Esempio n. 1
0
def _apply_saved_variables(config_data, group_data, apply_data, dry_run,
                           task_type, source_file_dir, wtf_attr):
    wow_dir_path = config_data[constant.Syntax.WOW_DIR_PATH]
    tasks = apply_data[task_type]
    target_file_duplicate_filter = set()  # type: typing.Set[str]
    final_content_cache = dict()  # type: typing.Dict[str, str]

    all_wtf_char_list = list()  # type: typing.List[wtf.WtfCharacter]
    for member in group_data["_all"]:
        account, realm, char = member.split(".")
        wtf_char = wtf.WtfCharacter(wow_dir_path=wow_dir_path,
                                    account=account,
                                    realm=realm,
                                    char=char)
        all_wtf_char_list.append(wtf_char)

    for task_data in tasks:
        members = evaluate_members(task_data=task_data, group_data=group_data)
        allow_addons = task_data[constant.Syntax.ALLOW_ADDONS]

        wtf_char_list = list()  # type: typing.List[wtf.WtfCharacter]
        for member in members:
            account, realm, char = member.split(".")
            wtf_char = wtf.WtfCharacter(wow_dir_path=wow_dir_path,
                                        account=account,
                                        realm=realm,
                                        char=char)
            wtf_char_list.append(wtf_char)

        for wtf_char in wtf_char_list:
            for addon_sv_file in allow_addons:
                source_file = Path(source_file_dir, addon_sv_file)
                validate_exists(source_file)
                target_file = Path(getattr(wtf_char, wtf_attr), addon_sv_file)

                if source_file.abspath not in final_content_cache:
                    tpl = jinja2.Template(
                        source_file.read_text(encoding="utf-8"))
                    final_content = tpl.render(
                        characters=wtf_char_list,
                        all_characters=all_wtf_char_list)
                    final_content_cache[source_file.abspath] = final_content
                else:
                    final_content = final_content_cache[source_file.abspath]

                if target_file.abspath not in target_file_duplicate_filter:
                    print(
                        f"render '{source_file}' -- write to -> '{target_file.abspath}'"
                    )
                    if not dry_run:
                        target_file.atomic_write_text(final_content,
                                                      overwrite=True)
                    target_file_duplicate_filter.add(target_file.abspath)
 def load_pathlib_mate_path(self,
                            dct,
                            class_name=pathlib_mate_path_class_name):
     """
     ``pathlib_mate.PathCLs`` loader.
     """
     return PathCls(dct["$" + class_name])
    def plan(self, workspace_dir):
        """
        This method

        :param workspace_dir:
        :return:

        **中文文档**

        此方法将 ``master_tier``, ``tier``, ``config_dir``, ``plan_file`` 中的
        所有信息汇总, 在 ``workspace_dir`` 下生成多个文件夹, 每个文件夹都是一个单独的
        ``aws cloudformation deploy`` 所需要的的文件.
        """
        env_tag_list = extract_all_env_tag(self._plan)
        config_data_mapper = OrderedDict()  # type: OrderedDict[str, dict]
        for env_tag in env_tag_list:
            p = Path(self._config_dir, "{}.json".format(env_tag))
            if not p.exists():
                raise FileNotFoundError(
                    "the config file of environment `{}` not exists at '{}'".format(env_tag, p))
            config_data_mapper[env_tag] = json.load(
                p.abspath, ignore_comments=True, verbose=False)

        pipeline = resolve_pipeline(self._plan)

        workspace_dir = Path(workspace_dir)
        workspace_dir.mkdir(parents=True, exist_ok=True)

        deploy_execution_counter = 0
        for can_id_list, env_tag in pipeline:
            # counter is used in deploy workspace dir name space
            deploy_execution_counter += 1
            deploy_workspace_dir = Path(
                workspace_dir,
                "{}-{}".format(str(deploy_execution_counter).zfill(3), env_tag)
            )
            deploy_workspace_dir.mkdir(parents=True, exist_ok=True)
            config_data = config_data_mapper[env_tag]

            # collect template instance and file path
            # so we can generate final template files at once
            template_file_list = list()  # type: List[TemplateFile]

            master_can = self._master_tier(**config_data)
            master_can.create_template()

            # master_can_label = self.canlabel_mapper[self.master_canlabel_id]
            # master_can = master_can_label.can_class(**config_data)
            # master_can.CONFIG_DIR = deploy_workspace_dir.abspath
            master_template_path = Path(
                deploy_workspace_dir, master_can.rel_path)
            template_file_list.append(
                TemplateFile(
                    template=master_can.template,
                    filepath=master_template_path,
                )
            )
Esempio n. 4
0
    def get_title(self):
        """
        Get title line from .rst file.

        **中文文档**

        从一个 ``_filename`` 所指定的 .rst 文件中, 找到顶级标题.
        也就是第一个 ``====`` 或 ``----`` 或 ``~~~~`` 上面一行.
        """
        header_bar_char_list = "=-~+*#^"

        lines = list()
        for cursor_line in textfile.readlines(self.rst_path,
                                              strip="both",
                                              encoding="utf-8"):
            if cursor_line.startswith(".. include::"):
                relative_path = cursor_line.split("::")[-1].strip()
                included_path = Path(
                    Path(self.rst_path).parent.abspath, relative_path)
                if included_path.exists():
                    cursor_line = included_path.read_text(encoding="utf-8")
            lines.append(cursor_line)
        rst_content = "\n".join(lines)

        cursor_previous_line = None
        for cursor_line in rst_content.split("\n"):
            for header_bar_char in header_bar_char_list:
                if cursor_line.startswith(header_bar_char):
                    flag_full_bar_char = cursor_line == header_bar_char * len(
                        cursor_line)
                    flag_line_length_greather_than_1 = len(cursor_line) >= 1
                    flag_previous_line_not_empty = bool(cursor_previous_line)
                    if flag_full_bar_char \
                        and flag_line_length_greather_than_1 \
                        and flag_previous_line_not_empty:
                        return cursor_previous_line.strip()
            cursor_previous_line = cursor_line

        msg = "Warning, this document doesn't have any %s header!" % header_bar_char_list
        return None
Esempio n. 5
0
def _apply(config_data, group_data, apply_data, dry_run, task_type,
           source_file_dir, wtf_attr):
    wow_dir_path = config_data[constant.Syntax.WOW_DIR_PATH]
    tasks = apply_data[task_type]
    target_file_duplicate_filter = set()
    for task_data in tasks:
        members = evaluate_members(task_data=task_data, group_data=group_data)
        source_file = Path(source_file_dir, task_data[constant.Syntax.FILE])
        source_file_content = source_file.read_text(encoding="utf-8")
        for member in members:
            account, realm, char = member.split(".")
            wtf_char = wtf.WtfCharacter(wow_dir_path=wow_dir_path,
                                        account=account,
                                        realm=realm,
                                        char=char)
            target_file = getattr(wtf_char, wtf_attr)
            if target_file.abspath not in target_file_duplicate_filter:
                print(f"copy '{source_file}' ---> '{target_file.abspath}'")
                if not dry_run:
                    target_file.atomic_write_text(source_file_content,
                                                  overwrite=True)
                target_file_duplicate_filter.add(target_file.abspath)
Esempio n. 6
0
 def sub_article_folders(self):
     """
     Returns all valid ArticleFolder sitting inside of
     :attr:`ArticleFolder.dir_path`.
     """
     l = list()
     for p in Path.sort_by_fname(
             Path(self.dir_path).select_dir(recursive=False)):
         af = ArticleFolder(index_file=self.index_file, dir_path=p.abspath)
         try:
             if af.title is not None:
                 l.append(af)
         except:
             pass
     return l
def generate_terraform_script(tf_dir):
    """
    This function looking for ``main.tf.tpl``, ``variables.tf.tpl`` files in
    ``tf_dir``. And use jinja2 template engine to generate the real tf files.
    It pass in the config object to dynamically inject values.

    :param tf_dir: terraform workspace directory.
    """

    tf_dir = Path(tf_dir)
    if not tf_dir.is_dir():
        raise TypeError

    tf_files = ["main", "variables", "backend", "output"]

    for file in tf_files:
        tpl_file = Path(tf_dir, file + ".tf.tpl")
        tf_file = Path(tf_dir, file + ".tf")
        if tpl_file.exists():
            tpl = jinja2.Template(tpl_file.read_text(encoding="utf-8"))
            content = tpl.render(config=config)
            tf_file.write_text(content, encoding="utf-8")
    from pathlib import Path

    pathlib_path_class_name = get_class_name(Path(__file__))
except ImportError:
    try:
        from pathlib2 import Path

        pathlib_path_class_name = get_class_name(Path(__file__))
    except ImportError:
        pass

pathlib_mate_path_class_name = "pathlib_mate.pathlib2.Path"
try:
    from pathlib_mate import PathCls

    pathlib_mate_path_class_name = get_class_name(PathCls(__file__))
except ImportError:
    pass


class SupportPathlib(object):
    def dump_pathlib_path(self, obj, class_name=pathlib_path_class_name):
        """
        ``pathlib.Path`` or ``pathlib2.Path`` dumper.
        """
        return {"$" + class_name: str(obj)}

    def load_pathlib_path(self, dct, class_name=pathlib_path_class_name):
        """
        ``pathlib.Path`` or ``pathlib2.Path`` loader.
        """
                    class_name = line.replace("class", "") \
                        .replace("(AWSObject):", "") \
                        .strip()
                    aws_object_class = getattr(imported_module, class_name)
                    tag_property_name = _get_tags_attr(aws_object_class)
                    if tag_property_name is not None:
                        try:
                            tag_property_name_mapper[
                                aws_object_class.
                                resource_type] = tag_property_name
                        except:
                            pass
    return tag_property_name_mapper


tag_property_name_mapper_cache_file = Path(__file__).change(
    new_basename="tag_property_name_mapper.json")
if tag_property_name_mapper_cache_file.exists():
    tag_property_name_mapper = json.loads(
        tag_property_name_mapper_cache_file.read_text())
else:  # pragma: no cover
    tag_property_name_mapper = get_tag_property_name_mapper()
    tag_property_name_mapper_cache_file.write_text(
        json.dumps(tag_property_name_mapper))


def get_tags_attr(resource):
    """
    Quickly find the property name for tags using Cache.

    :type resource: AWSObject
    :rtype: str
Esempio n. 10
0
    def plan(self, temp_dir):
        pipeline = resolve_pipeline([(note.can_id, note.env_tag)
                                     for note in self.notes])

        nested_can_mapper = dict()  # type: Dict[str, Canned]

        returned_list = list()

        STOP_AT_IND = 4
        counter = 0
        for can_id_list, env_tag in pipeline:
            counter += 1

            deploy_workspace_dir = Path(
                temp_dir, "{}-{}".format(str(counter).zfill(3), env_tag))
            deploy_workspace_dir.mkdir(parents=True, exist_ok=True)
            returned_list.append(deploy_workspace_dir)

            template_file_list = list()
            config_data = self.config_data_mapper[env_tag].data

            master_can_label = self.canlabel_mapper[self.master_canlabel_id]
            master_can = master_can_label.can_class(**config_data)
            master_can.CONFIG_DIR = deploy_workspace_dir.abspath
            master_can.create_template()

            master_template_path = Path(deploy_workspace_dir,
                                        master_can_label.filename)
            template_file_list.append(
                TemplateFile(
                    template=master_can.template,
                    filepath=master_template_path,
                ))

            # construct resource filter
            # based on two
            # 1. The current execution job's ``CanLabel.logic_id`` (Nested Stack Resource Logic Id)
            # 2. Environment specified config data's ``TIER_LIST_TO_DEPLOY``

            allowed_stack_id_list = [
                resource_id for resource_id in can_id_list
                if resource_id in master_can.TIER_LIST_TO_DEPLOY.get_value()
            ]
            r_filter = ResourceFilter(allowed_stack_id_list)

            # remove ignored stacks
            for resource_id, resource in list(
                    master_can.template.resources.items()):
                keep_this_flag = r_filter.filter(resource, master_can.template)
                if not keep_this_flag:
                    master_can.template.remove_resource(resource)
                else:
                    if resource_id in self.canlabel_mapper:
                        nested_canlabel = self.canlabel_mapper[resource_id]
                        nested_can = nested_canlabel.can_class(**config_data)
                        nested_can.create_template()
                        nested_can_mapper[resource_id] = nested_can

                        template_file = TemplateFile(
                            template=nested_can.template,
                            filepath=Path(deploy_workspace_dir,
                                          nested_canlabel.filename))
                        template_file_list.append(template_file)

            # construct ExecutionJob
            print("=" * 10)
            print(can_id_list, env_tag)

            master_can.dump_cloudformation_json_config_file()

            for template_file in template_file_list:
                template_file.make_file(json_or_yml="json")

            # break

            # if STOP_AT_IND == counter:
            #     break
        return returned_list
Esempio n. 11
0
    then put everything into ``dev.sh`` and use the "Example: run shell script remotely"
    to run the shell script on remote.
"""

from fabric2 import Connection
from invoke import Result
from paramiko import SFTPClient
from patchwork.transfers import rsync
from pathlib_mate import PathCls as Path

# --- Config ---
HOST = "ec2-111-222-333-444.compute-1.amazonaws.com"
OS_USERNAME = "******"  # for Amazon Linux and Redhat, its ec2-user, for Ubuntu, its ubuntu, for other system, read this: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/connection-prereqs.html#connection-prereqs-get-info-about-instance

# all path are absolute
HOME = Path.home()  # $HOME directory
HERE = Path(__file__).parent  # where this dev.py scripts locate
REPO_ROOT = HERE.parent  # the path of this repository on local laptop
REPO_ROOT_REMOTE = "/tmp/{}".format(REPO_ROOT.basename)  # the path of this repository on remote server, if using rsync.

PEM_PATH = Path(HOME, "path-to-my-pem-file.pem").abspath

_ = Result
_ = SFTPClient
_ = rsync

# config connection
with Connection(
        host=HOST,
        user=OS_USERNAME,
        connect_kwargs=dict(
 def teardown_class(cls):
     p = Path(__file__).change(new_basename="template.json")
     if p.exists():
         p.remove()
                            "codebuild:CreateReport",
                            "codebuild:UpdateReport",
                            "codebuild:BatchPutTestCases"
                        ],
                        "Resource": [
                            f"arn:aws:codebuild:{config.AWS_REGION.get_value()}:{config.AWS_ACCOUNT_ID.get_value()}:report-group/{config.CODE_BUILD_PROJECT_NAME.get_value()}-*"
                        ]
                    }
                ]
            }
        ),
    ],
)

# --- ECR Repository
cft_dir = Path(__file__).parent
repos_dir = cft_dir.change(new_basename="repos")

DEFAULT_UNTAGGED_IMAGE_EXPIRE_DAYS = 30

repo_names = list()
for subfolder in repos_dir.select_dir(recursive=False):
    repo_config_file = Path(subfolder, "config.json")
    repo_basename = subfolder.basename
    if repo_config_file.exists():
        repo_config_data = json.loads(strip_comments(repo_config_file.read_text("utf-8")))
        try:
            untagged_image_expire_days = repo_config_data["untagged_image_expire_days"]
        except:
            untagged_image_expire_days = DEFAULT_UNTAGGED_IMAGE_EXPIRE_DAYS
    else:
Esempio n. 14
0
def validate_exists(path):
    p = Path(path)
    if not p.exists():
        raise RuntimeError(f"{p.abspath} not exists!")
Esempio n. 15
0
"""

from __future__ import print_function

import requests

from pathlib_mate import PathCls as Path

try:
    from .pkg.atomicwrites import atomic_write
    from .pkg.sqlalchemy_mate import engine_creator
except:
    from uszipcode.pkg.atomicwrites import atomic_write
    from uszipcode.pkg.sqlalchemy_mate import engine_creator

db_file_dir = Path.home().append_parts(".uszipcode")
db_file_dir.mkdir(exist_ok=True)

simple_db_file_path = db_file_dir.append_parts("simple_db.sqlite")
db_file_path = db_file_dir.append_parts("db.sqlite")


def is_simple_db_file_exists():
    if simple_db_file_path.exists():
        if simple_db_file_path.size >= 5 * 1000 * 1000:
            return True
    return False


def is_db_file_exists():
    if db_file_path.exists():
# -*- coding: utf-8 -*-
"""
dir settings
"""

from __future__ import unicode_literals
from pathlib_mate import PathCls as Path

HOME = Path.home()
"""
User home directory:

- Windows: C:\\Users\\<username>
- MacOS: /Users/<username>
- Ubuntu: /home/<username> 
"""

ALFRED_FTS = Path(HOME, ".alfred-fts")
"""
Alfred Full Text Search Data Folder: ${HOME}/.alfred-fts
"""

if not ALFRED_FTS.exists():
    ALFRED_FTS.mkdir()
Esempio n. 17
0
# -*- coding: utf-8 -*-

from fabric2 import Connection
from invoke import Result
from patchwork.transfers import rsync
from pathlib_mate import PathCls as Path

_ = Result

HOST = "ec2-54-165-167-139.compute-1.amazonaws.com"
USER = "******"
PEM_PATH = Path.home().append_parts("ec2-pem", "eq-sanhe-dev.pem").abspath

HERE = Path(__file__).parent

# open a ssh session and close automatically
with Connection(host=HOST,
                user=USER,
                connect_kwargs={"key_filename": [
                    PEM_PATH,
                ]}) as conn:
    # sync folder from local to remote, like google drive
    rsync(conn, source=Path(HERE, "test-folder"), target="/tmp")
    conn.run('cat /tmp/test-folder/index.html')

    # sync folder from remote to local, like google drive
    conn.run('mkdir -p /tmp/test-folder-on-remote')
    conn.run(
        'echo "<html>This is a folder from remote server</html>" > /tmp/test-folder-on-remote/index.html'
    )
    rsync(conn,
Esempio n. 18
0
import requests
from pathlib_mate import PathCls as Path

p = Path("test.html")

url = "https://www.seedinvest.com/have.need/pre.seed/founders"
res = requests.get(url)

p.write_bytes(res.content)
def create_rst_file(dir_path):
    p = Path(dir_path)
    relpath = p.relative_to(dir_images)
    rst_dirpath = Path(dir_here, "99-附录 (Appendix)", "01-常用图标外链查询", relpath)

    if not rst_dirpath.exists():
        rst_dirpath.mkdir()
    rst_path = Path(rst_dirpath, "index.rst")

    lines = list()
    lines.append(p.basename)
    lines.append("=" * 80)
    lines.append(".. contents:: 索引")
    lines.append("    :local:")

    sub_p_list = Path.sort_by_abspath(dir_path.select_dir(recursive=False))
    if len(sub_p_list):
        lines.append("\n**目录**:\n")
        lines.append("\n.. articles::\n")

    for p_png in Path.sort_by_abspath(p.select_image(recursive=False)):
        lines.append("\n" + p_png.fname)
        lines.append("-" * 80)
        url = "/" + str(p_png.relative_to(dir_here))
        directive = ".. image:: {}".format(url)
        lines.append(directive)

    content = "\n".join(lines)
    rst_path.write_text(content, "utf-8")

    if len(sub_p_list):
        for sub_p in sub_p_list:
            create_rst_file(sub_p)
"""
Automatically generate index.rst file to display images. (except the 'icon' dir)
"""

try:
    from pathlib_mate import PathCls as Path
except:
    from pathlib_mate import Path

dir_here = Path(__file__).parent
"""
wotlkdoc_images-project/docs/source
"""
dir_static = Path(dir_here, "_static")
dir_images = Path(dir_static, "image")


def create_rst_file(dir_path):
    p = Path(dir_path)
    relpath = p.relative_to(dir_images)
    rst_dirpath = Path(dir_here, "99-附录 (Appendix)", "01-常用图标外链查询", relpath)

    if not rst_dirpath.exists():
        rst_dirpath.mkdir()
    rst_path = Path(rst_dirpath, "index.rst")

    lines = list()
    lines.append(p.basename)
    lines.append("=" * 80)
    lines.append(".. contents:: 索引")
import bs4
import json
from pathlib_mate import PathCls as Path

doc_source_dir = Path(
    "/Users/sanhehu/Downloads/aws-cloudformation-user-guide-master/doc_source")
resources_data = list()

RESOURCE_PATTERN = "aws-resource"
PROPERTY_PATTERN = "aws-properties"

for p in doc_source_dir.select_file():
    flag = False
    if p.fname.startswith(RESOURCE_PATTERN):
        entity_type = "Resource"
        fname_prefix = RESOURCE_PATTERN
        flag = True
    elif p.fname.startswith(PROPERTY_PATTERN):
        entity_type = "Property"
        fname_prefix = PROPERTY_PATTERN
        flag = True
    if flag:
        entity = p.fname.replace(fname_prefix + "-", "").replace("-",
                                                                 " ").title()
        title = "{}: {}".format(entity_type, entity)
        url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/{}.html".format(
            p.fname)
        subtitle = "Open {}".format(url)
        arg = url
        dct = dict(title=title, subtitle=subtitle, arg=arg)
        resources_data.append(dct)
# -*- coding: utf-8 -*-
"""
此模块用于枚举所有的账号密码, 避免将敏感信息写入代码, 并安全地在 Hotkeynet 脚本中引用这些
信息.
"""

import json
import typing

import attr
from pathlib_mate import PathCls as Path
from enum import Enum

# put credentials.json file in the git repo root folder
credentials_file = Path(__file__).parent.parent.parent.parent.change(
    new_basename="credentials.json")
assert credentials_file.parent.basename == "hotkeynet-project"
credentials_data = json.loads(credentials_file.read_text(encoding="utf-8"))


@attr.s
class Credential:
    username = attr.ib()
    password = attr.ib()


# Enumerate all username password data object
# 枚举出所有的用户名密码的数据对象, 以供之后引用
class Credentials(Enum):
    cred_fatmulti1 = Credential(username="******",
                                password=credentials_data["fatmulti1"])
# -*- coding: utf-8 -*-

from fabric2 import Connection
from invoke import Result
from pathlib_mate import PathCls as Path

_ = Result

HOST = "ec2-54-165-167-139.compute-1.amazonaws.com"
USER = "******"
PEM_PATH = Path.home().append_parts("ec2-pem", "eq-sanhe-dev.pem").abspath

# open a ssh session and close automatically
with Connection(host=HOST,
                user=USER,
                connect_kwargs={"key_filename": [
                    PEM_PATH,
                ]}) as conn:
    # example1: send command, create a html file index.html
    # conn.run('echo "<html>Hello World</html>" > /tmp/index.html') # type: Result
    # conn.run('cat /tmp/index.html') # type: Result

    # example2: process standard output afterwards
    result = conn.run('ls /', hide=True)  # type: Result
    print(
        result.stdout
    )  # access the standard output, capture those strings and process it in Python
    # print(result.stderr)
    # print(result.encoding)
    # print(result.command)
    # print(result.shell) # the shell you use to execute this command
# -*- coding: utf-8 -*-

import pandas as pd
from pathlib_mate import PathCls as Path
from wotlkdoc.df_to_list_table import df_to_list_table

p = Path(__file__).change(new_basename="Stat-Abbreviation.tsv")
df = pd.read_csv(p.abspath, sep="\t")
lt = df_to_list_table(df)
p = p.change(new_ext=".rst")
p.write_text(lt.render(), encoding="utf-8")