def create_rst_file(dir_path):
    p = Path(dir_path)
    relpath = p.relative_to(dir_images)
    rst_dirpath = Path(dir_here, "99-附录 (Appendix)", "01-常用图标外链查询", relpath)

    if not rst_dirpath.exists():
        rst_dirpath.mkdir()
    rst_path = Path(rst_dirpath, "index.rst")

    lines = list()
    lines.append(p.basename)
    lines.append("=" * 80)
    lines.append(".. contents:: 索引")
    lines.append("    :local:")

    sub_p_list = Path.sort_by_abspath(dir_path.select_dir(recursive=False))
    if len(sub_p_list):
        lines.append("\n**目录**:\n")
        lines.append("\n.. articles::\n")

    for p_png in Path.sort_by_abspath(p.select_image(recursive=False)):
        lines.append("\n" + p_png.fname)
        lines.append("-" * 80)
        url = "/" + str(p_png.relative_to(dir_here))
        directive = ".. image:: {}".format(url)
        lines.append(directive)

    content = "\n".join(lines)
    rst_path.write_text(content, "utf-8")

    if len(sub_p_list):
        for sub_p in sub_p_list:
            create_rst_file(sub_p)
Ejemplo n.º 2
0
    def plan(self, workspace_dir):
        """
        This method

        :param workspace_dir:
        :return:

        **中文文档**

        此方法将 ``master_tier``, ``tier``, ``config_dir``, ``plan_file`` 中的
        所有信息汇总, 在 ``workspace_dir`` 下生成多个文件夹, 每个文件夹都是一个单独的
        ``aws cloudformation deploy`` 所需要的的文件.
        """
        env_tag_list = extract_all_env_tag(self._plan)
        config_data_mapper = OrderedDict()  # type: OrderedDict[str, dict]
        for env_tag in env_tag_list:
            p = Path(self._config_dir, "{}.json".format(env_tag))
            if not p.exists():
                raise FileNotFoundError(
                    "the config file of environment `{}` not exists at '{}'".format(env_tag, p))
            config_data_mapper[env_tag] = json.load(
                p.abspath, ignore_comments=True, verbose=False)

        pipeline = resolve_pipeline(self._plan)

        workspace_dir = Path(workspace_dir)
        workspace_dir.mkdir(parents=True, exist_ok=True)

        deploy_execution_counter = 0
        for can_id_list, env_tag in pipeline:
            # counter is used in deploy workspace dir name space
            deploy_execution_counter += 1
            deploy_workspace_dir = Path(
                workspace_dir,
                "{}-{}".format(str(deploy_execution_counter).zfill(3), env_tag)
            )
            deploy_workspace_dir.mkdir(parents=True, exist_ok=True)
            config_data = config_data_mapper[env_tag]

            # collect template instance and file path
            # so we can generate final template files at once
            template_file_list = list()  # type: List[TemplateFile]

            master_can = self._master_tier(**config_data)
            master_can.create_template()

            # master_can_label = self.canlabel_mapper[self.master_canlabel_id]
            # master_can = master_can_label.can_class(**config_data)
            # master_can.CONFIG_DIR = deploy_workspace_dir.abspath
            master_template_path = Path(
                deploy_workspace_dir, master_can.rel_path)
            template_file_list.append(
                TemplateFile(
                    template=master_can.template,
                    filepath=master_template_path,
                )
            )
Ejemplo n.º 3
0
    def get_title(self):
        """
        Get title line from .rst file.

        **中文文档**

        从一个 ``_filename`` 所指定的 .rst 文件中, 找到顶级标题.
        也就是第一个 ``====`` 或 ``----`` 或 ``~~~~`` 上面一行.
        """
        header_bar_char_list = "=-~+*#^"

        lines = list()
        for cursor_line in textfile.readlines(self.rst_path,
                                              strip="both",
                                              encoding="utf-8"):
            if cursor_line.startswith(".. include::"):
                relative_path = cursor_line.split("::")[-1].strip()
                included_path = Path(
                    Path(self.rst_path).parent.abspath, relative_path)
                if included_path.exists():
                    cursor_line = included_path.read_text(encoding="utf-8")
            lines.append(cursor_line)
        rst_content = "\n".join(lines)

        cursor_previous_line = None
        for cursor_line in rst_content.split("\n"):
            for header_bar_char in header_bar_char_list:
                if cursor_line.startswith(header_bar_char):
                    flag_full_bar_char = cursor_line == header_bar_char * len(
                        cursor_line)
                    flag_line_length_greather_than_1 = len(cursor_line) >= 1
                    flag_previous_line_not_empty = bool(cursor_previous_line)
                    if flag_full_bar_char \
                        and flag_line_length_greather_than_1 \
                        and flag_previous_line_not_empty:
                        return cursor_previous_line.strip()
            cursor_previous_line = cursor_line

        msg = "Warning, this document doesn't have any %s header!" % header_bar_char_list
        return None
def generate_terraform_script(tf_dir):
    """
    This function looking for ``main.tf.tpl``, ``variables.tf.tpl`` files in
    ``tf_dir``. And use jinja2 template engine to generate the real tf files.
    It pass in the config object to dynamically inject values.

    :param tf_dir: terraform workspace directory.
    """

    tf_dir = Path(tf_dir)
    if not tf_dir.is_dir():
        raise TypeError

    tf_files = ["main", "variables", "backend", "output"]

    for file in tf_files:
        tpl_file = Path(tf_dir, file + ".tf.tpl")
        tf_file = Path(tf_dir, file + ".tf")
        if tpl_file.exists():
            tpl = jinja2.Template(tpl_file.read_text(encoding="utf-8"))
            content = tpl.render(config=config)
            tf_file.write_text(content, encoding="utf-8")
Ejemplo n.º 5
0
                        .strip()
                    aws_object_class = getattr(imported_module, class_name)
                    tag_property_name = _get_tags_attr(aws_object_class)
                    if tag_property_name is not None:
                        try:
                            tag_property_name_mapper[
                                aws_object_class.
                                resource_type] = tag_property_name
                        except:
                            pass
    return tag_property_name_mapper


tag_property_name_mapper_cache_file = Path(__file__).change(
    new_basename="tag_property_name_mapper.json")
if tag_property_name_mapper_cache_file.exists():
    tag_property_name_mapper = json.loads(
        tag_property_name_mapper_cache_file.read_text())
else:  # pragma: no cover
    tag_property_name_mapper = get_tag_property_name_mapper()
    tag_property_name_mapper_cache_file.write_text(
        json.dumps(tag_property_name_mapper))


def get_tags_attr(resource):
    """
    Quickly find the property name for tags using Cache.

    :type resource: AWSObject
    :rtype: str
    """
Ejemplo n.º 6
0
def validate_exists(path):
    p = Path(path)
    if not p.exists():
        raise RuntimeError(f"{p.abspath} not exists!")
# -*- coding: utf-8 -*-
"""
dir settings
"""

from __future__ import unicode_literals
from pathlib_mate import PathCls as Path

HOME = Path.home()
"""
User home directory:

- Windows: C:\\Users\\<username>
- MacOS: /Users/<username>
- Ubuntu: /home/<username> 
"""

ALFRED_FTS = Path(HOME, ".alfred-fts")
"""
Alfred Full Text Search Data Folder: ${HOME}/.alfred-fts
"""

if not ALFRED_FTS.exists():
    ALFRED_FTS.mkdir()
            }
        ),
    ],
)

# --- ECR Repository
cft_dir = Path(__file__).parent
repos_dir = cft_dir.change(new_basename="repos")

DEFAULT_UNTAGGED_IMAGE_EXPIRE_DAYS = 30

repo_names = list()
for subfolder in repos_dir.select_dir(recursive=False):
    repo_config_file = Path(subfolder, "config.json")
    repo_basename = subfolder.basename
    if repo_config_file.exists():
        repo_config_data = json.loads(strip_comments(repo_config_file.read_text("utf-8")))
        try:
            untagged_image_expire_days = repo_config_data["untagged_image_expire_days"]
        except:
            untagged_image_expire_days = DEFAULT_UNTAGGED_IMAGE_EXPIRE_DAYS
    else:
        untagged_image_expire_days = DEFAULT_UNTAGGED_IMAGE_EXPIRE_DAYS

    repo_logic_id = f"EcrRepo{camelcase(repo_basename)}"
    repo_name = f"{config.ENVIRONMENT_NAME.get_value()}-{repo_basename}"
    repo_names.append(repo_name)

    ecr_lifecycle_policy = {
        "rules": [
            {
 def teardown_class(cls):
     p = Path(__file__).change(new_basename="template.json")
     if p.exists():
         p.remove()