Ejemplo n.º 1
0
def load_yaml(fname: str, round_trip: bool = False) -> JSON_TYPE:
    """Load a YAML file."""
    if round_trip:
        yaml = YAML(typ='rt')
        yaml.preserve_quotes = True
    else:
        if not hasattr(ExtSafeConstructor, 'name'):
            ExtSafeConstructor.name = fname
        yaml = YAML(typ='safe')
        yaml.Constructor = ExtSafeConstructor

    try:
        with open(fname, encoding='utf-8') as conf_file:
            # If configuration file is empty YAML returns None
            # We convert that to an empty dict
            return yaml.load(conf_file) or OrderedDict()
    except YAMLError as exc:
        _LOGGER.error("YAML error in %s: %s", fname, exc)
        raise HomeAssistantError(exc)
    except UnicodeDecodeError as exc:
        _LOGGER.error("Unable to read file %s: %s", fname, exc)
        raise HomeAssistantError(exc)
Ejemplo n.º 2
0
    is_url_or_abbrev,
    DEFAULT_URL_ABBREVIATIONS_REPO,
    calculate_cache_location_for_url,
)
from frutils.exceptions import FrklException

from frutils.frutils import auto_parse_string
from frutils.tasks.callback import load_callback
from frutils.tasks.tasks import Tasks
from ting.tings import TingTings

log = logging.getLogger("freckles")

yaml = YAML()
yaml.default_flow_style = False
yaml.preserve_quotes = True
yaml.width = 4096


def startup_housekeeping():

    if not os.path.exists(FRECKLES_CONFIG_DIR):
        os.makedirs(FRECKLES_CONFIG_DIR)
    else:
        if not os.path.isdir(os.path.realpath(FRECKLES_CONFIG_DIR)):
            raise Exception(
                "Freckles config location exists and is not a directory: '{}'".
                format(FRECKLES_CONFIG_DIR))

    if not os.path.exists(FRECKLES_SHARE_DIR):
        os.makedirs(FRECKLES_SHARE_DIR)
Ejemplo n.º 3
0
from importlib.util import spec_from_file_location
from importlib.util import module_from_spec
import os
import sys
import json
import string
from io import StringIO, IOBase
from ruamel.yaml import YAML

# A shiny global ruamel.yaml obj with sane options (dumps should pass yamllint)
YM = YAML()
YM.indent(mapping=2, sequence=4, offset=2)
YM.explicit_start = True
YM.explicit_end = True
YM.allow_unicode = True
YM.preserve_quotes = True

# global formatting (any changes *should* propagate to later directives)
EOL = '\n'  # line separator
RENDER_JS = False


##
#   substitution
##
def sub_stream(strm, meta, method):
    """
    substitute strings in strm
    return (the same? or a new) stream with substituted values
    """
    strm = strm or StringIO()
Ejemplo n.º 4
0
    def extract_to_package_format(self) -> int:
        """Extracts the self.input yml file into several files according to the Demisto standard of the package format.

        Returns:
             int. status code for the operation.
        """
        try:
            output_path = self.get_output_path()
        except ValueError as ex:
            print_error(str(ex))
            return 1
        self.print_logs("Starting migration of: {} to dir: {}".format(self.input, output_path), log_color=LOG_COLORS.NATIVE)
        os.makedirs(output_path, exist_ok=True)
        base_name = os.path.basename(output_path) if not self.base_name else self.base_name
        code_file = "{}/{}".format(output_path, base_name)
        self.extract_code(code_file)
        script = self.yml_data['script']
        lang_type: str = script['type'] if self.file_type == 'integration' else self.yml_data['type']
        code_file = f"{code_file}{TYPE_TO_EXTENSION[lang_type]}"
        self.extract_image("{}/{}_image.png".format(output_path, base_name))
        self.extract_long_description("{}/{}_description.md".format(output_path, base_name))
        yaml_out = "{}/{}.yml".format(output_path, base_name)
        self.print_logs("Creating yml file: {} ...".format(yaml_out), log_color=LOG_COLORS.NATIVE)
        ryaml = YAML()
        ryaml.preserve_quotes = True
        with open(self.input, 'r') as yf:
            yaml_obj = ryaml.load(yf)
        script_obj = yaml_obj

        if self.file_type == 'integration':
            script_obj = yaml_obj['script']
            if 'image' in yaml_obj:
                del yaml_obj['image']
            if 'detaileddescription' in yaml_obj:
                del yaml_obj['detaileddescription']
        script_obj['script'] = SingleQuotedScalarString('')
        code_type = script_obj['type']
        if code_type == TYPE_PWSH and not yaml_obj.get('fromversion'):
            self.print_logs("Setting fromversion for PowerShell to: 5.5.0", log_color=LOG_COLORS.NATIVE)
            yaml_obj['fromversion'] = "5.5.0"
        with open(yaml_out, 'w') as yf:
            ryaml.dump(yaml_obj, yf)
        # check if there is a README and if found, set found_readme to True
        found_readme = False
        if self.readme:
            yml_readme = os.path.splitext(self.input)[0] + '_README.md'
            readme = output_path + '/README.md'
            if os.path.exists(yml_readme):
                found_readme = True
                self.print_logs(f"Copying {readme} to {readme}", log_color=LOG_COLORS.NATIVE)
                shutil.copy(yml_readme, readme)
            else:
                # open an empty file
                with open(readme, 'w'):
                    pass

        # Python code formatting and dev env setup
        if code_type == TYPE_PYTHON:
            if self.basic_fmt:
                self.print_logs("Running autopep8 on file: {} ...".format(code_file), log_color=LOG_COLORS.NATIVE)
                try:
                    subprocess.call(["autopep8", "-i", "--max-line-length", "130", code_file])
                except FileNotFoundError:
                    self.print_logs("autopep8 skipped! It doesn't seem you have autopep8 installed.\n"
                                    "Make sure to install it with: pip install autopep8.\n"
                                    "Then run: autopep8 -i {}".format(code_file), LOG_COLORS.YELLOW)
            if self.pipenv:
                if self.basic_fmt:
                    self.print_logs("Running isort on file: {} ...".format(code_file), LOG_COLORS.NATIVE)
                    try:
                        subprocess.call(["isort", code_file])
                    except FileNotFoundError:
                        self.print_logs("isort skipped! It doesn't seem you have isort installed.\n"
                                        "Make sure to install it with: pip install isort.\n"
                                        "Then run: isort {}".format(code_file), LOG_COLORS.YELLOW)

                self.print_logs("Detecting python version and setting up pipenv files ...", log_color=LOG_COLORS.NATIVE)
                docker = get_all_docker_images(script_obj)[0]
                py_ver = get_python_version(docker, self.config.log_verbose)
                pip_env_dir = get_pipenv_dir(py_ver, self.config.envs_dirs_base)
                self.print_logs("Copying pipenv files from: {}".format(pip_env_dir), log_color=LOG_COLORS.NATIVE)
                shutil.copy("{}/Pipfile".format(pip_env_dir), output_path)
                shutil.copy("{}/Pipfile.lock".format(pip_env_dir), output_path)
                env = os.environ.copy()
                env["PIPENV_IGNORE_VIRTUALENVS"] = "1"
                try:
                    subprocess.call(["pipenv", "install", "--dev"], cwd=output_path, env=env)
                    self.print_logs("Installing all py requirements from docker: [{}] into pipenv".format(docker),
                                    LOG_COLORS.NATIVE)
                    requirements = get_pip_requirements(docker)
                    fp = tempfile.NamedTemporaryFile(delete=False)
                    fp.write(requirements.encode('utf-8'))
                    fp.close()

                    try:
                        subprocess.check_call(["pipenv", "install", "-r", fp.name], cwd=output_path, env=env)

                    except Exception:
                        self.print_logs("Failed installing requirements in pipenv.\n "
                                        "Please try installing manually after extract ends\n", LOG_COLORS.RED)

                    os.unlink(fp.name)
                    self.print_logs("Installing flake8 for linting", log_color=LOG_COLORS.NATIVE)
                    subprocess.call(["pipenv", "install", "--dev", "flake8"], cwd=output_path, env=env)
                except FileNotFoundError as err:
                    self.print_logs("pipenv install skipped! It doesn't seem you have pipenv installed.\n"
                                    "Make sure to install it with: pip3 install pipenv.\n"
                                    f"Then run in the package dir: pipenv install --dev\n.Err: {err}", LOG_COLORS.YELLOW)
                arg_path = os.path.relpath(output_path)
                self.print_logs("\nCompleted: setting up package: {}\n".format(arg_path), LOG_COLORS.GREEN)
                next_steps: str = "Next steps: \n" \
                                  "* Install additional py packages for unit testing (if needed): cd {};" \
                                  " pipenv install <package>\n".format(arg_path) if code_type == TYPE_PYTHON else ''
                next_steps += "* Create unit tests\n" \
                              "* Check linting and unit tests by running: demisto-sdk lint -i {}\n".format(arg_path)
                next_steps += "* When ready, remove from git the old yml and/or README and add the new package:\n" \
                              "    git rm {}\n".format(self.input)
                if found_readme:
                    next_steps += "    git rm {}\n".format(os.path.splitext(self.input)[0] + '_README.md')
                next_steps += "    git add {}\n".format(arg_path)
                self.print_logs(next_steps, log_color=LOG_COLORS.NATIVE)

            else:
                self.print_logs("Skipping pipenv and requirements installation - Note: no Pipfile will be created",
                                log_color=LOG_COLORS.YELLOW)

        self.print_logs(f"Finished splitting the yml file - you can find the split results here: {output_path}",
                        log_color=LOG_COLORS.GREEN)
        return 0
def pretty_format_yaml(argv=None):
    # type: (typing.Optional[typing.List[typing.Text]]) -> int
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--autofix",
        action="store_true",
        dest="autofix",
        help="Automatically fixes encountered not-pretty-formatted files",
    )
    parser.add_argument(
        "--indent",
        type=int,
        help=
        ("The number of indent spaces or a string to be used as delimiter"
         ' for indentation level e.g. 4 or "\t". Overrides offset, sequence, and mapping values if set.'
         ),
    )
    parser.add_argument(
        "--mapping",
        type=int,
        default="2",
        help="Indentation of mapping values (Default: 2)",
    )
    parser.add_argument(
        "--offset",
        type=int,
        default="2",
        help="Map items offset (Default: 2)",
    )
    parser.add_argument(
        "--sequence",
        type=int,
        default="4",
        help=
        "Sequence indentation. Best to have sequence >= offset + 2 (Default: 4)",
    )
    parser.add_argument(
        "--preserve-quotes",
        action="store_true",
        dest="preserve_quotes",
        help="Keep existing string quoting",
    )

    parser.add_argument("filenames", nargs="*", help="Filenames to fix")
    args = parser.parse_args(argv)

    status = 0

    yaml = YAML()
    if args.indent:
        yaml.indent = args.indent
    else:
        yaml.indent(mapping=args.mapping,
                    sequence=args.sequence,
                    offset=args.offset)
    yaml.preserve_quotes = args.preserve_quotes
    # Prevent ruamel.yaml to wrap yaml lines
    yaml.width = maxsize

    separator = "---\n"

    for yaml_file in set(args.filenames):
        with open(yaml_file) as input_file:
            string_content = "".join(input_file.readlines())

        # Split multi-document file into individual documents
        #
        # Not using yaml.load_all() because it reformats primitive (non-YAML) content. It removes
        # newline characters.
        separator_pattern = r"^---\s*\n"
        original_docs = re.split(separator_pattern,
                                 string_content,
                                 flags=re.MULTILINE)

        # A valid multi-document YAML file might starts with the separator.
        # In this case the first document of original docs will be empty and should not be consdered
        if string_content.startswith("---"):
            original_docs = original_docs[1:]

        pretty_docs = []

        try:
            for doc in original_docs:
                content = _process_single_document(doc, yaml)
                if content is not None:
                    pretty_docs.append(content)

            # Start multi-doc file with separator
            pretty_content = "" if len(pretty_docs) == 1 else separator
            pretty_content += separator.join(pretty_docs)

            if string_content != pretty_content:
                print("File {} is not pretty-formatted".format(yaml_file))

                if args.autofix:
                    print("Fixing file {}".format(yaml_file))
                    with io.open(yaml_file, "w",
                                 encoding="UTF-8") as output_file:
                        output_file.write(text_type(pretty_content))

                status = 1
        except YAMLError:  # pragma: no cover
            print(
                "Input File {} is not a valid YAML file, consider using check-yaml"
                .format(yaml_file, ), )
            return 1

    return status
Ejemplo n.º 6
0
def exportPipelineToZip(pipelineId):

    try:

        usingS3 = (settings.DEFAULT_FILE_STORAGE ==
                   "gremlin.utils.storages.MediaRootS3Boto3Storage")

        pipeline = Pipeline.objects.select_related('root_node').get(
            pk=pipelineId)
        print("Fetched pipeline obj...")

        nodes = []
        edges = []
        scripts = {}
        script_dict = {}

        zip_bytes = io.BytesIO()
        myYaml = io.StringIO()

        yaml = YAML()
        yaml.preserve_quotes = False

        zip_file = zipfile.ZipFile(zip_bytes,
                                   mode='w',
                                   compression=zipfile.ZIP_DEFLATED)
        print("Zip file created")

        for node in PipelineNode.objects.filter(parent_pipeline=pipeline):

            print(f"Look over {node}")

            if node.script:

                print(f"Node ID {node.script.id} has script: {node.script}")
                print(f"Scripts was: {script_dict}")

                script_dict[node.script.id] = exportScriptYAMLObj(
                    node.script.id)

                print(f"Script YAML created...")

                if node.script.data_file and node.script.data_file.data_file:

                    print("Script had a data file")

                    # THe precise field with the valid filename / path depends on the storage adapter, so handle accordingly
                    if usingS3:
                        filename = node.script.data_file.data_file.name

                    else:
                        filename = node.script.data_file.data_file.path

                    data_file_bytes = default_storage.open(filename,
                                                           mode='rb').read()
                    zip_file.writestr(
                        f"/data/{node.script.data_file.uuid}.zip",
                        data_file_bytes)

        scripts = list(script_dict.values())

        print(f"Scripts are: {scripts}")

        for edge in Edge.objects.filter(parent_pipeline=pipeline):
            edges.append(exportPipelineEdgeToYAMLObj(edge.id))
        print("Edges converted to YAML")

        for node in PipelineNode.objects.filter(parent_pipeline=pipeline):
            nodes.append(exportPipelineNodeToYAMLObj(node.id))
        print("Nodes converted to YAML")

        pipeline_meta = {
            'name': pipeline.name,
            'description': LiteralScalarString(pipeline.description),
            'input_json_schema': pipeline.input_json_schema,
            'root_node': pipeline.root_node.id,
            'scale': pipeline.scale,
            'x_offset': pipeline.x_offset,
            'y_offset': pipeline.y_offset,
        }

        data = {
            'pipeline': pipeline_meta,
            'scripts': scripts,
            'edges': edges,
            'nodes': nodes,
        }

        print("Dump YAML")
        yaml.dump(data, myYaml)

        zip_file.writestr("pipeline_export.yaml", myYaml.getvalue())
        zip_file.close()
        zip_bytes.seek(io.SEEK_SET)

        print("Done with zip_bytes... returning")

        return zip_bytes

    except Exception as e:
        print(f"Error exporting Pipeline to archive: {e}")
        return None
Ejemplo n.º 7
0
 def __yaml_parser(self, kind: str) -> YAML:
     '''Get yaml parser.'''
     yaml = YAML(typ=kind)
     yaml.explicit_start = True  # type: ignore
     yaml.preserve_quotes = True  # type: ignore
     return yaml
Ejemplo n.º 8
0
def main(argv):
    args = parse_args()

    ## check the yaml of these files because ruamel pythin lib has issues with loading em
    yaml_files_check_list = [
        'ml-operator/values.yaml', 'emailnotifier/values.yaml'
    ]

    ports_array = {
        "simapi":
        "3000",
        "reportapi":
        "3002",
        "testapi":
        "3003",
        "https":
        "80",
        "http":
        "80",
        "http-admin":
        "4001",
        "http-api":
        "4002",
        "mysql":
        "3306",
        "mongodb":
        "27017",
        "inboundapi":
        "{{ $config.config.schemeAdapter.env.INBOUND_LISTEN_PORT }}",
        "outboundapi":
        "{{ $config.config.schemeAdapter.env.OUTBOUND_LISTEN_PORT }}"
    }

    p = Path() / args.directory
    print(f"Processing helm charts in directory: [{args.directory}]")
    yaml = YAML()
    yaml.allow_duplicate_keys = True
    yaml.preserve_quotes = True
    yaml.width = 4096

    # walk the directory structure and process all the values.yaml files
    # replace solsson kafka with kymeric
    # replace kafa start up check with netcat test (TODO check to see if this is ok)
    # replace mysql with arm version of mysql and adjust tag on the following line (TODO: check that latest docker mysql/mysql-server latest tag is ok )
    # TODO: maybe don't do this line by line but rather read in the entire file => can match across lines and avoid the next_line_logic
    # for now disable metrics and metrics exporting
    # replace the mojaloop images with the locally built  ones

    if (args.all or args.values):
        print(
            "\n\n============================================================="
        )
        print("Processing values.yaml files.. ")
        print("=============================================================")

        for vf in p.rglob('*/values.yaml'):
            backupfile = Path(vf.parent) / f"{vf.name}_bak"
            print(f"{vf} : {backupfile}")
            copyfile(vf, backupfile)
            with FileInput(files=[vf], inplace=True) as f:
                next_line_is_mojaloop_tag = False
                for line in f:
                    line = line.rstrip()

                    # now update the mojaloop images
                    if (next_line_is_mojaloop_tag):
                        line = re.sub("tag:.*$", "tag: latest", line)
                        next_line_is_mojaloop_tag = False
                    # TODO : check that there is no mojaloop image with > 3 parts to its name i.e. > 3 hypens
                    if re.match(r"(\s+)repository:\s*mojaloop", line):
                        line = re.sub(
                            r"(\s+)repository:\s*mojaloop/(\w+)-(\w+)-(\w+)-(\w+)",
                            r"\1repository: \2_\3_\4_\5_local", line)
                        line = re.sub(
                            r"(\s+)repository:\s*mojaloop/(\w+)-(\w+)-(\w+)",
                            r"\1repository: \2_\3_\4_local", line)
                        line = re.sub(
                            r"(\s+)repository:\s*mojaloop/(\w+)-(\w+)",
                            r"\1repository: \2_\3_local", line)
                        line = re.sub(r"(\s+)repository:\s*mojaloop/(\w+)",
                                      r"\1repository: \2_local", line)
                        next_line_is_mojaloop_tag = True

                    print(line)

    ## TODO  Need to modify the kafka requirements.yaml to update the zookeeper image
    ##       if I am fully automating this
    # walk the directory structure and process all the requirements.yaml files
    # kafka => local kafka chart
    # mysql/percona => local mysql chart with later arm64 based image
    # zookeeper => local zookeeper (this is in the requirements.yaml of the kafka local chart)

    if (args.all or args.requirements):
        print(
            "\n\n============================================================="
        )
        print("Processing requirements.yaml files ")
        print("=============================================================")
        for rf in p.rglob('*/requirements.yaml'):
            backupfile = Path(rf.parent) / f"{rf.name}_bak"
            print(f"{rf} : {backupfile}")
            copyfile(rf, backupfile)
            with open(rf) as f:
                reqs_data = yaml.load(f)
                #print(reqs_data)
            try:
                dlist = reqs_data['dependencies']
                for i in range(len(dlist)):
                    if (dlist[i]['name'] == "percona-xtradb-cluster"):
                        print(f"old was: {dlist[i]}")
                        dlist[i]['name'] = "mysql"
                        dlist[i]['version'] = "1.0.0"
                        dlist[i]['repository'] = "file://../mysql"
                        dlist[i]['alias'] = "mysql"
                        dlist[i]['condition'] = "enabled"
                        print(f"new is: {dlist[i]}")

                    if (dlist[i]['name'] == "kafka"):
                        print(f"old was: {dlist[i]}")
                        dlist[i]['repository'] = "file://../kafka"
                        dlist[i]['version'] = "1.0.0"
                        print(f"new is: {dlist[i]}")

                    if (dlist[i]['name'] == "zookeeper"):
                        print(f"old was: {dlist[i]}")
                        dlist[i]['version'] = "1.0.0"
                        dlist[i]['repository'] = "file://../zookeeper"
                        print(f"new is: {dlist[i]}")

                    if (dlist[i]['name'] == "mongodb"):
                        print(f"old was: {dlist[i]}")
                        dlist[i]['version'] = "1.0.0"
                        dlist[i]['repository'] = "file://../mongodb"
                        print(f"new is: {dlist[i]}")
            except Exception:
                continue
            #print(yaml.dump(reqs_data))
            with open(rf, "w") as f:
                yaml.dump(reqs_data, f)

    if (args.testonly):
        print(
            "\n\n==============================================================="
        )
        print("running toms code tests")
        print(
            "===============================================================")

        for vf in p.rglob('*/values.yaml'):
            backupfile = Path(vf.parent) / f"{vf.name}_bak"
            # print(f"{vf} : {backupfile}")
            copyfile(vf, backupfile)

            with open(vf) as f:
                skip = False
                for fn in yaml_files_check_list:
                    if vf == Path(fn):
                        print(
                            f"This yaml file needs checking skipping load/processing for now =>  {Path(fn)} "
                        )
                        skip = True
                if not skip:
                    print(f"      Loading yaml for ==> {vf.parent}/{vf.name}",
                          end="")
                    data = yaml.load(f)
                    print("  :[ok]")

            # update kafka settings
            count = 0
            for x, value in lookup("kafka", data):
                #print_debug(x,value)
                list(
                    update_key(
                        'command',
                        'until nc -vz -w 1 $kafka_host $kafka_port; do echo waiting for Kafka; sleep 2; done;',
                        value))
                list(update_key('repository', 'kymeric/cp-kafka', value))
                list(update_key('image', 'kymeric/cp-kafka', value))
                list(update_key('imageTag', 'latest', value))

            # turn off prometheus jmx and kafka exporter
            for x, value in lookup("prometheus", data):
                #print_debug(x,value , 2)
                if isinstance(value, dict):
                    if value.get("jmx"):
                        value['jmx']['enabled'] = False
                    if value.get("kafka"):
                        value['kafka']['enabled'] = False

            # update mysql settings
            for x, value in lookup("mysql", data):
                list(update_key('repository', 'mysql/mysql-server', value))
                list(update_key('tag', '8.0.28-1.2.7-server', value))
                if value.get("image"):
                    del value['image']
                    value['image'] = "mysql/mysql-server"
                    value['imageTag'] = "8.0.28-1.2.7-server"
                    value['pullPolicy'] = "ifNotPresent"

            # turn the side car off for the moment
            for x, value in lookup("sidecar", data):
                list(update_key('enabled', False, value))

            # turn metrics off
            # The simulator has metrics clause with no enabled setting  => hence need to test
            for x, value in lookup("metrics", data):
                try:
                    if value.get("enabled"):
                        value['enabled'] = False
                except Exception:
                    continue

            with open(vf, "w") as f:
                yaml.dump(data, f)

    if (args.ingress):
        print(
            "\n\n======================================================================================"
        )
        print(" Modify charts to implement networking/v1 ")
        print(
            " and to use bitnami mysql rather than percona (percona / busybox is broken on containerd) "
        )
        print(
            "==========================================================================================="
        )

        # modify the template files
        for vf in p.rglob('*.tpl'):
            backupfile = Path(vf.parent) / f"{vf.name}_bak"
            #print(f"{vf} : {backupfile}")
            #copyfile(vf, backupfile)
            with FileInput(files=[vf], inplace=True) as f:
                #with fileinput.input(files=([vf]), inplace=True)  as f:
                for line in f:
                    line = line.rstrip()
                    #replace networking v1beta1
                    line = re.sub(r"networking.k8s.io/v1beta1",
                                  r"networking.k8s.io/v1", line)
                    line = re.sub(r"extensions/v1beta1",
                                  r"networking.k8s.io/v1", line)
                    print(line)

        # modify the ingress.yaml files
        for vf in p.rglob('*/ingress.yaml'):
            backupfile = Path(vf.parent) / f"{vf.name}_bak"
            #print(f"{vf} : {backupfile}")
            #copyfile(vf, backupfile)

            with FileInput(files=[vf], inplace=True) as f:
                for line in f:
                    line = line.rstrip()
                    if re.search("path:", line):
                        line_dup = line
                        line_dup = re.sub(
                            r"- path:.*$",
                            r"  pathType: ImplementationSpecific", line_dup)
                        print(line)
                        print(line_dup)
                    elif re.search("serviceName:", line):
                        line_dup = line
                        line_dup = re.sub(r"serviceName:.*$", r"service:",
                                          line_dup)
                        print(line_dup)
                        line = re.sub(r"serviceName:", r"  name:", line)
                        print(line)
                    elif re.search("servicePort:", line):
                        line_dup = line
                        line_dup = re.sub(r"servicePort:.*$", r"  port:",
                                          line_dup)
                        line = re.sub(r"servicePort: ", r"    number: ", line)
                        # need to replace port names with numbers
                        for pname, pnum in ports_array.items():
                            line = re.sub(f"number: {pname}$",
                                          f"number: {pnum}", line)
                        print(line_dup)
                        print(line)
                        #servicePort {{ .Values.containers.api.service.ports.api.externalPort }}
                    elif re.search("spec:", line):
                        print(line)
                        print(
                            "  ingressClassName: public"
                        )  # well at least it is "public" for microk8s v1.22 => TODO fully figure the chamges and settings out here and simplify!
                    else:
                        print(line)

        for vf in p.rglob('*/values.yaml'):
            with open(vf) as f:

                #print(f"{vf.parent}/{vf.name}")
                skip = False
                for fn in yaml_files_check_list:
                    if vf == Path(fn):
                        print(
                            f"This yaml file needs checking skipping load/processing for now =>  {Path(fn)} "
                        )
                        skip = True
                if not skip:
                    #print(f"      Loading yaml for ==> {vf.parent}/{vf.name}", end="")
                    data = yaml.load(f)
                    #print("  :[ok]")

                for x, value in lookup("mysql", data):
                    list(update_key('enabled', 'true', value))
                # => use these for now
                # TODO: update to later DB and get rid of default passwords
                for x, value in lookup("mysql", data):
                    list(update_key('repository', 'mysql/mysql-server', value))
                    list(update_key('tag', '5.6', value))
                    if value.get("image"):
                        del value['image']
                        value['image'] = "mysql"
                        value['imageTag'] = '8.0'
                        value['pullPolicy'] = "ifNotPresent"

                ### need to set nameOverride  for mysql for ml-testing-toolkit as it appears to be missing
                if vf == Path('mojaloop/values.yaml'):
                    print("Updating the ml-testing-toolkit / mysql config ")
                    for x, value in lookup("ml-testing-toolkit", data):
                        value['mysql'] = {"nameOverride": "ttk-mysql"}

            with open(vf, "w") as f:
                yaml.dump(data, f)

        # versions of k8s -> 1.20 use containerd not docker and the percona chart
        # or at least the busybox dependency of the percona chart has an issue
        # so just replace the percona chart with the mysql charts
        #  for now using the old one because it deploys => TODO fix this and update
        for rf in p.rglob('*/requirements.yaml'):
            with open(rf) as f:
                reqs_data = yaml.load(f)
                #print(reqs_data)
            try:
                dlist = reqs_data['dependencies']
                for i in range(len(dlist)):
                    if (dlist[i]['name'] == "percona-xtradb-cluster"):
                        print(f"old was: {dlist[i]}")
                        dlist[i]['name'] = "mysql"
                        #dlist[i]['version'] = "8.8.8"
                        #dlist[i]['repository'] = "https://charts.bitnami.com/bitnami"
                        dlist[i]['version'] = 8.0
                        dlist[i][
                            'repository'] = "https://charts.bitnami.com/bitnami"
                        dlist[i]['alias'] = "mysql"
                        dlist[i]['condition'] = "enabled"
                        print(f"new is: {dlist[i]}")

                    # if (dlist[i]['name'] == "mongodb"):
                    #     print(f"old was: {dlist[i]}")
                    #     dlist[i]['version'] = "11.1.7"
                    #     dlist[i]['repository'] = "file://../mongodb"
                    #     print(f"new is: {dlist[i]}")
            except Exception:
                continue

            with open(rf, "w") as f:
                yaml.dump(reqs_data, f)
Ejemplo n.º 9
0
def update_files():
    """check the filepath extension and update the respective file"""
    global datasource_paths
    global user_value

    datasource_paths = DATASOURCE_PATHS[product_id]
    print("Datasource paths: " + str(datasource_paths))
    for data_source in datasource_paths:
        print("Datasource is " + data_source)
        print("storage dist abs path is : " + str(storage_dist_abs_path))
        file_path = Path(workspace + "/" + product_id + "/" + data_source)
        print("file_path is : " + str(file_path))
        if str(file_path).endswith('.yaml'):
            print("filepath file is yaml")
            yaml = YAML()
            yaml.preserve_quotes = True
            doc = Path(file_path)
            obj = yaml.load(doc)
            print("Current username is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['username'])
            print("Current password is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['password'])
            print("Current jdbcurl is : " + obj['wso2.datasources']
                  ['dataSources'][0]['definition']['configuration']['jdbcUrl'])
            print("Current driver name is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['driverClassName'])
            print("Current connection query value is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['connectionTestQuery'])

            obj['wso2.datasources']['dataSources'][0]['definition'][
                'configuration']['password'] = password_value
            obj['wso2.datasources']['dataSources'][0]['definition'][
                'configuration']['username'] = user_value
            obj['wso2.datasources']['dataSources'][0]['definition'][
                'configuration']['jdbcUrl'] = url_value
            obj['wso2.datasources']['dataSources'][0]['definition'][
                'configuration']['driverClassName'] = drive_class_name_value
            if ORACLE_DB_ENGINE == database_config['db_engine'].upper():
                obj['wso2.datasources']['dataSources'][0]['definition'][
                    'configuration'][
                        'connectionTestQuery'] = validation_query_value

            print("Changed username is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['username'])
            print("Changed password is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['password'])
            print("Changed  jdbcurl is : " + obj['wso2.datasources']
                  ['dataSources'][0]['definition']['configuration']['jdbcUrl'])
            print("Changed driver name is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['driverClassName'])
            print("Changed connection query value is : " +
                  obj['wso2.datasources']['dataSources'][0]['definition']
                  ['configuration']['connectionTestQuery'])

            yaml.dump(obj, doc)

        elif str(file_path).endswith('.xml'):
            print("filepath is xml")
            modify_datasources()
        else:
            print("extension not matched")
Ejemplo n.º 10
0
    def run(self):
        """
        This function will try to load integration/script yml file.
        Creates test playbook, and converts each command to automation task in test playbook and generates verify
        outputs task from command outputs.

        All the tasks eventually will be linked to each other:
        playbook_start_task => delete_context(all) => task1 => verify_outputs_task1 => task2 => verify_outputs_task2
            => task_end

        At the end the functions dumps the new test playbook to the outdir if set, otherwise file will be created in
        local directory

        """
        if self.outdir:
            if not os.path.isdir(self.outdir):
                print_error(f'Directory not exist: {self.outdir}')
                return

        ryaml = YAML()
        ryaml.preserve_quotes = True
        try:
            with open(self.integration_yml_path, 'r') as yf:
                yaml_obj = ryaml.load(yf)

                yaml_obj.get('name')
        except FileNotFoundError as ex:
            if self.verbose:
                raise

            print_error(str(ex))
            return
        except AttributeError:
            print_error(f'Error - failed to parse: {self.integration_yml_path}.\nProbably invalid yml file')
            return

        test_playbook = Playbook(
            name=self.name,
            fromversion='4.5.0'
        )

        if self.file_type == ContentItemType.INTEGRATION:
            for command in yaml_obj.get('script').get('commands'):
                create_automation_task_and_verify_outputs_task(
                    test_playbook=test_playbook,
                    command=command,
                    item_type=ContentItemType.INTEGRATION,
                    no_outputs=self.no_outputs
                )

        elif self.file_type == ContentItemType.SCRIPT:
            create_automation_task_and_verify_outputs_task(
                test_playbook=test_playbook,
                command=yaml_obj,
                item_type=ContentItemType.INTEGRATION,
                no_outputs=self.no_outputs
            )

        test_playbook.add_task(create_end_task(test_playbook.task_counter))

        with open(self.test_playbook_yml_path, 'w') as yf:
            ryaml.dump(test_playbook.to_dict(), yf)

            print_color(f'Test playbook yml was saved at:\n{self.test_playbook_yml_path}', LOG_COLORS.GREEN)
Ejemplo n.º 11
0
"""
heluxup is able to parse the flux control repository for HelmReleases and checks if updates of the charts are
available. If updates are available heluxup updates the yaml files in the flux control respositroy accordingly.
"""

import os

import click
import semver
import urllib3
from ruamel.yaml import YAML

HTTP = urllib3.PoolManager()
YAML_PARSER = YAML()
YAML_PARSER.preserve_quotes = True
YAML_PARSER.explicit_start = True
YAML_PARSER.width = 8000
REPO_CACHE = {}


class HelmRelease:
    """
    HelmRelease represents a HelmRelease object that is used by flux.
    """
    def __init__(self, release_name, chart_name, repository, git_version):
        self.release_name = release_name
        self.chart_name = chart_name
        self.repository = repository.rstrip('/')
        self.git_version = git_version

        if self.repository not in REPO_CACHE:
Ejemplo n.º 12
0
# All the beat types that will be processed
beats = ['winlogbeat',
         'filebeat',
         'metricbeat',
         'packetbeat']

# Start of the program
ipAddr = 'asdf' # Remove the fake IP, only here for testing
port = ''
while not valid_ip(ipAddr):
    ipAddr = input("Enter logstash host IP: ")
    port = input("Enter port [5044]: ")
if port == '':
    # If nothing entered, use default port for beats input to logstash
    port = '5044'

# Setting up the ruamel.yaml to properly read libbeat configs
yaml = YAML()
yaml.default_flow_style = False
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.preserve_quotes = True

# Get directories to work with
rootDir = '.'
for dirName, subdirList, fileList in os.walk(rootDir):
    for file in fileList:
        for beat in beats:
            if file == beat + '.yml':   #startswith(beat):
                print(f"Updating: {dirName}/{beat}.yml")
                update_config(beat, dirName+'/')
Ejemplo n.º 13
0
def migrate(yml_path,
            output_path,
            demisto_mock,
            commonserver=None,
            yml_type=None):
    try:
        from ruamel.yaml import YAML
        from ruamel.yaml.scalarstring import SingleQuotedScalarString
    except Exception as ex:
        print(
            "Failed importing ruamel.yaml. Migrate requires ruamel.yaml to work cleanly.\n"
            "Install via: pip3 install ruamel.yaml.\nErr: {}".format(ex))
        return 1
    print("Starting migration of: {} to dir: {}".format(yml_path, output_path))
    arg_path = output_path
    output_path = os.path.abspath(output_path)
    os.makedirs(output_path, exist_ok=True)
    base_name = os.path.basename(output_path)
    yml_type = get_yml_type(yml_path, yml_type)
    code_file = "{}/{}.py".format(output_path, base_name)
    extract_code(yml_path, code_file, demisto_mock, commonserver, yml_type)
    extract_image(yml_path, "{}/{}_image.png".format(output_path, base_name),
                  yml_type)
    yaml_out = "{}/{}.yml".format(output_path, base_name)
    print("Creating yml file: {} ...".format(yaml_out))
    ryaml = YAML()
    ryaml.preserve_quotes = True
    with open(yml_path, 'r') as yf:
        yaml_obj = ryaml.load(yf)
    script_obj = yaml_obj
    if yml_type == INTEGRATION:
        script_obj = yaml_obj['script']
        del yaml_obj['image']
    if script_obj['type'] != 'python':
        print('Script is not of type "python". Found type: {}. Nothing to do.'.
              format(script_obj['type']))
        return 1
    script_obj['script'] = SingleQuotedScalarString('')
    with open(yaml_out, 'w') as yf:
        ryaml.dump(yaml_obj, yf)
    print("Running autopep8 on file: {} ...".format(code_file))
    try:
        subprocess.call(
            ["autopep8", "-i", "--max-line-length", "130", code_file])
    except FileNotFoundError:
        print_color(
            "autopep8 skipped! It doesn't seem you have autopep8 installed.\n"
            "Make sure to install it with: pip install autopep8.\n"
            "Then run: autopep8 -i {}".format(code_file), LOG_COLORS.YELLOW)
    print("Detecting python version and setting up pipenv files ...")
    docker = get_docker_images(script_obj)[0]
    py_ver = get_python_version(docker)
    pip_env_dir = get_pipenv_dir(py_ver)
    print("Copying pipenv files from: {}".format(pip_env_dir))
    shutil.copy("{}/Pipfile".format(pip_env_dir), output_path)
    shutil.copy("{}/Pipfile.lock".format(pip_env_dir), output_path)
    try:
        subprocess.call(["pipenv", "install", "--dev"], cwd=output_path)
        print("Installing all py requirements from docker: [{}] into pipenv".
              format(docker))
        requirements = subprocess.check_output(
            [
                "docker", "run", "--rm", docker, "pip", "freeze",
                "--disable-pip-version-check"
            ],
            universal_newlines=True,
            stderr=subprocess.DEVNULL).strip()
        fp = tempfile.NamedTemporaryFile(delete=False)
        fp.write(requirements.encode('utf-8'))
        fp.close()
        subprocess.check_call(["pipenv", "install", "-r", fp.name],
                              cwd=output_path)
        os.unlink(fp.name)
    except FileNotFoundError:
        print_color(
            "pipenv install skipped! It doesn't seem you have pipenv installed.\n"
            "Make sure to install it with: pip3 install pipenv.\n"
            "Then run in the package dir: pipenv install --dev",
            LOG_COLORS.YELLOW)
    print_color("\nCompleted: setting up package: {}\n".format(arg_path),
                LOG_COLORS.GREEN)
    print(
        "Next steps: \n"
        "* Install additional py packages for unit testsing (if needed): pipenv install <package>\n"
        "* Create unit tests\n"
        "* Check linting and unit tests by running: ./Tests/scripts/pkg_dev_test_tasks.py -d {}\n"
        .format(arg_path))
    return 0
Ejemplo n.º 14
0
def yaml():
    y = YAML()
    y.explicit_start = True
    y.explicit_end = False
    y.preserve_quotes = True
    return y
Ejemplo n.º 15
0
    INDICATOR_FIELDS_DIR, INTEGRATIONS_DIR, JSON_ALL_INDICATOR_TYPES_REGEXES,
    LAYOUTS_DIR, PACKAGE_SUPPORTING_DIRECTORIES, PACKAGE_YML_FILE_REGEX,
    PACKS_DIR, PACKS_DIR_REGEX, PACKS_README_FILE_NAME, PLAYBOOKS_DIR,
    RELEASE_NOTES_DIR, RELEASE_NOTES_REGEX, REPORTS_DIR, SCRIPTS_DIR,
    SDK_API_GITHUB_RELEASES, TEST_PLAYBOOKS_DIR, TYPE_PWSH, UNRELEASE_HEADER,
    WIDGETS_DIR, FileType)
from ruamel.yaml import YAML

# disable insecure warnings
urllib3.disable_warnings()

# inialize color palette
colorama.init()

ryaml = YAML()
ryaml.preserve_quotes = True  # type: ignore
ryaml.allow_duplicate_keys = True


class LOG_COLORS:
    NATIVE = colorama.Style.RESET_ALL
    RED = colorama.Fore.RED
    GREEN = colorama.Fore.GREEN
    YELLOW = colorama.Fore.YELLOW
    WHITE = colorama.Fore.WHITE


LOG_VERBOSE = False

LAYOUT_CONTAINER_FIELDS = {
    'details', 'detailsV2', 'edit', 'close', 'mobile', 'quickView',
Ejemplo n.º 16
0
def merge_yamls(rule_file, tag_file, output_file):

    yaml = YAML(typ='rt')
    yaml.width = 4096  # prevent line wrap
    yaml.preserve_quotes = True

    indexed_rules_tags = {}
    with open(tag_file, "r") as file:
        rules_tags = yaml.load(file)
        for rule in rules_tags:
            indexed_rules_tags[rule['rule']] = rule['tags']

    stats = {
        'lists': 0,
        'macros': 0,
        'rules': 0,
        'rules_unmodified': 0,
        'rules_modified': 0,
        'rules_notfound': 0,
        'rules_notags': 0,
        'other': 0
    }
    other_items = []
    rules_not_found = []
    rules_no_tags_key = []
    required_engine_version = 0

    with open(rule_file, "r") as file:
        falco_doc = yaml.load(file)

        for item in falco_doc:
            if item.get("list") != None:
                stats['lists'] += 1
                continue
            if item.get("macro") != None:
                stats['macros'] += 1
                continue
            if item.get("required_engine_version") != None:
                required_engine_version = item.get("required_engine_version")
                continue

            if item.get("rule") == None:
                # Something that is not a rule, a macro or a list
                stats['other'] += 1
                other_items.append(item)
                continue

            # A rule
            stats['rules'] += 1

            if item.get("tags") == None:
                # Rule doesn't have a 'tags' key
                stats['rules_notags'] += 1
                rules_no_tags_key.append(item.get("rule"))
                continue
            if item.get("rule") not in indexed_rules_tags.keys():
                # Tags file doesn't have a rule with same name
                rules_not_found.append(item.get("rule"))
                continue
            if len(indexed_rules_tags[item.get("rule")]) == 0:
                # Tag file doesn't have new tags for this rule
                stats['rules_unmodified'] += 1
                continue

            # Append non existing tags
            for newtag in indexed_rules_tags[item.get("rule")]:
                if (not newtag in item['tags']):
                    item['tags'].append(newtag)

            stats['rules_modified'] += 1

        # Write output file

        with open(output_file, "w") as stream:
            stream.write('# Merged tags from ' + os.path.basename(tag_file) +
                         '\n\n')
            yaml.dump(falco_doc, stream)
            stream.close()

        # Output results

        if (len(rules_not_found) > 0):
            print("\nRules not found:")
            for rule in rules_not_found:
                print(rule)

        if (len(rules_no_tags_key) > 0):
            print("\nRules without 'tags' keyword:")
            for rule in rules_no_tags_key:
                print(rule)

        if (len(other_items) > 0):
            print("\nOther elements:")
            for item in other_items:
                print(item)

        print("\nLists: ", stats['lists'])
        print("Macros: ", stats['macros'])
        print("Rules: ", stats['rules'])
        print("  Modified rules: ", stats['rules_modified'])
        print("  Unmodified rules: ", stats['rules_unmodified'])
        print("  Rules not found: ", len(rules_not_found))
        print("  Rules no tags key: ", stats['rules_notags'])
        print("required_engine_version: ", required_engine_version)
        print("Other: ", len(other_items))

        if (len(rules_not_found) > 0):
            sys.exit(1)
        sys.exit(0)
Ejemplo n.º 17
0
def main(argv):
    args = parse_args()

    ingress_cn = set_ingressclassname(args.kubernetes)
    script_path = Path(__file__).absolute()
    mysql_values_file = script_path.parent.parent / "./etc/mysql_values.yaml"
    db_pass = gen_password()
    if (args.verbose):
        print(f"mysql_values_file  is {mysql_values_file}")
        print(f"mysql password is {db_pass}")

    ## check the yaml of these files because ruamel python lib has issues with loading em
    yaml_files_check_list = [
        'ml-operator/values.yaml', 'emailnotifier/values.yaml'
    ]

    ports_array = {
        "simapi":
        "3000",
        "reportapi":
        "3002",
        "testapi":
        "3003",
        "https":
        "80",
        "http":
        "80",
        "http-admin":
        "4001",
        "http-api":
        "4002",
        "mysql":
        "3306",
        "mongodb":
        "27017",
        "inboundapi":
        "{{ $config.config.schemeAdapter.env.INBOUND_LISTEN_PORT }}",
        "outboundapi":
        "{{ $config.config.schemeAdapter.env.OUTBOUND_LISTEN_PORT }}"
    }

    ingress_cn = set_ingressclassname(args.kubernetes)
    print(f"ingressclassname in main is {ingress_cn}")
    p = Path() / args.directory
    print(f"Processing helm charts in directory: [{args.directory}]")
    yaml = YAML()
    yaml.allow_duplicate_keys = True
    yaml.preserve_quotes = True
    yaml.width = 4096

    # walk the directory structure and process all the values.yaml files
    # replace solsson kafka with kymeric
    # replace kafa start up check with netcat test (TODO check to see if this is ok)
    # replace mysql with arm version of mysql and adjust tag on the following line (TODO: check that latest docker mysql/mysql-server latest tag is ok )
    # TODO: maybe don't do this line by line but rather read in the entire file => can match across lines and avoid the next_line_logic
    # for now disable metrics and metrics exporting
    # replace the mojaloop images with the locally built  ones

    print(
        " ==> mod_local_miniloop : Modify helm template files (.tpl) to implement networking/v1"
    )
    # modify the template files
    for vf in p.rglob('*.tpl'):
        backupfile = Path(vf.parent) / f"{vf.name}_bak"
        with FileInput(files=[str(vf)], inplace=True) as f:
            for line in f:
                line = line.rstrip()
                #replace networking v1beta1
                line = re.sub(r"networking.k8s.io/v1beta1",
                              r"networking.k8s.io/v1", line)
                line = re.sub(r"extensions/v1beta1", r"networking.k8s.io/v1",
                              line)
                print(line)

    # modify the ingress.yaml files
    print(
        " ==> mod_local_miniloop : Modify helm template ingress.yaml files to implement newer ingress"
    )
    print(
        f" ==> mod_local_miniloop : Modify helm template ingress.yaml implement correct ingressClassName [{ingress_cn}]"
    )
    for vf in p.rglob('*/ingress.yaml'):
        backupfile = Path(vf.parent) / f"{vf.name}_bak"

        with FileInput(files=[str(vf)], inplace=True) as f:
            for line in f:
                line = line.rstrip()
                if re.search("path:", line):
                    line_dup = line
                    line_dup = re.sub(r"- path:.*$",
                                      r"  pathType: ImplementationSpecific",
                                      line_dup)
                    print(line)
                    print(line_dup)
                elif re.search("serviceName:", line):
                    line_dup = line
                    line_dup = re.sub(r"serviceName:.*$", r"service:",
                                      line_dup)
                    print(line_dup)
                    line = re.sub(r"serviceName:", r"  name:", line)
                    print(line)
                elif re.search("servicePort:", line):
                    line_dup = line
                    line_dup = re.sub(r"servicePort:.*$", r"  port:", line_dup)
                    line = re.sub(r"servicePort: ", r"    number: ", line)
                    # need to replace port names with numbers
                    for pname, pnum in ports_array.items():
                        line = re.sub(f"number: {pname}$", f"number: {pnum}",
                                      line)
                    print(line_dup)
                    print(line)
                elif re.search("ingressClassName", line):
                    # skip any ingressClassname already set => we can re-run program without issue
                    continue
                elif re.search("spec:", line):
                    print(line)
                    print(f"  ingressClassName: {ingress_cn}")
                else:
                    print(line)

    # put the database password file into the mysql helm chart values file
    print(f" ==> mod_local_miniloop : generating a new database password")
    print(
        f" ==> mod_local_miniloop : insert new pw into [{mysql_values_file}]")
    with FileInput(files=[str(mysql_values_file)], inplace=True) as f:
        for line in f:
            line = line.rstrip()
            line = re.sub(r"password: .*$", r"password: '******'",
                          line)
            line = re.sub(r"mysql_native_password BY .*$",
                          r"mysql_native_password BY '" + db_pass + "';", line)
            print(line)

    print(
        " ==> mod_local_miniloop : Modify helm values to implement single mysql database"
    )
    for vf in p.glob('**/*values.yaml'):
        with open(vf) as f:
            if (args.verbose):
                print(f"===> Processing file < {vf.parent}/{vf.name} > ")
            skip = False
            for fn in yaml_files_check_list:
                if vf == Path(fn):
                    if (args.verbose):
                        print(
                            f"This yaml file needs checking skipping load/processing for now =>  {Path(fn)} "
                        )
                    skip = True
            if not skip:
                data = yaml.load(f)

            for x, value in lookup("mysql", data):
                if (value.get("name") == "wait-for-mysql"):
                    value['repository'] = "mysql"
                    value['tag'] = '8.0'
                if value.get("mysqlDatabase"):
                    value['enabled'] = False

            # update the values files to use a mysql instance that has already been deployed
            # and that uses a newly generated database password
            for x, value in lookup("config", data):
                if isinstance(value, dict):
                    if (value.get('db_type')):
                        value['db_host'] = 'mldb'
                        value['db_password'] = db_pass

            ### need to set nameOverride  for mysql for ml-testing-toolkit as it appears to be missing
            # if vf == Path('mojaloop/values.yaml') :
            #     print("Updating the ml-testing-toolkit / mysql config ")
            #     for x, value in lookup("ml-testing-toolkit", data):
            #         value['mysql'] = { "nameOverride" : "ttk-mysql" }

        with open(vf, "w") as f:
            yaml.dump(data, f)

    # now that we are inserting passwords with special characters in the password it is necessary to ensure
    # that $db_password is single quoted in the values files.
    print(
        " ==> mod_local_miniloop : Modify helm values, single quote db_password field to enable secure database password"
    )
    for vf in p.glob('**/*values.yaml'):
        with FileInput(files=[str(vf)], inplace=True) as f:
            for line in f:
                line = line.rstrip()
                line = re.sub(r"\'\$db_password\'", r"$db_password",
                              line)  # makes this re-runnable.
                line = re.sub(r'\$db_password', r"'$db_password'", line)
                print(line)

    # versions of k8s -> 1.20 use containerd not docker and the percona chart
    # or at least the busybox dependency of the percona chart has an issue
    # So here update the chart dependencies to ensure correct mysql is configured
    # using the bitnami helm chart BUT as we are disabling the database in the
    # values files and relying on separately deployed database this update is not really
    # doing anything. see the mini-loop scripts dir for where and how the database deployment
    # is now done.
    print(
        " ==> mod_local_miniloop : Modify helm requirements.yaml replace deprecated percona chart with current mysql"
    )
    for rf in p.rglob('**/*requirements.yaml'):
        with open(rf) as f:
            reqs_data = yaml.load(f)
            #print(reqs_data)
        try:
            dlist = reqs_data['dependencies']
            for i in range(len(dlist)):
                if (dlist[i]['name'] in ["percona-xtradb-cluster", "mysql"]):
                    dlist[i]['name'] = "mysql"
                    dlist[i]['version'] = 8.0
                    dlist[i][
                        'repository'] = "https://charts.bitnami.com/bitnami"
                    dlist[i]['alias'] = "mysql"
                    dlist[i]['condition'] = "mysql.enabled"

        except Exception:
            continue

        with open(rf, "w") as f:
            yaml.dump(reqs_data, f)

    print(
        f"Sucessfully finished processing helm charts in directory: [{args.directory}]"
    )
Ejemplo n.º 18
0
    async def update_build_data(self, advisories: Dict[str, int],
                                jira_issue_key: Optional[str]):
        if not advisories and not jira_issue_key:
            return False
        repo = self.working_dir / "ocp-build-data-push"
        if not repo.exists():
            await self.clone_build_data(repo)

        if not self.assembly or self.assembly == "stream":
            # update advisory numbers in group.yml
            with open(repo / "group.yml", "r") as f:
                group_config = f.read()
            for kind, advisory in advisories.items():
                new_group_config = re.sub(fr"^(\s+{kind}:)\s*[0-9]+$",
                                          fr"\1 {advisory}",
                                          group_config,
                                          count=1,
                                          flags=re.MULTILINE)
                group_config = new_group_config
            # freeze automation
            group_config = re.sub(r"^freeze_automation:.*",
                                  "freeze_automation: scheduled",
                                  group_config,
                                  count=1,
                                  flags=re.MULTILINE)
            # update group.yml
            with open(repo / "group.yml", "w") as f:
                f.write(group_config)
        else:
            # update releases.yml (if we are operating on a non-stream assembly)
            yaml = YAML(typ="rt")
            yaml.preserve_quotes = True
            async with aiofiles.open(repo / "releases.yml", "r") as f:
                old = await f.read()
            releases_config = yaml.load(old)
            group_config = releases_config["releases"][
                self.assembly].setdefault("assembly",
                                          {}).setdefault("group", {})
            group_config["advisories"] = advisories
            group_config["release_jira"] = jira_issue_key
            out = StringIO()
            yaml.dump(releases_config, out)
            async with aiofiles.open(repo / "releases.yml", "w") as f:
                await f.write(out.getvalue())

        cmd = ["git", "-C", str(repo), "--no-pager", "diff"]
        await exectools.cmd_assert_async(cmd)
        cmd = ["git", "-C", str(repo), "add", "."]
        await exectools.cmd_assert_async(cmd)
        cmd = ["git", "-C", str(repo), "diff-index", "--quiet", "HEAD"]
        rc = await exectools.cmd_assert_async(cmd, check=False)
        if rc == 0:
            _LOGGER.warn("Skip saving advisories: No changes.")
            return False
        cmd = [
            "git", "-C",
            str(repo), "commit", "-m", f"Prepare release {self.release_name}"
        ]
        await exectools.cmd_assert_async(cmd)
        if not self.dry_run:
            _LOGGER.info("Pushing changes to upstream...")
            cmd = ["git", "-C", str(repo), "push", "origin", self.group_name]
            await exectools.cmd_assert_async(cmd)
        else:
            _LOGGER.warn("Would have run %s", cmd)
            _LOGGER.warn("Would have pushed changes to upstream")
        return True
Ejemplo n.º 19
0
def main(docname):

    with open(docname, "r") as fi:
        lines = fi.readlines()
    context = {}
    rest_lines = []
    for line in lines:
        # print(line)
        if "{%" in line:
            set_expr = re.search("{%(.*)%}", line)
            set_expr = set_expr.group(1)
            set_expr = set_expr.replace("set", "", 1).strip()
            exec(set_expr, globals(), context)
        else:
            rest_lines.append(line)

    yaml = YAML(typ="rt")
    yaml.preserve_quotes = True
    yaml.default_flow_style = False
    yaml.indent(sequence=4, offset=2)
    yaml.width = 1000
    yaml.Representer = MyRepresenter
    yaml.Loader = ruamel.yaml.RoundTripLoader

    result_yaml = CommentedMap()
    result_yaml["context"] = context

    def has_selector(s):
        return s.strip().endswith("]")

    quoted_lines = []
    for line in rest_lines:
        if has_selector(line):
            selector_start = line.rfind("[")
            selector_end = line.rfind("]")
            selector_content = line[selector_start + 1 : selector_end]

            if line.strip().startswith("-"):
                line = (
                    line[: line.find("-") + 1]
                    + f" sel({selector_content}): "
                    + line[
                        line.find("-") + 1 : min(line.rfind("#"), line.rfind("["))
                    ].strip()
                    + "\n"
                )
        quoted_lines.append(line)
    rest_lines = quoted_lines

    def check_if_quoted(s):
        s = s.strip()
        return s.startswith('"') or s.startswith("'")

    quoted_lines = []
    for line in rest_lines:
        if "{{" in line:
            # make sure that jinja stuff is quoted
            if line.find(":") != -1:
                idx = line.find(":")
            elif line.strip().startswith("-"):
                idx = line.find("-")
            rest = line[idx + 1 :]

            if not check_if_quoted(rest):
                if "'" in rest:
                    rest = rest.replace("'", '"')

                line = line[: idx + 1] + f" '{rest.strip()}'\n"
        quoted_lines.append(line)
    rest_lines = quoted_lines

    skips, wo_skip_lines = [], []
    for line in rest_lines:
        if line.strip().startswith("skip"):
            parts = line.split(":")
            rhs = parts[1].strip()
            if rhs.startswith("true"):
                selector_start = line.rfind("[")
                selector_end = line.rfind("]")
                selector_content = line[selector_start + 1 : selector_end]
                skips.append(selector_content)
            else:
                print("ATTENTION skip: false not handled!")
        else:
            wo_skip_lines.append(line)

    rest_lines = wo_skip_lines
    result_yaml.update(
        ruamel.yaml.load("".join(rest_lines), ruamel.yaml.RoundTripLoader)
    )

    if len(skips) != 0:
        result_yaml["build"]["skip"] = skips

    if result_yaml.get("outputs"):
        for o in result_yaml["outputs"]:
            name = o["name"]
            package = {"name": name}
            del o["name"]
            if o.get("version"):
                package["version"] = o["version"]
                del o["version"]

            build = {}
            if o.get("script"):
                build["script"] = o["script"]
                del o["script"]

            o["package"] = package
            o["build"] = build

        for d in result_yaml["outputs"]:
            print(order_output_dict(d))
        result_yaml["outputs"] = [order_output_dict(d) for d in result_yaml["outputs"]]

    from io import StringIO

    output = StringIO()
    yaml.dump(result_yaml, output)

    # Hacky way to insert an empty line after the context-key-object
    context_output = StringIO()
    yaml.dump(context, context_output)
    context_output = context_output.getvalue()
    context_output_len = len(context_output.split("\n"))

    final_result = output.getvalue()
    final_result_lines = final_result.split("\n")
    final_result_lines.insert(context_output_len, "")

    print("\n".join(final_result_lines))
Ejemplo n.º 20
0
def store_iam_resources_in_git(
    iam_resources,
    account_id,
    git_url=config.get("cache_iam_resources_for_account.store_in_git.repo"),
    git_message="[Automated] Update IAM Cache",
):
    """
    Experimental function to force-push discovered IAM resources into a Git repository's master branch.
    Use at your own risk.
    """
    accounts_d = async_to_sync(get_account_id_to_name_mapping)()
    tempdir = tempfile.mkdtemp()
    try:
        repo = clone_repo(git_url, tempdir)

        expected_entries = {
            "UserDetailList": {
                "category": "iam_users",
                "resource_name_key": "UserName",
            },
            "GroupDetailList": {
                "category": "iam_groups",
                "resource_name_key": "GroupName",
            },
            "RoleDetailList": {
                "category": "iam_roles",
                "resource_name_key": "RoleName",
            },
            "Policies": {
                "category": "iam_policies",
                "resource_name_key": "PolicyName"
            },
        }

        for key, settings in expected_entries.items():
            category = settings["category"]
            for resource in iam_resources[key]:
                if key == "RoleDetailList":
                    resource.pop("RoleLastUsed", None)
                resource_name = resource[settings["resource_name_key"]]
                yaml = YAML()
                yaml.preserve_quotes = True  # type: ignore
                yaml.indent(mapping=2, sequence=4, offset=2)

                account_name = accounts_d.get(account_id, account_id)
                if not account_name:
                    account_name = "unknown"
                path_in_repo = os.path.join(
                    repo.working_dir,
                    f"{account_name}/{category}/{resource_name}.yaml")
                os.makedirs(Path(path_in_repo).parent.absolute(),
                            exist_ok=True)

                should_write = True
                to_write = sort_dict(resource)
                if os.path.exists(path_in_repo):
                    with open(path_in_repo, "r") as f:
                        # Unfortunately at the time of writing, ruamel.yaml loads this into ordered dictionaries.
                        # We want this to be the same type as `to_write`, so we use the builtin yaml library to load it
                        existing = builtin_yaml.safe_load(f)
                    if not DeepDiff(to_write, existing, ignore_order=True):
                        should_write = False
                if should_write:
                    with open(path_in_repo, "w") as f:
                        yaml.dump(to_write, f)
        repo.git.add("*")
        if repo.index.diff("HEAD"):
            repo.index.commit(git_message)
            origin = repo.remote("origin")
            origin.pull()
            origin.push("master", force=True)
    except Exception:  # noqa
        sentry_sdk.capture_exception()
    shutil.rmtree(tempdir)
from github import Github
from iteration_utilities import unique_everseen
from ruamel.yaml import YAML

GH_TOKEN = os.environ["GH_TOKEN"]
GH_ORG_NAME = os.getenv("GH_ORG_NAME", "cloudposse")
GH_SEARCH_PATTERN = os.getenv("GH_SEARCH_PATTERN", "terraform-")
TF_MODULE_PATH = os.getenv("TF_MODULE_PATH", ".")
TF_CONFIG_INSPECT_BINARY_PATH = os.getenv("TF_CONFIG_INSPECT_BINARY_PATH",
                                          "terraform-config-inspect")
TF_REGISTRY_URL = "https://registry.terraform.io"

gh = Github(GH_TOKEN)
yaml = YAML(typ="rt")
yaml.default_flow_style = False
yaml.preserve_quotes = False


def parse_gh():
    gh_repos = []
    for repo in gh.get_organization(GH_ORG_NAME).get_repos():
        if GH_SEARCH_PATTERN in repo.name:
            repo_object = {}
            repo_object["name"] = repo.name
            repo_object["description"] = repo.description
            repo_object["url"] = repo.html_url
            gh_repos.append(repo_object)
    return gh_repos


def tf_config_inspect():
Ejemplo n.º 22
0
    def extract_to_package_format(self) -> int:
        """Extracts the self.yml_path into several files according to the Demisto standard of the package format.

        Returns:
             int. status code for the operation.
        """
        print("Starting migration of: {} to dir: {}".format(self.yml_path, self.dest_path))
        arg_path = self.dest_path
        output_path = os.path.abspath(self.dest_path)
        os.makedirs(output_path, exist_ok=True)
        base_name = os.path.basename(output_path)
        yml_type = self.get_yml_type()
        code_file = "{}/{}.py".format(output_path, base_name)
        self.extract_code(code_file)
        self.extract_image("{}/{}_image.png".format(output_path, base_name))
        self.extract_long_description("{}/{}_description.md".format(output_path, base_name))
        yaml_out = "{}/{}.yml".format(output_path, base_name)
        print("Creating yml file: {} ...".format(yaml_out))
        ryaml = YAML()
        ryaml.preserve_quotes = True
        with open(self.yml_path, 'r') as yf:
            yaml_obj = ryaml.load(yf)
        script_obj = yaml_obj
        if yml_type == INTEGRATION:
            script_obj = yaml_obj['script']
            del yaml_obj['image']
            if 'detaileddescription' in yaml_obj:
                del yaml_obj['detaileddescription']
        if script_obj['type'] != 'python':
            print('Script is not of type "python". Found type: {}. Nothing to do.'.format(script_obj['type']))
            return 1
        script_obj['script'] = SingleQuotedScalarString('')
        with open(yaml_out, 'w') as yf:
            ryaml.dump(yaml_obj, yf)
        print("Running autopep8 on file: {} ...".format(code_file))
        try:
            subprocess.call(["autopep8", "-i", "--max-line-length", "130", code_file])
        except FileNotFoundError:
            print_color("autopep8 skipped! It doesn't seem you have autopep8 installed.\n"
                        "Make sure to install it with: pip install autopep8.\n"
                        "Then run: autopep8 -i {}".format(code_file), LOG_COLORS.YELLOW)
        print("Detecting python version and setting up pipenv files ...")
        docker = get_docker_images(script_obj)[0]
        py_ver = get_python_version(docker, self.config.log_verbose)
        pip_env_dir = get_pipenv_dir(py_ver, self.config.envs_dirs_base)
        print("Copying pipenv files from: {}".format(pip_env_dir))
        shutil.copy("{}/Pipfile".format(pip_env_dir), output_path)
        shutil.copy("{}/Pipfile.lock".format(pip_env_dir), output_path)
        try:
            subprocess.call(["pipenv", "install", "--dev"], cwd=output_path)
            print("Installing all py requirements from docker: [{}] into pipenv".format(docker))
            requirements = subprocess.check_output(["docker", "run", "--rm", docker,
                                                    "pip", "freeze", "--disable-pip-version-check"],
                                                   universal_newlines=True, stderr=subprocess.DEVNULL).strip()
            fp = tempfile.NamedTemporaryFile(delete=False)
            fp.write(requirements.encode('utf-8'))
            fp.close()

            try:
                subprocess.check_call(["pipenv", "install", "-r", fp.name], cwd=output_path)

            except Exception:
                print_color("Failed installing requirements in pipenv.\n "
                            "Please try installing manually after extract ends\n", LOG_COLORS.RED)

            os.unlink(fp.name)
            print("Installing flake8 for linting")
            subprocess.call(["pipenv", "install", "--dev", "flake8"], cwd=output_path)
        except FileNotFoundError:
            print_color("pipenv install skipped! It doesn't seem you have pipenv installed.\n"
                        "Make sure to install it with: pip3 install pipenv.\n"
                        "Then run in the package dir: pipenv install --dev", LOG_COLORS.YELLOW)
        # check if there is a changelog
        yml_changelog = os.path.splitext(self.yml_path)[0] + '_CHANGELOG.md'
        changelog = arg_path + '/CHANGELOG.md'
        if os.path.exists(yml_changelog):
            shutil.copy(yml_changelog, changelog)
        else:
            with open(changelog, 'wt', encoding='utf-8') as changelog_file:
                changelog_file.write("## [Unreleased]\n-\n")
        print_color("\nCompleted: setting up package: {}\n".format(arg_path), LOG_COLORS.GREEN)
        print("Next steps: \n",
              "* Install additional py packages for unit testing (if needed): cd {}; pipenv install <package>\n".format(
                  arg_path),
              "* Create unit tests\n",
              "* Check linting and unit tests by running: ./Tests/scripts/pkg_dev_test_tasks.py -d {}\n".format(
                  arg_path),
              "* When ready rm from git the source yml and add the new package:\n",
              "    git rm {}\n".format(self.yml_path),
              "    git add {}\n".format(arg_path),
              sep=''
              )
        return 0
Ejemplo n.º 23
0
def yaml() -> YAML:
    """Return default YAML parser."""
    yamlp = YAML(typ='safe', pure=True)
    yamlp.preserve_quotes = True
    yamlp.default_flow_style = False
    return yamlp
Ejemplo n.º 24
0
from ruamel.yaml import YAML

from chartpress import _check_call
from chartpress import _get_git_remote_url
from chartpress import _get_identifier_from_parts
from chartpress import _get_image_build_args
from chartpress import _get_latest_commit_tagged_or_modifying_paths
from chartpress import _image_needs_pushing
from chartpress import _strip_build_suffix_from_identifier
from chartpress import Builder
from chartpress import GITHUB_TOKEN_KEY

# use safe roundtrip yaml loader
yaml = YAML(typ="rt")
yaml.preserve_quotes = True  ## avoid mangling of quotes
yaml.indent(mapping=2, offset=2, sequence=4)


def test__strip_build_suffix_from_identifier():
    assert (_strip_build_suffix_from_identifier(
        identifier="0.1.2-n005.hasdf1234") == "0.1.2")
    assert (_strip_build_suffix_from_identifier(
        identifier="0.1.2-alpha.1.n005.hasdf1234") == "0.1.2-alpha.1")


def test__get_identifier_from_parts():
    assert (_get_identifier_from_parts(tag="0.1.2",
                                       n_commits="0",
                                       commit="asdf123",
                                       long=True) == "0.1.2-n000.hasdf123")
    assert (_get_identifier_from_parts(tag="0.1.2",
Ejemplo n.º 25
0
from typing import Union

import demisto_sdk.commands.common.content.errors as exc
from ruamel.yaml import YAML
from ruamel.yaml.scanner import ScannerError
from wcmatch.pathlib import EXTGLOB, NEGATE, Path

from .dictionary_based_object import DictionaryBasedObject

RUYAML = YAML(typ='rt')
RUYAML.preserve_quotes = True  # type: ignore
RUYAML.width = 50000  # type: ignore


class YAMLObject(DictionaryBasedObject):
    def __init__(self, path: Union[Path, str], file_name_prefix: str = ""):
        super().__init__(path=path, file_name_prefix=file_name_prefix)

    @staticmethod
    def _fix_path(path: Union[Path, str]):
        """Find and validate object path is valid.

        Rules:
            1. Path exists.
            2. One of the following options:
                a. Path is a file.
                b. Path is directory and file with a yml/yaml suffix exists in the given directory.
            3. File suffix equal "yml" or "yaml".

        Returns:
            Path: valid file path.
Ejemplo n.º 26
0
def pretty_format_yaml(argv=None):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--autofix',
        action='store_true',
        dest='autofix',
        help='Automatically fixes encountered not-pretty-formatted files',
    )
    parser.add_argument(
        '--indent',
        type=int,
        default='2',
        help=('The number of indent spaces or a string to be used as delimiter'
              ' for indentation level e.g. 4 or "\t" (Default: 2)'),
    )
    parser.add_argument(
        '--preserve-quotes',
        action='store_true',
        dest='preserve_quotes',
        help='Keep existing string quoting',
    )

    parser.add_argument('filenames', nargs='*', help='Filenames to fix')
    args = parser.parse_args(argv)

    status = 0

    yaml = YAML()
    yaml.indent = args.indent
    yaml.preserve_quotes = args.preserve_quotes
    # Prevent ruamel.yaml to wrap yaml lines
    yaml.width = maxsize

    separator = '---\n'

    for yaml_file in set(args.filenames):
        with open(yaml_file) as f:
            string_content = ''.join(f.readlines())

        # Split multi-document file into individual documents
        #
        # Not using yaml.load_all() because it reformats primitive (non-YAML) content. It removes
        # newline characters.
        separator_pattern = r'^---\s*\n'
        original_docs = re.split(separator_pattern,
                                 string_content,
                                 flags=re.MULTILINE)

        # A valid multi-document YAML file might starts with the separator.
        # In this case the first document of original docs will be empty and should not be consdered
        if string_content.startswith('---'):
            original_docs = original_docs[1:]

        pretty_docs = []

        try:
            for doc in original_docs:
                content = _process_single_document(doc, yaml)
                if content is not None:
                    pretty_docs.append(content)

            # Start multi-doc file with separator
            pretty_content = '' if len(pretty_docs) == 1 else separator
            pretty_content += separator.join(pretty_docs)

            if string_content != pretty_content:
                print('File {} is not pretty-formatted'.format(yaml_file))

                if args.autofix:
                    print('Fixing file {}'.format(yaml_file))
                    with io.open(yaml_file, 'w', encoding='UTF-8') as f:
                        f.write(text_type(pretty_content))

                status = 1
        except YAMLError:  # pragma: no cover
            print(
                'Input File {} is not a valid YAML file, consider using check-yaml'
                .format(yaml_file, ), )
            return 1

    return status
Ejemplo n.º 27
0
def load(obj_str: str, default_flow_style=False) -> dict:
    yaml = YAML()
    yaml.default_flow_style = default_flow_style
    yaml.preserve_quotes = True
    return yaml.load(obj_str)
Ejemplo n.º 28
0
    def create_compose_file(self):
        ds = {
            'version': '3',
            'networks': {
                'ssonet': {
                    'ipam': {
                        'config': [{
                            'subnet': '172.23.0.0/24'
                        }]
                    }
                }
            },
            'volumes': {
                'local_postgres_data': {},
                'local_postgres_data_backups': {},
                'local_zookeeper_data': {},
                'local_kafka_data': {}
            },
            'services': {
                'sso.local.redhat.com': {
                    'container_name': 'sso.local.redhat.com',
                    'image': 'quay.io/keycloak/keycloak:11.0.0',
                    'environment': {
                        'DB_VENDOR': 'h2',
                        'PROXY_ADDRESS_FORWARDING': "true",
                        'KEYCLOAK_USER': '******',
                        'KEYCLOAK_PASSWORD': '******',
                    },
                    #'ports': ['8443:8443'],
                    'expose': [8443],
                    'networks': {
                        'ssonet': {
                            'ipv4_address': '172.23.0.3'
                        }
                    }
                },
                'kcadmin': {
                    'container_name':
                    'kcadmin',
                    'image':
                    'python:3',
                    'build': {
                        'context':
                        f"{os.path.join(self.checkouts_root, 'keycloak_admin')}",
                    },
                    'volumes': [
                        f"./{os.path.join(self.checkouts_root, 'keycloak_admin')}:/app"
                    ],
                    'depends_on': ['sso.local.redhat.com'],
                    #'command': '/bin/bash -c "cd /app && pip install -r requirements.txt && flask run --host=0.0.0.0 --port=80"'
                    'command':
                    '/bin/bash -c "cd /app && pip install -r requirements.txt && python -c \'from kchelper import init_realm; init_realm()\' && flask run --host=0.0.0.0 --port=80"',
                    'networks': {
                        'ssonet': {
                            'ipv4_address': '172.23.0.4'
                        }
                    }
                },
                'insights_proxy': {
                    'container_name':
                    'insights_proxy',
                    'image':
                    'redhatinsights/insights-proxy',
                    #'ports': ['1337:1337'],
                    'ports': ['8443:8443'],
                    'environment': ['PLATFORM=linux', 'CUSTOM_CONF=true'],
                    'security_opt': ['label=disable'],
                    'extra_hosts': ['prod.foo.redhat.com:127.0.0.1'],
                    'environment': {
                        'SPANDX_PORT': 8443
                    },
                    'volumes': [
                        f'./{os.path.join(self.checkouts_root, "www", "spandx.config.js")}:/config/spandx.config.js'
                    ]
                },
                'webroot': {
                    'container_name':
                    'webroot',
                    'image':
                    'nginx',
                    'volumes': [
                        f"./{os.path.join(self.checkouts_root, 'www')}:/usr/share/nginx/html",
                        f"./{os.path.join(self.checkouts_root, 'nginx.conf.d')}:/etc/nginx/conf.d"
                    ],
                    'command': ['nginx-debug', '-g', 'daemon off;']
                },
                'chrome': {
                    'container_name':
                    'chrome',
                    'image':
                    'nginx',
                    'volumes': [
                        f"./{os.path.join(self.checkouts_root, 'insights-chrome')}:/usr/share/nginx/html"
                    ],
                    'command': ['nginx-debug', '-g', 'daemon off;']
                },
                'chrome_beta': {
                    'container_name':
                    'chrome_beta',
                    'image':
                    'nginx',
                    'volumes': [
                        f"./{os.path.join(self.checkouts_root, 'insights-chrome')}:/usr/share/nginx/html"
                    ],
                    'command': ['nginx-debug', '-g', 'daemon off;']
                },
                'entitlements': {
                    'container_name':
                    'entitlements',
                    'image':
                    'python:3',
                    'build': {
                        'context':
                        f"{os.path.join(self.checkouts_root, 'entitlements')}",
                    },
                    'volumes': [
                        f"./{os.path.join(self.checkouts_root, 'entitlements')}:/app"
                    ],
                    'command':
                    '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"'
                },
                'rbac': {
                    'container_name':
                    'rbac',
                    'image':
                    'python:3',
                    'build': {
                        'context':
                        f"{os.path.join(self.checkouts_root, 'rbac')}",
                    },
                    'volumes':
                    [f"./{os.path.join(self.checkouts_root, 'rbac')}:/app"],
                    'command':
                    '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"'
                }
            }
        }

        ds['services'].update(self.get_tower_analytics_frontend_service())
        ds['services'].update(self.get_landing_services())
        #import epdb; epdb.st()

        # macs can't do static IPs
        if platform.system().lower() == 'darwin':
            ds.pop('networks', None)

        # Add squid for the mac users who can't directly connect to containers
        if not self.args.integration:
            squid_logs = os.path.join(self.checkouts_root, 'squid', 'logs')
            squid_conf = os.path.join(self.checkouts_root, 'squid', 'conf')
            if not os.path.exists(squid_logs):
                os.makedirs(squid_logs)
            ds['services']['squid'] = {
                'container_name':
                'squid',
                'image':
                'datadog/squid',
                'ports': ['3128:3128'],
                'volumes': [
                    f"./{squid_conf}:/etc/squid",
                    f"./{squid_logs}:/var/log/squid",
                ]
            }

        if True:
            ds['services']['kcadmin'].pop('networks', None)
            ds['services']['sso.local.redhat.com'].pop('networks', None)
            ds['services']['sso.local.redhat.com'].pop('depends_on', None)

            pf = copy.deepcopy(ds['services']['insights_proxy'])
            pf['container_name'] = 'prod.foo.redhat.com'
            ds['services'].pop('insights_proxy', None)
            ds['services']['prod.foo.redhat.com'] = pf

        # if static, chrome/landing/frontend should be compiled and put into wwwroot
        if self.args.static:

            if 'all' in self.args.static or 'chrome' in self.args.static:
                ds['services'].pop('chrome', None)
                ds['services'].pop('chrome_beta', None)
            if 'all' in self.args.static or 'landing' in self.args.static:
                ds['services'].pop('landing', None)
            if 'all' in self.args.static or 'tower-analytics-frontend' in self.args.static:
                ds['services'].pop('aafrontend', None)

            for fc in self.frontend_services:
                if 'all' in self.args.static or fc.www_app_name in self.args.static:
                    for dp in fc.www_deploy_paths:
                        src = os.path.join(fc.srcpath, fc.distdir)
                        dst = f"/usr/share/nginx/html/{dp}"
                        volume = f"./{src}:{dst}"
                        ds['services']['webroot']['volumes'].append(volume)
                        #import epdb; epdb.st()

        # build the backend?
        if self.args.backend_mock:
            aa_be_srcpath = os.path.join(self.checkouts_root,
                                         'aa_backend_mock')
            bs = {
                'container_name':
                'aabackend',
                'image':
                'python:3',
                'build': {
                    'context': f"./{aa_be_srcpath}"
                },
                'environment': {
                    'API_SECURE': '1',
                },
                'volumes': [f"./{aa_be_srcpath}:/app"],
                'command':
                '/bin/bash -c "cd /app && pip install -r requirements.txt && python api.py"'
            }
            ds['services']['aabackend'] = bs
        elif self.args.backend_address:
            pass
        else:
            svcs = self.get_backend_compose_services()
            ds['services'].update(svcs)
            #import epdb; epdb.st()

        if self.args.integration:
            ds['services']['integration'] = self.get_integration_compose()

        yaml = YAML(typ='rt', pure=True)
        yaml.preserve_quotes = False
        yaml.indent = 4
        yaml.block_seq_indent = 4
        yaml.explicit_start = True
        yaml.width = 1000
        yaml.default_flow_style = False

        with open('genstack.yml', 'w') as f:
            yaml.dump(ds, f)

        # fix port quoting for sshd ...
        with open('genstack.yml', 'r') as f:
            fyaml = f.read()
        fyaml = fyaml.replace('2222:22', '\'2222:22\'')
        with open('genstack.yml', 'w') as f:
            f.write(fyaml)
Ejemplo n.º 29
0
import sys
from pathlib import Path
from ruamel.yaml import YAML

val = sys.argv[1]
subst = sys.argv[2]
file_name = Path(sys.argv[3])

def update(d, val, sub):
    if isinstance(d, dict):
        for k in d:
            v = d[k]
            if v == val:
                d[k] = sub
            else:
                update(v, val, sub)
    elif isinstance(d, list):
        for item in d:
            update(item, val, sub)

yaml = YAML()
yaml.preserve_quotes = True  # to preserve superfluous quotes in the input
data = yaml.load(file_name)
update(data, val, subst)
yaml.dump(data, file_name)
Ejemplo n.º 30
0
import pytest
from demisto_sdk.commands.common.constants import PACKS_DIR, PLAYBOOKS_DIR
from demisto_sdk.commands.common.content.errors import (ContentInitializeError,
                                                        ContentSerializeError)
from demisto_sdk.commands.common.content.objects.abstract_objects import \
    YAMLObject
from demisto_sdk.commands.common.tools import src_root
from ruamel.yaml import YAML

TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_VALID_YAML = TEST_CONTENT_REPO / PACKS_DIR / 'Sample01' / PLAYBOOKS_DIR / 'playbook-sample_new.yml'
TEST_NOT_VALID_YAML = TEST_DATA / 'malformed.yaml'

RUYAML = YAML(typ='rt')
RUYAML.preserve_quotes = True
RUYAML.width = 50000


class TestValidYAML:
    def test_valid_yaml_file_path(self):
        obj = YAMLObject(TEST_VALID_YAML)
        assert obj.to_dict() == RUYAML.load(TEST_VALID_YAML.open())

    def test_get_item(self):
        obj = YAMLObject(TEST_VALID_YAML)

        assert obj["fromversion"] == RUYAML.load(
            TEST_VALID_YAML.open())["fromversion"]

    @pytest.mark.parametrize(argnames="default_value",
Ejemplo n.º 31
0
def experiment(args):
    print(f"Running experiment with args: {args}")
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    args.train_batch_num -= 1
    args.val_batch_num -= 1
    args.eval_batch_num -= 1

    # TODO (JON): What is yaml for right now?
    yaml = YAML()
    yaml.preserve_quotes = True
    yaml.boolean_representation = ['False', 'True']

    torch.manual_seed(args.seed)
    np.random.seed(args.seed)
    if args.cuda: torch.cuda.manual_seed(args.seed)

    ########## Setup dataset
    # TODO (JON): Verify that the loaders are shuffling the validation / test sets.
    if args.dataset == 'MNIST':
        num_train = args.datasize
        if num_train == -1: num_train = 50000
        train_loader, val_loader, test_loader = load_mnist(
            args.batch_size,
            subset=[args.datasize, args.valsize, args.testsize],
            num_train=num_train)
        in_channel = 1
        imsize = 28
        fc_shape = 800
        num_classes = 10
    else:
        raise Exception("Must choose MNIST dataset")
    # TODO (JON): Right now we are not using the test loader for anything.  Should evaluate it occasionally.

    ##################### Setup model
    if args.model == "mlp":
        model = Net(args.num_layers,
                    args.dropout,
                    imsize,
                    in_channel,
                    args.l2,
                    num_classes=num_classes)
    else:
        raise Exception("bad model")

    hyper = init_hyper_train(args, model)  # We need this when doing all_weight
    if args.cuda:
        model = model.cuda()
        model.weight_decay = model.weight_decay.cuda()
        # model.Gaussian.dropout = model.Gaussian.dropout.cuda()

    ############ Setup Optimizer
    # TODO (JON):  Add argument for other optimizers?
    init_optimizer = torch.optim.Adam(model.parameters(),
                                      lr=args.lr)  # , momentum=0.9)
    hyper_optimizer = torch.optim.RMSprop([get_hyper_train(
        args, model)])  # , lr=args.lrh)  # try 0.1 as lr

    ############## Setup Inversion Algorithms
    KFAC_damping = 1e-2
    kfac_opt = KFACOptimizer(model, damping=KFAC_damping)  # sec_optimizer

    ########### Perform the training
    global_step = 0
    hp_k, update = 0, 0
    for epoch_h in range(0, args.hepochs + 1):
        print(f"Hyper epoch: {epoch_h}")
        if (epoch_h) % args.hyper_log_interval == 0:
            if args.hyper_train == 'opt_data':
                if args.dataset == 'MNIST':
                    save_learned(
                        get_hyper_train(args,
                                        model).reshape(args.batch_size, imsize,
                                                       imsize), True,
                        args.batch_size, args)
            elif args.hyper_train == 'various':
                print(
                    f"saturation: {torch.sigmoid(model.various[0])}, brightness: {torch.sigmoid(model.various[1])}, decay: {torch.exp(model.various[2])}"
                )
            eval_train_corr, eval_train_loss = evaluate(
                args, model, global_step, train_loader, 'train')
            # TODO (JON):  I don't know if we want normal train loss, or eval?
            eval_val_corr, eval_val_loss = evaluate(args, model, epoch_h,
                                                    val_loader, 'valid')
            eval_test_corr, eval_test_loss = evaluate(args, model, epoch_h,
                                                      test_loader, 'test')
            if args.break_perfect_val and eval_val_corr >= 0.999 and eval_train_corr >= 0.999:
                break

        min_loss = 10e8
        elementary_epochs = args.epochs
        if epoch_h == 0:
            elementary_epochs = args.init_epochs
        if True:  # epoch_h == 0:
            optimizer = init_optimizer
        # else:
        #    optimizer = sec_optimizer
        for epoch in range(1, elementary_epochs + 1):
            global_step, epoch_train_loss = train(args, model, train_loader,
                                                  optimizer, train_loss_func,
                                                  kfac_opt, epoch, global_step)
            if np.isnan(epoch_train_loss):
                print("Loss is nan, stop the loop")
                break
            elif False:  # epoch_train_loss >= min_loss:
                print(
                    f"Breaking on epoch {epoch}. train_loss = {epoch_train_loss}, min_loss = {min_loss}"
                )
                break
            min_loss = epoch_train_loss
        # if epoch_h == 0:
        #     continue

        hp_k, update = KFAC_optimize(args, model, train_loader, val_loader,
                                     hyper_optimizer, kfac_opt, KFAC_damping,
                                     epoch_h)
Ejemplo n.º 32
0
def yaml() -> YAML:
    """Return default YAML parser."""
    yamlp = YAML(typ='safe', pure=True)
    yamlp.preserve_quotes = True
    yamlp.default_flow_style = False
    return yamlp
Ejemplo n.º 33
0
    def _parse_sequence(self):
        """Parse a sequence file to be able to provide information about it.

        Make sure the sequence file is valid before calling this function.
        File can be checked with `SequenceReader.check_sequence`.
        """
        with open(self._seq_file_path) as f:
            yaml = YAML(typ='safe')
            yaml.preserve_quotes = True
            loaded = yaml.load(f)

        # Collect constants
        if 'constants' in loaded['sequence']:
            self._constants = loaded['sequence']['constants']
        else:
            self._constants = dict()

        # Create node objects
        for node_dict in loaded['sequence']['nodes']:
            ntype = node_dict['type']

            if ntype == "function":
                # list of wrappers is converted into an OrderedDict
                wrapper_list = node_dict.get('wrappers')
                if wrapper_list:
                    wrapper_dict = OrderedDict()
                    for wrapper in wrapper_list:
                        # A wrapper can be written as a simple string (name)
                        # or as a dict if some arguments are given.
                        if isinstance(wrapper, str):
                            wrapper_dict[wrapper] = {}
                        elif isinstance(wrapper, dict):
                            wrapper_dict.update(wrapper)
                        else:
                            raise SequenceFileError(
                                ('The following wrapper is neiter a str '
                                 ' or a dict: {}').format(wrapper))
                else:
                    wrapper_dict = None
                # create function node
                new_node = FunctionNode(
                    nid=node_dict.get('id'),
                    name=node_dict.get('name'),
                    transitions=node_dict.get('transitions'),
                    function_name=node_dict.get('function'),
                    function_kwargs=node_dict.get('arguments'),
                    timeout=node_dict.get('timeout'),
                    return_var_name=node_dict.get('return'),
                    wrappers=wrapper_dict)

            elif ntype == "start":
                new_node = StartNode(nid=node_dict.get('id'),
                                     name=node_dict.get('name'),
                                     transitions=node_dict.get('transitions'))

            elif ntype == "stop":
                new_node = StopNode(nid=node_dict.get('id'),
                                    name=node_dict.get('name'))

            elif ntype == "variable":
                new_node = VariableNode(
                    nid=node_dict.get('id'),
                    name=node_dict.get('name'),
                    transitions=node_dict.get('transitions'),
                    variables=node_dict.get('variables'))

            elif ntype == "parallel_split":
                new_node = ParallelSplitNode(
                    nid=node_dict.get('id'),
                    name=node_dict.get('name'),
                    transitions=node_dict.get('transitions'))

            elif ntype == "parallel_sync":
                new_node = ParallelSyncNode(
                    nid=node_dict.get('id'),
                    name=node_dict.get('name'),
                    transitions=node_dict.get('transitions'))

            else:
                raise SequenceFileError("Node n°{} has an unknown type "
                                        "{}.".format(node_dict['id'],
                                                     node_dict['type']))
            # Add the new node to set of nodes in the SequenceReader
            self._nodes.add(new_node)