Exemple #1
0
def write_output_document(
    args: argparse.Namespace, log: ConsolePrinter, yaml_editor: YAML,
    docs: List[Merger]
) -> None:
    """Save a backup of the overwrite file, if requested."""
    if args.backup:
        backup_file = args.overwrite + ".bak"
        log.verbose(
            "Saving a backup of {} to {}."
            .format(args.overwrite, backup_file))
        if exists(backup_file):
            remove(backup_file)
        copy2(args.overwrite, backup_file)

    document_is_json = (
        docs[0].prepare_for_dump(yaml_editor, args.output)
        is OutputDocTypes.JSON)

    dumps = []
    for doc in docs:
        doc.prepare_for_dump(yaml_editor, args.output)
        dumps.append(doc.data)

    if args.output:
        with open(args.output, 'w', encoding='utf-8') as out_fhnd:
            if document_is_json:
                if len(dumps) > 1:
                    for dump in dumps:
                        print(
                            json.dumps(Parsers.jsonify_yaml_data(dump)),
                            file=out_fhnd)
                else:
                    json.dump(Parsers.jsonify_yaml_data(dumps[0]), out_fhnd)
            else:
                if len(dumps) > 1:
                    yaml_editor.explicit_end = True  # type: ignore
                    yaml_editor.dump_all(dumps, out_fhnd)
                else:
                    yaml_editor.dump(dumps[0], out_fhnd)
    else:
        if document_is_json:
            if len(dumps) > 1:
                for dump in dumps:
                    print(json.dumps(Parsers.jsonify_yaml_data(dump)))
            else:
                json.dump(Parsers.jsonify_yaml_data(dumps[0]), sys.stdout)
        else:
            if len(dumps) > 1:
                yaml_editor.explicit_end = True  # type: ignore
                yaml_editor.dump_all(dumps, sys.stdout)
            else:
                yaml_editor.dump(dumps[0], sys.stdout)
def _init_yaml() -> YAML:
    yaml = YAML()

    yaml.representer.add_representer(FileFormat, _format_representer)
    yaml.representer.add_multi_representer(UUID, _uuid_representer)
    yaml.representer.add_representer(datetime, represent_datetime)
    yaml.representer.add_multi_representer(PurePath, represent_paths)

    # WAGL spits out many numpy primitives in docs.
    yaml.representer.add_representer(numpy.int8, Representer.represent_int)
    yaml.representer.add_representer(numpy.uint8, Representer.represent_int)
    yaml.representer.add_representer(numpy.int16, Representer.represent_int)
    yaml.representer.add_representer(numpy.uint16, Representer.represent_int)
    yaml.representer.add_representer(numpy.int32, Representer.represent_int)
    yaml.representer.add_representer(numpy.uint32, Representer.represent_int)
    yaml.representer.add_representer(numpy.int, Representer.represent_int)
    yaml.representer.add_representer(numpy.int64, Representer.represent_int)
    yaml.representer.add_representer(numpy.uint64, Representer.represent_int)
    yaml.representer.add_representer(numpy.float, Representer.represent_float)
    yaml.representer.add_representer(numpy.float32,
                                     Representer.represent_float)
    yaml.representer.add_representer(numpy.float64,
                                     Representer.represent_float)
    yaml.representer.add_representer(numpy.ndarray, Representer.represent_list)
    yaml.representer.add_representer(numpy.datetime64,
                                     represent_numpy_datetime)

    # Match yamllint default expectations. (Explicit start/end are recommended to tell if a file is cut off)
    yaml.width = 80
    yaml.explicit_start = True
    yaml.explicit_end = True

    return yaml
def _watcher(osde2ectl_cmd, account_config, my_path, cluster_count, delay,
             my_uuid):
    logging.info('Watcher thread started')
    logging.info('Getting status every %d seconds' % int(delay))
    yaml = YAML(pure=True)
    yaml.default_flow_style = False
    yaml.explicit_start = False
    yaml.explicit_end = False
    yaml.allow_duplicate_keys = True
    yaml.dump(account_config, open(my_path + "/account_config.yaml", 'w'))
    my_config = yaml.load(open(my_path + "/account_config.yaml"))
    my_thread = threading.currentThread()
    cmd = [osde2ectl_cmd, "list", "--custom-config", "account_config.yaml"]
    # To stop the watcher we expect the run attribute to be not True
    while getattr(my_thread, "run", True):
        logging.debug(cmd)
        process = subprocess.Popen(cmd,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   cwd=my_path,
                                   universal_newlines=True)
        stdout, stderr = process.communicate()

        cluster_count = 0
        state = {}
        status = {}
        error = []
        # Count the various states/status' and report it to logging
        for line in stdout.splitlines():
            if my_config['ocm']['userOverride'] in line:
                cluster_count += 1
                state_key = line.split()[2]
                status_key = line.split()[3]
                state[state_key] = state.get(state_key, 0) + 1
                status[status_key] = status.get(status_key, 0) + 1

                if state_key == "error":
                    error.append(line.split()[1])
                    logging.debug(line.split()[1])

        logging.info('Requested Clusters for test %s: %d' %
                     (my_uuid, cluster_count))
        if cluster_count != 0:
            logging.debug(state.items())
            logging.debug(status.items())
            state_output = "Current clusters state: " + str(
                cluster_count) + " clusters"
            status_output = "Current clusters status: " + str(
                cluster_count) + " clusters"
            for i1 in state.items():
                state_output += " (" + str(i1[0]) + ": " + str(i1[1]) + ")"
            for i2 in status.items():
                status_output += " (" + str(i2[0]) + ": " + str(i2[1]) + ")"
            logging.info(state_output)
            logging.info(status_output)
            if error:
                logging.warning('Clusters in error state: %s' % error)

        time.sleep(delay)
    logging.info('Watcher exiting')
def main(v6_prefix):
    """
    Execution begins here.
    """

    # Load MAC addresses from file
    with open("input_macs.txt", "r") as handle:
        lines = handle.readlines()

    # Initialize Ansible YAML inventory dictionary
    ansible_inv = {"all": {"children": {"remotes": {"hosts": {}}}}}

    # Iterate over the lines read from file
    for index, line in enumerate(lines):

        # Clean up the line; remove whitespace and delimeters
        mac = line.strip().lower()
        for delim in ["-", ":", "."]:
            mac = mac.replace(delim, "")

        # If MAC is invalid, skip it and continue with the next MAC
        if not is_valid_mac(mac):
            continue

        # Build the low-order 64 bits of the IPv6 address
        host_addr = f"{mac[:4]}:{mac[4:6]}ff:fe{mac[6:8]}:{mac[8:]}"

        # Flip the 7th bit of first byte (3rd bit of second nibble) using xor
        flip = hex(int(host_addr[1], 16) ^ 2)[-1]

        # Re-assemble host bits with flipped bit plus IPv6 prefix
        eui64_addr = f"{v6_prefix}{host_addr[:1]}{flip}{host_addr[2:]}"

        # Display MAC address and newly-computed EUI-64 IPv6 address
        print(mac, eui64_addr)

        # Update the Ansible inventory dict with new host. The hostname
        # will be "node_" plus the entire MAC address (user can modify).
        # The IPv6 address is the address to which Ansible connects and
        # the original MAC is retained for documentation/troubleshooting
        ansible_inv["all"]["children"]["remotes"]["hosts"].update({
            f"node_{index + 1}": {
                "ansible_host": DoubleQuotedScalarString(eui64_addr),
                "original_mac": DoubleQuotedScalarString(mac),
            }
        })

    # Instantiate the YAML object, preserving quotes and
    # using explicit start (---) and end (...) markers
    yaml = YAML()
    yaml.preserve_quotes = True
    yaml.explicit_start = True
    yaml.explicit_end = True

    # Dump the Ansible inventory to a new file for use later
    with open("eui64_hosts.yml", "w") as handle:
        yaml.dump(ansible_inv, handle)
Exemple #5
0
    def __open_yaml():
        from ruamel.yaml import YAML

        yml = YAML(typ="safe", pure=False)
        yml.default_flow_style = False
        yml.explicit_end = True
        yml.explicit_start = True
        yml.indent(mapping=4, sequence=4, offset=2)
        return yml
Exemple #6
0
 def list_products_yaml(self, hostname, system):
     from ruamel.yaml import YAML
     yml = YAML(typ='safe', pure=False)
     yml.default_flow_style = False
     yml.explicit_end = True
     yml.explicit_start = True
     yml.indent(mapping=4, sequence=4, offset=2)
     data = system.to_refhost_dict()
     data["name"] = str(hostname)
     yml.dump(data, self.output)
Exemple #7
0
 def list_products_yaml(self, hostname, system):
     from ruamel.yaml import YAML
     yml = YAML(typ='safe', pure=False)
     yml.default_flow_style = False
     yml.explicit_end = True
     yml.explicit_start = True
     yml.indent(mapping=4, sequence=4, offset=2)
     data = system.to_refhost_dict()
     data["name"] = str(hostname)
     yml.dump(data, self.output)
Exemple #8
0
def main(mgmt_prefix):
    """
    Execution starts here.
    """

    # Create an IPv6 network object to test subnet containment later
    mgmt_net = IPv6Network(mgmt_prefix)

    # Create netmiko SSH connection handler to access the device
    conn = Netmiko(
        host="192.0.2.1",
        username="******",
        password="******",
        device_type="cisco_ios",
    )

    # Should be using "show bgp ipv6 unicast" but code has bug
    # https://github.com/CiscoTestAutomation/genieparser/issues/362
    resp = conn.send_command("show bgp all", use_genie=True)
    v6_rte = resp["vrf"]["default"]["address_family"]["ipv6 unicast"]["routes"]

    # Initialize Ansible YAML inventory dictionary
    ansible_inv = {"all": {"children": {"remotes": {"hosts": {}}}}}

    # Iterate over all collected BGP prefixes
    for index, prefix in enumerate(v6_rte.keys()):

        # Create an IPv6 network representing the specific prefix
        prefix_net = IPv6Network(prefix.lower())

        # Test for subnet containment and for /128 mask
        if prefix_net.subnet_of(mgmt_net) and prefix.endswith("/128"):

            # Assemble inventory item and update inventory dict
            prefix_str = DoubleQuotedScalarString(prefix_net.network_address)
            ansible_inv["all"]["children"]["remotes"]["hosts"].update(
                {f"node_{index + 1}": {
                    "ansible_host": prefix_str
                }})
            print(prefix_str)

    # Close connection when finished
    conn.disconnect()

    # Instantiate the YAML object, preserving quotes and
    # using explicit start (---) and end (...) markers
    yaml = YAML()
    yaml.preserve_quotes = True
    yaml.explicit_start = True
    yaml.explicit_end = True

    # Dump the Ansible inventory to a new file for use later
    with open("bgp_hosts.yml", "w") as handle:
        yaml.dump(ansible_inv, handle)
Exemple #9
0
def test_qualified_name01(tmpdir):
    """issue 214"""
    from ruamel.yaml import YAML
    import ruamel.yaml.comments
    from ruamel.yaml.compat import StringIO

    yaml = YAML(typ='unsafe', pure=True)
    yaml.explicit_end = True
    buf = StringIO()
    yaml.dump(ruamel.yaml.comments.CommentedBase.yaml_anchor, buf)
    res = buf.getvalue()
    assert res == "!!python/name:ruamel.yaml.comments.CommentedBase.yaml_anchor ''\n...\n"
    x = yaml.load(res)
    assert x == ruamel.yaml.comments.CommentedBase.yaml_anchor
Exemple #10
0
def write_secrets(template_file, secrets_file):
    #
    # Configure ruamel YAML to:
    #  * preserve comments from the template.
    #  * preserve the order of items in the template
    #  * preserve quotes for value.
    #
    template_path = Path(template_file)
    secrets_path = Path(secrets_file)
    yaml = YAML()
    yaml.default_flow_style = False
    yaml.explicit_start = True
    yaml.explicit_end = True
    yaml.preserve_quotes = True
    #
    # Read YAML template.
    #
    data = yaml.load(template_path)
    #
    # Append new random passwords.
    #
    for key in data.keys():
        if data[key] is None or data[key] == '':
            print('INFO: generating new password for key "' + key + '" ...')
            #
            # Length of generated passwords is semi random too for extra complexity.
            # We use a minimum password length of 60 and a max length of 80.
            #
            pass_length = random.randint(60, 80)
            data[key] = ''.join(
                random.choice(string.ascii_letters + string.digits + '!?@%&[]^_+-{}<=>~.,;:\|/')
                for _ in range(pass_length))
        else:
            print('INFO: preserving existing value "' + data[key] + '" for key "' + key + '".')
    #
    # Make numbered backups of the secrets file.
    #
    if path.isfile(secrets_file):
        timestamp_object = datetime.now()
        timestamp_string = timestamp_object.strftime(".%Y-%m-%dT%H:%M:%S%z")
        rename(secrets_file, secrets_file + timestamp_string)
    #
    # Write new secrets.yml file.
    #
    yaml.dump(data, secrets_path)
Exemple #11
0
def get_opinionated_yaml_writer(
    yamkix_config: YamkixConfig,
) -> YAML:
    """Configure a yaml parser/formatter the yamkix way.

    Args:
        yamkix_config: a YamkixConfig instance
    Returns:
        a ruamel.yaml YAML instance
    """
    yaml = YAML(typ=yamkix_config.parsing_mode)
    yaml.explicit_start = yamkix_config.explicit_start
    yaml.explicit_end = yamkix_config.explicit_end
    yaml.default_flow_style = yamkix_config.default_flow_style
    yaml.preserve_quotes = yamkix_config.quotes_preserved
    yaml.width = yamkix_config.line_width
    if yamkix_config.dash_inwards:
        yaml.indent(mapping=2, sequence=4, offset=2)
    return yaml
Exemple #12
0
def write_yaml(filename:str, dictionary:dict):
    """ Function to convert a dictionary into a YAML file
    """

    yml = YAML()
    yml.explicit_start = True
    yml.default_flow_style = False 
    yml.encoding = "utf-8"     # default when using YAML() or YAML(typ="rt")
    yml.allow_unicode = True   # always default in the new API
    yml.errors = "strict"
    yml.indent(sequence=4, offset=2)
    yml.explicit_end = True

    if isinstance(dictionary,dict):
        with open(filename, 'w') as outfile:
            print(filename)
            yml.dump(dictionary, outfile)

    else:
       raise Exception('its not a dictionary')
Exemple #13
0
def yaml():
    y = YAML()
    y.explicit_start = True
    y.explicit_end = False
    y.preserve_quotes = True
    return y
Exemple #14
0
from importlib import import_module
from importlib.util import spec_from_file_location
from importlib.util import module_from_spec
import os
import sys
import json
import string
from io import StringIO, IOBase
from ruamel.yaml import YAML

# A shiny global ruamel.yaml obj with sane options (dumps should pass yamllint)
YM = YAML()
YM.indent(mapping=2, sequence=4, offset=2)
YM.explicit_start = True
YM.explicit_end = True
YM.allow_unicode = True
YM.preserve_quotes = True

# global formatting (any changes *should* propagate to later directives)
EOL = '\n'  # line separator
RENDER_JS = False


##
#   substitution
##
def sub_stream(strm, meta, method):
    """
    substitute strings in strm
    return (the same? or a new) stream with substituted values
def _build_cluster(osde2e_cmnd, osde2ectl_cmd, account_config, my_path, es,
                   index, my_uuid, my_inc, cluster_count, timestamp, dry_run,
                   index_retry, skip_health_check, must_gather,
                   es_ignored_metadata):
    cluster_start_time = time.strftime("%Y-%m-%dT%H:%M:%S")
    success = True
    # osde2e takes a relative path to the account file so we need to create it in a working dir and
    # pass that dir as the cwd to subproccess
    cluster_path = my_path + "/" + account_config['cluster']['name']
    os.mkdir(cluster_path)
    yaml = YAML(pure=True)
    yaml.default_flow_style = False
    yaml.explicit_start = False
    yaml.explicit_end = False
    yaml.allow_duplicate_keys = True
    yaml.dump(account_config, open(cluster_path + "/cluster_account.yaml",
                                   'w'))
    cluster_env = os.environ.copy()
    cluster_env["REPORT_DIR"] = cluster_path
    if "expiration" in account_config['ocm'].keys():
        cluster_env["CLUSTER_EXPIRY_IN_MINUTES"] = str(
            account_config['ocm']['expiration'])
    logging.debug('Attempting cluster installation')
    logging.debug('Output directory set to %s' % cluster_path)
    cluster_cmd = [
        osde2e_cmnd, "test", "--custom-config", "cluster_account.yaml"
    ]
    cluster_cmd.append('--skip-health-check') if skip_health_check else None
    cluster_cmd.append('--must-gather=false') if not must_gather else None
    if args.wildcard_options:
        for param in args.wildcard_options.split():
            cluster_cmd.append(param)
    if not dry_run:
        logging.debug(cluster_cmd)
        installation_log = open(cluster_path + "/" + 'installation.log', 'w')
        process = subprocess.Popen(cluster_cmd,
                                   stdout=installation_log,
                                   stderr=installation_log,
                                   env=cluster_env,
                                   cwd=cluster_path)
        logging.info(
            'Started cluster %s (%d of %d)' %
            (account_config['cluster']['name'], my_inc, cluster_count))
        stdout, stderr = process.communicate()
        cluster_end_time = time.strftime("%Y-%m-%dT%H:%M:%S")
        if process.returncode != 0:
            logging.error('Failed to build cluster %d: %s' %
                          (my_inc, account_config['cluster']['name']))
            logging.error(
                'Check installation.log and test_output.log files on %s for errors'
                % (cluster_path + "/"))
            success = False
        logging.debug('Attempting to load metadata json')
        metadata = {}
        try:
            metadata = json.load(open(cluster_path + "/metadata.json"))
        except Exception as err:
            logging.error(err)
            logging.error('Failed to load metadata.json file located %s' %
                          cluster_path)
        metadata["cluster_start_time"] = cluster_start_time
        metadata["cluster_end_time"] = cluster_end_time
        metadata["install_successful"] = success
        metadata["uuid"] = my_uuid
        metadata['cluster_name'] = account_config['cluster']['name']
        metadata['multiAZ'] = 'True' if account_config['cluster'][
            'multiAZ'] else None
        metadata["install_counter"] = my_inc
        try:
            with open(cluster_path + "/metadata.json", "w") as metadata_file:
                json.dump(metadata, metadata_file)
        except Exception as err:
            logging.error(err)
            logging.error('Failed to write metadata.json file located %s' %
                          cluster_path)
        kubeconfig_path = _download_kubeconfig(
            osde2ectl_cmd, account_config['cluster']['name'], cluster_path)
        _add_machinepool(osde2ectl_cmd, kubeconfig_path,
                         cluster_path) if args.machinepool_name else None
        if es is not None:
            metadata["timestamp"] = time.strftime("%Y-%m-%dT%H:%M:%S")
            common._index_result(es, index, metadata, es_ignored_metadata,
                                 index_retry)