Example #1
0
    def setup_yaml(self):
        """Sets up the yaml loader to return unicode objects for strings by default"""

        def construct_yaml_str(self, node):
            # Override the default string handling function
            # to always return unicode objects
            return self.construct_scalar(node)

        yaml.Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)
        yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str)

        # Set up the dumper to not tag every string with !!python/unicode
        def unicode_representer(dumper, uni):
            node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
            return node

        yaml.add_representer(str, unicode_representer)

        # Set up the dumper to increase the indent for lists
        def increase_indent_wrapper(func):
            def increase_indent(self, flow=False, indentless=False):
                func(self, flow, False)

            return increase_indent

        yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent)
        yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
Example #2
0
def print_desc(topic, txt, post):
    with c.info:
        try:

            print('\n\nTitle : {}:'.format(topic))

        except UnicodeEncodeError:
            pass
    with c.dark_gray:
        yaml.add_representer(
            tuple, yaml.representer.SafeRepresenter.represent_list)
        yaml.add_representer(feedparser.FeedParserDict,
                             yaml.representer.SafeRepresenter.represent_dict)

        import copy
        post_copy = copy.deepcopy(post)

        # post_copy = post.copy()
        for key in list(post_copy.keys()):
            if '_parsed' in key \
                    or '_detail' in key \
                    or key in ('guidislink', 'link', 'links', 'id', 'summary', 'authors', 'title'):
                del post_copy[key]
            if 'content' is key:
                for i in range(len(post_copy[key])):
                    post_copy[key][i] = clean_txt(post_copy[key][i]['value'])
        print(yaml.dump(post_copy))
Example #3
0
def display_yaml(metrics, pretty):
    yaml.add_representer(Severity, Severity.yaml_repr, yaml.SafeDumper)
    kwargs = {}
    if pretty:
        kwargs = {'default_flow_style': False}

    print(yaml.safe_dump(metrics, **kwargs))
def main(flowcell_dir, csv_sample_sheet):
    multiplex_dir = os.path.join(flowcell_dir, "Data/Intensities/BaseCalls/Multiplex")
    fileObj = codecs.open( csv_sample_sheet, "r", "latin-1" )
    lane = 1
    samples = False
    mp = dict(details=list())
    for row in fileObj.readlines():
        if samples:
            s = dict()
            rdata = row.split(",")
            sample = rdata[0].split(" ")[0].encode("ascii")
            s['description'] = sample
            s['lane'] = str(lane)
            s['files'] = get_files(multiplex_dir,sample)
            s['analysis'] = 'Align_standard'
            s['genome_build'] = 'hg19'
            s['hybrid_target'] = '/proj/a2010002/projects/data/seqcap/agilent/targets/SureSelect_All_Exon_50mb_with_annotation_hg19_bed.interval_list'
            s['hybrid_bait'] = '/proj/a2010002/projects/data/seqcap/agilent/probes/SureSelect_All_Exon_50mb_with_annotation_hg19_bed.interval_list'
            lane = lane + 1
            mp["details"].append(s)
        if row.startswith("Sample_ID"):
            header = row.split(",")
            samples = True
    yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
    print yaml.dump(mp)
    def dump_spec(self, spec_file, filter=None):
        """
        Dump Specification (Get specifications from DB)
        """
        yaml.add_representer(unicode, self.unicode_representer)
        conf = InventoryConfiguration()
        connection = conf.get_backend_db(CONFIG_FILE)
        connection.connect()
        if filter:
            query = {"type_name__in": filter}
        else:
            query = {}
        list = []
        
        try:
            specs = connection.find_entities(self.collection, query)
        except:
            raise Exception("Error during obtaining specifications from DB")

        for item in specs:
            list.append(item)
        
        try:
            spec = open(spec_file, 'w')
            yaml.dump({"specifications": list}, spec)
            spec.close()
        except:
            raise Exception("Can't create specification file %s. Please, check permissions and path to file."
                            % spec_file)
        finally:
            connection.close()
Example #6
0
def pytest_collection_modifyitems(items):
    output = {}
    for item in items:
        item_class = item.location[0]
        item_class = item_class[:item_class.rfind('.')].replace('/', '.')
        item_name = item.location[2]
        item_param = re.findall('\.*(\[.*\])', item_name)
        if item_param:
            item_name = item_name.replace(item_param[0], '')
        node_name = '{}.{}'.format(item_class, item_name)
        output[node_name] = {}
        output[node_name]['docstring'] = base64.b64encode(getattr(item.function, '__doc__') or '')
        output[node_name]['name'] = item_name

        # This is necessary to convert AttrDict in metadata, or even metadict(previously)
        # into serializable data as builtin doesn't contain instancemethod and gives us issues.
        doc_meta = {k: v for k, v in item._metadata.get('from_docs', {}).items()}
        output[node_name]['metadata'] = {'from_docs': doc_meta}

    with open('doc_data.yaml', 'w') as f:
        def dice_representer(dumper, data):
            return dumper.represent_scalar("chew", "me")
        import lya
        from yaml.representer import SafeRepresenter
        yaml.add_representer(lya.lya.AttrDict, SafeRepresenter.represent_dict)
        yaml.dump(output, f)
    def write(self, correlation_iterable, stream):
        """
        :type correlation_iterable: Iterable
"""
        yaml.add_representer(Correlation, CorrelationSerializerYaml._correlation_yaml_representer)

        yaml.dump_all(correlation_iterable, stream = stream, default_flow_style = False)
Example #8
0
    def __init__(self, *args, **kwargs):
        super(YamlPrinter, self).__init__(*args, **kwargs)

        def LiteralPresenter(dumper, data):
            return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")

        yaml.add_representer(YamlPrinter._LiteralString, LiteralPresenter, Dumper=yaml.dumper.SafeDumper)
Example #9
0
def __init_module__():
    yaml.add_representer(uuid.UUID, uuid_representer, Dumper=ConfigDumper)
    yaml.add_representer(ConfigPath, path_representer, Dumper=ConfigDumper)
    yaml.add_constructor('!uuid', uuid_constructor, Loader=ConfigLoader)
    yaml.add_constructor(
        '!create-if-needed', path_constructor, Loader=ConfigLoader)
    uuid_add_implicit_resolver()
Example #10
0
def _get_yaml_config(substructure, config=None):
    """
    Extract the keys from the config in substructure, which may be a nested
    dictionary.

    Raises a ``unittest.SkipTest`` if the substructure is not found in the
    configuration.

    This can be used to load credentials all at once for testing purposes.
    """
    if config is None:
        config = _load_config_from_yaml()
    try:
        return _extract_substructure(config, substructure)
    except MissingConfigError as e:
        yaml.add_representer(
            _Optional,
            lambda d, x: d.represent_scalar(u'tag:yaml.org,2002:str', repr(x)))
        print (
            'Skipping test: could not get configuration: {}\n\n'
            'In order to run this test, add ensure file at $ACCEPTANCE_YAML '
            'has structure like:\n\n{}'.format(
                e.message,
                yaml.dump(substructure, default_flow_style=False)))
        raise unittest.SkipTest()
Example #11
0
def setup_yaml():
    """Set YAML to use OrderedDict
    http://stackoverflow.com/a/8661021"""
    represent_dict_order = lambda self, data: self.represent_mapping(  # noqa
        'tag:yaml.org,2002:map', data.items()
    )
    yaml.add_representer(OrderedDict, represent_dict_order)
Example #12
0
def _install_customer_representers():
    """ Installs custom yaml representers so that yaml.dump() can use print out our custom classes like
    LineStr, LineDict, LineList """

    # TODO(jroovers): we need to support different yaml types like Float, Int, etc so that we don't print
    # everything like a string (i.e. wrap atomic type in quotes)

    def linestr_representer(dumper, data):
        node = dumper.represent_str(str(data))
        # If the linestring was entered as a string starting with |, we want to print it in the | again, because
        # otherwise pyyaml will insert too many newlines
        if hasattr(data, 'style') and '|' == data.style:
            node.style = data.style
        return node

    def lineint_representer(dumper, data):
        return dumper.represent_int(data)

    def linefloat_representer(dumper, data):
        return dumper.represent_float(data)

    def linedict_representer(dumper, data):
        return dumper.represent_dict(data)

    def linelist_representer(dumper, data):
        return dumper.represent_list(data)

    yaml.add_representer(LineInt, lineint_representer)
    yaml.add_representer(LineFloat, linefloat_representer)
    yaml.add_representer(LineStr, linestr_representer)
    yaml.add_representer(LineDict, linedict_representer)
    yaml.add_representer(LineList, linelist_representer)
def main():
    requestsexceptions.squelch_warnings()
    yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
                         construct_yaml_map)

    yaml.add_representer(OrderedDict, project_representer,
                         Dumper=IndentedDumper)

    data = yaml.load(open('openstack_catalog/web/static/assets.yaml'))

    assets = []
    for a in data['assets']:
        url = a.get('attributes', {}).get('url')
        if not a.get('active', True) or not url:
            assets.append(a)
            continue

        r = requests.head(url, allow_redirects=True)
        if r.status_code != 200:
            a['active'] = False
        else:
            hash_url = a.get('hash_url')
            if hash_url:
                hashes = get_hashes(hash_url)
                filename = url.split("/")[-1]
                a['attributes']['hash'] = hashes.get(filename, 'unknown')

        assets.append(a)

    output = {'assets': assets}
    with open('openstack_catalog/web/static/assets.yaml', 'w') as out:
        out.write(yaml.dump(output, default_flow_style=False,
                            Dumper=IndentedDumper, width=80,
                            indent=2))
Example #14
0
def _write_tool(step_dir, name, inputs, outputs, parallel):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    out = {"class": "CommandLineTool",
           "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
           "inputs": [],
           "outputs": []}
    if not parallel:
        inputs = [{"id": "#sentinel", "type": {"type": "array", "items": "string"},
                   "default": ["multisample"]}] + inputs
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = "#%s" % base_id
        inp_binding = {"prefix": "%s=" % base_id, "separate": False,
                       "itemSeparator": ";;", "position": i}
        if "secondaryFiles" in inp_tool:
            inp_binding["secondaryFiles"] = inp_tool.pop("secondaryFiles")
        if parallel:
            inp_tool["inputBinding"] = inp_binding
        else:
            inp_tool["type"]["inputBinding"] = inp_binding
        out["inputs"].append(inp_tool)
    # XXX Need to generalize outputs, just a hack for now to test align_prep
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool["id"] = "#%s" % workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:
        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)
        yaml.add_representer(str, str_presenter)
        yaml.dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
Example #15
0
def _write_tool(step_dir, name, inputs, outputs, parallel):
    out_file = os.path.join(step_dir, "%s.cwl" % name)
    out = {"class": "CommandLineTool",
           "baseCommand": ["bcbio_nextgen.py", "runfn", name, "cwl"],
           "inputs": [],
           "outputs": []}
    pinputs = [{"id": "#sentinel-parallel", "type": "string",
                "default": parallel}]
    inputs = pinputs + inputs
    for i, inp in enumerate(inputs):
        base_id = workflow.get_base_id(inp["id"])
        inp_tool = copy.deepcopy(inp)
        inp_tool["id"] = "#%s" % base_id
        inp_binding = {"prefix": "%s=" % base_id, "separate": False,
                       "itemSeparator": ";;", "position": i}
        inp_tool = _place_input_binding(inp_tool, inp_binding, parallel)
        inp_tool = _place_secondary_files(inp_tool, inp_binding)
        out["inputs"].append(inp_tool)
    for outp in outputs:
        outp_tool = copy.deepcopy(outp)
        outp_tool["id"] = "#%s" % workflow.get_base_id(outp["id"])
        out["outputs"].append(outp_tool)
    with open(out_file, "w") as out_handle:
        def str_presenter(dumper, data):
            if len(data.splitlines()) > 1:  # check for multiline string
                return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
            return dumper.represent_scalar('tag:yaml.org,2002:str', data)
        yaml.add_representer(str, str_presenter)
        yaml.dump(out, out_handle, default_flow_style=False, allow_unicode=False)
    return os.path.join("steps", os.path.basename(out_file))
Example #16
0
def export(options):
    log_entries = []

    cn = MySQLdb.connect(host=options.host, user=options.username, 
            passwd=options.password, db=options.db, use_unicode=True)

    cur = cn.cursor()
    cur.execute('SELECT id, post_title, post_date, guid FROM wp_posts '
            'INNER JOIN wp_term_relationships ON '
            'wp_term_relationships.object_id = wp_posts.id '
            'WHERE post_status = %s AND term_taxonomy_id = %s '
            'ORDER BY post_date ASC', ('publish', 14))
    for row in cur.fetchall():
        id, title, date, guid = row
        entry = {'Title': title, 'Date': date, 'GUID': guid}
        subcur = cn.cursor()
        subcur.execute('SELECT meta_key, meta_value FROM wp_postmeta '
                'WHERE post_id = %s', (id,))
        for key, value in subcur.fetchall():
            if key == '_readinglog_url': entry['URL'] = value
            elif key == '_readinglog_author': entry['Author'] = value
            elif key == '_readinglog_rating': entry['Rating'] = float(value)
        log_entries.append(entry)

    if options.output is not None:
        f = open(options.output, 'w')
    else:
        f = sys.stdout
    
    yaml.add_representer(unicode, lambda dumper, value: 
            dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
    yaml.dump_all(log_entries, f, default_flow_style=False, allow_unicode=True)
Example #17
0
def main(args):
    from argparse import ArgumentParser

    parser = ArgumentParser()
    parser.add_argument("--debug", "-d", dest="debug", action="store_true", default=False, help="Verbose output")
    parser.add_argument("--file", required=True, type=str)
    parser.add_argument("--texture-path", type=str, nargs="*")
    parser.add_argument("--threejs", type=str, help="Export as threejs object")
    options = parser.parse_args()

    if options.debug:
        logging.getLogger().setLevel(logging.DEBUG)

    import w3d
    model = w3d.W3DModel.from_file(options.file)

    if 0:
        import yaml
        from mathutils import Vector, Quaternion
        yaml.add_representer(Vector, lambda dumper, vector: dumper.represent_scalar('Vector', '%s, %s, %s' % (vector.x, vector.y, vector.z)))
        yaml.add_representer(Quaternion, lambda dumper, q: dumper.represent_scalar('Quaternion', '%s, %s, %s, %s' % (q.w, q.x, q.y, q.z)))
        print(yaml.dump(model))
        exit(0)

    if options.threejs:
        data = model.as_threejs(texture_paths=options.texture_path)
        with open(options.threejs, "w") as fw:
            fw.write(json.dumps(data, indent=2))
            if options.debug:
                print(json.dumps(data, indent=2))

    else:
        print(json.dumps(model.as_threejs(), indent=2))
Example #18
0
    def tearDown(self):
        """
        This method is run every time a test is finished.

        It is kind of a save_settings(method) and what it actually
        does is take the changes made to the self.test_cofig dict and
        save them to the yaml file.

        """
        class folded_unicode(unicode): pass
        class literal_unicode(unicode): pass

        def literal_unicode_representer(dumper, data):
            return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='|')

        def folded_unicode_representer(dumper, data):
            return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>')

        def unicode_representer(dumper, uni):
            node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
            return node

        yaml.add_representer(unicode, unicode_representer)
        yaml.add_representer(literal_unicode, literal_unicode_representer)

        config_file = open(self.path, 'w')
        yaml.dump(self.test_config, config_file, default_flow_style=False, )
        config_file.close()
Example #19
0
def split_files(file_location, output_location,
                archive_location=None):
    """
    :param file_location: input yaml file location
    :param output_location: output directory path
    :param archive_location: if present will create a zipped
    representation of the split files
    """
    try:
        try:
            file_documents = yaml.load_all(open(file_location, 'r'), Loader=yaml.CSafeLoader)
        except:
            file_documents = yaml.safe_load_all(open(file_location, 'r'))

        # make a submission directory where all the files will be stored.
        # delete a directory in the event that it exists.
        if os.path.exists(output_location):
            shutil.rmtree(output_location)

        os.makedirs(output_location)

        with open(os.path.join(output_location, "submission.yaml"),
                  'w') as submission_yaml:
            for document in file_documents:
                if "record_ids" in document:
                    write_submission_yaml_block(
                        document, submission_yaml)
                else:
                    file_name = document["name"].replace(' ', '') + ".yaml"
                    document["data_file"] = file_name

                    with open(os.path.join(output_location, file_name),
                              'w') as data_file:
                        yaml.add_representer(str, str_presenter)
                        yaml.dump(
                            {"independent_variables":
                                cleanup_data_yaml(
                                    document["independent_variables"]),
                                "dependent_variables":
                                    cleanup_data_yaml(
                                        document["dependent_variables"])},
                            data_file, allow_unicode=True)

                    write_submission_yaml_block(document,
                                                submission_yaml,
                                                type="record")

        if archive_location:
            if os.path.exists(archive_location):
                os.remove(archive_location)

            zipf = zipfile.ZipFile(archive_location, 'w')
            os.chdir(output_location)
            zipdir(".", zipf)
            zipf.close()
    except Exception as e:
        current_app.logger.exception(e)
        current_app.logger.error(
            'Error parsing {0}, {1}'.format(file_location, e.message))
Example #20
0
def juju_state_to_yaml(yaml_path, namespace_separator=':',
                       allow_hyphens_in_keys=True):
    """Update the juju config and state in a yaml file.

    This includes any current relation-get data, and the charm
    directory.

    This function was created for the ansible and saltstack
    support, as those libraries can use a yaml file to supply
    context to templates, but it may be useful generally to
    create and update an on-disk cache of all the config, including
    previous relation data.

    By default, hyphens are allowed in keys as this is supported
    by yaml, but for tools like ansible, hyphens are not valid [1].

    [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
    """
    config = charmhelpers.core.hookenv.config()

    # Add the charm_dir which we will need to refer to charm
    # file resources etc.
    config['charm_dir'] = charm_dir
    config['local_unit'] = charmhelpers.core.hookenv.local_unit()

    # Add any relation data prefixed with the relation type.
    relation_type = charmhelpers.core.hookenv.relation_type()
    if relation_type is not None:
        relation_data = charmhelpers.core.hookenv.relation_get()
        relation_data = dict(
            ("{relation_type}{namespace_separator}{key}".format(
                relation_type=relation_type.replace('-', '_'),
                key=key,
                namespace_separator=namespace_separator), val)
            for key, val in relation_data.items())
        config.update(relation_data)

    # Don't use non-standard tags for unicode which will not
    # work when salt uses yaml.load_safe.
    yaml.add_representer(unicode, lambda dumper,
                         value: dumper.represent_scalar(
                             u'tag:yaml.org,2002:str', value))

    yaml_dir = os.path.dirname(yaml_path)
    if not os.path.exists(yaml_dir):
        os.makedirs(yaml_dir)

    if os.path.exists(yaml_path):
        with open(yaml_path, "r") as existing_vars_file:
            existing_vars = yaml.load(existing_vars_file.read())
    else:
        existing_vars = {}

    if not allow_hyphens_in_keys:
        config = dict(
            (key.replace('-', '_'), val) for key, val in config.items())
    existing_vars.update(config)
    with open(yaml_path, "w+") as fp:
        fp.write(yaml.dump(existing_vars))
Example #21
0
def main(arguments=None):
    """
    Runs digestive.

    :param arguments: Commandline arguments, passed to parse_arguments.
    """

    # add an order-retaining representer for OrderedDict (default impl calls sorted())
    yaml.add_representer(OrderedDict, lambda dumper, data: MappingNode(
        'tag:yaml.org,2002:map',
        # create a represented list of tuples, in order or data.items()
        [(dumper.represent_data(key), dumper.represent_data(value)) for key, value in data.items()]
    ))

    arguments = parse_arguments(arguments)
    # create the output generator
    output = output_to_file(arguments.output)
    # initialize output (moves it to the first occurrence of yield)
    next(output)
    info = OrderedDict()
    info['digestive'] = str(digestive.__version__)
    info['started'] = datetime.now(tz=timezone.utc)
    output.send(info)

    with ThreadPoolExecutor(arguments.jobs) as executor:
        # TODO: globs like tests/files/file.* includes both file.E01 and file.E02
        # TODO: only file.E01 will get treated as ewf, file.E02 should be removed from sources
        for file in files(arguments.sources, arguments.recursive):
            with get_source(file, arguments.format) as source:
                # instantiate sinks from requested types
                sinks = [sink() for sink in arguments.sinks]
                # flush initial status line to force it to show in something like | less
                print('{} ({})'.format(source, file_size(len(source))), flush=True)

                if arguments.progress and sys.stdout.isatty():
                    with Progress(source, arguments.progress) as progress:
                        size = process_source(executor, source, sinks, arguments.block_size, progress=progress)
                else:
                    size = process_source(executor, source, sinks, arguments.block_size)

                results = OrderedDict((sink.name, sink.result()) for sink in sinks)
                for name, result in results.items():
                    if result is not None:  # exclude Nones from results
                        print('  {:<12} {}'.format(name, result))

                # create meta data leader
                # TODO: using kwargs here would be nice, but that destroys order :( (see PEP-468)
                info = OrderedDict((
                    ('source', file),
                    ('size', size),
                    ('completed', datetime.now(tz=timezone.utc))
                ))
                # add results
                info.update(results)
                # send info to the output collector
                output.send(info)

    # close the output collector, which in turn closes the output stream
    output.close()
Example #22
0
def apply_ordered_dict_patch():
    yaml.add_representer(OrderedDict,Representer.represent_dict)
    
    
    yaml.representer.BaseRepresenter.represent_mapping = represent_ordered_mapping
    
    yaml.representer.Representer.add_representer(collection_backport.OrderedDict,
            yaml.representer.SafeRepresenter.represent_dict)
Example #23
0
def __init_yaml():
    """Lazy init yaml because canmatrix might not be fully loaded when loading this format."""
    global _yaml_initialized
    if not _yaml_initialized:
        _yaml_initialized = True
        yaml.add_constructor(u'tag:yaml.org,2002:Frame', _frame_constructor)
        yaml.add_constructor(u'tag:yaml.org,2002:Signal', _signal_constructor)
        yaml.add_representer(canmatrix.Frame, _frame_representer)
Example #24
0
def generate_script_content(networkconfig):
    yaml.add_representer(fixtures.FlowList, lambda dumper, data: dumper.represent_sequence(u'tag:yaml.org,2002:seq',
                                                                                           data, flow_style=True))
    yaml.add_representer(OrderedDict, lambda dumper, data: dumper.represent_mapping(u'tag:yaml.org,2002:map',
                                                                                    data.items()))

    network_config_yaml = yaml.dump(networkconfig, default_flow_style=False)
    return network_config_yaml
Example #25
0
def setup_yaml():
    """ https://stackoverflow.com/a/8661021 """
    def represent_dict_order(dumper, data):
        return dumper.represent_mapping(
            'tag:yaml.org,2002:map', data.items(), flow_style=False
        )

    yaml.add_representer(OrderedDict, represent_dict_order)
Example #26
0
    def dump2yaml(self, dest_filepath=os.getcwd()):
        file_name = os.path.join(dest_filepath, os.path.splitext(os.path.basename(self.filepath))[0])

        full_path = file_name + self.yaml_extension
        print "Creating %s" % full_path
        yaml.add_representer(unicode, lambda dumper, value: dumper.represent_scalar(u"tag:yaml.org,2002:str", value))
        with codecs.open(full_path, "wb", "utf-16") as f:
            yaml.dump(self, f, allow_unicode=True, encoding="utf-16")
Example #27
0
def save_settings(request):
    """Stores settings to settings.yaml local file.

    This is useful for using mist.io UI to configure your installation. It
    includes some yaml dump magic in order for the dumped private ssh keys
    to be in a valid string format.

    """

    class folded_unicode(unicode):
        pass

    class literal_unicode(unicode):
        pass

    def literal_unicode_representer(dumper, data):
        return dumper.represent_scalar(u"tag:yaml.org,2002:str", data, style="|")

    def folded_unicode_representer(dumper, data):
        return dumper.represent_scalar(u"tag:yaml.org,2002:str", data, style=">")

    def unicode_representer(dumper, uni):
        node = yaml.ScalarNode(tag=u"tag:yaml.org,2002:str", value=uni)
        return node

    yaml.add_representer(unicode, unicode_representer)
    yaml.add_representer(literal_unicode, literal_unicode_representer)

    base_path = os.environ.get("OPENSHIFT_DATA_DIR", "")
    yaml_path = base_path + "settings.yaml"
    config_file = open(yaml_path, "w")

    settings = request.registry.settings

    keypairs = {}
    for key in settings["keypairs"].keys():
        keypairs[key] = {
            "public": literal_unicode(settings["keypairs"][key]["public"]),
            "private": literal_unicode(settings["keypairs"][key]["private"]),
            "machines": settings["keypairs"][key].get("machines", []),
            "default": settings["keypairs"][key].get("default", False),
        }

    payload = {
        "keypairs": keypairs,
        "backends": settings["backends"],
        "core_uri": settings["core_uri"],
        "js_build": settings["js_build"],
        "js_log_level": settings["js_log_level"],
    }

    if settings.get("email", False) and settings.get("password", False):
        payload["email"] = settings["email"]
        payload["password"] = settings["password"]

    yaml.dump(payload, config_file, default_flow_style=False)

    config_file.close()
Example #28
0
def juju_state_to_yaml(yaml_path, namespace_separator=':',
                       allow_hyphens_in_keys=True, mode=None):
    """Update the juju config and state in a yaml file.

    This includes any current relation-get data, and the charm
    directory.

    This function was created for the ansible and saltstack
    support, as those libraries can use a yaml file to supply
    context to templates, but it may be useful generally to
    create and update an on-disk cache of all the config, including
    previous relation data.

    By default, hyphens are allowed in keys as this is supported
    by yaml, but for tools like ansible, hyphens are not valid [1].

    [1] http://www.ansibleworks.com/docs/playbooks_variables.html#what-makes-a-valid-variable-name
    """
    config = charmhelpers.core.hookenv.config()

    # Add the charm_dir which we will need to refer to charm
    # file resources etc.
    config['charm_dir'] = charm_dir
    config['local_unit'] = charmhelpers.core.hookenv.local_unit()
    config['unit_private_address'] = charmhelpers.core.hookenv.unit_private_ip()
    config['unit_public_address'] = charmhelpers.core.hookenv.unit_get(
        'public-address'
    )

    # Don't use non-standard tags for unicode which will not
    # work when salt uses yaml.load_safe.
    yaml.add_representer(six.text_type,
                         lambda dumper, value: dumper.represent_scalar(
                             six.u('tag:yaml.org,2002:str'), value))

    yaml_dir = os.path.dirname(yaml_path)
    if not os.path.exists(yaml_dir):
        os.makedirs(yaml_dir)

    if os.path.exists(yaml_path):
        with open(yaml_path, "r") as existing_vars_file:
            existing_vars = yaml.load(existing_vars_file.read())
    else:
        with open(yaml_path, "w+"):
            pass
        existing_vars = {}

    if mode is not None:
        os.chmod(yaml_path, mode)

    if not allow_hyphens_in_keys:
        config = dict_keys_without_hyphens(config)
    existing_vars.update(config)

    update_relations(existing_vars, namespace_separator)

    with open(yaml_path, "w+") as fp:
        fp.write(yaml.dump(existing_vars, default_flow_style=False))
Example #29
0
 def _dump(self, stream):
     if self._use_yaml:
         import yaml
         def config_representer(dumper, obj):
             return dumper.represent_mapping(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, obj._data.items())
         yaml.add_representer(self.__class__, config_representer)
         yaml.dump(self._data, stream)
     else:
         json.dump(self._data, stream, default=lambda obj: obj._data)
Example #30
0
def serializable(cls):
    """Register representer method of decorated class with YAML."""
    if hasattr(cls, '_representer'):
        yaml.add_representer(cls, cls._representer)
    elif hasattr(cls, '_multi_representer'):
        yaml.add_multi_representer(cls, cls._multi_representer)
    else:
        raise RuntimeError
    return cls
Example #31
0

yaml.add_constructor(u"tag:yaml.org,2002:opencv-matrix", opencv_matrix_constructor)


# A yaml representer is for dumping structs into a yaml node.
# So for an opencv_matrix type (to be compatible with c++'s FileStorage) we save the rows, cols, type and flattened-data
def opencv_matrix_representer(dumper, mat):
    if mat.ndim > 1:
        mapping = {'rows': mat.shape[0], 'cols': mat.shape[1], 'dt': 'd', 'data': mat.reshape(-1).tolist()}
    else:
        mapping = {'rows': mat.shape[0], 'cols': 1, 'dt': 'd', 'data': mat.tolist()}
    return dumper.represent_mapping(u"tag:yaml.org,2002:opencv-matrix", mapping)


yaml.add_representer(np.ndarray, opencv_matrix_representer)


def get_rect_parameters(width, height, int_fn, ext_fn):
    with open(int_fn, 'r') as f:
        int_param = yaml.load(f)
        K_l = int_param["M1"]
        K_r = int_param["M2"]
        D_l = int_param["D1"]
        D_r = int_param["D2"]

    with open(ext_fn, 'r') as f:
        ext_param = yaml.load(f)
        P_l = ext_param["P1"]
        P_r = ext_param["P2"]
        R_l = ext_param["R1"]
Example #32
0
    ]
else:
    paths = [Path(pathname) for pathname in args.inputs]


def print_verbose(*arg, **kwarg):
    if args.verbose:
        print(*arg, **kwarg)


# Define order-preserving representer from dicts/maps
def yaml_preserve_order(self, dict_data):
    return self.represent_mapping("tag:yaml.org,2002:map", dict_data.items())


yaml.add_representer(dict, yaml_preserve_order)

uuids = set()
passed = True
for path in paths:
    print_verbose("Rule {}".format(str(path)))
    with path.open("r") as f:
        rules = list(yaml.safe_load_all(f))

    if args.verify:
        i = 1
        for rule in rules:
            if "title" in rule:  # Rule with a title should also have a UUID
                try:
                    UUID(rule["id"])
                except ValueError:  # id is not a valid UUID
Example #33
0
    return [0, yamlobj]


#############################################################################
# repack verb #
###############


def repack_verb(args):
    # TODO
    raise CustomException("Sorry, this option is not implemented yet")


############
# __main__ #
############

if __name__ == '__main__':

    yaml.add_constructor(u'tag:yaml.org,2002:seq', ordereddict_constructor)
    yaml.add_representer(OrderedDict, represent_ordereddict)

    parser, args = parse_args()
    if (args.subcommand == "unpack"): unpack_verb(args)
    elif (args.subcommand == "replace"): replace_verb(args)
    elif (args.subcommand == "repack"): repack_verb(args)
    else:
        parser.print_usage()
        sys.exit(20)
    sys.exit(0)
Example #34
0
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
"""Configuration control."""

import collections
import copy
import yaml
import pprint

from yaml.representer import Representer
yaml.add_representer(collections.defaultdict, Representer.represent_dict)

__all__ = ("Config", )

# UserDict and yaml have defined metaclasses and Python 3 does not allow multiple
# inheritance of classes with distinct metaclasses. We therefore have to
# create a new baseclass that Config can inherit from. This is because the metaclass
# syntax differs between versions


class _ConfigMeta(type(collections.UserDict), type(yaml.YAMLObject)):
    pass


class _ConfigBase(collections.UserDict, yaml.YAMLObject,
                  metaclass=_ConfigMeta):
    def from_yaml(loader, node):
        # value = CaseDefinition()
        # yield value
        # node_map = loader.construct_mapping(node, deep=True)
        # value.update(**node_map)

        node_map = loader.construct_mapping(node, deep=True)
        return CaseInput(boundary_conditions=node_map['boundary_conditions'],
                         geometry=node_map['geometry'],
                         discretization=node_map['discretization'],
                         physics_parameters=node_map['physics_parameters'],
                         xs_file=node_map['xs_file'],
                         numerics=node_map['numerics'])


yaml.add_representer(CaseInput, CaseInput.to_yaml, Dumper=yaml.SafeDumper)
yaml.add_constructor(CaseInput.YAMLTag, CaseInput.from_yaml, Loader=yaml.SafeLoader)


class CaseDefinition:

    def __init__(self, case_input):
        self.input = case_input

        # Duplicate entries in order to add keys without altering original input
        self.boundary_conditions = dict(case_input.boundary_conditions)
        self.geometry = dict(case_input.geometry)
        self.discretization = dict(case_input.discretization)
        self.physics_parameters = dict(case_input.physics_parameters)

        # Get XS library file name
Example #36
0
    def __init__(self, full):
        super(YamlOutput, self).__init__(full)

        yaml.add_representer(six.text_type, self.string_presenter)
        yaml.add_representer(str, self.string_presenter)
        yaml.add_representer(collections.OrderedDict, self.dict_representer)
# -*- coding: utf-8 -*-

import collections
import yaml


#https://stackoverflow.com/questions/9951852/pyyaml-dumping-things-backwards
def order_rep(dumper, data):
    return dumper.represent_mapping(u'tag:yaml.org,2002:map',
                                    data.items(),
                                    flow_style=False)


yaml.add_representer(collections.OrderedDict, order_rep)

resume = collections.OrderedDict()

contact = collections.OrderedDict()
contact['name'] = 'Ryan Compton'
contact['email'] = '*****@*****.**'
contact['website'] = 'http://ryancompton.net/'
resume['contact'] = contact

employment = collections.OrderedDict()
employment[
    'current'] = "Data Scientist, Clarifai, Designed and constructed datasets in order to improve the performance of deep learning-powered image recognition systems"
employment[
    '2012-2015'] = "Research Staff, Howard Hughes Research Laboratories, Worked on social media data mining for early detection of newsworthy events."
resume['employment'] = employment

edu = collections.OrderedDict()
Example #38
0
    option_list = []

    for key, value in options.items():
        option_list.append({
            "name": key,
            "description": value["Description"],
            "required": value["Required"],
            "value":
            value["Value"],  # todo should value really be defaultValue?
        })

    return {"options": option_list}


if __name__ == "__main__":
    yaml.add_representer(type(None), represent_none)
    root_path = f"../../modules/python"
    pattern = "*.py"
    count = 0
    for root, dirs, files in os.walk(root_path):
        for filename in fnmatch.filter(files, pattern):
            file_path = os.path.join(root, filename)
            print(file_path)

            # if 'eventvwr' not in file_path and 'seatbelt' not in file_path and 'logonpasswords' not in file_path \
            #         and 'invoke_assembly' not in file_path.lower() and 'sherlock' not in file_path and 'kerberoast' not in file_path \
            #         and 'watson' not in file_path and 'message.py' not in file_path and 'rick_astley' not in file_path \
            #         and 'portscan' not in file_path and 'say.py' not in file_path and 'prompt' not in file_path and 'screenshot' not in file_path\
            #         and 'clipboard' not in file_path:
            #     continue
Example #39
0
        """
        return the saved space in percent
        """
        return (1. - float(self.output_file_size) /
                float(self.input_file_size)) * 100.

    @staticmethod
    def to_yaml(dumper, data):
        """
        dump the file to yaml
        """
        return dumper.represent_mapping(data.YAMLTag, data.as_dict())


yaml.add_representer(TranscoderFileStats,
                     TranscoderFileStats.to_yaml,
                     Dumper=yaml.SafeDumper)


class TranscoderStats:
    """
    transcoder Stats class
    """

    YAMLTag = u"!TranscoderStats"

    def __init__(self):
        # define collected stats
        self.total_saved = 0.0

    def increment_total_saved(self, value):
Example #40
0
def _represent_omap(dumper, data):
    return dumper.represent_mapping(u'tag:yaml.org,2002:map', data.to_omap())


def _unicode_representer(dumper, uni):
    node = yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=uni)
    return node


class _IndentDumper(yaml.Dumper):
    def increase_indent(self, flow=False, indentless=False):
        return super(_IndentDumper, self).increase_indent(flow, False)


yaml.add_representer(_MetaYaml, _represent_omap)
if PY3:
    yaml.add_representer(str, _unicode_representer)
    unicode = None  # silence pyflakes about unicode not existing in py3
else:
    yaml.add_representer(unicode, _unicode_representer)


def output_yaml(metadata, filename=None):
    utils.trim_empty_keys(metadata.meta)
    if metadata.meta.get('outputs'):
        del metadata.meta['outputs']
    output = yaml.dump(_MetaYaml(metadata.meta),
                       Dumper=_IndentDumper,
                       default_flow_style=False,
                       indent=4)
Example #41
0
"""Uttilities for pretty printing to yaml."""
import yaml
from yaml.representer import SafeRepresenter
import numpy as np


class folded_str(str):
    pass


class literal_str(str):
    pass


def change_style(style, representer):
    def new_representer(dumper, data):
        scalar = representer(dumper, data)
        scalar.style = style
        return scalar

    return new_representer


# represent_str does handle some corner cases, so use that
# instead of calling represent_scalar directly
represent_folded_str = change_style('>', SafeRepresenter.represent_str)
represent_literal_str = change_style('|', SafeRepresenter.represent_str)

yaml.add_representer(folded_str, represent_folded_str)
yaml.add_representer(literal_str, represent_literal_str)
Example #42
0
from pygments import highlight
from pygments.lexers import HtmlLexer, DjangoLexer
from pygments.formatters import HtmlFormatter

# preserve key order when parsing YAML – http://stackoverflow.com/a/21048064/147318


def dict_representer(dumper, data):
    return dumper.represent_dict(data.iteritems())


def dict_constructor(loader, node):
    return OrderedDict(loader.construct_pairs(node))


yaml.add_representer(OrderedDict, dict_representer)
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
                     dict_constructor)


class ToolkitEnvironment(Environment):
    """Override join_path() to strip out 'toolkit/' string from template paths."""
    def join_path(self, template, parent):
        if template.startswith('toolkit/'):
            template = template.replace('toolkit/', '')
        return super(ToolkitEnvironment, self).join_path(template, parent)


class Styleguide_publisher(object):
    "publish a styleguide for the toolkit"
Example #43
0
# from http://stackoverflow.com/questions/5121931/in-python-how-can-you-load-yaml-mappings-as-ordereddicts

_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG


def dict_representer(dumper, data):
    return dumper.represent_dict(
        data.iteritems() if hasattr(data, "iteritems") else data.items())


def dict_constructor(loader, node):
    return collections.OrderedDict(loader.construct_pairs(node))


yaml.add_representer(collections.OrderedDict, dict_representer,
                     yaml.SafeDumper)
yaml.add_constructor(_mapping_tag, dict_constructor, yaml.SafeLoader)


def check_memory_usage(bytes_needed, confirm):
    if bytes_needed > psutil.virtual_memory().available:
        if bytes_needed < (psutil.virtual_memory().available +
                           psutil.swap_memory().free):
            text = "Action requires %s, you have enough swap memory available but it will make your computer slower, do you want to continue?" % (
                filesize_format(bytes_needed), )
            return confirm("Memory usage issue", text)
        else:
            text = "Action requires %s, you do not have enough swap memory available, do you want try anyway?" % (
                filesize_format(bytes_needed), )
            return confirm("Memory usage issue", text)
    return True
Example #44
0
                r"\1" + (" " * self.tab_replacement_spaces) + r"\3", line)
            if replaced != line:
                line = replaced
                if self.warn_on_tab_replacement:
                    self.log.warning(
                        "Replaced leading tabs in file %s, line %s", fname,
                        num)
                    self.log.warning("Line content is: %s", replaced.strip())
                    self.log.warning(
                        "Please remember that YAML spec does not allow using tabs for indentation"
                    )
            res += line
        return res


yaml.add_representer(Configuration, SafeRepresenter.represent_dict)
yaml.add_representer(BetterDict, SafeRepresenter.represent_dict)
if PY2:
    yaml.add_representer(text_type, SafeRepresenter.represent_unicode)
yaml.add_representer(str, str_representer)

# dirty hack from http://stackoverflow.com/questions/1447287/format-floats-with-standard-json-module
encoder.FLOAT_REPR = lambda o: format(o, '.3g')


class EngineModule(object):
    """
    Base class for any BZT engine module

    :type engine: Engine
    :type settings: BetterDict
Example #45
0
def _get_config_to_yaml(config, comments=True):
    """Converts a configuration from dict to a yaml string

    Auxiliary method to :func:`get_default_config`.

    Parameters
    ----------
    config : dict
        A dict of parameters, one key per processor and
        postprocessors. For each key, the value is a dict as well,
        with a mapping (parameter name: parameter value).
    comments : bool, optional
        When True, include the parameters docstrings as comments
        in the yaml string, default to True

    Returns
    -------
    yaml : str
        A string formatted to the YAML format, ready to be written
        to a file

    """
    # inform yaml to not sort keys by alphabetical order
    yaml.add_representer(
        dict,
        lambda self, data: yaml.representer.SafeRepresenter.represent_dict(
            self, data.items()))

    # inform yaml to represent numpy floats as standard floats
    yaml.add_representer(np.float32,
                         yaml.representer.Representer.represent_float)

    # store the pitch processor (if any)
    try:
        pitch_processor = config['pitch']['processor']
    except KeyError:
        pitch_processor = None

    # build the yaml formated multiline string
    config = yaml.dump(config).strip()

    if not comments:
        return config + '\n'

    # incrust the parameters docstrings as comments in the yaml
    config_commented = []
    processors = []
    prev_offset = 0
    for line in config.split('\n'):
        offset = len(line.split(': ')[0]) - len(line.split(': ')[0].strip())
        for _ in range((prev_offset - offset) // 2):
            processors.pop()
        if line.endswith(':'):
            processor = line[:-1].strip()
            # special case of pitch_postprocessor
            if processor == 'postprocessing':
                processor = f'{processors[-1]}_post'
            processors.append(processor)

            # special case here when '   vad:' we are in the ubm section of
            # vtln: no need to append this comment
            if processor == 'vad' and offset != 4:
                config_commented.append(
                    "  # The vad options are not used if 'with_vad' is false")

            config_commented.append(line)
        else:
            param = line.split(': ')[0].strip()
            default = line.split(': ')[1].strip()
            processor = processors[-1]

            if processor == 'cmvn' and param == 'by_speaker':
                docstring = ('If false, do normalization by utterance, '
                             'if true do normalization by speaker.')
            elif processor == 'cmvn' and param == 'with_vad':
                docstring = (
                    'If true do normalization only on frames where '
                    'voice activity has been detected, if false do not '
                    'consider voice activity for normalization.')
            elif param == 'features' and default == 'default':
                # custom docstring when using VTLN with default features
                docstring = (
                    'Features extraction configuration. Default is to use '
                    'MFCCs with default parameters. Regenerate this '
                    'configuration file with "speech-features config" using '
                    'the "--vtln-full" option to expose all the parameters.')
            elif processor == 'pitch' and param == 'processor':
                docstring = f'Computing pitch using {pitch_processor}'
            elif 'pitch' in processor and param != 'processor':
                docstring = PipelineManager.get_docstring(
                    pitch_processor + '_' + processor, param, default)
            else:
                docstring = PipelineManager.get_docstring(
                    processor, param, default)

            config_commented += [
                ' ' * offset + '# ' + w
                for w in textwrap.wrap(docstring, width=68 - offset)
            ]
            config_commented.append(line)
        prev_offset = offset

    return '\n'.join(config_commented) + '\n'
Example #46
0
from collections import OrderedDict

import yaml


# <https://stackoverflow.com/a/16782282>
def represent_ordereddict(dumper, data):
    value = []

    for item_key, item_value in data.items():
        node_key = dumper.represent_data(item_key)
        node_value = dumper.represent_data(item_value)

        value.append((node_key, node_value))

    return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)

yaml.add_representer(OrderedDict, represent_ordereddict)


# <https://stackoverflow.com/a/38370522>
def quoted_presenter(dumper, data):
    return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')

yaml.add_representer(str, quoted_presenter)


__all__ = []
Example #47
0
def write_yaml(data, yml_file):
    Dumper = yaml.SafeDumper
    Dumper.ignore_aliases = lambda self, data: True
    yaml.add_representer(OrderedDict, represent_ordereddict, Dumper=Dumper)
    return yaml.dump(data, yml_file, default_flow_style=False, Dumper=Dumper)
Example #48
0
TEST = (os.environ.get('TEST', 'false') == 'true')

_TOP_LEVEL_KEYS = ['settings', 'filegroups', 'libs', 'targets', 'vspackages']
_VERSION_KEYS = ['major', 'minor', 'micro', 'build']
_ELEM_KEYS = [
    'name', 'flaky', 'build', 'run', 'language', 'public_headers', 'headers',
    'src', 'deps'
]


def repr_ordered_dict(dumper, odict):
    return dumper.represent_mapping(u'tag:yaml.org,2002:map', odict.items())


yaml.add_representer(collections.OrderedDict, repr_ordered_dict)


def rebuild_as_ordered_dict(indict, special_keys):
    outdict = collections.OrderedDict()
    for key in sorted(indict.keys()):
        if '#' in key:
            outdict[key] = indict[key]
    for key in special_keys:
        if key in indict:
            outdict[key] = indict[key]
    for key in sorted(indict.keys()):
        if key in special_keys: continue
        if '#' in key: continue
        outdict[key] = indict[key]
    return outdict
Example #49
0
from flask import current_app as app

print(">> %s" % BASE_DIR)

LOGGER = logging.getLogger("business-glossary.dump_data")


class literal(str):
    pass


def literal_representer(dumper, data):
    return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')


yaml.add_representer(literal, literal_representer)


def return_categories(term):
    '''Return category names as a list when dumping terms'''
    categories = []
    for category in term.categories:
        categories.append(category.name)
    return categories


def return_terms(rule):
    '''Return the terms a rule belongs to as a list'''
    terms = []
    for term in rule.terms:
        terms.append(term.name)
Example #50
0
            'package_version': PACKAGE_VERSION,
        },
    }
elif PACKAGE_NAME == 'apache-airflow-providers':
    jinja_contexts = {
        'official_download_page': {
            'all_providers': ALL_PROVIDER_YAMLS,
        },
    }
elif PACKAGE_NAME == 'helm-chart':

    def _str_representer(dumper, data):
        style = "|" if "\n" in data else None  # show as a block scalar if we have more than 1 line
        return dumper.represent_scalar("tag:yaml.org,2002:str", data, style)

    yaml.add_representer(str, _str_representer)

    def _format_default(value: Any) -> str:
        if value == "":
            return '""'
        if value is None:
            return '~'
        return str(value)

    def _format_examples(param_name: str, schema: dict) -> Optional[str]:
        if not schema.get("examples"):
            return None

        # Nicer to have the parameter name shown as well
        out = ""
        for ex in schema["examples"]:
Example #51
0
from dictlib import Obj as Dict
import yaml
from .gql import parse as gql_parse
from .dex import dex_transpile
from .util.out import debug, notify, header, error, abort  # pylint: disable=unused-import


def multiline_yaml(dumper, data):
    if len(data.splitlines()) > 1:  # check for multiline string
        return dumper.represent_scalar('tag:yaml.org,2002:str',
                                       data,
                                       style='|')
    return dumper.represent_scalar('tag:yaml.org,2002:str', data)


yaml.add_representer(str, multiline_yaml)

# things always needed within a container
GLOBAL_PIP = [
    'boto3',  # React Arc: AWS boto3 into dynamo
    'jwt',  # React Arc: Handling authentication tokens
    'dictlib',  # general polyform utility
    'datacleaner'
]


################################################################################
class Config():
    """
    Polyform.yml configuration object
    """
Example #52
0
    def config(self,
               ref,
               diff=False,
               config=None,
               update=False,
               set_env=None,
               **kwargs):
        app, service = self.parse_app_ref(ref, kwargs, app_only=True)

        app_config = yield self._remote_exec('config', app)

        parser_env = set_env or app_config['env']

        if diff or (not update and not set_env):
            old_config = YamlConfig(source=unicode(app_config['source']),
                                    app_name=app,
                                    env=parser_env)
            old_config.load(process=False)
            from collections import OrderedDict

            yaml.add_representer(
                unicode, yaml.representer.SafeRepresenter.represent_unicode)
            yaml.add_representer(OrderedDict, self.represent_ordereddict)
            olds = yaml.dump(old_config.config, default_flow_style=False)

        if not update and not diff and not set_env:
            x = PrettyTable(["Name", "Value"],
                            hrules=ALL,
                            align='l',
                            header=False)
            x.align = "l"
            x.add_row(['Config', olds])
            x.add_row(['Environment', app_config['env']])
            x.add_row(['Path', app_config['path']])
            print(x)

        else:
            if config:
                config_file = os.path.expanduser(config)
            else:
                config_file = 'mcloud.yml'

            new_config = YamlConfig(file=config_file,
                                    app_name=app,
                                    env=parser_env)
            new_config.load(process=False)

            if diff:
                yaml.add_representer(
                    unicode,
                    yaml.representer.SafeRepresenter.represent_unicode)
                yaml.add_representer(OrderedDict, self.represent_ordereddict)
                news = yaml.dump(new_config.config, default_flow_style=False)

                if olds == news:
                    print('Configs are identical.')
                else:

                    for line in unified_diff(olds.splitlines(1),
                                             news.splitlines(1)):
                        if line.endswith('\n'):
                            line = line[0:-1]
                        if line.startswith('+'):
                            print color_text(line, color='green')
                        elif line.startswith('-'):
                            print color_text(line, color='red')
                        else:
                            print line
            else:
                if set_env and not update:
                    yield self._remote_exec('update', app, env=set_env)
                else:
                    yield self._remote_exec('update',
                                            app,
                                            config=new_config.export(),
                                            env=set_env)
    flags, pattern = value.split(':', 1)
    return re.compile(pattern, int(flags or 0))


def _wa_level_constructor(loader, node):
    value = loader.construct_scalar(node)
    name, value = value.split(':', 1)
    return level(name, value)


def _wa_cpu_mask_constructor(loader, node):
    value = loader.construct_scalar(node)
    return cpu_mask(value)


_yaml.add_representer(OrderedDict, _wa_dict_representer)
_yaml.add_representer(regex_type, _wa_regex_representer)
_yaml.add_representer(level, _wa_level_representer)
_yaml.add_representer(cpu_mask, _wa_cpu_mask_representer)
_yaml.add_constructor(_regex_tag, _wa_regex_constructor, Loader=_yaml_loader)
_yaml.add_constructor(_level_tag, _wa_level_constructor, Loader=_yaml_loader)
_yaml.add_constructor(_cpu_mask_tag,
                      _wa_cpu_mask_constructor,
                      Loader=_yaml_loader)
_yaml.add_constructor(_mapping_tag, _wa_dict_constructor, Loader=_yaml_loader)


class yaml(object):
    @staticmethod
    def dump(o, wfh, *args, **kwargs):
        return _yaml.dump(o, wfh, *args, **kwargs)
Example #54
0
    def service_get(ctx, network, name):
        """
        Get details about a service in this profile.
        """

        # See
        # https://stackoverflow.com/questions/16782112/can-pyyaml-dump-dict-items-in-non-alphabetical-order
        def represent_ordereddict(dumper, data):
            value = []

            for item_key, item_value in data.items():
                node_key = dumper.represent_data(item_key)
                node_value = dumper.represent_data(item_value)

                value.append((node_key, node_value))

            return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)

        yaml.add_representer(OrderedDict, represent_ordereddict)

        def get_paths_info_for_service(service):
            paths = ctx.obj['CLIENT'].paths.list()
            has_access_to = ["default-all-outgoing-allowed"]
            is_accessible_from = []
            for path in paths:
                if path.network.name != service.network.name:
                    continue
                if path.destination.name == service.name:
                    if path.source.name:
                        is_accessible_from.append(
                            "%s:%s:%s" %
                            (path.network.name, path.source.name, path.port))
                    else:
                        cidr_blocks = [
                            subnetwork.cidr_block
                            for subnetwork in path.source.subnetworks
                        ]
                        cidr_blocks_string = ",".join(cidr_blocks)
                        is_accessible_from.append(
                            "external:%s:%s" % (cidr_blocks_string, path.port))
                elif path.source.name == service.name:
                    has_access_to.append(
                        "%s:%s:%s" %
                        (path.network.name, path.destination.name, path.port))
            return {
                "has_access_to": has_access_to,
                "is_accessible_from": is_accessible_from
            }

        service = get_service_for_cli(ctx, network, name)
        paths_info = get_paths_info_for_service(service)
        service_info = OrderedDict()
        service_info['name'] = service.name
        service_info['has_access_to'] = paths_info['has_access_to']
        service_info['is_accessible_from'] = paths_info['is_accessible_from']
        network_info = OrderedDict()
        network_info['name'] = service.network.name
        network_info['id'] = service.network.network_id
        network_info['block'] = service.network.cidr_block
        network_info['region'] = service.network.region
        network_info['subnetworks'] = []
        service_info['network'] = network_info
        for subnetwork in service.subnetworks:
            subnetwork_info = OrderedDict()
            subnetwork_info['name'] = subnetwork.name
            subnetwork_info['id'] = subnetwork.subnetwork_id
            subnetwork_info['block'] = subnetwork.cidr_block
            subnetwork_info['region'] = subnetwork.region
            subnetwork_info['availability_zone'] = subnetwork.availability_zone
            subnetwork_info['instances'] = []
            for instance in subnetwork.instances:
                instance_info = OrderedDict()
                instance_info['id'] = instance.instance_id
                instance_info['public_ip'] = instance.public_ip
                instance_info['private_ip'] = instance.private_ip
                instance_info['state'] = instance.state
                instance_info['availability_zone'] = instance.availability_zone
                subnetwork_info["instances"].append(instance_info)
            service_info["network"]["subnetworks"].append(subnetwork_info)
        click.echo(yaml.dump(service_info, default_flow_style=False))
Example #55
0
# To ensure that the dicts are always output in the same order
# we setup a representation for dict objects and register it
# with the yaml class.
def represent_dict(self, data):
    def key_function((key, value)):
        # Prioritizes certain keys when sorting.
        prio = {"version": 0, "projects": 1, "repo": 2, "hash": 3}.get(key, 99)
        return (prio, key)

    items = data.items()
    items.sort(key=key_function)
    return self.represent_mapping(u'tag:yaml.org,2002:map', items)


yaml.add_representer(dict, represent_dict)


# sourced from
# http://stackoverflow.com/questions/25108581/python-yaml-dump-bad-indentation
def yaml_dump(dump, indentSize=2):
    stream = StringIO(dump)
    out = StringIO()
    pat = re.compile('(\s*)([^:]*)(:*)')
    last = None

    prefix = 0
    for s in stream:
        indent, key, colon = pat.match(s).groups()
        if indent == "" and key[0] != '-':
            prefix = 0
Example #56
0
def setup_yaml():
    """ https://stackoverflow.com/a/8661021 """
    def represent_dict_order(self, data):
        return self.represent_mapping('tag:yaml.org,2002:map', data.items())

    yaml.add_representer(OrderedDict, represent_dict_order)
Example #57
0
################################################################################


# Add a custom string representer to use block literals for multiline strings
def str_repr(dumper, data):
    """
    Repair YAML string representation
    """
    if len(data.splitlines()) > 1:
        return dumper.represent_scalar('tag:yaml.org,2002:str',
                                       data,
                                       style='|')
    return dumper.represent_scalar('tag:yaml.org,2002:str', data)


yaml.add_representer(str, str_repr)


class ArkaneSpecies(RMGObject):
    """
    A class for archiving an Arkane species including its statmech data into .yml files
    """
    def __init__(self,
                 species=None,
                 conformer=None,
                 author='',
                 level_of_theory='',
                 model_chemistry='',
                 frequency_scale_factor=None,
                 use_hindered_rotors=None,
                 use_bond_corrections=None,
Example #58
0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client utilities."""

import uuid

import click
import yaml

from renku.api import LocalClient


def _uuid_representer(dumper, data):
    """Add UUID serializer for YAML."""
    return dumper.represent_str(str(data))


yaml.add_representer(uuid.UUID, _uuid_representer)

pass_local_client = click.make_pass_decorator(LocalClient, ensure=True)
Example #59
0
        return "{}({!r})".format(self.__class__.__name__, self.__dict__)

    def __eq__(self, other: Any) -> bool:
        return (self.__class__ == other.__class__
                and
                self.__dict__ == other.__dict__)

    def one_line_str(self) -> str:
        return '<{} for service_name={}>'.format(self.__class__.__name__, self.service_name())

    @staticmethod
    def yaml_representer(dumper: 'yaml.SafeDumper', data: 'ServiceSpec') -> Any:
        return dumper.represent_dict(data.to_json().items())


yaml.add_representer(ServiceSpec, ServiceSpec.yaml_representer)


class NFSServiceSpec(ServiceSpec):
    def __init__(self,
                 service_type: str = 'nfs',
                 service_id: Optional[str] = None,
                 pool: Optional[str] = None,
                 namespace: Optional[str] = None,
                 placement: Optional[PlacementSpec] = None,
                 unmanaged: bool = False,
                 preview_only: bool = False,
                 config: Optional[Dict[str, str]] = None,
                 ):
        assert service_type == 'nfs'
        super(NFSServiceSpec, self).__init__(
Example #60
0
import logging
import yaml
from collections import OrderedDict
from ignition.service.framework import Service, Capability, interface
from ignition.service.logging import log_type
from ignition.model.progress_events import ResourceTransitionProgressEvent, BASE_EVENT_TYPE

# Allow OrderedDict to be dumped as YAML
yaml.add_representer(OrderedDict, lambda dumper, data: dumper.represent_mapping('tag:yaml.org,2002:map', data.items()))
yaml.add_representer(OrderedDict, lambda dumper, data: dumper.represent_mapping('tag:yaml.org,2002:map', data.items()), Dumper=yaml.SafeDumper)

logger = logging.getLogger(__name__)

class ProgressEventLogSerializerCapability(Capability):

    @interface
    def serialize(self, event):
        pass

class ProgressEventLogWriterCapability(Capability):

    @interface
    def add(self, event):
        pass

class YAMLProgressEventLogSerializer(Service, ProgressEventLogSerializerCapability):

    def serialize(self, event):
        data = event.to_dict()
        return yaml.safe_dump(data)