def save_yaml( dictionary: Dict, path: str, encoding: str = "utf-8", pretty: bool = False, sortkeys: bool = False, ) -> None: """Save dictionary to YAML file preserving order if it is an OrderedDict Args: dictionary (Dict): Python dictionary to save path (str): Path to YAML file encoding (str): Encoding of file. Defaults to utf-8. pretty (bool): Whether to pretty print. Defaults to False. sortkeys (bool): Whether to sort dictionary keys. Defaults to False. Returns: None """ with open(path, "w", encoding=encoding) as f: representer = representers[sortkeys][pretty] yaml = YAML(typ="rt") yaml.Representer = representer add_representer(OrderedDict, representer.represent_dict, representer=representer) if pretty: yaml.indent(offset=2) else: yaml.default_flow_style = None yaml.representer.add_representer(type(None), representer.represent_none) yaml.dump(dictionary, f)
def setup_yaml(): """https://stackoverflow.com/a/8661021.""" def represent_dict_order(self, data): return self.represent_mapping('tag:yaml.org,2002:map', data.items()) yaml.add_representer(OrderedDict, represent_dict_order)
def _enable_ordered_dict_yaml_dumping() -> None: """Ensure that `OrderedDict`s are dumped so that the order of keys is respected.""" yaml.add_representer( OrderedDict, RoundTripRepresenter.represent_dict, representer=RoundTripRepresenter, )
def _write_pb_to_yaml(pb, output): # Add yaml representer so that yaml dump can dump OrderedDict. The code # is coming from https://stackoverflow.com/questions/16782112. yaml.add_representer(OrderedDict, _represent_ordereddict) json_obj = _order_dict(json.loads(MessageToJson(pb))) with open(output, 'w') as outfile: yaml.dump(json_obj, outfile, default_flow_style=False)
def write_parameter_output_file(filename, parameter_dict): # This representer tells pyYaml to treat an OrderedDict as it would a regular dict. yaml.add_representer(OrderedDict, yaml.representer.SafeRepresenter.represent_dict) with open(filename, 'w') as output_file: # default flow-style = FALSE allows us to write our python dict out # in the key-value mapping that is standard in YAML. If this is set # to true; the output looks more like JSON, so best to leave it. yaml.dump(parameter_dict, output_file, default_flow_style=False)
def setup_yaml(): """ StackOverflow Driven Development http://stackoverflow.com/a/8661021 """ def represent_dict_order(yaml_self, data): return yaml_self.represent_mapping('tag:yaml.org,2002:map', data.items()) yaml3ed.add_representer(OrderedDict, represent_dict_order)
def _enable_ordered_dict_yaml_dumping() -> None: """Ensure that `OrderedDict`s are dumped so that the order of keys is respected.""" def _order_rep(dumper: yaml.Representer, _data: Dict[Any, Any]) -> Any: return dumper.represent_mapping("tag:yaml.org,2002:map", _data.items(), flow_style=False) yaml.add_representer(OrderedDict, _order_rep)
def setup_yaml(): """ StackOverflow Driven Development https://stackoverflow.com/a/31609484/4709370 http://stackoverflow.com/a/8661021 """ def represent_dict_order(yaml_self, data): return yaml_self.represent_mapping('tag:yaml.org,2002:map', data.items()) yaml3ed.add_representer(OrderedDict, represent_dict_order)
def build_index(): """Create the index of all (YAML) sheets available.""" from mathmaker import settings from ruamel import yaml # Below snippet from https://stackoverflow.com/a/21048064/3926735 # to load roadmap.yaml using OrderedDict instead of dict _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG def dict_representer(dumper, data): return dumper.represent_dict(data.items()) def dict_constructor(loader, node): return OrderedDict(loader.construct_pairs(node)) yaml.add_representer(OrderedDict, dict_representer) yaml.add_constructor(_mapping_tag, dict_constructor) index = dict() themes_dirs = [x for x in os.listdir(settings.frameworksdir) if os.path.isdir(settings.frameworksdir + x)] for theme in themes_dirs: folder_path = os.path.join(settings.frameworksdir, theme) folder_files = glob(folder_path + '/*.yaml') for folder_path in folder_files: subtheme = os.path.splitext(os.path.basename(folder_path))[0] with open(folder_path) as f: loaded_data = yaml.safe_load(f) if loaded_data is not None: folder = OrderedDict(loaded_data) for sheet_name in folder: directive = '_'.join([subtheme, sheet_name]) index[directive] = (theme, subtheme, sheet_name) # Automatic add possibly missing sheet integration test sheet_test_dir = Path(os.path.join(settings.testsdir, 'integration', theme, subtheme)) file_name = subtheme + '_' + sheet_name sheet_file = Path(os.path.join(sheet_test_dir, 'test_{}.py' .format(file_name))) if not sheet_file.is_file(): sheet_test_dir.mkdir(parents=True, exist_ok=True) template = TESTFILE_TEMPLATE if (theme == 'mental_calculation' and not sheet_name.startswith('W')): template += \ MENTAL_CALCULATION_TESTFILE_TEMPLATE_ADDENDUM with open(sheet_file, 'w') as f: f.write(template.format(theme=theme, subtheme=subtheme, sheet_name=sheet_name)) with open(settings.index_path, 'w') as f: json.dump(index, f, indent=4) f.write('\n')
def dump(filename): yaml.add_representer(OrderedDict, ordered_dict_representer) yaml.add_representer(LiteralString, literal_string_representer) monitors = sorted(map(lambda m: normalize(m), read_all()), key=lambda m: m['name']) print('INFO: writing {} monitors to {}.'.format(len(monitors), filename)) with open(filename, 'w') as stream: yaml.dump({'monitors': monitors}, stream=stream, indent=2, default_flow_style=False)
def apply_before_dump(data: Any, rule: FormattingRule, text: str, rules: dict) -> Any: forbid_in_block_mappings = DEFAULT.get("forbid-in-block-mappings") forbid_in_flow_mappings = DEFAULT.get("forbid-in-flow-mappings") if rule is not None: forbid_in_block_mappings = rule and rule.get( "forbid-in-block-mappings", forbid_in_block_mappings) forbid_in_flow_mappings = rule and rule.get("forbid-in-flow-mappings", forbid_in_flow_mappings) if (rule is None or rule) and (forbid_in_block_mappings and forbid_in_flow_mappings): # no need to go through all of them # just represent them as `null` add_representer(type(None), represent_null, Dumper=RoundTripDumper) return data elif (rule is not None and not rule) or (not forbid_in_flow_mappings and not forbid_in_block_mappings): # no need to go through all of them # just represent them as `` add_representer(type(None), represent_empty_null, Dumper=RoundTripDumper) return data add_representer(Null, represent_null, Dumper=RoundTripDumper) add_representer(EmptyNull, represent_empty_null, Dumper=RoundTripDumper) return replace_empty_values(data, forbid_in_block_mappings, forbid_in_flow_mappings)
def dump_yaml(d:dict): from graphql.execution.base import ExecutionResult from ruamel.yaml import YAML, Representer from collections import OrderedDict from ruamel.yaml import RoundTripRepresenter as RTR, RoundTripDumper as RTD import ruamel.yaml as yaml import sys if isinstance(d, ExecutionResult): d = d.to_dict() yaml.add_representer(OrderedDict, RTR.represent_dict, Dumper=RTD) yaml.dump(d, sys.stdout, Dumper=RTD)
def register_xxx(**kw): from ruamel import yaml class XXX(yaml.comments.CommentedMap): @staticmethod def yaml_dump(dumper, data): return dumper.represent_mapping(u'!xxx', data) @classmethod def yaml_load(cls, constructor, node): data = cls() yield data constructor.construct_mapping(node, data) yaml.add_constructor(u'!xxx', XXX.yaml_load, constructor=yaml.RoundTripConstructor) yaml.add_representer(XXX, XXX.yaml_dump, representer=yaml.RoundTripRepresenter)
def ordered_dump(data, stream=None, *args, **kwargs): dumper = IndentedDumper # We need to do this because of how template expasion into a project # works. Without it, we end up with YAML references to the expanded jobs. dumper.ignore_aliases = lambda self, data: True yaml.add_representer(collections.OrderedDict, project_representer, Dumper=IndentedDumper) output = yaml.dump( data, default_flow_style=False, Dumper=dumper, width=80, *args, **kwargs).replace( '\n-', '\n\n-') if stream: stream.write(output) else: return output
def dump_resource(self, item): # https://www.programcreek.com/python/example/104725/yaml.add_representer def str_presenter(dumper, data): pattern = r'(.*)\$({\w*})(.*)' match_obj = re.match(pattern, data) if match_obj: data = "{0}{{{1}}}{2}".format(match_obj.group(1), match_obj.group(2), match_obj.group(3)) return dumper.represent_scalar('tag:yaml.org,2002:str', data.strip()) # managed_resources = ['Route', 'DeploymentConfig', 'Service', 'PersistentVolumeClaim', 'ImageStream'] managed_resources = [ 'Route', 'DeploymentConfig', 'Service', 'PersistentVolumeClaim' ] metadata_name_ = item['metadata']['name'] if self.filter_app_resource and not self.application_name in metadata_name_: return None if item['kind'] in managed_resources: print("SUPPORTED {}/{}".format(item['kind'], metadata_name_)) registry = "docker-registry.default.svc:5000" name = "{0}-{1}".format(item['kind'], metadata_name_).lower() filename = "{1}/{0}.yaml".format(name, self.work_directory).lower() if item['kind'] == 'DeploymentConfig': image = "{}/coolstore-dev/{}:@APP_VERSION@".format( registry, item['spec']['template']['spec']['containers'][0]['image']) print('image is {} '.format(image)) item['spec']['template']['spec']['containers'][0][ 'image'] = image filename = "{1}/{0}.yaml.bak".format( name, self.work_directory).lower() with open(filename, 'w') as outfile: yaml.add_representer(str, str_presenter) yaml.dump(item, outfile, default_flow_style=False) return { 'type': 'openshift.ResourcesFile', 'name': name, 'file': filename } else: print("NOT SUPPORTED {}/{}".format(item['kind'], metadata_name_)) return None
def serialize(cls, data): import yaml def represent_none(self, _): return self.represent_scalar('tag:yaml.org,2002:null', '') yaml.add_representer(type(None), represent_none) class Dumper(yaml.Dumper): def increase_indent(self, flow=False, indentless=False): return super().increase_indent( flow=flow, indentless=False if settings.INDENT_YAML_BLOCKS else indentless, ) text = yaml.dump(data, Dumper=Dumper, sort_keys=False, default_flow_style=False) return text
def main(): """Main application entry point.""" if len(sys.argv) != 3: print("Usage: yc-calc <input-file> <output-file>") sys.exit(1) infile = sys.argv[1] outfile = sys.argv[2] mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG sequence_tag = yaml.resolver.BaseResolver.DEFAULT_SEQUENCE_TAG yaml.add_constructor(mapping_tag, dict_constructor, Loader=RoundTripConstructor) yaml.add_constructor(sequence_tag, list_constructor, Loader=RoundTripConstructor) yaml.add_representer(CalcDict, dict_representer, Dumper=RoundTripRepresenter) yaml.add_representer(CalcList, list_representer, Dumper=RoundTripRepresenter) try: with open(infile) as infp: top = YAML().load(infp) if not isinstance(top, CalcDict): type_name = type(top).__name__ err("Top level element should be dict not {0}".format(type_name)) defs = {} defs_str = top.get("DEFS", "") try: exec(defs_str, defs) except Exception as exc: err("Error executing DEFS: {0}".format(exc)) CalcContainer.set_top(defs, top) write(top, outfile) except IOError as exc: err("Error opening file: {0}".format(exc)) except yaml.YAMLError as exc: err("Error parsing input: {0}".format(exc))
def save(self, fname, encoding='utf-8'): """Save the workflow to file. Save the workflow to a CWL file that can be run with a CWL runner. Args: fname (str): file to save the workflow to. encoding (str): file encoding to use (default: utf-8). """ dirname = os.path.dirname(os.path.abspath(fname)) if not os.path.exists(dirname): os.makedirs(dirname) yaml.add_representer(str, str_presenter) with codecs.open(fname, 'wb', encoding=encoding) as yaml_file: yaml_file.write('#!/usr/bin/env cwl-runner\n') yaml_file.write( yaml.dump(self.to_obj(), Dumper=yaml.RoundTripDumper))
def ordered_dump(data, stream=None, *args, **kwargs): dumper = IndentedDumper # We need to do this because of how template expasion into a project # works. Without it, we end up with YAML references to the expanded jobs. dumper.ignore_aliases = lambda self, data: True yaml.add_representer(collections.OrderedDict, project_representer, Dumper=IndentedDumper) output = yaml.dump(data, default_flow_style=False, Dumper=dumper, width=80, *args, **kwargs).replace('\n-', '\n\n-') if stream: stream.write(output) else: return output
def _represents(func): yaml.add_representer(cls, func) return func
from SimulationFramework.Codes.MAD8.MAD8 import * import progressbar from munch import Munch, unmunchify _mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG def dict_representer(dumper, data): return dumper.represent_dict(iter(list(data.items()))) def dict_constructor(loader, node): return OrderedDict(loader.construct_pairs(node)) yaml.add_representer(OrderedDict, dict_representer) yaml.add_constructor(_mapping_tag, dict_constructor) class Framework(Munch): def __init__(self, directory='test', master_lattice=None, overwrite=None, runname='CLARA_240', clean=False, verbose=True, sddsindex=0): super(Framework, self).__init__() # global master_lattice_location self.global_parameters = {
from __future__ import absolute_import from __future__ import division import collections from googlecloudsdk.core import exceptions from ruamel import yaml from typing import Any, AnyStr, Generator, IO, Iterable, Optional, Union # pylint: disable=unused-import, for pytype # YAML unfortunately uses a bunch of global class state for this kind of stuff. # We don't have to do it at import but the other option would be to do it every # time we try to dump something (which is worse for performance that just # doing it once). This allows OrderedDicts to be serialized as if they were # normal dicts. yaml.add_representer( collections.OrderedDict, yaml.dumper.SafeRepresenter.represent_dict, Dumper=yaml.dumper.SafeDumper) class Error(exceptions.Error): """Top level error for this module. Attributes: inner_error: Exception, The original exception that is being wrapped. This will always be populated. file: str, The path to the thing being loaded (if applicable). This is not necessarily a literal file (it could be a URL or any hint the calling code passes in). It should only be used for more descriptive error messages. """
def setup_yaml_customobjects(): yaml.add_representer(YAMLFile, representer_yamlfile) yaml.add_constructor(u'!yaml', constructor_yamlfile)
""" return len(s.splitlines()) > 1 def str_presenter(dmpr, data): """Return correct str_presenter to write multiple lines to a yaml field. Source: http://stackoverflow.com/a/33300001 """ if is_multiline(data): return dmpr.represent_scalar('tag:yaml.org,2002:str', data, style='|') return dmpr.represent_scalar('tag:yaml.org,2002:str', data) def save_yaml(fname, wf, inline, pack, relpath, wd, encoding='utf-8'): with codecs.open(fname, 'wb', encoding=encoding) as yaml_file: yaml_file.write('#!/usr/bin/env cwl-runner\n') yaml_file.write( yaml.dump(wf.to_obj(inline=inline, pack=pack, relpath=relpath, wd=wd), Dumper=yaml.RoundTripDumper)) yaml.add_representer(str, str_presenter, Dumper=yaml.RoundTripDumper) yaml.add_representer(Reference, reference_presenter, Dumper=yaml.RoundTripDumper)
for :class:`Thing` instances. """ # https://stackoverflow.com/questions/16782112/can-pyyaml-dump-dict-items-in-non-alphabetical-order value = [] for item_key, item_value in thing.items(): node_key = rep.represent_data(item_key) node_value = rep.represent_data(item_value) value.append((node_key, node_value)) return yaml.nodes.MappingNode( yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, value) yaml.add_representer(Thing, _thing_to_yaml) def yaml_dump(obj, stream=None): """Dump an object as YAML to an output stream. The default destination, if ``stream`` is omitted or ``None``, is ``sys.stdout``. """ return yaml.dump(obj, stream or sys.stdout, default_flow_style=False, block_seq_indent=2)
import collections from googlecloudsdk.core import exceptions from googlecloudsdk.core.util import files from googlecloudsdk.core.util import typing # pylint: disable=unused-import from ruamel import yaml # YAML unfortunately uses a bunch of global class state for this kind of stuff. # We don't have to do it at import but the other option would be to do it every # time we try to dump something (which is worse for performance that just # doing it once). This allows OrderedDicts to be serialized as if they were # normal dicts. yaml.add_representer(collections.OrderedDict, yaml.dumper.SafeRepresenter.represent_dict, Dumper=yaml.dumper.SafeDumper) yaml.add_representer(collections.OrderedDict, yaml.dumper.RoundTripRepresenter.represent_dict, Dumper=yaml.dumper.RoundTripDumper) class Error(exceptions.Error): """Top level error for this module. Attributes: inner_error: Exception, The original exception that is being wrapped. This will always be populated. file: str, The path to the thing being loaded (if applicable). This is not necessarily a literal file (it could be a URL or any hint the calling code passes in). It should only be used for more descriptive error
def convert(xml): """Do the conversion.""" add_representer(OrderedDict, represent_ordereddict) return dump(xml, width=float("inf"), default_flow_style=False)
# - Run this script import sys sys.path.insert(1, '../_ext') from collections import OrderedDict from utils import slugify import json from ruamel import yaml from ruamel.yaml.representer import RoundTripRepresenter import markdown yamldoc = [] # Prevent OrderedDict from being presented as YAML OMAP - we just want a regular YAML dict. yaml.add_representer(OrderedDict, RoundTripRepresenter.represent_dict, representer=RoundTripRepresenter) def convert_to_yaml(year, series, slug, pretalx_results, yaml_filename): with open(pretalx_results) as json_file: talks = json.load(json_file) for index, talk in enumerate(talks['results']): slug = slugify(talk['title'] + '-' + talk['speakers'][0]['name']) yamldoc.append( OrderedDict([ ('title', talk['title']), ('slug', slug), ('series', series), ('series_slug', slug),
return dumper.represent_scalar( yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG, data) def carry_over_compose_document(self): self.get_event() node = self.compose_node(None, None) self.get_event() # this prevents cleaning of anchors between documents in **one stream** # self.anchors = {} return node yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, dict_constructor) yaml.add_representer(collections.OrderedDict, dict_representer) yaml.add_representer(str, literal_str_representer) yaml.composer.Composer.compose_document = carry_over_compose_document @app.context_processor def inject_sysinfo(): return dict(sysinfo=dict(build=__version__)) @app.context_processor def inject_user(): return dict(user=session['me']) if 'me' in session else dict(user=None) @app.template_filter('maxlength')
import json from googlecloudsdk.core import exceptions from googlecloudsdk.core.util import files from googlecloudsdk.core.util import typing # pylint: disable=unused-import from ruamel import yaml import six # YAML unfortunately uses a bunch of global class state for this kind of stuff. # We don't have to do it at import but the other option would be to do it every # time we try to dump something (which is worse for performance that just # doing it once). This allows OrderedDicts to be serialized as if they were # normal dicts. yaml.add_representer(collections.OrderedDict, yaml.dumper.SafeRepresenter.represent_dict, Dumper=yaml.dumper.SafeDumper) yaml.add_representer(collections.OrderedDict, yaml.dumper.RoundTripRepresenter.represent_dict, Dumper=yaml.dumper.RoundTripDumper) # Always output None as "null", instead of just empty. yaml.add_representer( type(None), lambda self, _: self.represent_scalar('tag:yaml.org,2002:null', 'null'), Dumper=yaml.dumper.RoundTripDumper) class Error(exceptions.Error): """Top level error for this module.
return dumper.represent_mapping(u'tag:yaml.org,2002:seq', data, flow_style=False) class flowmap(dict): pass def flowmap_rep(dumper, data): return dumper.represent_mapping(u'tag:yaml.org,2002:map', data, flow_style=True) yaml.add_representer(blockseq, blockseq_rep) yaml.add_representer(flowmap, flowmap_rep) def dump(tileset, *xargs, **pargs): """ stxg.dump( tileset, ... ) is a wrapper for yaml.dump that slightly prettifies the output taking into account what tilesets usually look like. It passes all arguments onto yaml.dump. """ # Make tile and bond sections flow-style tileset['tiles'] = [flowmap(x) for x in tileset['tiles']] tileset['bonds'] = [flowmap(x) for x in tileset['bonds']] # If xgrowargs is there, make it block-style
import itertools import numpy as np import scipy.misc import png import ruamel.yaml as yaml # import yaml # Set representation of the floating point numbers in YAML files def float_representer(dumper, value): text = '{0:.8f}'.format(value) return dumper.represent_scalar(u'tag:yaml.org,2002:float', text) yaml.add_representer(float, float_representer) def load_yaml(path): with open(path, 'r') as f: content = yaml.load(f, Loader=yaml.CLoader) return content def save_yaml(path, content): with open(path, 'w') as f: yaml.dump(content, f, Dumper=yaml.CDumper, width=10000) def load_cam_params(path): with open(path, 'r') as f:
import uuid import collections import copy import fileinput import csv from yaml.representer import Representer from ruamel import yaml import pprint pp = pprint.PrettyPrinter(indent=4) yaml.add_representer(collections.defaultdict, Representer.represent_dict) class PacConfigEditor: def __init__(self, yaml_file): with open(yaml_file, 'r') as f: self.doc = yaml.load(f.read(), yaml.RoundTripLoader) def get_node_by_id(self, node_id): return self.doc['environments'][node_id] def get_nodes_by_name(self, name): return [k for k, n in self.doc['environments'].iteritems() if 'name' in n and n['name'] == name] def get_parents(self, node_id):
return os.path.exists(file_path) def read_local_file(filename): file_path = os.path.join(".local", filename) # Checking if file exists abs_file_path = os.path.join( os.path.dirname(inspect.getfile(sys._getframe(1))), file_path) # If not exists read from home directory if not file_exists(abs_file_path): init_obj = get_init_data() file_path = os.path.join(init_obj["LOCAL_DIR"].get("location"), filename) return read_file(file_path, 0).rstrip() # To remove \n, use rstrip return read_file(file_path, depth=2) def str_presenter(dumper, data): """For handling multiline strings""" if len(data.splitlines()) > 1: # check for multiline string return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|") return dumper.represent_scalar("tag:yaml.org,2002:str", data) yaml.add_representer(str, str_presenter)
# -*- coding: utf-8 -*- ''' output ''' from ruamel import yaml def str_presenter(dumper, data): str_tag = 'tag:yaml.org,2002:str' if len(data.splitlines()) > 1: return dumper.represent_scalar(str_tag, data, style='|') return dumper.represent_scalar(str_tag, data) yaml.add_representer(str, str_presenter) def list_presenter(dumper, data): list_tag = 'tag:yaml.org,2002:seq' if len(data) > 1: if all([isinstance(item, str) for item in data]): return dumper.represent_sequence(list_tag, data, flow_style=False) return dumper.represent_sequence(list_tag, data) yaml.add_representer(list, list_presenter) def yaml_format(obj): class MyDumper(yaml.Dumper):
if sys.version_info < (3, ): text_type = unicode # noqa: F821 else: text_type = str class folded_str(str): pass def represent_folded(dumper, data): # type: (yaml.Dumper, folded_str) -> yaml.Node return dumper.represent_scalar(u'tag:yaml.org,2002:str', data, style='>') yaml.add_representer(folded_str, represent_folded) def represent_student(dumper, data): # type: (yaml.Dumper, Student) -> yaml.Node return dumper.represent_mapping(u'!Student', { u'id': data.id, u'name': data.name, u'email': data.email }) def construct_student(loader, node): # type: (yaml.Loader, yaml.Node) -> Student return Student(**loader.construct_mapping(node))
---------------------------------------------------------------------------- "THE BEER-WARE LICENSE" (Revision 42): <*****@*****.**> wrote this file. As long as you retain this notice you can do whatever you want with this stuff. If we meet some day, and you think this stuff is worth it, you can buy me a beer in return. ---------------------------------------------------------------------------- """ import re, ruamel.yaml as yaml from scriptler.model import Source, Script, Config def _represent_model(t): def represent_subclass(dumper, data): return dumper.represent_mapping(data.__class__.__name__, dict(data)) return represent_subclass yaml.add_representer(Source, _represent_model(Source)) yaml.add_representer(Script, _represent_model(Script)) yaml.add_representer(Config, _represent_model(Config)) def load(stream): return yaml.safe_load(stream) def dump(dictionary, indent=2): dump = yaml.dump(dictionary, indent=indent, default_flow_style=False) dump = re.sub(r'!<.*>', '', dump) return dump.strip()