def write_dictionaries_into_yaml(yaml_dict,output_file):
    yaml.add_representer(QuotedString, quoted_scalar)
    if(yaml_dict):
        with open(output_file, 'w') as stream_write:
            for y in yaml_dict:
                if y["kind"] == "Ingress":
                    if y["metadata"]["annotations"] is not None:
                        for k,v in y["metadata"]["annotations"].items():
                            if k == "ingress.citrix.com/secure-backend":
                                continue
                            y["metadata"]["annotations"][k] = QuotedString(v)
            stream_write.write(yaml.dump_all(yaml_dict, default_flow_style=False, sort_keys=False))
            logger.info("Please note Tier-1 ADC VPX ingress "+tier1_ingress_name+" is created with basic config. Please edit it as per your requirements")
            logger.info("ServiceMesh Lite YAMLs are created and is present in \"smlite-all-in-one.yaml\" file.")
    def handle(self, **options):
        source = options["api-spec"]
        output = options["output"]
        common_url = settings.COMMON_SPEC
        try:
            response = requests.get(common_url)
            response.raise_for_status()
            common_yaml = response.text
        except requests.exceptions.RequestException:
            return

        common_spec = yaml.safe_load(common_yaml)
        common_components = common_spec["components"]

        with open(source, "r", encoding="utf8") as infile:
            spec = yaml.safe_load(infile)
            components = spec["components"]
            refs = {}

            for scope, scope_items in components.items():
                if scope not in common_components:
                    continue

                for item, item_spec in scope_items.copy().items():
                    if item not in common_components[scope]:
                        continue

                    common_item_spec = common_components[scope][item]
                    if item_spec == common_item_spec:
                        # add ref to replace
                        ref = f"#/components/{scope}/{item}"
                        refs[ref] = f"{common_url}{ref}"

                        # remove item from internal components
                        del components[scope][item]

            # remove empty components
            for scope, scope_items in components.copy().items():
                if not scope_items:
                    del components[scope]

            # replace all refs
            replace_refs(spec, refs)

        with open(output, "w", encoding="utf8") as outfile:
            yaml.add_representer(QuotedString, quoted_scalar)
            yaml.dump(spec, outfile, default_flow_style=False)
Ejemplo n.º 3
0
    def _setattr_to_namespace(namespace, display_yaml_attr_name, key, value):
        if hasattr(namespace, key) and not overwriting:
            raise RuntimeError("cannot overwrite config option '{}'".format(key))

        display_yaml = getattr(namespace, display_yaml_attr_name, {})
        if options.options_parsed() and key in display_yaml and not key.startswith('_'):
            # `display_yaml` being `None` means that the config is not yet
            # parsed, and that things are still moving.

            # Somehow yaml-ing a plain torch.nn.Module or a torch.distribution.Distribution
            # breaks with a weird error similar to https://github.com/pytorch/pytorch/issues/11172
            if isinstance(value, yaml_specialized_superclasses) and value.__class__ not in yaml_specialized_classes:
                yaml.add_representer(value.__class__, yaml_module_representer)
                yaml_specialized_classes.add(value.__class__)

            display_yaml[key] = value

        return super(namespace.__class__, namespace).__setattr__(key, value)
Ejemplo n.º 4
0
                        #print("Decoding %r" % (d))
                        # Texture2D objects are flipped
                        img = ImageOps.flip(image)
                        # PIL has no method to write to a string :/
                        output = BytesIO()
                        img.save(output, format="png")
                        self.write_to_file(
                            filename, output.getvalue(), mode="wb")


def asset_representer(dumper, data):
    return dumper.represent_scalar("!asset", data.name)


yaml.add_representer(Asset, asset_representer)


def objectpointer_representer(dumper, data):
    # return yaml.SequenceNode(tag=u'tag:yaml.org,2002:seq', value=[ data.path_id,data.file_id])
    # print(data.resolve())
    return yaml.SequenceNode(tag=u'tag:yaml.org,2002:seq', value=[yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=str(data.path_id)), yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value=str(data.file_id))])


    # return yaml.ScalarNode(tag=u'tag:yaml.org,2002:str', value="[" + str(data.file_id)+","+ str(data.path_id)+"]")
    # return dumper.represent_sequence("!PPtr", [data.file_id, data.path_id])
yaml.add_representer(ObjectPointer, objectpointer_representer)


def unityobj_representer(dumper, data):
    return dumper.represent_mapping("!unitypack:%s" % (data.__class__.__name__), data._obj)
Ejemplo n.º 5
0
        # create "opener" (OpenerDirector instance)
        opener = urllib2.build_opener(handler)
    else:
        opener = urllib2.build_opener()

    res = opener.open(url, data=data)

    headers = res.info()

    with open(filename, "wb") as fp:
        fp.write(res.read())

    return filename, headers


try:
    from json import JSONDecodeError
except ImportError:
    # Python 2
    JSONDecodeError = ValueError

try:
    text_type = unicode
except NameError:
    text_type = str
else:
    oyaml.add_representer(
        unicode,
        lambda d, s: oyaml.ScalarNode(tag="tag:yaml.org,2002:str", value=s))
Ejemplo n.º 6
0
def parse(path, code_only, format, profiler, bench, **kwargs):
    """Parse SQL files and just spit out the result.

    PATH is the path to a sql file or directory to lint. This can be either a
    file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
    character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
    be interpreted like passing the current working directory as a path argument.
    """
    # Initialise the benchmarker
    bencher = BenchIt()  # starts the timer
    c = get_config(**kwargs)
    # We don't want anything else to be logged if we want a yaml output
    lnt = get_linter(c, silent=format in ('json', 'yaml'))
    verbose = c.get('verbose')
    recurse = c.get('recurse')

    config_string = format_config(lnt, verbose=verbose)
    if len(config_string) > 0:
        lnt.log(config_string)

    # TODO: do this better
    nv = 0
    if profiler:
        # Set up the profiler if required
        try:
            import cProfile
        except ImportError:
            lnt.log('The cProfiler is not available on your platform.')
            sys.exit(1)
        pr = cProfile.Profile()
        pr.enable()

    bencher("Parse setup")
    try:
        # handle stdin if specified via lone '-'
        if '-' == path:
            # put the parser result in a list to iterate later
            config = lnt.config.make_child_from_path('stdin')
            result = [lnt.parse_string(
                sys.stdin.read(),
                'stdin',
                verbosity=verbose,
                recurse=recurse,
                config=config
            )]
        else:
            # A single path must be specified for this command
            result = lnt.parse_path(path, verbosity=verbose, recurse=recurse)

        # iterative print for human readout
        if format == 'human':
            for parsed, violations, time_dict in result:
                if parsed:
                    lnt.log(parsed.stringify(code_only=code_only))
                else:
                    # TODO: Make this prettier
                    lnt.log('...Failed to Parse...')
                nv += len(violations)
                for v in violations:
                    lnt.log(format_violation(v, verbose=verbose))
                if verbose >= 2:
                    lnt.log("==== timings ====")
                    lnt.log(cli_table(time_dict.items()))
                bencher("Output details for file")
        else:
            # collect result and print as single payload
            # will need to zip in the file paths
            filepaths = ['stdin'] if '-' == path else lnt.paths_from_path(path)
            result = [
                dict(
                    filepath=filepath,
                    segments=parsed.as_record(code_only=code_only, show_raw=True)
                )
                for filepath, (parsed, _, _) in zip(filepaths, result)
            ]

            if format == 'yaml':
                # For yaml dumping always dump double quoted strings if they contain tabs or newlines.
                def quoted_presenter(dumper, data):
                    """Representer which always double quotes string values needing escapes."""
                    if '\n' in data or '\t' in data:
                        return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
                    else:
                        return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='')

                yaml.add_representer(str, quoted_presenter)

                click.echo(yaml.dump(result))
            elif format == 'json':
                click.echo(json.dumps(result))
    except IOError:
        click.echo(colorize('The path {0!r} could not be accessed. Check it exists.'.format(path), 'red'))
        sys.exit(1)

    if profiler:
        pr.disable()
        profiler_buffer = StringIO()
        ps = pstats.Stats(
            pr, stream=profiler_buffer
        ).sort_stats('cumulative')
        ps.print_stats()
        lnt.log("==== profiler stats ====")
        # Only print the first 50 lines of it
        lnt.log('\n'.join(profiler_buffer.getvalue().split('\n')[:50]))

    if bench:
        lnt.log("\n\n==== bencher stats ====")
        bencher.display()

    if nv > 0:
        sys.exit(66)
    else:
        sys.exit(0)
Ejemplo n.º 7
0
def parse(
    path,
    code_only,
    include_meta,
    format,
    profiler,
    bench,
    nofail,
    logger=None,
    **kwargs,
):
    """Parse SQL files and just spit out the result.

    PATH is the path to a sql file or directory to lint. This can be either a
    file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
    character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
    be interpreted like passing the current working directory as a path argument.
    """
    c = get_config(**kwargs)
    # We don't want anything else to be logged if we want json or yaml output
    non_human_output = format in ("json", "yaml")
    lnt, formatter = get_linter_and_formatter(c, silent=non_human_output)
    verbose = c.get("verbose")
    recurse = c.get("recurse")

    formatter.dispatch_config(lnt)

    # Set up logging.
    set_logging_level(verbosity=verbose, logger=logger, stderr_output=non_human_output)

    # TODO: do this better
    nv = 0
    if profiler:
        # Set up the profiler if required
        try:
            import cProfile
        except ImportError:  # pragma: no cover
            click.echo("The cProfiler is not available on your platform.")
            sys.exit(1)
        pr = cProfile.Profile()
        pr.enable()

    try:
        t0 = time.monotonic()
        # handle stdin if specified via lone '-'
        if "-" == path:
            # put the parser result in a list to iterate later
            result = [
                lnt.parse_string(
                    sys.stdin.read(), "stdin", recurse=recurse, config=lnt.config
                ),
            ]
        else:
            # A single path must be specified for this command
            result = lnt.parse_path(path, recurse=recurse)
        total_time = time.monotonic() - t0

        # iterative print for human readout
        if format == "human":
            timing = TimingSummary()
            for parsed_string in result:
                timing.add(parsed_string.time_dict)
                if parsed_string.tree:
                    click.echo(parsed_string.tree.stringify(code_only=code_only))
                else:
                    # TODO: Make this prettier
                    click.echo("...Failed to Parse...")  # pragma: no cover
                nv += len(parsed_string.violations)
                if parsed_string.violations:
                    click.echo("==== parsing violations ====")  # pragma: no cover
                for v in parsed_string.violations:
                    click.echo(format_violation(v))  # pragma: no cover
                if (
                    parsed_string.violations
                    and parsed_string.config.get("dialect") == "ansi"
                ):
                    click.echo(format_dialect_warning())  # pragma: no cover
                if verbose >= 2:
                    click.echo("==== timings ====")
                    click.echo(cli_table(parsed_string.time_dict.items()))
            if verbose >= 2 or bench:
                click.echo("==== overall timings ====")
                click.echo(cli_table([("Clock time", total_time)]))
                timing_summary = timing.summary()
                for step in timing_summary:
                    click.echo(f"=== {step} ===")
                    click.echo(cli_table(timing_summary[step].items()))
        else:
            result = [
                dict(
                    filepath=linted_result.fname,
                    segments=linted_result.tree.as_record(
                        code_only=code_only, show_raw=True, include_meta=include_meta
                    )
                    if linted_result.tree
                    else None,
                )
                for linted_result in result
            ]

            if format == "yaml":
                # For yaml dumping always dump double quoted strings if they contain tabs or newlines.
                yaml.add_representer(str, quoted_presenter)

                click.echo(yaml.dump(result))
            elif format == "json":
                click.echo(json.dumps(result))
    except OSError:  # pragma: no cover
        click.echo(
            colorize(
                f"The path {path!r} could not be accessed. Check it exists.",
                "red",
            ),
            err=True,
        )
        sys.exit(1)

    if profiler:
        pr.disable()
        profiler_buffer = StringIO()
        ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats("cumulative")
        ps.print_stats()
        click.echo("==== profiler stats ====")
        # Only print the first 50 lines of it
        click.echo("\n".join(profiler_buffer.getvalue().split("\n")[:50]))

    if nv > 0 and not nofail:
        sys.exit(66)  # pragma: no cover
    else:
        sys.exit(0)
Ejemplo n.º 8
0
import json
import re
import os

yaml.SafeDumper.org_represent_str = yaml.SafeDumper.represent_str


def repr_str(dumper, data):
    if '\n' in data:
        return dumper.represent_scalar(u'tag:yaml.org,2002:str',
                                       data,
                                       style='|')
    return dumper.org_represent_str(data)


yaml.add_representer(str, repr_str, Dumper=yaml.SafeDumper)

ps = PorterStemmer()


class Odinson:
    def __init__(self, targets):
        self.targets = targets.get('target_variables', {})
        self.rules_shell = self.load_rule_shell()
        self.rules = []
        self.process_targets()
        self.write_tmp_rules()

    def load_rule_shell(self):
        with open("src/odinson-rules/rules.yaml") as f:
            return yaml.safe_load(f)
# https://stackoverflow.com/questions/37200150/can-i-dump-blank-instead-of-null-in-yaml-pyyaml
def represent_none(self, _):
    return self.represent_scalar("tag:yaml.org,2002:null", "")


def dump_data(data):
    with open("data.yaml", "w") as file:
        yaml.dump(data, file, allow_unicode=True, width=10000, Dumper=Dumper)


headers = {
    "User-Agent":
    "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:85.0) Gecko/20100101 Firefox/85.0"
}
yaml.add_representer(type(None), represent_none)

with open("data.yaml", "r") as stream:
    try:
        data = yaml.safe_load(stream)

        jobs = data["jobs"]

        for index, job in enumerate(jobs):
            url = job["review"]
            job["rating"] = None

            if url:
                print(url + ": ", end="", flush=True)
                response = requests.get(url, headers=headers)
                content = response.text
Ejemplo n.º 10
0
    return result


def Parameter_representer(dumper, param):
    return dumper.represent_dict(serialize_parameter(param).items())


def ParameterNode_representer(dumper, node):
    return dumper.represent_dict(serialize_parameter_node(node).items())


def represent_str(dumper, data):
    if data.count('\n') >= 1:  # check for multiline string
        return dumper.represent_scalar('tag:yaml.org,2002:str',
                                       data,
                                       style='|')
    return dumper.represent_scalar('tag:yaml.org,2002:str', data)


yaml.add_representer(Parameter, Parameter_representer)
yaml.add_representer(ParameterNode, ParameterNode_representer)
yaml.add_representer(str, represent_str)


def to_yaml(param):
    result = yaml.dump(param, default_flow_style=False, allow_unicode=True)
    result = re.sub(r"'(\d{4}-\d{2}-\d{2})'", r"\1", result)

    return result
Ejemplo n.º 11
0
"""


# credit to stackoverflow user's solutions for yaml representation of quoted and blank values
# https://stackoverflow.com/questions/67476386/python-yaml-script-and-double-quotes
# https://stackoverflow.com/questions/30134110/how-can-i-output-blank-value-in-python-yaml-file/37445121
class DoubleQuoted(str):
    pass


def represent_double_quoted(dumper, data):
    return dumper.represent_scalar(
        yaml.resolver.BaseResolver.DEFAULT_SCALAR_TAG, data, style='"')


yaml.add_representer(DoubleQuoted, represent_double_quoted)
yaml.add_representer(
    type(None), lambda dumper, value: dumper.represent_scalar(
        u'tag:yaml.org,2002:null', ''))


def flatten(dict_item):
    item = dict_item.copy()
    item_id = item['id']
    if item.get('params'):
        for param in item['params']:
            item[param] = item['params'][param]
        del item['params']
    del item['id']
    return item_id, quote_values(item)
Ejemplo n.º 12
0
 def _get_or_write_config(self):
     '''
     [To Do] 
         - write_config has to be moved somewhere
         - numerical params: equal values mean equality (simplifies equation).
         - boundary params: need equality list, parameters in equality list MUST have equal values and are then collapsed.
     '''
     if self.config_file is None:
         print("[-] No config file found.")
         print("[+] Generating config file for model %r" % self.model_file)
         '''for now we use the following dict until columns are fixed in gimble model'''
         config = {
             'version': self._VERSION,
             'random_seed': 12345,
             'precision': 25,
             #'model' : self.model_file,
             'population_ids': collections.defaultdict(dict),
             'k_max': collections.defaultdict(dict),
             'parameters': collections.defaultdict(dict),
             'boundaries': collections.defaultdict(list),
         }
         config['parameters']['theta'] = 'FLOAT'
         for column in self._parse_model_file(target='header'):
             if column.startswith('C_'):
                 config['parameters'][column] = 'FLOAT'
                 population_id = column.replace('C_', '')
                 if len(population_id) == 1:
                     config['population_ids'][population_id] = 'STRING'
             if column.startswith('M_'):
                 config['parameters'][column] = 'FLOAT'
             elif column.startswith('m_'):
                 config['k_max'][column] = 'INT'
         config['parameters']['T'] = 'FLOAT'
         for parameter in config['parameters']:
             config['boundaries'][parameter] = ['MIN', 'MAX']
         config_file = pathlib.Path(
             self.model_file).with_suffix('.config.yaml')
         yaml.add_representer(collections.defaultdict,
                              yaml.representer.Representer.represent_dict)
         with open(config_file, 'w') as fh:
             yaml.dump(config, fh)
         print("[+] Wrote file %r" % str(config_file))
         sys.exit("[X] Please specify parameters in config file %r" %
                  str(config_file))
     else:
         print("[+] Reading config %r" % self.config_file)
         config_raw = yaml.safe_load(open(self.config_file, 'r'))
         config = {}
         for k, v in config_raw.items():
             if k == 'version':
                 config[k] = v
             elif k == 'population_ids':
                 config[k] = v
             #elif k == 'model':
             #    config[k] = v
             elif isinstance(v, str):
                 sys.exit(
                     "[X] Config file error: %r should be a number (not %r)."
                     % (k, v))
             elif k == 'parameters':
                 config['parameters'], config['boundaries'] = {}, {}
                 for v_k, v_v in config_raw[k].items():
                     if isinstance(v_v, str):  # parameter not set
                         if any([
                                 isinstance(bound, str)
                                 for bound in config_raw['boundaries'][v_k]
                         ]):
                             sys.exit(
                                 "[X] Config file error: set parameter or boundaries for %r (not %r)."
                                 % (v_k, v_v))
                         else:
                             config['boundaries'][v_k] = config_raw[
                                 'boundaries'][v_k]
                     else:
                         config[k][v_k] = v_v
             elif k == 'boundaries':
                 pass
             elif k == 'k_max':
                 config['k_max'] = {}
                 for v_k, v_v in config_raw[k].items():
                     if isinstance(v_v, int):  # k_max not set
                         config[k][v_k] = v_v
                     else:
                         sys.exit(
                             "[X] Config file error: set value for k_max %r (not %r)."
                             % (v_k, v_v))
             else:
                 config[k] = v
         return config
Ejemplo n.º 13
0

# Codefresh variables may need quotes: adjust yaml dump accordingly
def literal_presenter(dumper, data):
    if isinstance(data, str) and "\n" in data:
        return dumper.represent_scalar('tag:yaml.org,2002:str',
                                       data,
                                       style='|')
    if isinstance(data, str) and data.startswith('${{'):
        return dumper.represent_scalar('tag:yaml.org,2002:str',
                                       data,
                                       style="'")
    return dumper.represent_scalar('tag:yaml.org,2002:str', data)


yaml.add_representer(str, literal_presenter)


def create_codefresh_deployment_scripts(root_paths,
                                        env,
                                        include=(),
                                        exclude=(),
                                        template_name=CF_TEMPLATE_PATH,
                                        base_image_name=None,
                                        values_manual_deploy=None,
                                        save=True):
    """
    Entry point to create deployment scripts for codefresh: codefresh.yaml and helm chart
    """
    template_name = f"codefresh-template-{env}.yaml"
    out_filename = f"codefresh-{env}.yaml"
Ejemplo n.º 14
0
"""Utility to generate yml files for all the parsing examples."""
import os

import oyaml as yaml

from sqlfluff.core.parser import Parser, Lexer
from sqlfluff.core import FluffConfig
from sqlfluff.cli.commands import quoted_presenter

from dialects.parse_fixtures import get_parse_fixtures, load_file

yaml.add_representer(str, quoted_presenter)

parse_success_examples, _ = get_parse_fixtures()

for example in parse_success_examples:
    dialect, sqlfile = example
    config = FluffConfig(overrides=dict(dialect=dialect))
    # Load the SQL
    raw = load_file(dialect, sqlfile)
    # Lex and parse the file
    tokens, _ = Lexer(config=config).lex(raw)
    tree = Parser(config=config).parse(tokens)
    r = None
    if tree:
        r = tree.as_record(code_only=True, show_raw=True)
    # Remove the .sql file extension
    root = sqlfile[:-4]
    path = os.path.join("test", "fixtures", "parser", dialect, root + ".yml")
    with open(path, "w", newline="\n") as f:
        if r:
from pathlib import Path

import oyaml as yaml
from pyparsing import unicode


class LiteralUnicode(unicode):
    pass


def literal_unicode_represnter(dumper, data):
    return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')


yaml.add_representer(LiteralUnicode, literal_unicode_represnter)


class GithubAction:
    def __init__(self, name: str):
        """
        Creates a new Github Action.
        :param name: Name of the action.  Will be displayed in the action file.
        """
        self.name = name
        self.action_data = {
            'name': self.name,
            'on': '[push]',
            'jobs': {
                'build': {
                    'runs-on':
                    'ubuntu-latest',
Ejemplo n.º 16
0
def parse(path,
          code_only,
          format,
          profiler,
          bench,
          nofail,
          logger=None,
          **kwargs):
    """Parse SQL files and just spit out the result.

    PATH is the path to a sql file or directory to lint. This can be either a
    file ('path/to/file.sql'), a path ('directory/of/sql/files'), a single ('-')
    character to indicate reading from *stdin* or a dot/blank ('.'/' ') which will
    be interpreted like passing the current working directory as a path argument.
    """
    # Initialise the benchmarker
    bencher = BenchIt()  # starts the timer
    c = get_config(**kwargs)
    # We don't want anything else to be logged if we want a yaml output
    lnt, formatter = get_linter_and_formatter(c,
                                              silent=format
                                              in ("json", "yaml"))
    verbose = c.get("verbose")
    recurse = c.get("recurse")

    formatter.dispatch_config(lnt)

    # Set up logging.
    set_logging_level(verbosity=verbose, logger=logger)

    # TODO: do this better
    nv = 0
    if profiler:
        # Set up the profiler if required
        try:
            import cProfile
        except ImportError:
            click.echo("The cProfiler is not available on your platform.")
            sys.exit(1)
        pr = cProfile.Profile()
        pr.enable()

    bencher("Parse setup")
    try:
        # handle stdin if specified via lone '-'
        if "-" == path:
            # put the parser result in a list to iterate later
            config = lnt.config.make_child_from_path("stdin")
            result = [(
                # TODO: Remove verbose
                *lnt.parse_string(
                    sys.stdin.read(), "stdin", recurse=recurse, config=config),
                config,
            )]
        else:
            # A single path must be specified for this command
            # TODO: Remove verbose
            result = lnt.parse_path(path, recurse=recurse)

        # iterative print for human readout
        if format == "human":
            for parsed, violations, time_dict, f_cfg in result:
                if parsed:
                    click.echo(parsed.stringify(code_only=code_only))
                else:
                    # TODO: Make this prettier
                    click.echo("...Failed to Parse...")
                nv += len(violations)
                if violations:
                    click.echo("==== parsing violations ====")
                for v in violations:
                    click.echo(format_violation(v))
                if violations and f_cfg.get("dialect") == "ansi":
                    click.echo(format_dialect_warning())
                if verbose >= 2:
                    click.echo("==== timings ====")
                    click.echo(cli_table(time_dict.items()))
                bencher("Output details for file")
        else:
            # collect result and print as single payload
            # will need to zip in the file paths
            filepaths = ["stdin"] if "-" == path else lnt.paths_from_path(path)
            result = [
                dict(
                    filepath=filepath,
                    segments=parsed.as_record(code_only=code_only,
                                              show_raw=True),
                ) for filepath, (parsed, _, _, _) in zip(filepaths, result)
            ]

            if format == "yaml":
                # For yaml dumping always dump double quoted strings if they contain tabs or newlines.
                def quoted_presenter(dumper, data):
                    """Representer which always double quotes string values needing escapes."""
                    if "\n" in data or "\t" in data or "'" in data:
                        return dumper.represent_scalar("tag:yaml.org,2002:str",
                                                       data,
                                                       style='"')
                    else:
                        return dumper.represent_scalar("tag:yaml.org,2002:str",
                                                       data,
                                                       style="")

                yaml.add_representer(str, quoted_presenter)

                click.echo(yaml.dump(result))
            elif format == "json":
                click.echo(json.dumps(result))
    except IOError:
        click.echo(
            colorize(
                "The path {0!r} could not be accessed. Check it exists.".
                format(path),
                "red",
            ))
        sys.exit(1)

    if profiler:
        pr.disable()
        profiler_buffer = StringIO()
        ps = pstats.Stats(pr, stream=profiler_buffer).sort_stats("cumulative")
        ps.print_stats()
        click.echo("==== profiler stats ====")
        # Only print the first 50 lines of it
        click.echo("\n".join(profiler_buffer.getvalue().split("\n")[:50]))

    if bench:
        click.echo("\n\n==== bencher stats ====")
        bencher.display()

    if nv > 0 and not nofail:
        sys.exit(66)
    else:
        sys.exit(0)
Ejemplo n.º 17
0
            with param.open() as p:
                params[param.name.split(".")[0]] = extract_meta(p)
        data["components"]["parameters"] = params

        # Endpoints:
        endpoints = {}
        for ep in (root / "src" / "endpoints").iterdir():
            point = "/" + ep.name

            # Sanity check: this ensures that we're ignoring irrelevant
            # files such as `.DS_STORE`.
            if not ep.is_dir():
                continue

            endpoints[point] = {}
            for desc in ep.glob("**/*.md"):
                method = desc.as_posix().split("/")[-1].split(".")[0]
                with desc.open() as d:
                    endpoints[point][method] = extract_meta(d)
        data["paths"] = endpoints

        # Write our OAS3-compliant specification.
        compiled.write_text(
            yaml.dump(data, allow_unicode=True, default_flow_style=False))
        print("Successfully compiled an OAS3-compliant specification.")


if __name__ == "__main__":
    yaml.add_representer(str, str_presenter)
    generate(pathlib.Path("website/static/api"))