Exemple #1
0
def merge(
    *,
    files: list,
    dst: str,
    style: str,  # flavor?, strategy?
    strict: bool = False,
    wrap: str = None,
    wrap_section: str = "definitions"
):
    """merge files"""
    from dictknife.langhelpers import make_dict, as_jsonpointer
    from dictknife import deepmerge

    if style == "ref":
        dstdir = dst and os.path.dirname(dst)

        r = make_dict()
        seen = {}
        for src in files:
            d = loading.loadfile(src)
            for ns, sd in d.items():
                for name in sd:
                    if ns not in r:
                        r[ns] = make_dict()
                        seen[ns] = make_dict()
                    if strict and name in r[ns]:
                        raise RuntimeError(
                            "{name} is already existed, (where={where} and {where2})".format(
                                name=name, where=seen[ns][name], where2=src
                            )
                        )
                    if dst is None:
                        where = ""
                    else:
                        where = os.path.relpath(src, start=dstdir)
                    r[ns][name] = {
                        "$ref": "{where}#/{ns}/{name}".format(
                            where=where, ns=ns, name=as_jsonpointer(name)
                        )
                    }
                    seen[ns][name] = src
    elif style == "whole":
        # TODO: strict support?
        data = [loading.loadfile(src) for src in files]
        r = deepmerge(*data, override=True)
    else:
        raise RuntimeError("invalid style: {}".format(style))

    if wrap is not None:
        wd = make_dict()
        wd["type"] = "object"
        wd["properties"] = make_dict()
        for name in r.get(wrap_section) or {}:
            wd["properties"][name] = {
                "$ref": "#/{wrap_section}/{name}".format(
                    wrap_section=wrap_section, name=name
                )
            }
        r[wrap_section][wrap] = wd
    loading.dumpfile(r, dst)
Exemple #2
0
def loadfile_with_jsonref(ref):
    if "#/" not in ref:
        return loading.loadfile(ref)

    filename, query = ref.split("#")
    doc = loading.loadfile(filename)
    return access_by_json_pointer(doc, query)
Exemple #3
0
def run(*, config: str) -> None:
    c = loading.loadfile(config)

    overwrite_file = os.environ.get("OVERWRITE_CONFIG")
    if overwrite_file is not None:
        c2 = loading.loadfile(overwrite_file)
        c = deepmerge(c, c2, method="replace")
    loading.dumpfile(c)
Exemple #4
0
def run(*, schema: str, data: str, cache: str) -> None:
    data = loading.loadfile(data)

    try:
        with open(cache) as rf:
            validator = pickle.load(rf)
    except FileNotFoundError:
        schema = loading.loadfile(schema)
        # jsonschema.Draft7Validator.check_schema(schema)
        validator = jsonschema.Draft7Validator(schema)
        with open(cache, "wb") as wf:
            pickle.dump(validator, wf)
    print(list(validator.iter_errors(data)))
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--extracted", required=True)
    parser.add_argument("--jsonschema", required=True)
    parser.add_argument("-o",
                        "--output-format",
                        default="yaml",
                        choices=["json", "yaml"])
    args = parser.parse_args()
    r = merge(
        extracted=loading.loadfile(args.extracted),
        jsonschema=loading.loadfile(args.jsonschema),
    )
    loading.dumpfile(r, format=args.output_format)
Exemple #6
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("src", nargs="?", default=None)
    args = parser.parse_args()
    rows = loading.loadfile(args.src, format="tsv")
    print("\n".join(to_table(rows)))
Exemple #7
0
def run(src):
    d = loading.loadfile(src)
    for path, d in LooseDictWalkingIterator(["enum"]).iterate(d):
        print(path[-2])
        for x in d["enum"]:
            print("- {}".format(x))
        print("")
Exemple #8
0
def run(*, config_path: str) -> None:
    # Create SQS client
    c = loading.loadfile(config_path)
    sqs = boto3.client("sqs")
    queue_url = c["QueueUrl"]

    # Long poll for message on provided SQS queue
    response = sqs.receive_message(
        QueueUrl=queue_url,
        AttributeNames=["SentTimestamp"],
        MaxNumberOfMessages=10,
        MessageAttributeNames=["All"],
        WaitTimeSeconds=20,
        VisibilityTimeout=1,
    )
    messages = response["Messages"]
    print(len(messages))

    will_be_deleted = []
    for msg in messages:
        print(f"{msg['MessageId']}: {msg['Body']}")
        will_be_deleted.append({
            "Id": msg["MessageId"],
            "ReceiptHandle": msg["ReceiptHandle"]
        })
        # delete
    # import inspect
    # print(inspect.getfullargspec(sqs.delete_message_batch))
    print(sqs.delete_message_batch(QueueUrl=queue_url,
                                   Entries=will_be_deleted))
def run(filename: str) -> None:
    schema = loading.loadfile(filename)
    d = {
        "config": {
            "variables": {"env": "production"},
            "providers": {"aws": {"region": "us-west-1"}},
        }
    }
    d = {
        "config": {
            "variables": {"env": "production"},
            "providers": {"aws": {"region": "us-west-2"}},
        }
    }

    cls = jsonschema.Draft4Validator
    cls.check_schema(schema)
    for e in cls(schema).iter_errors(d):
        print("cause?")
        print("	context", e.context)
        print("	cause", e.cause)
        print("where?")
        print("	instance_path", e.path)
        print("	schema_path", e.schema_path)
        print("	validator", e.validator)
Exemple #10
0
def transform(
    *,
    src: str,
    dst: str,
    code: str,
    functions: str,
    input_format: str,
    output_format: str,
    format: str,
    sort_keys: str
):
    """transform dict"""
    from magicalimport import import_symbol

    if code is not None:
        transform = eval(code)
    elif functions:

        def transform(d):
            for fn in functions:
                if "." not in fn:
                    fn = "dictknife.transform:{}".format(fn)
                d = import_symbol(fn)(d)
            return d

    else:
        transform = lambda x: x  # NOQA

    input_format = input_format or format
    data = loading.loadfile(src, input_format)
    result = transform(data)
    loading.dumpfile(
        result, dst, format=output_format or input_format or format, sort_keys=sort_keys
    )
Exemple #11
0
def transform(*, src: str, dst: str, config: str, config_file: str, code: str,
              functions: str, input_format: str, output_format: str,
              format: str, sort_keys: str):
    """transform dict"""
    from magicalimport import import_symbol
    from dictknife import deepmerge

    if code is not None:
        transform = eval(code)
    elif functions:

        def transform(d):
            for fn in functions:
                if "." not in fn:
                    fn = "dictknife.transform:{}".format(fn)
                d = import_symbol(fn)(d)
            return d

    else:
        transform = lambda x: x  # NOQA

    input_format = input_format or format
    kwargs = loading.loads(config, format=input_format)

    if config_file:
        with open(config_file) as rf:
            kwargs = deepmerge(kwargs, loading.load(rf, format=input_format))

    data = loading.loadfile(src, input_format)
    result = transform(data, **kwargs)
    loading.dumpfile(result,
                     dst,
                     format=output_format or format,
                     sort_keys=sort_keys)
Exemple #12
0
def examples(
    *,
    src: str,
    dst: str = None,
    ref: str,
    limit: int,
    input_format: str,
    output_format: str,
    format: str,
    use_expand: bool = False,
):
    """output sample value from swagger's spec"""
    from dictknife.jsonknife import extract_example
    from dictknife.jsonknife.accessor import access_by_json_pointer

    if use_expand:
        from dictknife.jsonknife import bundle, expand

        if ref is not None:
            src = "{prefix}#/{name}".format(prefix=src, name=ref.lstrip("#/"))
        data = bundle(src, format=input_format or format)
        data = expand(None, doc=data)
    else:
        data = loading.loadfile(src, format=input_format or format)

    if src and "#/" in src:
        _, ref = src.split("#/", 1)
    if ref is not None:
        data = access_by_json_pointer(data, ref)
    d = extract_example(data, limit=limit)
    loading.dumpfile(d, dst, format=output_format or format or "json")
Exemple #13
0
def run(*, config_path: str):
    """Exercise send_sqs_message()"""

    # Assign this value before running the program
    c = loading.loadfile(config_path)
    sqs_queue_url = c["QueueUrl"]
    sqs_client = boto3.client("sqs")

    # Send some SQS messages
    entries = []
    for i in range(1, 6):
        entries.append({
            "Id": f"m{i}",
            "MessageBody": f"SQS message #{i}",
            "DelaySeconds": 10
        })

    try:
        assert (
            entries
        ), "!! An error occurred (AWS.SimpleQueueService.EmptyBatchRequest) when calling the SendMessageBatch operation: There should be at least one SendMessageBatchRequestEntry in the request."

        response = sqs_client.send_message_batch(QueueUrl=sqs_queue_url,
                                                 Entries=entries)
        print(response)
    except ClientError as e:
        print("!!", e)
Exemple #14
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("src", nargs="?", default=None)
    parser.add_argument("--package", default=None)
    parser.add_argument("--position", default=None)
    parser.add_argument("--writer", default="goaway.writer:Writer")
    parser.add_argument("--emitter", default="goaway.emitter:Emitter")

    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)
    loading.setup()

    r = get_repository(
        writer_cls=import_symbol(args.writer),
        emitter_cls=import_symbol(args.emitter),
    )

    data = loading.loadfile(args.src)
    package = r.package(args.package or "main")
    walk(data, package, r)

    d = r.resolve_package_path(args.position, package)
    r.emitter.emit_package(package, d=d)
Exemple #15
0
def run(filename: str) -> str:
    d = loading.loadfile(filename)
    w = DictWalker(["vars", "exec"])
    for path, w in w.walk(d):
        # path: services/<service_name>/vars/exec
        service_name = path[-3]
        exec_path = w["exec"]
        if exec_path == "./enduser_server":
            continue
        if exec_path == "./tuner_server":
            continue

        if _normalize(_normalize2(exec_path)).startswith(
                _normalize(_normalize2(service_name))
        ):
            continue
        loading.dumpfile(
            {
                "must_include_part": _normalize(_normalize2(service_name)),
                "ref": f"#/{'/'.join(path[:-2])}",
                "exec_path": exec_path,
            },
            format="json",
        )
        print("")
Exemple #16
0
def run(src):
    d = loading.loadfile(src)
    for path, d in LooseDictWalkingIterator(["enum"]).iterate(d):
        print(path[-2])
        for x in d["enum"]:
            print("- {}".format(x))
        print("")
Exemple #17
0
def run(filename: str) -> None:
    d = loading.loadfile(filename)
    ctx = Context()
    visit(ctx, d)
    print("@", ctx)
    print("----------------------------------------")
    print(emit(ctx))
Exemple #18
0
def run(*, path: str, disable_docstring) -> None:
    d = loading.loadfile(path)
    m = Module()
    a = Accessor(d, m, disable_docstring=disable_docstring)

    m.import_("typing", as_="t")
    m.sep()
    m.stmt("AnyService = t.Any  # TODO")
    m.stmt("AnyResource = t.Any  # TODO")
    m.sep()
    for rname, resource in a.resources.items():
        with m.class_(titleize(rname), ""):

            with m.method("__init__", "resource: AnyResource"):
                m.stmt("self.internal = resource")

            m.stmt("# methods")
            for mname, method, params in a.iterate_methods(resource):
                with m.method(mname, params):
                    a.emit_docstring(method["description"])
                    m.stmt(
                        f"""# {method["httpMethod"]}: {method["flatPath"]}""")
                    m.stmt(f"""# id: {method["id"]}""")
                    m.stmt(f"return self.internal.{mname}({params})")

            m.stmt("# nested resources")
            for srname, subresource in a.iterate_nested_resources(resource):
                with m.method(srname):
                    m.stmt(f"return self.internal.{srname}({params})")

            # m.stmt("# nested resources")
            # for mname, subresource in resource.get("resources", {}).items():
            #     params = LParams()
            #     for is_positional, (pname, parameter) in itertools.zip_longest(subresource.get("parameterOrder", []), subresource.get("parameters", {}).items()):
            #         if is_positional:
            #             params.append(pname)  # TODO type:
            #         else:
            #             params[pname] = None  # TODO type:
            #     with m.method(mname, params):
            #         docstring(subresource["description"])
            #         m.stmt(f"""# id: {subresource["id"]}""")
            #         m.stmt(f"return self.{mname}({params})")

    with m.class_("Service"):
        with m.method("__init__", "service: AnyService"):
            m.stmt("self.internal = service")

        for rname in a.resources.keys():
            with m.method(rname, return_type=titleize(rname)):
                m.stmt(f"return {titleize(rname)}(self.internal.{rname}())")

    with m.def_("build", "*args", "**kwargs", return_type="Service"):
        m.stmt("# TODO: use the signature of googleapiclient.discovery.build")
        m.submodule().from_("googleapiclient.discovery", "build")
        m.stmt(
            f"return Service(build({a.name!r}, {a.version!r}, *args, **kwargs))"
        )

    print(m)
Exemple #19
0
def flatten(*, src: str, dst: str, input_format: str, output_format: str, format: str):
    """flatten jsonschema sub definitions"""
    from dictknife.swaggerknife.flatten import flatten

    input_format = input_format or format
    data = loading.loadfile(src, format=input_format)
    d = flatten(data)
    loading.dumpfile(d, dst, format=output_format or format)
Exemple #20
0
def json2swagger(
    *,
    files,
    dst: str,
    output_format: str,
    name: str,
    detector,
    emitter,
    annotate,
    emit,
    with_minimap: bool,
    without_example: bool
):
    from prestring import Module
    from dictknife import DictWalker

    if annotate is not None:
        annotate = loading.loadfile(annotate)
    else:
        annotate = {}

    ns = "dictknife.swaggerknife.json2swagger"
    detector = import_symbol(detector, ns=ns)()
    emitter = import_symbol(emitter, ns=ns)(annotate)

    info = None
    for src in files:
        data = loading.loadfile(src)
        info = detector.detect(data, name, info=info)

    if emit == "info":
        loading.dumpfile(info, filename=dst)
    else:
        m = Module(indent="  ")
        m.stmt(name)
        emitter.emit(info, m)
        if with_minimap:
            print("# minimap ###")
            print("# *", end="")
            print("\n# ".join(str(m).split("\n")))

        if without_example:
            for _, d in DictWalker(["example"]).walk(emitter.doc):
                d.pop("example")
        loading.dumpfile(emitter.doc, filename=dst, format=output_format)
Exemple #21
0
 def data(self):
     data = deepmerge(
         *[loading.loadfile(d) for d in self.data_path_list], override=True
     )
     if self.format is not None:
         data = deepmerge(
             data, loading.load(sys.stdin, format=self.format), override=True
         )
     return data
Exemple #22
0
def run(src):
    d = loading.loadfile(src)
    for path, d in LooseDictWalkingIterator(["type"]).iterate(d):
        if d["type"] == "boolean":
            if "name" in d:
                name = d["name"]
            else:
                name = path[-2]
            print(name, d.get("description"))
Exemple #23
0
def run(filename: str, output: t.Optional[str] = None):
    d = loading.loadfile(filename)
    for path, sd in DictWalker(["allOf"]).walk(d):
        parent = d
        for name in path[:-2]:
            parent = parent[name]
        assert parent[path[-2]] == sd
        parent[path[-2]] = sd.pop("allOf")[0]
    loading.dumpfile(d, output)
Exemple #24
0
def cut(*, src, dst, refs):
    from dictknife.accessing import Accessor

    d = loading.loadfile(src)
    accessor = Accessor(make_dict)
    for ref in refs:
        if ref.startswith("#/"):
            ref = ref[2:]
        accessor.maybe_remove(d, ref.split("/"))
    loading.dumpfile(d, dst)
Exemple #25
0
def annotate(filename: str, *, use_fullname: bool = False) -> None:
    from dictknife import loading
    from detector import detect, generate_annotations

    d = loading.loadfile(filename)
    result = detect(d)
    r = generate_annotations(result,
                             use_fullname=use_fullname,
                             toplevel_name="toplevel")
    loading.dumpfile(r)
Exemple #26
0
def run(*, config: str) -> None:
    import sys
    from dictknife import loading

    d = loading.loadfile(config)
    try:
        c = Config().load(d)
    except marshmallow.ValidationError as e:
        print(e.normalized_messages(), file=sys.stderr)
        sys.exit(1)
    print(c)
Exemple #27
0
def run(*, config: str) -> None:
    import sys
    from dictknife import loading

    d = loading.loadfile(config)
    try:
        c = Config(**d)
    except pydantic.ValidationError as e:
        print(e.json(), file=sys.stderr)
        sys.exit(1)
    print(c)
Exemple #28
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("src", nargs="?", default=None)
    parser.add_argument("--package", default="autogenerated")
    args = parser.parse_args()

    loading.setup()
    data = loading.loadfile(args.src)
    m = go.Module()
    m.package(args.package)
    emit(data, m)
    print(m)
Exemple #29
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("src", nargs="?", default=None)
    parser.add_argument("--package", default="autogenerated")
    args = parser.parse_args()

    loading.setup()
    data = loading.loadfile(args.src)
    m = go.Module()
    m.package(args.package)
    emit(data, m)
    print(m)
Exemple #30
0
 def _load_data(self, name_or_data, *, cache):
     if name_or_data is None:
         return {}
     elif isinstance(name_or_data, (list, tuple)):
         return deepmerge(
             *[self._load_data(d, cache=cache) for d in name_or_data])
     elif hasattr(name_or_data, "get"):
         return name_or_data
     else:
         r = cache.get(name_or_data)
         if r is None:
             r = cache[name_or_data] = loading.loadfile(name_or_data)
         return r
Exemple #31
0
def run(filename: str, *, verbose: bool = False, profile: bool = False) -> None:
    if profile:
        import cProfile
        import pstats

        prof = cProfile.Profile()
        prof.enable()

    d = loading.loadfile(filename)
    main(d, verbose=verbose)

    if profile:
        prof.disable()
        s = pstats.Stats(prof)
        s.dump_stats("metashape-inputs-openapi.prof")
Exemple #32
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("--data", action="append", help="support yaml, json, toml", default=[])
    parser.add_argument("template")
    parser.add_argument("--input-format", default=None)
    parser.add_argument("--output-format", default="raw")
    parser.add_argument("--dst", default=None)

    args = parser.parse_args()
    loading.setup()

    data = deepmerge(*[loading.loadfile(d) for d in args.data], override=True)
    if args.input_format is not None:
        data = deepmerge(data, loading.load(sys.stdin, format=args.input_format), override=True)
    result = run(args.template, data)
    loading.dumpfile(result, args.dst, format=args.output_format)
Exemple #33
0
def run(
    filename: str,
    *,
    schema: t.Union[None, str, dict] = None,
    guess_schema: bool,
    always_success: bool,
    output: OutputType,
) -> int:
    filepath = os.path.abspath(filename)
    s = streams.from_filename(filepath)

    if guess_schema:
        wlogger = LoggerWithCollectMessage(logger, {})
        schema = guess.guess_schema(
            filepath,
            code=".schemalint.py",
            current=os.path.dirname(filename),
            logger=wlogger,
        )
        s = streams.append_messages(s, messages=wlogger.messages)

    if schema is not None:
        if isinstance(schema, str):
            if schema.startswith(("https://", "http://")):
                import requests

                schema = requests.get(schema).json()
            else:
                from dictknife import loading

                schema = loading.loadfile(schema)

        validator = get_validator(schema,
                                  check_schema=True)  # TODO: skip_check option
        s = streams.with_validator(s, validator)
    formatter = get_formatter(filepath,
                              lookup=s.context.lookup,
                              output_type=output)

    success = True
    for ev in s:
        if not always_success and not ev.has_soft_error:
            success = False
        print(formatter.format(ev))
    return 0 if success else 1
Exemple #34
0
def run(
    *,
    file: str,
    pattern: str = None,
    ignore: str = None,
    format: str = "json",
    shy: bool = False,
    with_request_type: bool = False,
    with_response_type: bool = False,
    ignore_cookies: bool = False,
    include_all: bool = False,
) -> None:
    import re

    pattern_rx = pattern and re.compile(pattern)
    ignore_rx = ignore and re.compile(ignore)

    d = loading.loadfile(file, format=format)
    for domain, entries in classify(d).items():
        if pattern_rx and pattern_rx.search(domain) is None:
            continue
        if ignore_rx and ignore_rx.search(domain) is not None:
            continue

        print("##", domain)
        print("")
        print("```yaml")
        r = aggregate(entries, shy=shy)
        loading.dumpfile(
            transform(
                r,
                with_request_type=with_request_type,
                with_response_type=with_response_type,
                with_cookies=not ignore_cookies,
                include_all=include_all,
            )
        )
        print("```")
        print("")
def run(src):
    loading.setup()
    d = loading.loadfile(src)
    store = collect(d["package"])
    r = arrange(store)
    loading.dumpfile(r, format="toml")
def run(*, src: str) -> None:
    data = loading.loadfile(src)
    loading.dumpfile(data, format="yaml")
def run(path: str) -> None:
    rows = loading.loadfile(path, format="csv")
    import itertools
    rows = itertools.islice(rows, 3)
    loading.dumpfile(rows, format="json")
def run(args):
    config = loading.loadfile(args.config)
    template = config["template"].strip()
    c = Context(args, config.get("defaults") or {})
    print(template.format(c=c))
from nbreversible import (code, )
import pandas as pd
from dictknife import loading
# %matplotlib inline

with code():
    df = pd.DataFrame.from_dict(loading.loadfile("points.json")).set_index("name")
    df

with code():
    ax = df.plot(kind="scatter", x="x", y="y", s=40)
    for _, row in df.iterrows():
        ax.annotate(
            row.name, (row.x, row.y),
            color="k",
            weight="semibold",
            size="medium",
            horizontalalignment="left",
            verticalalignment="top"
        )
Exemple #40
0
from dictknife import loading
from zenmai.naming import snakecase

loading.setup()


path = "./app.yaml"
d = loading.loadfile(path)
print("\n".join([snakecase(v["name"]) for v in d["X"].values()]))
Exemple #41
0
from dictknife import loading
import jsonschema
import argparse


parser = argparse.ArgumentParser()
parser.add_argument("--schema", required=True)
parser.add_argument("--data", required=True)

args = parser.parse_args()

schema = loading.loadfile(args.schema)
jsonschema.Draft4Validator.check_schema(schema)

data = loading.loadfile(args.data)
for err in jsonschema.Draft4Validator(schema).iter_errors(data):
    print(err)

Exemple #42
0
from dictknife.transform import flatten
from dictknife import loading

d = loading.loadfile("00person.json")
loading.dumpfile(d, format="json")
loading.dumpfile(flatten(d), format="json")

print("----------------------------------------")

d = loading.loadfile("02person.json")
loading.dumpfile(d, format="json")
loading.dumpfile(flatten(d), format="json")
def main(*, src: str, dst: str) -> None:
    src = loading.loadfile(src)
    dst = loading.loadfile(dst)
    patches = list(unpatch.unpatch(src, dst, verbose=True))
    loading.dumpfile(patches, format="json")
Exemple #44
0
import sys
import urllib.parse as p
from collections import defaultdict
from dictknife import loading

if len(sys.argv) == 1:
    # sys.argv.append("data.har")
    sys.argv.append(None)

d = loading.loadfile(sys.argv[1], format="json")
r = defaultdict(list)
for entry in d["log"]["entries"]:
    parsed = p.urlparse(entry["request"]["url"])
    if "application/json" in entry["response"]["content"]["mimeType"].lower():
        r[parsed.netloc].append(entry)
loading.dumpfile(r)
def run(*, file: str, name: str = "response") -> None:
    d = loading.loadfile(file)
    loading.dumpfile(makeschema(d, name=name))
def main(src):
    d = loading.loadfile(src)
    m = GraphQLModule()
    emit(m, d)
    print(m)
Exemple #47
0
def run(src):
    d = loading.loadfile(src)
    target = d["module"]["model"]
    dig(target)
Exemple #48
0
from dictknife import loading, pp
from schema import Conf

loading.setup()
data = loading.loadfile("./config-sample.json")
data, err = Conf().load(data)
if err:
    pp(err)
else:
    loading.dumpfile(data)