def transform(*, src: str, dst: str, config: str, config_file: str, code: str, functions: str, input_format: str, output_format: str, format: str, sort_keys: str): """transform dict""" from magicalimport import import_symbol from dictknife import deepmerge if code is not None: transform = eval(code) elif functions: def transform(d): for fn in functions: if "." not in fn: fn = "dictknife.transform:{}".format(fn) d = import_symbol(fn)(d) return d else: transform = lambda x: x # NOQA input_format = input_format or format kwargs = loading.loads(config, format=input_format) if config_file: with open(config_file) as rf: kwargs = deepmerge(kwargs, loading.load(rf, format=input_format)) data = loading.loadfile(src, input_format) result = transform(data, **kwargs) loading.dumpfile(result, dst, format=output_format or format, sort_keys=sort_keys)
def main(src): Base = automap_base() engine = create_engine(src) Base.prepare(engine, reflect=True) collector = Collector(Resolver()) d = collector.collect(Base.classes) loading.dumpfile(d, format="json")
def merge( *, files: list, dst: str, style: str, # flavor?, strategy? strict: bool = False, wrap: str = None, wrap_section: str = "definitions" ): """merge files""" from dictknife.langhelpers import make_dict, as_jsonpointer from dictknife import deepmerge if style == "ref": dstdir = dst and os.path.dirname(dst) r = make_dict() seen = {} for src in files: d = loading.loadfile(src) for ns, sd in d.items(): for name in sd: if ns not in r: r[ns] = make_dict() seen[ns] = make_dict() if strict and name in r[ns]: raise RuntimeError( "{name} is already existed, (where={where} and {where2})".format( name=name, where=seen[ns][name], where2=src ) ) if dst is None: where = "" else: where = os.path.relpath(src, start=dstdir) r[ns][name] = { "$ref": "{where}#/{ns}/{name}".format( where=where, ns=ns, name=as_jsonpointer(name) ) } seen[ns][name] = src elif style == "whole": # TODO: strict support? data = [loading.loadfile(src) for src in files] r = deepmerge(*data, override=True) else: raise RuntimeError("invalid style: {}".format(style)) if wrap is not None: wd = make_dict() wd["type"] = "object" wd["properties"] = make_dict() for name in r.get(wrap_section) or {}: wd["properties"][name] = { "$ref": "#/{wrap_section}/{name}".format( wrap_section=wrap_section, name=name ) } r[wrap_section][wrap] = wd loading.dumpfile(r, dst)
def run(filename: str) -> str: d = loading.loadfile(filename) w = DictWalker(["vars", "exec"]) for path, w in w.walk(d): # path: services/<service_name>/vars/exec service_name = path[-3] exec_path = w["exec"] if exec_path == "./enduser_server": continue if exec_path == "./tuner_server": continue if _normalize(_normalize2(exec_path)).startswith( _normalize(_normalize2(service_name)) ): continue loading.dumpfile( { "must_include_part": _normalize(_normalize2(service_name)), "ref": f"#/{'/'.join(path[:-2])}", "exec_path": exec_path, }, format="json", ) print("")
def _migrate(self, doc=None, *, where=None, savedir=None): where = where or os.getcwd() doc = doc or self.resolver.doc self._prepare(doc=doc, where=where) yield self.updater for r in self.updater.resolvers: relpath = os.path.relpath(r.filename, start=where) savepath = r.filename if savedir: savepath = os.path.join( savedir, os.path.relpath(savepath, start=os.path.dirname( self.resolver.filename)), ) savepath = os.path.relpath(savepath, start=where) diff = "\n".join(self.differ.diff(r, where=where)) if not diff: if savedir is None: logger.debug("skip file %s", relpath) else: logger.info("copy file %s -> %s", relpath, (savepath or relpath)) try: shutil.copy(r.filename, savepath) except FileNotFoundError: os.makedirs(os.path.dirname(savepath)) shutil.copy(r.filename, savepath) continue logger.info("update file %s -> %s", relpath, (savepath or relpath)) loading.dumpfile(self.transform(self.differ.after_data(r.doc)), savepath, **self.dump_options)
def run(): data = { "people": [ { "name": "boo", "age": 20 }, { "name": "foo", "age": 20, "nickname": "F" }, { "name": "bar", "age": 20 }, ] } q = """\ { foo: person(name: "foo") { name, age, nickname }, xxx: person(name: "xxx") { name, age, nickname }, } """ result = g.graphql_sync(schema, q, Root(data)) print(result.errors) loading.dumpfile(result.data)
def mkdict(*, output_format: str, separator: str, delimiter: str, sort_keys: bool, squash: bool, extra): from dictknife.mkdict import mkdict if not extra: r = [] variables = {} for code in sys.stdin: d = mkdict(code, separator=separator, shared=variables) if not d: continue if isinstance(d, list): r.extend(d) else: r.append(d) if len(r) == 1: r = r[0] else: args = [] for x in extra: if "=" not in x: args.append(repr(x)) else: for e in x.split("=", 1): args.append(repr(e)) r = mkdict(" ".join(args), separator=separator) if squash: for row in r: loading.dumpfile(row, format=output_format, sort_keys=sort_keys) sys.stdout.write("\n") else: loading.dumpfile(r, format=output_format, sort_keys=sort_keys) sys.stdout.write("\n")
def main(secret_path, credentials_path): storage = Storage(credentials_path) credentials = storage.get() if credentials is None: scopes = ['https://www.googleapis.com/auth/analytics.readonly'] flow = client.flow_from_clientsecrets(secret_path, scopes) args = argparse.ArgumentParser(parents=[tools.argparser]).parse_args() args.auth_host_port = [44444] args.logging_level = "DEBUG" credentials = tools.run_flow(flow, storage, args) http = credentials.authorize(httplib2.Http()) print("0@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@") cache = InMemoryCache() resource1 = build("analytics", "v3", http=http, cache_discovery=True, cache=cache) print("1@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@") resource2 = build("analytics", "v3", http=http, cache_discovery=True, cache=cache) print("2@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@") accounts = resource1.management().accounts().list().execute() loading.dumpfile(accounts, None) print("3@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@") accounts = resource2.management().accounts().list().execute() loading.dumpfile(accounts, None)
def shrink( *, files: list, input_format: str, output_format: str, max_length_of_string: int, cont_suffix: str, max_length_of_list: int, with_tail: bool ): """shrink""" from dictknife.transform import shrink for f in files: with _open(f) as rf: d = loading.load(rf, format=input_format) format = output_format or input_format or loading.guess_format(f) r = shrink( d, max_length_of_list=max_length_of_list, max_length_of_string=max_length_of_string, cont_suffix=cont_suffix, with_tail=with_tail, ) loading.dumpfile(r, format=format)
def run(): schema.get_type("Team").fields["members"].resolve = Team.members data = { "teams": [ { "name": "x", "members": [ {"age": 10, "name": "a"}, {"age": 10, "name": "b"}, {"age": 10, "name": "z"}, ], }, { "name": "y", "members": [ {"age": 10, "name": "i"}, {"age": 10, "name": "j"}, {"age": 10, "name": "k"}, ], }, ] } q = """\ { teams { name, members(name: "i") { name }} } """ result = g.graphql_sync(schema, q, Root(data)) print(result.errors) loading.dumpfile(result.data)
def run(): data = { "people": [ { "name": "boo", "age": 20 }, { "name": "foo", "age": 20, "nickname": "F" }, { "name": "bar", "age": 20 }, ] } q = """\ { desc_people: people(order_by: name_DESC) { name, age, nickname }, asc_people: people(order_by: name_ASC) { name, age, nickname } } """ result = g.graphql_sync(schema, q, Root(data)) loading.dumpfile(result.data)
def emit(*, title: str = "egoist", version: str = "0.0.0") -> None: from emit import emit, get_walker from dictknife import loading w = get_walker([Article]) root = emit(w, app.routes, title=title, version=version) loading.dumpfile(root)
def transform( *, src: str, dst: str, code: str, functions: str, input_format: str, output_format: str, format: str, sort_keys: str ): """transform dict""" from magicalimport import import_symbol if code is not None: transform = eval(code) elif functions: def transform(d): for fn in functions: if "." not in fn: fn = "dictknife.transform:{}".format(fn) d = import_symbol(fn)(d) return d else: transform = lambda x: x # NOQA input_format = input_format or format data = loading.loadfile(src, input_format) result = transform(data) loading.dumpfile( result, dst, format=output_format or input_format or format, sort_keys=sort_keys )
async def main() -> None: from async_asgi_testclient import TestClient from dictknife import loading async with TestClient(app) as client: response = await client.get("/openapi.json") loading.dumpfile(response.json())
def examples( *, src: str, dst: str = None, ref: str, limit: int, input_format: str, output_format: str, format: str, use_expand: bool = False, ): """output sample value from swagger's spec""" from dictknife.jsonknife import extract_example from dictknife.jsonknife.accessor import access_by_json_pointer if use_expand: from dictknife.jsonknife import bundle, expand if ref is not None: src = "{prefix}#/{name}".format(prefix=src, name=ref.lstrip("#/")) data = bundle(src, format=input_format or format) data = expand(None, doc=data) else: data = loading.loadfile(src, format=input_format or format) if src and "#/" in src: _, ref = src.split("#/", 1) if ref is not None: data = access_by_json_pointer(data, ref) d = extract_example(data, limit=limit) loading.dumpfile(d, dst, format=output_format or format or "json")
def flatten(*, src: str, dst: str, input_format: str, output_format: str, format: str): """flatten jsonschema sub definitions""" from dictknife.swaggerknife.flatten import flatten input_format = input_format or format data = loading.loadfile(src, format=input_format) d = flatten(data) loading.dumpfile(d, dst, format=output_format or format)
def run(*, filename): def onload(d, resolver, w=DictWalker(["$include"])): for _, sd, in w.walk(d): subresolver, jsref = resolver.resolve(sd.pop("$include")) sd.update(subresolver.access_by_json_pointer(jsref)) resolver = get_resolver(filename, onload=onload) loading.dumpfile(resolver.doc)
def run(*, config: str) -> None: c = loading.loadfile(config) overwrite_file = os.environ.get("OVERWRITE_CONFIG") if overwrite_file is not None: c2 = loading.loadfile(overwrite_file) c = deepmerge(c, c2, method="replace") loading.dumpfile(c)
def run(filename: str, output: t.Optional[str] = None): d = loading.loadfile(filename) for path, sd in DictWalker(["allOf"]).walk(d): parent = d for name in path[:-2]: parent = parent[name] assert parent[path[-2]] == sd parent[path[-2]] = sd.pop("allOf")[0] loading.dumpfile(d, output)
def emitfiles(d, format=None, position="."): if isinstance(d, (list, tuple)): for x in d: emitfiles(x, format=format, position=position) else: for name, data in d.items(): fpath = os.path.join(position, name) logger.info("emit:%s", os.path.normpath(fpath)) loading.dumpfile(data, fpath, format=format)
def dump(self, commands, outdir): outdir = outdir or "." for t, cmd, data in commands: result = _render_with_newline(t, data) outpath = os.path.join(outdir, cmd["dst"]) logger.info("rendering %s (template=%s)", outpath, t.name) fmt = cmd.get("format") or self.format or "raw" if fmt != "raw": result = loading.loads(result, format=fmt) loading.dumpfile(result, outpath, format=fmt)
def main(): parser = argparse.ArgumentParser() parser.add_argument("files", nargs="*", type=argparse.FileType("r")) args = parser.parse_args() files = args.files if len(files) == 0: files = [sys.stdin] for f in files: loading.dumpfile(extract(loading.load(f)), format="json")
def annotate(filename: str, *, use_fullname: bool = False) -> None: from dictknife import loading from detector import detect, generate_annotations d = loading.loadfile(filename) result = detect(d) r = generate_annotations(result, use_fullname=use_fullname, toplevel_name="toplevel") loading.dumpfile(r)
def cut(*, src, dst, refs): from dictknife.accessing import Accessor d = loading.loadfile(src) accessor = Accessor(make_dict) for ref in refs: if ref.startswith("#/"): ref = ref[2:] accessor.maybe_remove(d, ref.split("/")) loading.dumpfile(d, dst)
def main(*, src: str) -> None: precompile_ref_walker = DictWalker(["$precompile-ref"]) def onload(d, resolver): for path, sd in precompile_ref_walker.walk(d): subresolver, query = resolver.resolve(sd.pop("$precompile-ref")) value = subresolver.access(subresolver.doc, query) jsref = path_to_json_pointer(path[:-1]) resolver.assign(d, jsref, value) d = bundle(src, onload=onload) loading.dumpfile(d)
def main(*, src: str) -> None: precompile_ref_walker = DictWalker(["$precompile-ref"]) accessor = Accessor() def onload(d, subresolver): for path, sd in precompile_ref_walker.walk(d): sdoc, query = subresolver.resolve(sd.pop("$precompile-ref")) sresolved = access_by_json_pointer(sdoc.doc, query) accessor.assign(d, path[:-1], sresolved) resolver = get_resolver_from_filename(src, onload=onload) d = Bundler(resolver).bundle() loading.dumpfile(d)
def run(): data = { "teams": [ { "name": "x", "members": [ { "age": 10, "name": "a" }, { "age": 10, "name": "b" }, { "age": 10, "name": "z" }, ], }, { "name": "y", "members": [ { "age": 10, "name": "i" }, { "age": 10, "name": "j" }, { "age": 10, "name": "k" }, ], }, ] } q = """\ { teams { name, members(name: "i") { name }} } """ result = g.graphql_sync(schema, q, Root(data)) print(result.errors) loading.dumpfile(result.data)
def run(): dotenv.load_dotenv() token = os.environ["TOKEN"] with contextlib.ExitStack() as stack: stack.enter_context(handle_error_response()) s = stack.enter_context(requests.Session()) s.headers["Authorization"] = f"Bearer {token}" user_id = "podhmo" url = f"{base_url}/api/v2/users/{user_id}/stocks" r = [] while url is not None: import time time.sleep(0.1) print(f"get url {url}", file=sys.stderr) response = s.get(url, params={"per_page": 100}) response.raise_for_status() output = {} output["headers"] = dict(response.headers) output["response"] = {} output["response"]["url"] = response.url output["response"]["status_code"] = response.status_code output["response"]["content"] = response.json() r.append(output) link = response.headers.get("Link") if link is None: break next_url = None for string, kind in re.findall(r'<\s*([^;]+)>; rel="([^,]+),?', link): kind = kind.strip('"') if kind == "next": next_url = string.strip("<>") break if next_url is None: break url = next_url from dictknife import loading loading.dumpfile(r)
def main(): parser = argparse.ArgumentParser() parser.add_argument("--extracted", required=True) parser.add_argument("--jsonschema", required=True) parser.add_argument("-o", "--output-format", default="yaml", choices=["json", "yaml"]) args = parser.parse_args() r = merge( extracted=loading.loadfile(args.extracted), jsonschema=loading.loadfile(args.jsonschema), ) loading.dumpfile(r, format=args.output_format)
def diff( *, normalize: bool, sort_keys: bool, skip_empty: bool, left: dict, right: dict, n: int, input_format: str, output_format: str = "diff", verbose: bool = False ): """diff dict""" from dictknife.diff import diff, diff_rows, make_jsonpatch with open(left) as rf: left_data = loading.load(rf, format=input_format) with open(right) as rf: right_data = loading.load(rf, format=input_format) if output_format == "diff": for line in diff( left_data, right_data, fromfile=left, tofile=right, n=n, normalize=normalize, sort_keys=sort_keys, ): print(line) elif output_format == "jsonpatch": r = make_jsonpatch(left_data, right_data, verbose=verbose) loading.dumpfile(list(r), format="json") else: if output_format == "dict": output_format = "json" diff_key = "diff" rows = diff_rows( left_data, right_data, fromfile=left, tofile=right, diff_key=diff_key, normalize=normalize, ) if skip_empty: rows = [row for row in rows if row[diff_key] not in ("", 0)] loading.dumpfile(rows, format=output_format)
def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("--data", action="append", help="support yaml, json, toml", default=[]) parser.add_argument("template") parser.add_argument("--input-format", default=None) parser.add_argument("--output-format", default="raw") parser.add_argument("--dst", default=None) args = parser.parse_args() loading.setup() data = deepmerge(*[loadfile(d) for d in args.data], override=True) if args.input_format is not None: data = deepmerge(data, loading.load(sys.stdin, format=args.input_format), override=True) result = run(args.template, data) loading.dumpfile(result, args.dst, format=args.output_format)
def run( *, file: str, pattern: str = None, ignore: str = None, format: str = "json", shy: bool = False, with_request_type: bool = False, with_response_type: bool = False, ignore_cookies: bool = False, include_all: bool = False, ) -> None: import re pattern_rx = pattern and re.compile(pattern) ignore_rx = ignore and re.compile(ignore) d = loading.loadfile(file, format=format) for domain, entries in classify(d).items(): if pattern_rx and pattern_rx.search(domain) is None: continue if ignore_rx and ignore_rx.search(domain) is not None: continue print("##", domain) print("") print("```yaml") r = aggregate(entries, shy=shy) loading.dumpfile( transform( r, with_request_type=with_request_type, with_response_type=with_response_type, with_cookies=not ignore_cookies, include_all=include_all, ) ) print("```") print("")
parser.add_argument("--logging", default="INFO", choices=list(logging._nameToLevel.keys())) parser.add_argument("--emit", default="schema", choices=["schema", "info"]) parser.add_argument("--dst", type=argparse.FileType('w'), default=None) parser.add_argument("src", type=argparse.FileType('r')) args = parser.parse_args() logging.basicConfig(level=args.logging) annotations = {} if args.annotations: annotations = loading.load(args.annotations) detector = Detector() emitter = Emitter(annotations) loading.setup() data = loading.load(args.src) info = detector.detect(data, args.name) if args.emit == "info": loading.dumpfile(info, filename=args.dst) else: m = Module(indent=" ") m.stmt(args.name) emitter.emit(info, m) if args.show_minimap: print("# minimap ###") print("# *", end="") print("\n# ".join(str(m).split("\n"))) loading.dumpfile(emitter.doc, filename=args.dst)
from dictknife.transform import flatten from dictknife import loading d = loading.loadfile("00person.json") loading.dumpfile(d, format="json") loading.dumpfile(flatten(d), format="json") print("----------------------------------------") d = loading.loadfile("02person.json") loading.dumpfile(d, format="json") loading.dumpfile(flatten(d), format="json")
def run(*, src: str) -> None: data = loading.loadfile(src) loading.dumpfile(data, format="yaml")
from dictknife import loading from dictknife.jsonknife import get_resolver from dictknife.jsonknife import Bundler filename = "./src/main.json" r = get_resolver(filename) r.doc = { "definitions": { "person": r.access_by_json_pointer("/definitions/person"), } } b = Bundler(r) d = b.bundle() loading.dumpfile(d)
def main(*, src: str, dst: str) -> None: src = loading.loadfile(src) dst = loading.loadfile(dst) patches = list(unpatch.unpatch(src, dst, verbose=True)) loading.dumpfile(patches, format="json")
import sys import urllib.parse as p from collections import defaultdict from dictknife import loading if len(sys.argv) == 1: # sys.argv.append("data.har") sys.argv.append(None) d = loading.loadfile(sys.argv[1], format="json") r = defaultdict(list) for entry in d["log"]["entries"]: parsed = p.urlparse(entry["request"]["url"]) if "application/json" in entry["response"]["content"]["mimeType"].lower(): r[parsed.netloc].append(entry) loading.dumpfile(r)
import uuid import sys import dictknife.loading as loading def randname(): return uuid.uuid4().hex[:6] for i in range(5): d = {"name": randname(), "i": i} loading.dumpfile(d, format="json") sys.stdout.flush()
if f == "markdown": # xxx: continue m = import_module(f"dictknife.loading.{f}") ex_parser = ex_parsers.add_parser(f) setup = getattr(m, "setup_parser", None) if setup is None: print(f"{m.__name__} doesn't have setup_parser() function", file=sys.stderr) continue setup(ex_parser) args, rest = parser.parse_known_args() extra, _ = ex_parsers.parse_args(args.format, rest) print(args, extra) L = [ { "name": "foo", "age": 20 }, { "name": "bar", "age": 21, "nickname": "B" }, { "name": "boo" }, ] loading.dumpfile(L, **vars(args), extra=vars(extra))
def run(path: str) -> None: rows = loading.loadfile(path, format="csv") import itertools rows = itertools.islice(rows, 3) loading.dumpfile(rows, format="json")
def do_mkdict(self, line): loading.dumpfile(mkdict(line, shared=self.d), format="json") print("")
person: Person class People(Array): items = Person # todo: default,example,validation # todo: string as type (order of reference) with Namespace("components") as components: with components.namespace("schemas") as schemas: # schemas.mount(Person) schemas.mount(People) schemas.mount(XPerson) assert get_resolver().lookup.lookup(components, "schemas/Person") is not None loading.dumpfile(components.as_dict(), format="json") # { # "components": { # "schemas": { # "Person": { # "type": "object", # "description": "person", # "properties": { # "name": { # "type": "string" # }, # "age": { # "type": "integer" # } # },
from tempfile import TemporaryDirectory from pathlib import Path import dictknife.loading as loading from dictknife.pp import pp, indent with TemporaryDirectory() as d: d = Path(d) data = { "definitions": { "person": { "$ref": "./models/person.json#/definitions/person" } } } loading.dumpfile(data, str(d.joinpath("./swagger.json"))) person = { "definitions": { "person": { "properties": { "name": { "$ref": "../primitives/name.json" }, "age": { "$ref": "../primitives/age.json" } } } } }
def run(src): loading.setup() d = loading.loadfile(src) store = collect(d["package"]) r = arrange(store) loading.dumpfile(r, format="toml")
def run(*, file: str, name: str = "response") -> None: d = loading.loadfile(file) loading.dumpfile(makeschema(d, name=name))
from dictknife import loading, pp from schema import Conf loading.setup() data = loading.loadfile("./config-sample.json") data, err = Conf().load(data) if err: pp(err) else: loading.dumpfile(data)