def migrate_for_subfile(u, *, scope): for resolver in u.resolvers: uu = u.new_child(resolver) if uu.has("paths", resolver=resolver): from dictknife import DictWalker, Or method_walker = DictWalker( [Or(["get", "post", "put", "delete", "patch", "head"])] ) schema_walker = DictWalker(["schema"]) for path, sd in method_walker.walk(uu.resolver.doc["paths"]): # parameters # responses frame = {} if "produces" in sd[path[-1]]: ref = path_to_json_pointer(["paths", *path, "produces"]) frame["produces"] = uu.pop(ref) with scope.scope(frame or None): if "responses" in sd[path[-1]]: for spath, ssd in schema_walker.walk(sd[path[-1]]["responses"]): fullpath = ["paths", *path, "responses", *spath] ref = path_to_json_pointer(fullpath) schema = uu.pop(ref) content = uu.make_dict() for produce in scope[["produces"]]: content[produce] = {"schema": schema} ref = path_to_json_pointer([*fullpath[:-1], "content"]) uu.update(ref, content)
def migrate_for_subfile(u, *, scope): for resolver in u.resolvers: uu = u.new_child(resolver) if uu.has("paths", resolver=resolver): from dictknife import DictWalker, Or method_walker = DictWalker( [Or(["get", "post", "put", "delete", "patch", "head"])]) schema_walker = DictWalker(["schema"]) for path, sd in method_walker.walk(uu.resolver.doc["paths"]): # parameters # responses frame = {} if "produces" in sd[path[-1]]: ref = path_to_json_pointer(["paths", *path, "produces"]) frame["produces"] = uu.pop(ref) with scope.scope(frame or None): if "responses" in sd[path[-1]]: for spath, ssd in schema_walker.walk( sd[path[-1]]["responses"]): fullpath = ["paths", *path, "responses", *spath] ref = path_to_json_pointer(fullpath) schema = uu.pop(ref) content = uu.make_dict() for produce in scope[["produces"]]: content[produce] = {"schema": schema} ref = path_to_json_pointer( [*fullpath[:-1], "content"]) uu.update(ref, content)
def run(filename: str) -> str: d = loading.loadfile(filename) w = DictWalker(["vars", "exec"]) for path, w in w.walk(d): # path: services/<service_name>/vars/exec service_name = path[-3] exec_path = w["exec"] if exec_path == "./enduser_server": continue if exec_path == "./tuner_server": continue if _normalize(_normalize2(exec_path)).startswith( _normalize(_normalize2(service_name)) ): continue loading.dumpfile( { "must_include_part": _normalize(_normalize2(service_name)), "ref": f"#/{'/'.join(path[:-2])}", "exec_path": exec_path, }, format="json", ) print("")
from dictknife import DictWalker # from: https://github.com/BigstickCarpet/json-schema-ref-parser d = loading.loads( """ { "definitions": { "person": { "$ref": "schemas/people/Bruce-Wayne.json" }, "place": { "$ref": "schemas/places.yaml#/definitions/Gotham-City" }, "thing": { "$ref": "http://wayne-enterprises.com/things/batmobile" }, "color": { "$ref": "#/definitions/thing/properties/colors/black-as-the-night" } } } """, format="json" ) walker = DictWalker(["$ref"]) refs = [("/".join(path[:]), sd["$ref"]) for path, sd in walker.walk(d)] for path, ref in refs: print(path, ref)
from prestring.python import Module from dictknife import DictWalker from dictknife import loading w = DictWalker(["lines"]) d = loading.loadfile(format="json") r = [] for _, d in w.walk(d): if d["language"] == "python" or d["language"] == "py": r.append(d["lines"]) m = Module() m.from_("nbreversible", "code") for lines in r: with m.with_("code()"): for line in lines: if line.startswith("%"): m.stmt("#{}", line) else: m.stmt(line.rstrip()) m.sep() print(m)