def collatz(ctx, value, macro_code, macro_params): print("COLLATZ", value) ctx.series = cell() if value == 1: ctx.series.set([1]) return if value % 2: newvalue = value * 3 + 1 else: newvalue = value // 2 ctx.value = cell("int").set(value) ctx.newvalue = cell("int").set(newvalue) ctx.macro_params = cell().set(macro_params) m = ctx.macro = macro(macro_params) ctx.newvalue.connect(m.value) ctx.macro_code = cell("macro").set(macro_code) ctx.macro_code.connect(m.code) ctx.macro_code.connect(m.macro_code) ctx.macro_params.connect(m.macro_params) ctx.tf = transformer({"a": "input", "b": "input", "c": "output"}) ctx.a = cell() ctx.a.connect(ctx.tf.a) ctx.b = cell("int") ctx.b.connect(ctx.tf.b) m.ctx.series.connect(ctx.a) ctx.value.connect(ctx.b) ctx.tf.code.set("c = [b] + a") ctx.tf.c.connect(ctx.series) print("/COLLATZ", value)
def collatz(ctx, value, macro_code, macro_params): import sys #kludge print("COLLATZ", value) ctx.series = cell("json") if value == 1: ctx.series.set([1]) return if value % 2: newvalue = value * 3 + 1 else: newvalue = value // 2 ###ctx.value = cell("int").set(value) ###ctx.newvalue = cell("int").set(newvalue) ctx.value = cell().set(value) ctx.newvalue = cell().set(newvalue) ctx.macro_params = cell("json").set(macro_params) m = ctx.macro = macro(ctx.macro_params.value) ctx.newvalue.connect(m.value) ctx.macro_code = cell("macro").set(macro_code) ctx.macro_code.connect(m.code) ctx.macro_code.connect(m.macro_code) ctx.macro_params.connect(m.macro_params) if sys.USE_TRANSFORMER_CODE: ###transformer code version ctx.tf = transformer({"a": "input", "b": "input", "c": "output"}) m.gen_context.series.connect(ctx.tf.a) ctx.value.connect(ctx.tf.b) ctx.tf.code.set("c = [b] + a") ctx.tf.c.connect(ctx.series) print("/COLLATZ", value) else: #no transformer code, exploits that macro is synchronous series = [value] + m.gen_context.series.value ctx.series.set(series) print("/COLLATZ", series)
def collatz(ctx, value, macro_code, macro_params): print("COLLATZ", value) ctx.series = cell() if value == 1: ctx.series.set([1]) return if value % 2: newvalue = value * 3 + 1 else: newvalue = value // 2 ###ctx.value = cell("int").set(value) ###ctx.newvalue = cell("int").set(newvalue) ctx.value = cell().set(value) ctx.newvalue = cell().set(newvalue) ctx.macro_params = cell().set(macro_params) m = ctx.macro = macro(macro_params) ctx.newvalue.connect(m.value) ctx.macro_code = cell("macro").set(macro_code) ctx.macro_code.connect(m.code) ctx.macro_code.connect(m.macro_code) ctx.macro_params.connect(m.macro_params) ctx.tf = transformer({"a": "input", "b": "input", "c": "output"}) ctx.a = cell() ctx.a.connect(ctx.tf.a) ctx.b = cell() ctx.b.connect(ctx.tf.b) m.ctx.series.connect(ctx.a) ctx.value.connect(ctx.b) ctx.tf.code.set("[b] + a") ctx.tf.c.connect(ctx.series) print("/COLLATZ", value)
def main(): global ctx, compute ctx = context(toplevel=True) ctx.select_compute = libcell("compute.select") ctx.compute = macro(select_params, lib="compute") ctx.select_compute.connect(ctx.compute.code) ctx.compute.which.cell().set("pi") compute = ctx.compute.gen_context ctx.iterations = cell().set(10000) ctx.iterations.connect(compute.iterations) ctx.result = cell() compute.result.connect(ctx.result)
def select(ctx, which): assert which in ("pi", "e") ctx.readme = libcell(".readme") ctx.loader = macro({}) if which == "pi": ctx.load = libcell(".pi.load") else: ctx.load = libcell(".e.load") ctx.load.connect(ctx.loader.code) compute = ctx.loader.ctx ctx.iterations = cell() ctx.iterations.connect(compute.iterations) ctx.result = cell() compute.result.connect(ctx.result)
def select(ctx, which): assert which in ("pi", "e") ctx.readme = libcell(".readme") ctx.loader = macro({}) if which == "pi": ctx.load = libcell(".pi.load") else: ctx.load = libcell(".e.load") ctx.load.connect(ctx.loader.code) compute = ctx.loader.gen_context ctx.iterations = cell() ctx.iterations.connect(compute.iterations) ctx.result = cell() compute.result.connect(ctx.result)
def main(): global ctx, compute ctx = context(toplevel=True) ctx.select_compute = libcell("compute.select") ctx.compute = macro(select_params, lib="compute") ctx.select_compute.connect(ctx.compute.code) ###ctx.compute.which.cell().set("pi") # TODO: malfunctioning ### KLUDGE ctx.which_cell = cell().set("pi") ctx.which_cell.connect(ctx.compute.which) ### /KLUDGE compute = ctx.compute.ctx ctx.iterations = cell().set(10000) ctx.iterations.connect(compute.iterations) ctx.result = cell() compute.result.connect(ctx.result)
import seamless #seamless.core.cache.use_caching = False ### from seamless.core import macro_mode_on from seamless.core import context, cell, transformer, pymacrocell, \ macro, link, path with macro_mode_on(): ctx = context(toplevel=True) ctx.mount("/tmp/mount-test", persistent=None) ctx.param = cell("json").set(0) ctx.macro = macro({ "param": "copy", }) ctx.param.connect(ctx.macro.param) ctx.macro_code = pymacrocell().set(""" ctx.sub = context(context=ctx,name="sub") ctx.a = cell("json").set(1000 + param) ctx.b = cell("json").set(2000 + param) ctx.result = cell("json") ctx.tf = transformer({ "a": "input", "b": "input", "c": "output" }) ctx.a.connect(ctx.tf.a) ctx.b.connect(ctx.tf.b) ctx.code = cell("transformer").set("c = a + b") ctx.code.connect(ctx.tf.code) ctx.tf.c.connect(ctx.result)
ctx.run = cell("transformer").set(run) ctx.run.connect(tf.code) ctx.a = cell() ctx.b = cell() ctx.a.connect(tf.a) ctx.b.connect(tf.b) ctx.c = cell() tf.c.connect(ctx.c) ctx.param = cell().set(param) with macro_mode_on(): ctx = context(toplevel=True) ctx.macro = macro({ "param": "plain", "run": "text", }) ctx.macro_code = cell("macro").set(build) ctx.macro_code.connect(ctx.macro.code) ctx.run = cell("transformer").set(run) ctx.run.connect(ctx.macro.run) ctx.param = cell().set("PARAM") ctx.param.connect(ctx.macro.param) ctx.a = cell().set(1) ctx.b = cell().set(2) ctx.c = cell() ctx.a.connect(ctx.macro.ctx.a) ctx.b.connect(ctx.macro.ctx.b) ctx.macro.ctx.c.connect(ctx.c) ctx.compute()
import seamless from seamless.core import macro_mode_on from seamless.core import context, cell, transformer, pymacrocell, pythoncell, macro with macro_mode_on(): ctx = context(toplevel=True) ctx.param = cell("json").set(1) ctx.macro = macro({ "param": "copy", "testmodule": ("ref", "module", "python"), }) ctx.param.connect(ctx.macro.param) ctx.macro_code = pymacrocell().set(""" print("macro execute") print(testmodule) print(testmodule.a) from .testmodule import a print(a) import sys print([m for m in sys.modules if m.find("testmodule") > -1]) print("/macro execute") """) ctx.macro_code.connect(ctx.macro.code) ctx.testmodule = pythoncell().set("a = 10") ctx.testmodule.connect(ctx.macro.testmodule) ctx.macro2 = macro({ "testmodule2": ("ref", "module", "python"),
create(ctx.nauth, with_buffer=False, with_schema=False, inchannels=[("z",)]) nauth = ctx.nauth.hub nauth.handle["a"] = "value of nauth.a" nauth.handle["b"] = "value of nauth.b" print(nauth.value) lib = library.build(ctx) library.register("test", lib) print() print("!" * 80) print("LOAD") print("!" * 80) ctx2 = context(toplevel=True) ctx2.load_test = libcell("test.load") ctx2.test = macro({}, lib="test") ctx2.load_test.connect(ctx2.test.code) test = ctx2.test.gen_context print(test.readme.value) print("!" * 80) print("INIT") print("!" * 80) auth_json = test.auth_json.hub print(auth_json.value) auth = test.auth.hub print(auth.value, auth.schema.value) err = test.err.hub print("VALUE", err.value, "HANDLE", err.handle, err.schema.value) ctx.auth.hub.handle.a.set("updated value of auth")
def define_ctx2(): ctx.macro = macro({"param_a": "int"}) ctx.macro.code.cell().set(macro_code) ctx.param_a = cell().set(42) ctx.param_a.share(readonly=False) ctx.param_a.connect(ctx.macro.param_a)
import seamless from seamless.core import macro_mode_on from seamless.core import context, cell, \ macro, path def run_macro(ctx): ctx.mycell = cell() with macro_mode_on(): ctx = context(toplevel=True) ctx.macro = macro({}) ctx.macro.code.cell().set(run_macro) ctx.a = cell().set(1) p = path(ctx.macro.ctx).mycell ctx.a.connect(p) ctx.aa = cell() p.connect(ctx.aa) ctx.compute() print(ctx.macro.ctx.mycell.value) print(ctx.aa.value) ctx.a.set(None) ctx.compute() print(ctx.macro.ctx.mycell.value) print(ctx.aa.value)
"header": "input", "code_": { "io": "input", "as": "code", }, "result": "output" }) ctx.header.connect(ctx.tf.header) ctx.code.connect(ctx.tf.code_) ctx.tf_code.connect(ctx.tf.code) ctx.tf.result.connect(ctx.result) ctx.macro = macro({ "header": "str", "code_": { "celltype": "str", "as": "code", }, }) def run(ctx, code, header): ctx.result = cell("str").set("MACRO: " + header + code) ctx.macro.code.set(run) ctx.code.connect(ctx.macro.code_) ctx.header.connect(ctx.macro.header) ctx.result2 = cell("str") mctx = path(ctx.macro.ctx) mctx.result.connect(ctx.result2) ctx.reactor = reactor({ "header": "input", "code_": {
import seamless #seamless.core.cache.use_caching = False ### from seamless.core import macro_mode_on from seamless.core import context, cell, transformer, pymacrocell, macro with macro_mode_on(): ctx = context(toplevel=True) ctx.param = cell("json").set(1) ctx.macro = macro({ "param": "copy", }) ctx.param.connect(ctx.macro.param) ctx.inp = cell("text").set("INPUT") ctx.macro_code = pymacrocell().set(""" print("Execute macro") ctx.submacro = macro({ "inp": "copy" }) ctx.submacro_code = pymacrocell().set(''' print("Execute submacro") ctx.inp = cell("text").set(inp + "!!!") ''') ctx.submacro_code.connect(ctx.submacro.code) """) ctx.macro_code.connect(ctx.macro.code) ctx.inp.connect(ctx.macro.ctx.submacro.inp) print(ctx.macro.ctx.submacro.ctx.inp.value) ctx.macro.ctx.submacro.ctx.inp.set(10)
def map_list_N_nested(ctx, elision_chunksize, inp_prefix, graph, inp, *, map_list_N_nested_code, macro_code_lib_code, macro_code_lib0=None): global macro_code_lib if macro_code_lib0 is not None: macro_code_lib = macro_code_lib0 from seamless.core import cell, macro, context, path, transformer first_k = list(inp.keys())[0] length = len(inp[first_k]) print("NEST", length, inp[first_k][0]) first_k = first_k[len(inp_prefix):] for k0 in inp: k = k0[len(inp_prefix):] if len(inp[k0]) != length: err = "all cells in inp must have the same length, but '{}' has length {} while '{}' has length {}" raise ValueError(err.format(k, len(inp[k0]), first_k, length)) if length > elision_chunksize: merge_subresults = """def merge_subresults(**subresults): result = [] for k in sorted(subresults.keys()): v = subresults[k] result += v return result""" ctx.macro_code = cell("python").set(map_list_N_nested_code) ctx.macro_code_lib_code = cell("plain").set(macro_code_lib_code) ctx.macro_code_lib = cell("plain").set({ "type": "interpreted", "language": "python", "code": macro_code_lib_code }) ctx.graph = cell("plain").set(graph) ctx.inp_prefix = cell("str").set(inp_prefix) ctx.elision_chunksize = cell("int").set(elision_chunksize) chunk_index = 0 macro_params = { 'inp_prefix': { 'celltype': 'str' }, 'elision_chunksize': { 'celltype': 'int' }, 'graph': { 'celltype': 'plain' }, 'inp': { 'celltype': 'plain' }, "map_list_N_nested_code": { 'celltype': 'python' }, "macro_code_lib": { 'celltype': 'plain', 'subcelltype': 'module' }, "macro_code_lib_code": { 'celltype': 'plain' }, } subresults = {} chunksize = elision_chunksize while chunksize * elision_chunksize < length: chunksize *= elision_chunksize for n in range(0, length, chunksize): chunk_inp = {} for k in inp: chunk_inp[k] = inp[k][n:n + chunksize] chunk_index += 1 subresult = cell("checksum") """ # The following will work, but make it un-elidable setattr(ctx, "chunk_%d" % chunk_index, context()) chunk_ctx = getattr(ctx, "chunk_%d" % chunk_index) macro_code_lib.map_list_N(chunk_ctx, inp_prefix, graph, chunk_inp) # """ m = macro(macro_params) elision = {"macro": m, "input_cells": {}, "output_cells": {}} m.allow_elision = True setattr(ctx, "m{}".format(chunk_index), m) ctx.macro_code.connect(m.code) ctx.inp_prefix.connect(m.inp_prefix) ctx.elision_chunksize.connect(m.elision_chunksize) ctx.graph.connect(m.graph) ctx.macro_code.connect(m.map_list_N_nested_code) ctx.macro_code_lib.connect(m.macro_code_lib) ctx.macro_code_lib_code.connect(m.macro_code_lib_code) m.inp.cell().set(chunk_inp) subr = "subresult{}".format(chunk_index) setattr(ctx, subr, subresult) subresults[subr] = subresult result_path = path(m.ctx).result result_path.connect(subresult) elision["output_cells"][subresult] = result_path ctx._get_manager().set_elision(**elision) transformer_params = {} for subr in subresults: transformer_params[subr] = {"io": "input", "celltype": "checksum"} transformer_params["result"] = {"io": "output", "celltype": "checksum"} ctx.merge_subresults = transformer(transformer_params) ctx.merge_subresults.code.cell().set(merge_subresults) tf = ctx.merge_subresults for subr, c in subresults.items(): c.connect(getattr(tf, subr)) ctx.all_subresults = cell("plain") tf.result.connect(ctx.all_subresults) # ctx.all_subresults has the correct checksum, but there is no valid conversion # (because it is unsafe). # Use a macro to do it ctx.get_result = macro( {"result_checksum": { "io": "input", "celltype": "checksum" }}) get_result = """def get_result(ctx, result_checksum): ctx.result = cell("mixed", hash_pattern={"!": "#"}) ctx.result.set_checksum(result_checksum)""" ctx.get_result.code.cell().set(get_result) ctx.all_subresults.connect(ctx.get_result.result_checksum) p = path(ctx.get_result.ctx).result ctx.result = cell("mixed", hash_pattern={"!": "#"}) p.connect(ctx.result) else: macro_code_lib.map_list_N(ctx, inp_prefix, graph, inp) return ctx
import seamless from seamless.core import macro_mode_on from seamless.core import context, cell, transformer, macro with macro_mode_on(): ctx = context(toplevel=True) ctx.param = cell("plain").set(1) ctx.mymacro = macro({ "param": "plain", }) ctx.param.connect(ctx.mymacro.param) ctx.inp = cell("text").set("INPUT") ctx.mymacro_code = cell("macro").set(""" print("Executing 'mymacro'...") ctx.submacro = macro({ "inp": "plain" }) ctx.submacro_code = cell("macro").set(''' print("Executing 'submacro, param = %s'...") ctx.myinp = cell("text").set(inp + "!!!") ''' % param) ctx.submacro_code.connect(ctx.submacro.code) ctx.inp2 = cell("text") ctx.inp2.connect(ctx.submacro.inp) """) ctx.mymacro_code.connect(ctx.mymacro.code) ctx.inp.connect(ctx.mymacro.ctx.inp2) ctx.compute()
ctx.a = cell() ctx.a.connect(ctx.tf.a) ctx.b = cell() ctx.b.connect(ctx.tf.b) m.ctx.series.connect(ctx.a) ctx.value.connect(ctx.b) ctx.tf.code.set("[b] + a") ctx.tf.c.connect(ctx.series) print("/COLLATZ", value) ctx.start = cell() ctx.code = cell("macro").set(collatz) macro_params = {k: "ref" for k in ("value", "macro_code", "macro_params")} ctx.macro_params = cell().set(macro_params) m = ctx.macro = macro(ctx.macro_params.value) ctx.start.connect(m.value) ctx.code.connect(m.code) ctx.code.connect(m.macro_code) ctx.macro_params.connect(m.macro_params) ctx.series = cell() start = 27 ###start = 10 #7-level nesting ###start = 12 #10-level nesting ###start = 23 #16-level nesting ###start = 27 #111-level nesting refe = refe_collatz(start) ctx.start.set(start) print("building done") ctx.equilibrate()
ctx.code.connect(ctx.tf.code) ctx.iterations = link(ctx.tf.iterations) ctx.result = link(ctx.tf.pi) lctx = context(toplevel=True) lctx.readme = cell("text").set("Compute pi iteratively") lctx.code = cell("python").set(compute_pi) lctx.load = cell("macro").set(load) lctx.equilibrate() lib = library.build(lctx) library.register("compute", lib) ctx = context(toplevel=True) ctx.load_compute = libcell("compute.load") ctx.compute = macro({}, lib="compute") ctx.load_compute.connect(ctx.compute.code) compute = ctx.compute.gen_context print(compute.readme.value) ctx.iterations = cell().set(10000) ctx.iterations.connect(compute.iterations) ctx.result = cell() print("START") print() compute.result.connect(ctx.result) ctx.equilibrate() print(ctx.status()) print(ctx.result.value) print()
ctx.tf = link(tf) ctx.run = pytransformercell().set(run) ctx.run.connect(tf.code) ctx.a = cell() ctx.a.connect(tf.a) ctx.b = cell() ctx.b.connect(tf.b) ctx.c = cell() tf.c.connect(ctx.c) ctx.param = cell().set(param) with macro_mode_on(): ctx = context(toplevel=True) ctx.macro = macro({ "param":"copy", "run": ("copy", "text"), }) ctx.macro_code = pytransformercell().set(build) ctx.macro_code.connect(ctx.macro.code) ctx.run = pytransformercell().set(run) ctx.run.connect(ctx.macro.run) ctx.param = cell().set("PARAM") ctx.param.connect(ctx.macro.param) ctx.a = cell().set(1) ctx.b = cell().set(2) ctx.c = cell() ctx.a.connect(ctx.macro.ctx.a) ctx.b.connect(ctx.macro.ctx.b) ctx.macro.ctx.c.connect(ctx.c)
ctx.value.connect(ctx.b) ctx.tf.code.set("c = [b] + a") ctx.tf.c.connect(ctx.series) print("/COLLATZ", value) ctx.start = cell() ctx.code = cell("macro").set(collatz) macro_params = { "value": "int", "macro_params": "plain", "macro_code": ("python", "macro") } ctx.macro_params = cell().set(macro_params) ctx.compute() m = ctx.macro = macro(ctx.macro_params.value) ctx.start.connect(m.value) ctx.code.connect(m.code) ctx.code.connect(m.macro_code) ctx.macro_params.connect(m.macro_params) ctx.series = cell() with macro_mode_on(None): ctx.macro.ctx.series.connect(ctx.series) start = 27 ###start = 10 #7-level nesting ###start = 12 #10-level nesting ###start = 35 #12-level nesting ###start = 23 #16-level nesting ###start = 27 #111-level nesting refe = refe_collatz(start)
inchannels=[("z", )]) nauth = ctx.nauth.hub nauth.handle["a"] = "value of nauth.a" nauth.handle["b"] = "value of nauth.b" print(nauth.value) lib = library.build(ctx) library.register("test", lib) print() print("!" * 80) print("LOAD") print("!" * 80) ctx2 = context(toplevel=True) ctx2.load_test = libcell("test.load") ctx2.test = macro({}, lib="test") ctx2.load_test.connect(ctx2.test.code) test = ctx2.test.gen_context print(test.readme.value) print("!" * 80) print("INIT") print("!" * 80) auth_json = test.auth_json.hub print(auth_json.value) auth = test.auth.hub print(auth.value, auth.schema.value) err = test.err.hub print("VALUE", err.value, "HANDLE", err.handle, err.schema.value) ctx.auth.hub.handle.a.set("updated value of auth")
def map_dict_chunk(ctx, chunksize, graph, inp, keyorder, has_uniform, elision, lib_module_dict): #print("map_dict_chunk", inp) from seamless.core import Cell as CoreCell from seamless.core import cell, context, macro, path, transformer from seamless.core.structured_cell import StructuredCell from seamless.core.HighLevelContext import HighLevelContext from seamless.core.unbound_context import UnboundContext import math pseudo_connections = [] ctx.sc_data = cell("mixed") ctx.sc_buffer = cell("mixed") inpkeys = keyorder nchunks = math.ceil(len(inpkeys) / chunksize) ctx.sc = StructuredCell( data=ctx.sc_data, buffer=ctx.sc_buffer, inchannels=[(n + 1, ) for n in range(nchunks)], outchannels=[()], ) if has_uniform: ctx.uniform = cell("mixed") first = True for n in range(nchunks): pos = chunksize * n hc = HighLevelContext(graph) subctx = "subctx_" + str(n + 1) setattr(ctx, subctx, hc) if first: if not hasattr(hc, "inp"): raise TypeError( "map_dict_chunk context must have a cell called 'inp'") hci = hc.inp if has_uniform: if first: if not hasattr(hc, "uniform"): raise TypeError( "map_dict_chunk context must have a cell called 'uniform'" ) if isinstance(hc.uniform, StructuredCell): raise TypeError( "map_dict_chunk context has a cell called 'uniform', but its celltype must be mixed, not structured" ) if not isinstance(hc.uniform, CoreCell): raise TypeError( "map_dict_chunk context must have an attribute 'uniform' that is a cell, not a {}" .format(type(hc.uniform))) ctx.uniform.connect(hc.uniform) con = ["..uniform"], ["ctx", subctx, "uniform"] pseudo_connections.append(con) if first: if isinstance(hci, StructuredCell): raise TypeError( "map_dict_chunk context has a cell called 'inp', but its celltype must be mixed, not structured" ) if not isinstance(hci, CoreCell): raise TypeError( "map_dict_chunk context must have an attribute 'inp' that is a cell, not a {}" .format(type(hci))) if hci.celltype != "mixed": raise TypeError( "map_dict_chunk context has a cell called 'inp', but its celltype must be mixed, not {}" .format(hci.celltype)) con = ["..inp"], ["ctx", subctx, "inp"] pseudo_connections.append(con) inputchunk = {k: inp[k] for k in inpkeys[pos:pos + chunksize]} #print("CHUNK", list(inputchunk.keys())) chunk_ctx = context() setattr(ctx, "chunk_%d" % (n + 1), chunk_ctx) chunk_ctx.inputchunk_deep = cell("mixed", hash_pattern={"*": "#"}) chunk_ctx.inputchunk_deep.set(inputchunk) chunk_ctx.inputchunk_deep2 = cell("plain") chunk_ctx.inputchunk_deep.connect(chunk_ctx.inputchunk_deep2) chunk_ctx.inputchunk_checksum = cell("checksum") chunk_ctx.inputchunk_deep2.connect(chunk_ctx.inputchunk_checksum) # chunk_ctx.inputchunk_checksum has the correct checksum, but there is no valid conversion # (because it is unsafe). # Use a macro to do it chunk_ctx.get_inputchunk = macro( {"inputchunk_checksum": { "io": "input", "celltype": "checksum" }}) get_inputchunk = lib_module_dict["helper"]["get_inputchunk_dict"] chunk_ctx.get_inputchunk.code.cell().set(get_inputchunk) chunk_ctx.inputchunk_checksum.connect( chunk_ctx.get_inputchunk.inputchunk_checksum) p = path(chunk_ctx.get_inputchunk.ctx).inputchunk chunk_ctx.inputchunk = cell("mixed", hash_pattern={"*": "#"}) p.connect(chunk_ctx.inputchunk) if first: if not hasattr(hc, "result"): raise TypeError( "map_dict_chunk context must have a cell called 'result'") if isinstance(hc.result, StructuredCell): raise TypeError( "map_dict_chunk context has a cell called 'result', but its celltype must be mixed, not structured" ) if not isinstance(hc.result, CoreCell): raise TypeError( "map_dict_chunk context must have an attribute 'result' that is a cell, not a {}" .format(type(hc.result))) chunk_ctx.inputchunk.connect(hci) chunk_ctx.result = cell("mixed", hash_pattern={"*": "#"}) chunk_ctx.result_deep = cell("checksum") hc.result.connect(chunk_ctx.result) chunk_ctx.result.connect(chunk_ctx.result_deep) chunk_ctx.result_deep.connect(ctx.sc.inchannels[(n + 1, )]) con = ["ctx", subctx, "result"], ["..result"] pseudo_connections.append(con) first = False ctx.subresults = cell("plain") ctx.sc.outchannels[()].connect(ctx.subresults) merge_subresults = lib_module_dict["helper"]["merge_subresults_chunk"] ctx.merge_subresults = transformer({ "subresults": { "io": "input", "celltype": "plain" }, "result": { "io": "output", "celltype": "plain" } }) ctx.merge_subresults.code.cell().set(merge_subresults) ctx.subresults.connect(ctx.merge_subresults.subresults) ctx.result_deep = cell("plain") ctx.merge_subresults.result.connect(ctx.result_deep) # ctx.result_deep has the correct checksum, but there is no valid conversion # (because it is unsafe). # Use a macro to do it ctx.get_result = macro( {"result_checksum": { "io": "input", "celltype": "checksum" }}) get_result = lib_module_dict["helper"]["get_result_dict"] ctx.get_result.code.cell().set(get_result) ctx.result_deep.connect(ctx.get_result.result_checksum) p = path(ctx.get_result.ctx).result ctx.result = cell("mixed", hash_pattern={"*": "#"}) p.connect(ctx.result) if not elision: ctx._pseudo_connections = pseudo_connections
def translate_macro(node, root, namespace, inchannels, outchannels): from .translate import set_structured_cell_from_checksum parent = get_path(root, node["path"][:-1], None, None) name = node["path"][-1] ctx = context(toplevel=False) setattr(parent, name, ctx) param_name = node["PARAM"] all_inchannels = set(inchannels) param_inchannels = [] interchannels = [] pin_cells = {} pin_mpaths0 = {} for pinname in list(node["pins"].keys()): pin = node["pins"][pinname] if pin["io"] == "parameter": pin_cell_name = pinname + "_PARAM" assert pin_cell_name not in node["pins"] assert pin_cell_name not in all_inchannels pinname2 = as_tuple(pinname) interchannels.append(pinname2) if pinname2 in inchannels: param_inchannels.append(pinname2) elif pin["io"] in ("input", "output", "edit"): pin_cell_name = pinname else: raise ValueError((pin["io"], pinname)) pin_hash_pattern = pin.get("hash_pattern") celltype = pin.get("celltype", "mixed") if celltype == "mixed": pin_cell = cell(celltype, hash_pattern=pin_hash_pattern) else: pin_cell = cell(celltype) cell_setattr(node, ctx, pin_cell_name, pin_cell) pin_cells[pinname] = pin_cell if pin["io"] != "parameter": pin_mpaths0[pinname] = (pin["io"] in ("input", "edit")) mount = node.get("mount", {}) param = None if len(interchannels): param, param_ctx = build_structured_cell(ctx, param_name, param_inchannels, interchannels, return_context=True, fingertip_no_remote=False, fingertip_no_recompute=False, hash_pattern={"*": "#"}) setattr(ctx, param_name, param) namespace[node["path"] + ("SCHEMA", ), "source"] = param.schema, node if "param_schema" in mount: param_ctx.schema.mount(**mount["param_schema"]) param_pins = {} for pinname, pin in node["pins"].items(): if pin["io"] != "parameter": continue p = {"io": "input"} p.update(pin) param_pins[pinname] = p ctx.macro = macro(param_pins) if node.get("elision"): ctx.macro.allow_elision = True elision = {"macro": ctx.macro, "input_cells": {}, "output_cells": {}} for pinname in pin_mpaths0: is_input = pin_mpaths0[pinname] pin_mpath = getattr(core_path(ctx.macro.ctx), pinname) pin_cell = pin_cells[pinname] if is_input: if node["pins"][pinname]["io"] == "edit": pin_cell.bilink(pin_mpath) else: pin_cell.connect(pin_mpath) elision["input_cells"][pin_cell] = pin_mpath else: pin_mpath.connect(pin_cell) elision["output_cells"][pin_cell] = pin_mpath ctx._get_manager().set_elision(**elision) ctx.code = cell("macro") if "code" in mount: ctx.code.mount(**mount["code"]) ctx.code.connect(ctx.macro.code) checksum = node.get("checksum", {}) if "code" in checksum: ctx.code._set_checksum(checksum["code"], initial=True) if param is not None: param_checksum = convert_checksum_dict(checksum, "param") set_structured_cell_from_checksum(param, param_checksum) namespace[node["path"] + ("code", ), "target"] = ctx.code, node namespace[node["path"] + ("code", ), "source"] = ctx.code, node for pinname in node["pins"]: path = node["path"] + as_tuple(pinname) pin = node["pins"][pinname] if pin["io"] == "parameter": pinname2 = as_tuple(pinname) if pinname2 in inchannels: namespace[path, "target"] = param.inchannels[pinname], node target = getattr(ctx.macro, pinname) assert target is not None, pinname pin_cell = pin_cells[pinname] param.outchannels[pinname2].connect(pin_cell) pin_cell.connect(target) elif pin["io"] == "edit": namespace[path, "edit"] = pin_cells[pinname], node else: cmode = "target" if pin["io"] == "input" else "source" namespace[path, cmode] = pin_cells[pinname], node
from seamless.core import macro_mode, context, cell, macro with macro_mode.macro_mode_on(None): ctx = context(toplevel=True) ###ctx.mount("/tmp/test-mount-macro") ctx.macro = macro({ "a": "mixed", }) ctx.a = cell().set(42) def code(ctx, a): ctx.answer = cell().set(a) ctx.double = transformer({"test": "input", "result": "output"}) ctx.answer.connect(ctx.double.test) ctx.double.code.cell().set("test * 2") ctx.result = cell() ctx.double.result.connect(ctx.result) ctx.code = cell("macro").set(code) ctx.a.connect(ctx.macro.a) ctx.code.connect(ctx.macro.code) ctx.compute() print(ctx.macro.status) print(ctx.macro.exception) print(ctx.macro.ctx.answer.value) print(ctx.macro.ctx.result.value) ctx.result0 = cell() ctx.macro.ctx.result.connect(ctx.result0)
print(func is func2) print(func2()) print(testmodule.testvalue) from .testmodule import mod3 print(mod3.testvalue) print(mod3.testfunc(99)) result = 0 print("/execute") """ with macro_mode_on(): ctx = context(toplevel=True) ctx.param = cell("plain").set(1) ctx.macro = macro({ "param": "plain", "testmodule": ("plain", "module"), }) ctx.param.connect(ctx.macro.param) ctx.macro_code = cell("macro").set(code) ctx.macro_code.connect(ctx.macro.code) ctx.testmodule = cell("plain").set(testmodule) ctx.testmodule.connect(ctx.macro.testmodule) print("START") ctx.compute() print(ctx.macro.exception) with macro_mode_on(): ctx = context(toplevel=True)
def top(ctx, elision_, elision_chunksize, graph, lib_module_dict, lib_codeblock, inp, keyorder, has_uniform): ctx.lib_module_dict = cell("plain").set(lib_module_dict) ctx.lib_codeblock = cell("plain").set(lib_codeblock) ctx.main_code = cell("python").set(lib_module_dict["map_dict"]["main"]) ctx.lib_module = cell("plain").set({ "type": "interpreted", "language": "python", "code": lib_codeblock }) ctx.graph = cell("plain").set(graph) ctx.elision = cell("bool").set(elision_) ctx.elision_chunksize = cell("int").set(elision_chunksize) ctx.has_uniform = cell("bool").set(has_uniform) if has_uniform: ctx.uniform = cell("mixed") macro_params = { 'elision_': { 'celltype': 'bool' }, 'elision_chunksize': { 'celltype': 'int' }, 'graph': { 'celltype': 'plain' }, 'lib_module_dict': { 'celltype': 'plain' }, 'lib_codeblock': { 'celltype': 'plain' }, 'lib': { 'celltype': 'plain', 'subcelltype': 'module' }, 'inp': { 'celltype': 'plain' }, 'keyorder': { 'celltype': 'plain' }, 'has_uniform': { 'celltype': 'bool' }, } ctx.top = macro(macro_params) m = ctx.top m.allow_elision = elision_ ctx.main_code.connect(m.code) ctx.elision.connect(m.elision_) ctx.elision_chunksize.connect(m.elision_chunksize) ctx.has_uniform.connect(m.has_uniform) ctx.graph.connect(m.graph) ctx.lib_module_dict.connect(m.lib_module_dict) ctx.lib_codeblock.connect(m.lib_codeblock) ctx.lib_module.connect(m.lib) m.inp.cell().set(inp) m.keyorder.cell().set(keyorder) result_path = path(m.ctx).result ctx.result = cell("mixed", hash_pattern={"*": "#"}) result_path.connect(ctx.result) input_cells = {} if has_uniform: uniform_path = path(m.ctx).uniform ctx.uniform.connect(uniform_path) input_cells = {ctx.uniform: uniform_path} ctx._get_manager().set_elision(macro=m, input_cells=input_cells, output_cells={ ctx.result: result_path, })
def map_dict_nested(ctx, elision, elision_chunksize, graph, inp, keyorder, *, lib_module_dict, lib_codeblock, lib, has_uniform): from seamless.core import cell, macro, context, path, transformer assert len(inp) == len(keyorder) length = len(inp) #print("NEST", length, keyorder[0]) if elision and elision_chunksize > 1 and length > elision_chunksize: merge_subresults = lib_module_dict["helper"]["merge_subresults_dict"] ctx.lib_module_dict = cell("plain").set(lib_module_dict) ctx.lib_codeblock = cell("plain").set(lib_codeblock) ctx.main_code = cell("python").set(lib_module_dict["map_dict"]["main"]) ctx.lib_module = cell("plain").set({ "type": "interpreted", "language": "python", "code": lib_codeblock }) ctx.graph = cell("plain").set(graph) ctx.elision = cell("bool").set(elision) ctx.elision_chunksize = cell("int").set(elision_chunksize) ctx.has_uniform = cell("bool").set(has_uniform) chunk_index = 0 macro_params = { 'elision_': { 'celltype': 'bool' }, 'elision_chunksize': { 'celltype': 'int' }, 'graph': { 'celltype': 'plain' }, "lib_module_dict": { 'celltype': 'plain' }, "lib_codeblock": { 'celltype': 'plain' }, "lib": { 'celltype': 'plain', 'subcelltype': 'module' }, 'inp': { 'celltype': 'plain' }, 'has_uniform': { 'celltype': 'bool' }, 'keyorder': { 'celltype': 'plain' }, } if has_uniform: ctx.uniform = cell("mixed") subresults = {} chunksize = elision_chunksize while chunksize * elision_chunksize < length: chunksize *= elision_chunksize for n in range(0, length, chunksize): chunk_keyorder = keyorder[n:n + chunksize] chunk_inp = {k: inp[k] for k in chunk_keyorder} chunk_index += 1 subresult = cell("checksum") m = macro(macro_params) m.allow_elision = True setattr(ctx, "m{}".format(chunk_index), m) ctx.main_code.connect(m.code) ctx.elision.connect(m.elision_) ctx.elision_chunksize.connect(m.elision_chunksize) ctx.has_uniform.connect(m.has_uniform) ctx.graph.connect(m.graph) ctx.lib_module_dict.connect(m.lib_module_dict) ctx.lib_codeblock.connect(m.lib_codeblock) ctx.lib_module.connect(m.lib) m.inp.cell().set(chunk_inp) m.keyorder.cell().set(chunk_keyorder) subr = "subresult{}".format(chunk_index) setattr(ctx, subr, subresult) subresults[subr] = subresult result_path = path(m.ctx).result result_path.connect(subresult) input_cells = {} if has_uniform: uniform_path = path(m.ctx).uniform ctx.uniform.connect(uniform_path) input_cells = {ctx.uniform: uniform_path} ctx._get_manager().set_elision(macro=m, input_cells=input_cells, output_cells={ subresult: result_path, }) transformer_params = {} for subr in subresults: transformer_params[subr] = {"io": "input", "celltype": "checksum"} transformer_params["result"] = {"io": "output", "celltype": "checksum"} ctx.merge_subresults = transformer(transformer_params) ctx.merge_subresults.code.cell().set(merge_subresults) tf = ctx.merge_subresults for subr, c in subresults.items(): c.connect(getattr(tf, subr)) ctx.all_subresults = cell("plain") tf.result.connect(ctx.all_subresults) # ctx.all_subresults has the correct checksum, but there is no valid conversion # (because it is unsafe). # Use a macro to do it ctx.get_result = macro( {"result_checksum": { "io": "input", "celltype": "checksum" }}) get_result = lib_module_dict["helper"]["get_result_dict"] ctx.get_result.code.cell().set(get_result) ctx.all_subresults.connect(ctx.get_result.result_checksum) p = path(ctx.get_result.ctx).result ctx.result = cell("mixed", hash_pattern={"*": "#"}) p.connect(ctx.result) else: lib.map_dict(ctx, graph, inp, has_uniform, elision) return ctx
import seamless from seamless.core import macro_mode_on from seamless.core import context, cell, transformer, pymacrocell, pythoncell, macro with macro_mode_on(): ctx = context(toplevel=True) ctx.param = cell("json").set(1) ctx.macro = macro({ "param": "copy", "testmodule": ("ref", "module", "python"), }) ctx.param.connect(ctx.macro.param) ctx.macro_code = pymacrocell().set(""" print("macro execute") print(testmodule) print(testmodule.a) from .testmodule import a print(a) import sys print([m for m in sys.modules if m.find("testmodule") > -1]) print("/macro execute") """) ctx.macro_code.connect(ctx.macro.code) ctx.testmodule = pythoncell().set("a = 10") ctx.testmodule.connect(ctx.macro.testmodule) ctx.macro2 = macro({ "testmodule2": ("ref", "module", "python"), })
import seamless from seamless.core import macro_mode_on from seamless.core import context, cell, transformer, pymacrocell, macro with macro_mode_on(): ctx = context(toplevel=True) ctx.param = cell("plain").set(1) ctx.mymacro = macro({ "param": "copy", }) ctx.param.connect(ctx.mymacro.param) ctx.inp = cell("text").set("INPUT") ctx.mymacro_code = pymacrocell().set(""" print("Executing 'mymacro'...") ctx.submacro = macro({ "inp": "copy" }) ctx.submacro_code = pymacrocell().set(''' print("Executing 'submacro, param = %s'...") ctx.myinp = cell("text").set(inp + "!!!") ''' % param) ctx.submacro_code.connect(ctx.submacro.code) ctx.inp2 = cell("text") ctx.inp2.connect(ctx.submacro.inp) """) ctx.mymacro_code.connect(ctx.mymacro.code) ctx.inp.connect(ctx.mymacro.ctx.inp2) print(ctx.mymacro.ctx.submacro.ctx.myinp.value)
else: ctx.tf_alt2 = tf ctx.tf = link(tf) ctx.run = pytransformercell().set(run) ctx.run.connect(tf.code) ctx.a = link(tf.a).export() ctx.b = link(tf.b).export() ctx.c = link(tf.c).export() ctx.param = cell("json").set(param) with macro_mode_on(): ctx = context(toplevel=True) ctx.macro = macro({ "param": "copy", "run": ("copy", "text"), }) ctx.macro_code = pytransformercell().set(build) ctx.macro_code.connect(ctx.macro.code) ctx.run = pytransformercell().set(run) ctx.run.connect(ctx.macro.run) ctx.param = cell("json").set("PARAM") ctx.param.connect(ctx.macro.param) ctx.a = cell("json").set(1) ctx.b = cell("json").set(2) ctx.c = cell() ctx.a.connect(ctx.macro.ctx.a) ctx.b.connect(ctx.macro.ctx.b) ctx.macro.ctx.c.connect(ctx.c) ctx.equilibrate(0.5)