def demo_graph_fixture(): """ +---main----+ +---sub----+ | A | | C1 | |-----------| |----------| o in_<> | +--->o in_<> | | out<> o-----+ | out<> o +-----------+ | +----------+ | +---sub----+ | | C2 | | |----------| +--->o in_<> | | out<> o +----------+ """ # Sub graph sub = Graph("sub") c1 = DemoNode(graph=sub, name="C1") c2 = DemoNode(graph=sub, name="C2") # Main graph main = Graph("main") DemoNode(graph=main, name="A") # Group inputs in the sub graph InputPlugGroup( "graph_in", sub, [ c1.inputs["in_"], c2.inputs["in_"], ], ) return sub, main
def test_access_node_of_subgraph_by_key(): main = Graph("main") main_node = DemoNode(name="node", graph=main) sub = Graph("sub") sub_node = DemoNode(name="node", graph=sub) main["node"].outputs["out"] >> sub["node"].inputs["in_"] assert main["node"] == main_node assert main["sub.node"] == sub_node
def get_facematch_graph(threshold): """Set up facematching e.g. with paramters taken from a config.""" facematch_graph = Graph() #It is useful to define image_node = EmbeddingNode(input_name="image", output_name="image_emb", graph=facematch_graph, name="ImageEmbeddings") reference_node = EmbeddingNode(input_name="reference", output_name="reference_emb", graph=facematch_graph, name="ReferenceEmbeddings") match_node = MatchNode(threshold=threshold, graph=facematch_graph) image_node.outputs["image_emb"] >> match_node.inputs["image_emb"] reference_node.outputs["reference_emb"] \ >> match_node.inputs["reference_emb"] match_node.outputs["facematch"].promote_to_graph("result") return facematch_graph
def complex_cg_render(frames, batch_size): graph = Graph(name='Rendering') slapcomp = CreateSlapComp(graph=graph, template='nuke_template.nk') update_database = UpdateDatabase(graph=graph, id_=123456) for i in range(0, frames, batch_size): maya_render = MayaRender(name='MayaRender{0}-{1}'.format( i, i + batch_size), graph=graph, frames=range(i, i + batch_size), scene_file='/scene/for/rendering.ma') check_images = CheckImages(name='CheckImages{0}-{1}'.format( i, i + batch_size), graph=graph) maya_render.outputs['renderings'].connect( check_images.inputs['images']) check_images.outputs['images'].connect( slapcomp.inputs['images'][str(i)]) check_images.outputs['images'].connect( update_database.inputs['images'][str(i)]) quicktime = Quicktime() for i in range(0, frames, batch_size): nuke_render = NukeRender(name='NukeRender{0}-{1}'.format( i, i + batch_size), graph=graph, frames=range(i, i + batch_size)) slapcomp.outputs['slapcomp'].connect(nuke_render.inputs['scene_file']) nuke_render.outputs['renderings'].connect( quicktime.inputs['images'][str(i)]) print(graph)
def __init__( self, name, parent_graph=None, create_input_node=True, percentiles_to_compute=JOIN_PERCENTILES, **kwargs ): self.subgraphs = [] self.parallel_steps = dict() self.serial_steps = [] self.parent_graph = parent_graph self.graph = Graph(name=name) self.percentiles_to_compute = percentiles_to_compute # All inputs come through here... if create_input_node: input_node_name = "Latency" self.input_node = InputNode(graph=self.graph, name=input_node_name) self.input_node.inputs[LATENCIES].promote_to_graph(name=LATENCIES) # Everything in this subgraph will end up feeding into the Join join_output_name = "Upper Bound" self.join_output = JoinUpper( graph=self.graph, name=join_output_name, percentiles_to_compute=percentiles_to_compute, ) self.join_output.outputs[LATENCIES].promote_to_graph(name=LATENCIES)
def test_serialize_nested_graph_to_json(): graph = _nested_graph() serialized = graph.to_json() deserialized = Graph.from_json(serialized).to_json() assert serialized == deserialized
def test_plugs_can_be_promoted_to_graph_level_under_new_name(): main = Graph("main") DemoNode(name="node1", graph=main) main["node1"].inputs["in_"].promote_to_graph() main["node1"].outputs["out"].promote_to_graph(name="graph_out") assert main.inputs["in_"] is main["node1"].inputs["in_"] assert main.outputs["graph_out"] is main["node1"].outputs["out"]
class Workflow(object): """Abstract base class defining a workflow, based on a flowpipe graph. The Workflow holds a graph and provides two ways to evaluate the graph, locally and remotely. """ def __init__(self): self.graph = Graph() def evaluate_locally(self): """Evaluate the graph locally.""" self.graph.evaluate() def evaluate_remotely(self): """See examples/vfx_render_farm_conversion.py on how to implement a conversion from flowpipe graphs to your render farm. """ pass
def test_subgraph_names_need_to_be_unique(): """ +--------------------+ +--------------------+ | node1 | | node1 | |--------------------| |--------------------| o in_<> | +--->o in_<{"a": null> | | out %-----+ | out o | out.a o | +--------------------+ +--------------------+ | +--------------------+ +------------+ | | node2 | | node2 | | |--------------------| |------------| +--->o in_<{"a": null> | o in_<> | | out o | out o +--------------------+ +------------+ """ main = Graph("main") DemoNode(name="node1", graph=main) DemoNode(name="node2", graph=main) sub1 = Graph("sub") DemoNode(name="node1", graph=sub1) DemoNode(name="node2", graph=sub1) sub2 = Graph("sub") DemoNode(name="node1", graph=sub2) DemoNode(name="node2", graph=sub2) main["node1"].outputs["out"] >> sub1["node1"].inputs["in_"] with pytest.raises(ValueError): main["node1"].outputs["out"] >> sub2["node1"].inputs["in_"] with pytest.raises(ValueError): main["node1"].outputs["out"]["a"] >> sub2["node1"].inputs["in_"] with pytest.raises(ValueError): main["node1"].outputs["out"]["a"] >> sub2["node1"].inputs["in_"]["a"] with pytest.raises(ValueError): main["node1"].outputs["out"] >> sub2["node1"].inputs["in_"]["a"] # Connecting to the same graph does not throw an error # main["node1"].outputs["out"] >> sub1["node2"].inputs["in_"]
def test_plugs_can_only_be_promoted_once_to_graph_level(): main = Graph("main") DemoNode(name="node1", graph=main) main["node1"].inputs["in_"].promote_to_graph() main["node1"].outputs["out"].promote_to_graph() with pytest.raises(ValueError): main["node1"].inputs["in_"].promote_to_graph(name="different_name") with pytest.raises(ValueError): main["node1"].outputs["out"].promote_to_graph(name="different_name")
def implicit_batching(frames, batch_size): """Batches are created during the farm conversion.""" graph = Graph(name='Rendering') render = MayaRender(graph=graph, frames=list(range(frames)), scene_file='/scene/for/rendering.ma', metadata={'batch_size': batch_size}) update = UpdateDatabase(graph=graph, id_=123456) render.outputs['renderings'].connect(update.inputs['images']) print(graph) print(json.dumps(convert_graph_to_job(graph), indent=2))
def test_subplugs_can_not_be_promoted_individually(): main = Graph("main") DemoNode(name="node1", graph=main) with pytest.raises(TypeError): main["node1"].inputs["in_"]["sub"].promote_to_graph() with pytest.raises(TypeError): main["node1"].outputs["out"]["sub"].promote_to_graph() # Promoting the main plug will of course give access to subplugs as well main["node1"].inputs["in_"].promote_to_graph() assert main.inputs["in_"]["sub"] == main["node1"].inputs["in_"]["sub"]
def _nested_graph(): """Create this nested subgraph: +---------------+ +---------------+ +---------------+ +---------------+ | DemoNode | | DemoNode | | DemoNode | | DemoNode | |---------------| |---------------| |---------------| |---------------| o in_<> | +--->o in_<> | +--->o in_<> | +--->o in_<> | | out o-----+ | out o-----+ | out o-----+ | out o +---------------+ +---------------+ +---------------+ +---------------+ +-------------+ | sub0-2 | |-------------| o in_<> | | out o +-------------+ +-------------+ | sub1-2 | |-------------| o in_<> | | out o +-------------+ +-------------+ | sub2-2 | |-------------| o in_<> | | out o +-------------+ """ main = Graph("main") DemoNode(graph=main) parent = main for i in range(3): sub = Graph("sub" + str(i)) DemoNode(graph=sub) DemoNode(graph=sub, name="sub" + str(i) + "-2") parent["DemoNode"].outputs["out"] >> sub["DemoNode"].inputs["in_"] parent = sub return main
def explicit_batching(frames, batch_size): """Batches are already part of the graph.""" graph = Graph(name='Rendering') update_database = UpdateDatabase(graph=graph, id_=123456) for i in range(0, frames, batch_size): maya_render = MayaRender(name='MayaRender{0}-{1}'.format( i, i + batch_size), graph=graph, frames=list(range(i, i + batch_size)), scene_file='/scene/for/rendering.ma') maya_render.outputs['renderings'].connect( update_database.inputs['images'][str(i)]) print(graph) print(json.dumps(convert_graph_to_job(graph), indent=2))
def complex_cg_render(frames, batch_size): graph = Graph(name="Rendering") slapcomp = CreateSlapComp(graph=graph, template="nuke_template.nk") update_database = UpdateDatabase(graph=graph, id_=123456) for i in range(0, frames, batch_size): maya_render = MayaRender( name="MayaRender{0}-{1}".format(i, i + batch_size), graph=graph, frames=range(i, i + batch_size), scene_file="/scene/for/rendering.ma", ) check_images = CheckImages(name="CheckImages{0}-{1}".format( i, i + batch_size), graph=graph) maya_render.outputs["renderings"].connect( check_images.inputs["images"]) check_images.outputs["images"].connect( slapcomp.inputs["images"][str(i)]) check_images.outputs["images"].connect( update_database.inputs["images"][str(i)]) quicktime = Quicktime() for i in range(0, frames, batch_size): nuke_render = NukeRender( name="NukeRender{0}-{1}".format(i, i + batch_size), graph=graph, frames=range(i, i + batch_size), ) slapcomp.outputs["slapcomp"].connect(nuke_render.inputs["scene_file"]) nuke_render.outputs["renderings"].connect( quicktime.inputs["images"][str(i)]) print(graph)
@Node(outputs=['file']) def MyNode(file): # Something is done in here ... return {'file': file} # A graph that fixes an incoming file, cleaning up messy names etc. # # +-----------------------+ +-------------------------+ # | Cleanup Filename | | Change Lineendings | # |-----------------------| |-------------------------| # o file<> | +--->o file<> | # | file o-----+ | file o # +-----------------------+ +-------------------------+ fix_file = Graph(name="fix_file") cleanup_filename = MyNode(name="Cleanup Filename", graph=fix_file) change_lineendings = MyNode(name="Change Lineendings", graph=fix_file) cleanup_filename.outputs["file"].connect(change_lineendings.inputs["file"]) # A second graph reads finds files, and extracts their contents into a database # +----------------+ +----------------------------+ +----------------+ # | Find File | | Read Values from File | | Update DB | # |----------------| |----------------------------| |----------------| # o file<> | +--->o file<> | +--->o file<> | # | file o-----+ | file o-----+ | file o # +----------------+ +----------------------------+ +----------------+ udpate_db_from_file = Graph(name="udpate_db_from_file") find_file = MyNode(name="Find File", graph=udpate_db_from_file) values_from_file = MyNode(name="Read Values from File", graph=udpate_db_from_file)
The wrapped function is used as the compute method. """ print('{0} are building the {1}'.format(', '.join(workers.values()), section)) return {'workers.{0}'.format(i): worker for i, worker in workers.items()} @Node() def Party(attendees): """Nodes do not necessarily need to have output or input plugs.""" print('{0} and {1} are having a great party!'.format( ', '.join(list(attendees.values())[:-1]), list(attendees.values())[-1])) graph = Graph(name='Build a House') workers = HireWorkers(graph=graph, amount=4) build_walls = Build(graph=graph, name='Build Walls', section='walls') build_roof = Build(graph=graph, name='Build Roof', section='roof') party = Party(graph=graph, name='Housewarming Party') # Nodes are connected via their input/output plugs. workers.outputs['workers']['0'].connect(build_walls.inputs['workers']['0']) workers.outputs['workers']['1'].connect(build_walls.inputs['workers']['1']) workers.outputs['workers']['2'].connect(build_roof.inputs['workers']['0']) workers.outputs['workers']['3'].connect(build_roof.inputs['workers']['1']) # Connecting nodes can be done via the bit shift operator as well build_walls.outputs['workers']['0'] >> party.inputs['attendees']['0'] build_walls.outputs['workers']['1'] >> party.inputs['attendees']['2'] build_roof.outputs['workers']['0'] >> party.inputs['attendees']['1']
def compute(self, time, timezone): return {"converted_time": time + timezone * 60 * 60} @Node() def ShowTimes(times): """Nodes do not necessarily have to define output and input plugs.""" print("-- World Clock -------------------") for location, t in times.items(): print("It is now: {time:%H:%M} in {location}".format( time=datetime.fromtimestamp(t), location=location)) print("----------------------------------") # The Graph holds the nodes graph = Graph(name="World Clock") current_time = CurrentTime(graph=graph) van = ConvertTime(name="Vancouver", timezone=-8, graph=graph) ldn = ConvertTime(name="London", timezone=0, graph=graph) muc = ConvertTime(name="Munich", timezone=1, graph=graph) world_clock = ShowTimes(graph=graph) # Connecting nodes can be done via the bit shift operator as well current_time.outputs["time"].connect(van.inputs["time"]) current_time.outputs["time"].connect(ldn.inputs["time"]) current_time.outputs["time"].connect(muc.inputs["time"]) van.outputs["converted_time"] >> world_clock.inputs["times"]["Vancouver"] ldn.outputs["converted_time"] >> world_clock.inputs["times"]["London"] muc.outputs["converted_time"] >> world_clock.inputs["times"]["Munich"] # Display the graph
""" print("{0} are building the {1}".format(", ".join(workers.values()), section)) return {"workers.{0}".format(i): worker for i, worker in workers.items()} @Node() def Party(attendees): """Nodes do not necessarily need to have output or input plugs.""" print("{0} and {1} are having a great party!".format( ", ".join(list(attendees.values())[:-1]), list(attendees.values())[-1], )) graph = Graph(name="Build a House") workers = HireWorkers(graph=graph, amount=4) build_walls = Build(graph=graph, name="Build Walls", section="walls") build_roof = Build(graph=graph, name="Build Roof", section="roof") party = Party(graph=graph, name="Housewarming Party") # Nodes are connected via their input/output plugs. workers.outputs["workers"]["0"].connect(build_walls.inputs["workers"]["0"]) workers.outputs["workers"]["1"].connect(build_walls.inputs["workers"]["1"]) workers.outputs["workers"]["2"].connect(build_roof.inputs["workers"]["0"]) workers.outputs["workers"]["3"].connect(build_roof.inputs["workers"]["1"]) # Connecting nodes can be done via the bit shift operator as well build_walls.outputs["workers"]["0"] >> party.inputs["attendees"]["0"] build_walls.outputs["workers"]["1"] >> party.inputs["attendees"]["2"] build_roof.outputs["workers"]["0"] >> party.inputs["attendees"]["1"]
def __init__(self): self.graph = Graph()
def compute(self, time, timezone): return {'converted_time': time + datetime.timedelta(hours=timezone)} @Node() def ShowTimes(times): """Nodes do not necessarily have to define output and input plugs.""" print('-- World Clock -------------------') for location, t in times.items(): print('It is now: {time} in {location}'.format( time=t.strftime("%Y-%m-%d %H:%M:%S"), location=location)) print('----------------------------------') # The Graph holds the nodes graph = Graph(name='World Clock') current_time = CurrentTime(graph=graph) van = ConvertTime(name='Vancouver', timezone=-8, graph=graph) ldn = ConvertTime(name='London', timezone=0, graph=graph) muc = ConvertTime(name='Munich', timezone=1, graph=graph) world_clock = ShowTimes(graph=graph) # Connecting nodes can be done via the bit shift operator as well current_time.outputs['time'].connect(van.inputs['time']) current_time.outputs['time'].connect(ldn.inputs['time']) current_time.outputs['time'].connect(muc.inputs['time']) van.outputs['converted_time'] >> world_clock.inputs['times']['Vancouver'] ldn.outputs['converted_time'] >> world_clock.inputs['times']['London'] muc.outputs['converted_time'] >> world_clock.inputs['times']['Munich'] # Display the graph