def main(): # Parse script arguments passed via blendtorch launcher btargs, remainder = btb.parse_blendtorch_args() cam = bpy.context.scene.camera cube = bpy.data.objects["Cube"] def pre_frame(): # Randomize cube rotation cube.rotation_euler = np.random.uniform(0, np.pi, size=3) def post_frame(off, pub, anim, cam): # Called every after Blender finished processing a frame. # Will be sent to one of the remote dataset listener connected. pub.publish(image=off.render(), xy=cam.object_to_pixel(cube), frameid=anim.frameid) # Data source pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid) # Setup default image rendering cam = btb.Camera() off = btb.Renderer(btargs.btid, camera=cam, mode='rgb') # Setup the animation and run endlessly anim = btb.AnimationController() anim.pre_frame.add(pre_frame) anim.post_frame.add(post_frame, off, pub, anim, cam) anim.play(frame_range=(0, 100), num_episodes=-1, use_offline_render=False)
def main(): args, remainder = btb.parse_blendtorch_args() np.random.seed(args.btseed) cam = bpy.context.scene.camera # Random colors for all cubes cubes = list(bpy.data.collections['Cubes'].objects) for idx, c in enumerate(cubes): mat = bpy.data.materials.new(name=f'random{idx}') mat.diffuse_color = np.concatenate((np.random.random(size=3), [1.])) c.data.materials.append(mat) c.active_material = mat def pre_anim(): xyz = np.random.uniform((-3, -3, 6), (3, 3, 12.), size=(len(cubes), 3)) rot = np.random.uniform(-np.pi, np.pi, size=(len(cubes), 3)) for idx, c in enumerate(cubes): c.location = xyz[idx] c.rotation_euler = rot[idx] def post_frame(anim, off, pub, cam): pub.publish(image=off.render(), xy=cam.object_to_pixel(*cubes), frameid=anim.frameid) pub = btb.DataPublisher(args.btsockets['DATA'], args.btid) cam = btb.Camera() off = btb.OffScreenRenderer(camera=cam, mode='rgb') off.set_render_style(shading='RENDERED', overlays=False) anim = btb.AnimationController() anim.pre_animation.add(pre_anim) anim.post_frame.add(post_frame, anim, off, pub, cam) anim.play(frame_range=(0, 100), num_episodes=-1)
def main(): btargs, remainder = btb.parse_blendtorch_args() def post_frame(pub, anim): pub.publish(frameid=anim.frameid, img=np.zeros((64,64), dtype=np.uint8)) # Data source: add linger to avoid not sending data upon closing Blender. pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid, lingerms=5000) anim = btb.AnimationController() anim.post_frame.add(post_frame, pub, anim) anim.play(frame_range=(1,3), num_episodes=-1, use_animation=not bpy.app.background)
def main(): # Update python-path with current blend file directory btb.add_scene_dir_to_path() import scene_helpers as scene def pre_anim(meshes): # Called before each animation # Randomize supershapes for m in meshes: scene.update_mesh(m, sshape_res=SHAPE) def post_frame(render, pub, animation): # After frame if anim.frameid == 1: imgs = render.render() pub.publish(normals=imgs['normals'], depth=imgs['depth']) # Parse script arguments passed via blendtorch launcher btargs, _ = btb.parse_blendtorch_args() # Fetch camera cam = bpy.context.scene.camera bpy.context.scene.rigidbody_world.time_scale = 100 bpy.context.scene.rigidbody_world.substeps_per_frame = 300 # Setup supershapes meshes = scene.prepare(NSHAPES, sshape_res=SHAPE) # Data source pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid) # Setup default image rendering cam = btb.Camera() render = btb.CompositeRenderer( [ btb.CompositeSelection('normals', 'Out1', 'Normals', 'RGB'), btb.CompositeSelection('depth', 'Out1', 'Depth', 'V'), ], btid=btargs.btid, camera=cam, ) # Setup the animation and run endlessly anim = btb.AnimationController() anim.pre_animation.add(pre_anim, meshes) anim.post_frame.add(post_frame, render, pub, anim) anim.play(frame_range=(0, 1), num_episodes=-1, use_offline_render=False, use_physics=True)
def main(): btargs, remainder = btb.parse_blendtorch_args() uvshape = (100, 100) obj = sshape.make_bpy_mesh(uvshape) idx = None coords = None params = None gen = None def pre_frame(duplex): nonlocal gen, params, coords, idx msg = duplex.recv(timeoutms=0) if msg != None: gen = generate_supershape(msg, shape=uvshape) if gen != None: try: params, idx, coords = next(gen) sshape.update_bpy_mesh(*coords, obj) except StopIteration: gen = None def post_frame(off, pub): if gen != None: pub.publish(image=off.render(), shape_id=idx) # Data source pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid) duplex = btb.DuplexChannel(btargs.btsockets['CTRL'], btargs.btid) # Setup default image rendering cam = btb.Camera() off = btb.OffScreenRenderer(camera=cam, mode='rgb') off.set_render_style(shading='SOLID', overlays=False) # Setup the animation and run endlessly anim = btb.AnimationController() anim.pre_frame.add(pre_frame, duplex) anim.post_frame.add(post_frame, off, pub) anim.play(frame_range=(0, 10000), num_episodes=-1)
def main(): btargs, remainder = btb.parse_blendtorch_args() seq = [] def pre_play(anim): seq.extend(['pre_play', anim.frameid]) def pre_animation(anim): seq.extend(['pre_animation', anim.frameid]) def pre_frame(anim): seq.extend(['pre_frame', anim.frameid]) def post_frame(anim): seq.extend(['post_frame', anim.frameid]) def post_animation(anim): seq.extend(['post_animation', anim.frameid]) def post_play(anim, pub): seq.extend(['post_play', anim.frameid]) pub.publish(seq=seq) # Data source: add linger to avoid not sending data upon closing Blender. pub = btb.DataPublisher(btargs.btsockets['DATA'], btargs.btid, lingerms=5000) anim = btb.AnimationController() anim.pre_play.add(pre_play, anim) anim.pre_animation.add(pre_animation, anim) anim.pre_frame.add(pre_frame, anim) anim.post_frame.add(post_frame, anim) anim.post_animation.add(post_animation, anim) anim.post_play.add(post_play, anim, pub) anim.play(frame_range=(1, 3), num_episodes=2, use_animation=not bpy.app.background)