def parallel_playback(cfg, fast=True, segment_time=2.0, constrained_timeranges=False): """ Parallel media playback, flipping between the two sources. The fast version makes sure the textures continue to be updated even though they are not displayed. On the other hand, the slow version will update the textures only when needed to be displayed, causing potential seek in the underlying media, and thus undesired delays. """ m1 = ngl.Media(cfg.medias[0].filename, label="media #1") m2 = ngl.Media(cfg.medias[0].filename, label="media #2") t1 = ngl.Texture2D(data_src=m1, label="texture #1") t2 = ngl.Texture2D(data_src=m2, label="texture #2") render1 = ngl.RenderTexture(t1) render2 = ngl.RenderTexture(t2) text_settings = { "box_corner": (-1, 1 - 0.2, 0), "box_height": (0, 0.2, 0), "aspect_ratio": cfg.aspect_ratio, } render1 = ngl.Group(children=(render1, ngl.Text("media #1", **text_settings))) render2 = ngl.Group(children=(render2, ngl.Text("media #2", **text_settings))) rf1 = ngl.TimeRangeFilter(render1) rf2 = ngl.TimeRangeFilter(render2) if constrained_timeranges: rf1.set_prefetch_time(segment_time / 3.0) rf2.set_prefetch_time(segment_time / 3.0) rf1.set_max_idle_time(segment_time / 2.0) rf2.set_max_idle_time(segment_time / 2.0) t = 0 rr1 = [] rr2 = [] while t < cfg.duration: rr1.append(ngl.TimeRangeModeCont(t)) rr1.append(ngl.TimeRangeModeNoop(t + segment_time)) rr2.append(ngl.TimeRangeModeNoop(t)) rr2.append(ngl.TimeRangeModeCont(t + segment_time)) t += 2 * segment_time rf1.add_ranges(*rr1) rf2.add_ranges(*rr2) g = ngl.Group() g.add_children(rf1, rf2) if fast: g.add_children(t1, t2) return g
def simple_transition(cfg, transition_start=2, transition_duration=4): """Fading transition between two medias""" cfg.duration = transition_start * 2 + transition_duration vertex = cfg.get_vert("dual-tex") fragment = cfg.get_frag("tex-mix") q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) p1_2 = ngl.Program(vertex=vertex, fragment=fragment) p1_2.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_tex1_coord=ngl.IOVec2()) m1 = ngl.Media(cfg.medias[0].filename, label="media #1") m2 = ngl.Media(cfg.medias[1 % len(cfg.medias)].filename, label="media #2") animkf_m2 = [ ngl.AnimKeyFrameFloat(transition_start, 0), ngl.AnimKeyFrameFloat(transition_start + cfg.duration, cfg.duration), ] m2.set_time_anim(ngl.AnimatedTime(animkf_m2)) t1 = ngl.Texture2D(data_src=m1, label="texture #1") t2 = ngl.Texture2D(data_src=m2, label="texture #2") render1 = ngl.RenderTexture(t1, label="render #1") render2 = ngl.RenderTexture(t2, label="render #2") delta_animkf = [ ngl.AnimKeyFrameFloat(transition_start, 1.0), ngl.AnimKeyFrameFloat(transition_start + transition_duration, 0.0), ] delta = ngl.AnimatedFloat(delta_animkf) render1_2 = ngl.Render(q, p1_2, label="transition") render1_2.update_frag_resources(tex0=t1, tex1=t2) render1_2.update_frag_resources(delta=delta) rr1 = [] rr2 = [] rr1_2 = [] rr1.append(ngl.TimeRangeModeNoop(transition_start)) rr2.append(ngl.TimeRangeModeNoop(0)) rr2.append(ngl.TimeRangeModeCont(transition_start + transition_duration)) rr1_2.append(ngl.TimeRangeModeNoop(0)) rr1_2.append(ngl.TimeRangeModeCont(transition_start)) rr1_2.append(ngl.TimeRangeModeNoop(transition_start + transition_duration)) rf1 = ngl.TimeRangeFilter(render1, ranges=rr1) rf2 = ngl.TimeRangeFilter(render2, ranges=rr2) rf1_2 = ngl.TimeRangeFilter(render1_2, ranges=rr1_2) g = ngl.Group() g.add_children(rf1, rf1_2, rf2) return g
def media_timeranges_rtt(cfg): m0 = cfg.medias[0] cfg.duration = d = 10 cfg.aspect_ratio = (m0.width, m0.height) # Use a media/texture as leaf to exercise its prefetch/release mechanism media = ngl.Media(m0.filename) texture = ngl.Texture2D(data_src=media) # Diamond tree on the same media texture render0 = ngl.RenderTexture(texture, label="leaf 0") render1 = ngl.RenderTexture(texture, label="leaf 1") # Create intermediate RTT "proxy" to exercise prefetch/release at this # level as well dst_tex0 = ngl.Texture2D(width=m0.width, height=m0.height) dst_tex1 = ngl.Texture2D(width=m0.width, height=m0.height) rtt0 = ngl.RenderToTexture(render0, [dst_tex0]) rtt1 = ngl.RenderToTexture(render1, [dst_tex1]) # Render the 2 RTTs vertically split (one half content each) quad0 = ngl.Quad((-1, -1, 0), (1, 0, 0), (0, 2, 0), uv_corner=(0, 0), uv_width=(0.5, 0)) quad1 = ngl.Quad((0, -1, 0), (1, 0, 0), (0, 2, 0), uv_corner=(0.5, 0), uv_width=(0.5, 0)) rtt_render0 = ngl.RenderTexture(dst_tex0, geometry=quad0, label="render RTT 0") rtt_render1 = ngl.RenderTexture(dst_tex1, geometry=quad1, label="render RTT 1") proxy0 = ngl.Group(children=(rtt0, rtt_render0), label="proxy 0") proxy1 = ngl.Group(children=(rtt1, rtt_render1), label="proxy 1") # We want to make sure the idle times are enough to exercise the # prefetch/release mechanism prefetch_time = 1 assert prefetch_time < d / 5 # Split the presentation in 5 segments such that there are inactive times, # prefetch times and both overlapping and non-overlapping times for the # RTTs ranges0 = ( ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(1 / 5 * d), ngl.TimeRangeModeNoop(3 / 5 * d), ) ranges1 = ( ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(2 / 5 * d), ngl.TimeRangeModeNoop(4 / 5 * d), ) trange0 = ngl.TimeRangeFilter(proxy0, ranges=ranges0, prefetch_time=prefetch_time, label="left") trange1 = ngl.TimeRangeFilter(proxy1, ranges=ranges1, prefetch_time=prefetch_time, label="right") return ngl.Group(children=(trange0, trange1))
def queued_medias(cfg, overlap_time=1.0, dim=3): """Queue of medias, mainly used as a demonstration for the prefetch/release mechanism""" nb_videos = dim * dim tqs = [] ag = AutoGrid(range(nb_videos)) for video_id, _, col, pos in ag: start = video_id * cfg.duration / nb_videos animkf = [ ngl.AnimKeyFrameFloat(start, 0), ngl.AnimKeyFrameFloat(start + cfg.duration, cfg.duration), ] m = ngl.Media(cfg.medias[video_id % len(cfg.medias)].filename, time_anim=ngl.AnimatedTime(animkf)) m.set_label("media #%d" % video_id) t = ngl.Texture2D(data_src=m) render = ngl.RenderTexture(t) render.set_label("render #%d" % video_id) render = ag.place_node(render, (col, pos)) rf = ngl.TimeRangeFilter(render) if start: rf.add_ranges(ngl.TimeRangeModeNoop(0)) rf.add_ranges( ngl.TimeRangeModeCont(start), ngl.TimeRangeModeNoop(start + cfg.duration / nb_videos + overlap_time)) tqs.append(rf) return ngl.Group(children=tqs)
def _get_time_scene(cfg): m0 = cfg.medias[0] media_seek = 10 noop_duration = 2 prefetch_duration = 2 freeze_duration = 3 playback_duration = 5 range_start = noop_duration + prefetch_duration play_start = range_start + freeze_duration play_stop = play_start + playback_duration range_stop = play_stop + freeze_duration duration = range_stop + noop_duration cfg.duration = duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(play_start, media_seek), ngl.AnimKeyFrameFloat(play_stop, media_seek + playback_duration), ] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) t = ngl.Texture2D(data_src=m) r = ngl.RenderTexture(t) time_ranges = [ ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(range_start), ngl.TimeRangeModeNoop(range_stop), ] rf = ngl.TimeRangeFilter(r, ranges=time_ranges, prefetch_time=prefetch_duration) return rf
def rtt_clear_attachment_with_timeranges(cfg): cfg.aspect_ratio = (1, 1) # Time-disabled full screen white quad render = ngl.RenderColor(COLORS.white) time_range_filter = ngl.TimeRangeFilter(render) time_range_filter.add_ranges(ngl.TimeRangeModeNoop(0)) # Intermediate no-op RTT to force the use of a different render pass internally texture = ngl.Texture2D(width=32, height=32) rtt_noop = ngl.RenderToTexture(ngl.Identity(), [texture]) # Centered rotating quad quad = ngl.Quad((-0.5, -0.5, 0), (1, 0, 0), (0, 1, 0)) render = ngl.RenderColor(COLORS.orange, geometry=quad) animkf = [ngl.AnimKeyFrameFloat(0, 0), ngl.AnimKeyFrameFloat(cfg.duration, -360)] render = ngl.Rotate(render, angle=ngl.AnimatedFloat(animkf)) group = ngl.Group(children=(time_range_filter, rtt_noop, render)) # Root RTT texture = ngl.Texture2D(width=512, height=512) rtt = ngl.RenderToTexture(group, [texture]) # Full screen render of the root RTT result render = ngl.RenderTexture(texture) return ngl.Group(children=(rtt, render))
def buffer_dove(cfg, bgcolor1=(0.6, 0, 0), bgcolor2=(0.8, 0.8, 0), bilinear_filtering=True): """Blending of a Render using a Buffer as data source""" cfg.duration = 3.0 # Credits: https://icons8.com/icon/40514/dove # (Raw data is the premultiplied) icon_filename = op.join(op.dirname(__file__), "data", "icons8-dove.raw") cfg.files.append(icon_filename) w, h = (96, 96) cfg.aspect_ratio = (w, h) img_buf = ngl.BufferUBVec4(filename=icon_filename, label="icon raw buffer") img_tex = ngl.Texture2D(data_src=img_buf, width=w, height=h) if bilinear_filtering: img_tex.set_mag_filter("linear") quad = ngl.Quad((-0.5, -0.5, 0.1), (1, 0, 0), (0, 1, 0)) render = ngl.RenderTexture(img_tex, geometry=quad, blending="src_over") shape_bg = ngl.Circle(radius=0.6, npoints=256) color_animkf = [ ngl.AnimKeyFrameColor(0, bgcolor1), ngl.AnimKeyFrameColor(cfg.duration / 2.0, bgcolor2), ngl.AnimKeyFrameColor(cfg.duration, bgcolor1), ] ucolor = ngl.AnimatedColor(color_animkf) render_bg = ngl.RenderColor(ucolor, geometry=shape_bg, label="background") return ngl.Group(children=(render_bg, render))
def lut3d(cfg, xsplit=0.3, trilinear=True): """Lookup Table 3D using a Texture3D""" level = 6 level2 = level**2 # Generated with `ffmpeg -f lavfi -i haldclutsrc=6,curves=vintage,format=rgba # -f rawvideo -frames:v 1 lut3d.raw` lut3d_filename = op.join(op.dirname(__file__), "data", "lut3d.raw") cfg.files.append(lut3d_filename) lut3d_buf = ngl.BufferUBVec4(filename=lut3d_filename) lut3d_tex = ngl.Texture3D(data_src=lut3d_buf, width=level2, height=level2, depth=level2) if trilinear: lut3d_tex.set_min_filter("linear") lut3d_tex.set_mag_filter("linear") m0 = cfg.medias[0] cfg.duration = m0.duration cfg.aspect_ratio = (m0.width, m0.height) video = ngl.Media(m0.filename) video_tex = ngl.Texture2D(data_src=video) scene_tex = ngl.RenderTexture(video_tex) quad = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) prog_lut = ngl.Program(fragment=cfg.get_frag("lut3d"), vertex=cfg.get_vert("lut3d")) prog_lut.update_vert_out_vars(var_uvcoord=ngl.IOVec2(), var_tex0_coord=ngl.IOVec2()) scene_lut = ngl.Render(quad, prog_lut) scene_lut.update_frag_resources(tex0=video_tex, lut3d=lut3d_tex) return compare(cfg, scene_tex, scene_lut, xsplit)
def _get_random_layer(cfg, rng, t0, t1, enable_computes, layer=4): nb_elems = rng.randint(2, 5) children = [] sub_layers = rng.sample(range(nb_elems), 2) for i in range(nb_elems): if i in sub_layers and layer != 0: # Recursively create another layer child = _get_random_layer(cfg, rng, t0, t1, enable_computes, layer=layer - 1) child = _get_random_transform(rng, t0, t1, child) child.set_label(f"layer={layer}") # Create a (small) RTT of the children rtt_tex = ngl.Texture2D( width=rng.randint(50, 90), height=rng.randint(15, 70), ) rtt = ngl.RenderToTexture( child, clear_color=_get_random_color(rng) + (1, ), ) rtt.add_color_textures(rtt_tex) rtt_render = ngl.RenderTexture( rtt_tex, geometry=_get_random_geometry(rng), blending="src_over", ) rtt_render = _get_random_transform(rng, t0, t1, rtt_render) t_start, t_end = _get_random_time_range(rng, t0, t1) rtt_group = ngl.Group(children=(rtt, rtt_render)) t_filter = ngl.TimeRangeFilter(rtt_group) t_filter.add_ranges( ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(t_start), ngl.TimeRangeModeNoop(t_end), ) # Draw both the children and the rendered texture child = ngl.Group(children=(t_filter, child)) else: # We are at a leaf (last layer) so we create a random render t_start, t_end = _get_random_time_range(rng, t0, t1) child = _get_random_render(cfg, rng, t_start, t_end, enable_computes) if rng.random() < 1 / 3: child = _get_random_transform(rng, t_start, t_end, child) child = ngl.TimeRangeFilter(child) child.add_ranges( ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(t_start), ngl.TimeRangeModeNoop(t_end), ) children.append(child) return ngl.Group(children=children)
def media_clamp(cfg): m0 = cfg.medias[0] cfg.duration = m0.duration cfg.aspect_ratio = (m0.width, m0.height) media = ngl.Media(m0.filename) texture = ngl.Texture2D(data_src=media, clamp_video=True) return ngl.RenderTexture(texture)
def _rtt_load_attachment_nested(samples=0): scene = _rtt_load_attachment() texture = ngl.Texture2D(width=16, height=16) rtt = ngl.RenderToTexture(scene, [texture], samples=samples) foreground = ngl.RenderTexture(texture) return ngl.Group(children=(rtt, foreground))
def histogram(cfg): """Histogram using compute shaders""" m0 = cfg.medias[0] cfg.duration = m0.duration cfg.aspect_ratio = (m0.width, m0.height) g = ngl.Group() m = ngl.Media(cfg.medias[0].filename) t = ngl.Texture2D(data_src=m) h = ngl.Block(label="histogram_block", layout="std430") h.add_fields( ngl.BufferUInt(256, label="r"), ngl.BufferUInt(256, label="g"), ngl.BufferUInt(256, label="b"), ngl.UniformUInt(label="maximum"), ) r = ngl.RenderTexture(t) proxy_size = 128 proxy = ngl.Texture2D(width=proxy_size, height=proxy_size) rtt = ngl.RenderToTexture(r) rtt.add_color_textures(proxy) g.add_children(rtt) compute_program = ngl.ComputeProgram(cfg.get_comp("histogram-clear"), workgroup_size=(1, 1, 1)) compute_program.update_properties(hist=ngl.ResourceProps(writable=True)) compute = ngl.Compute(workgroup_count=(256, 1, 1), program=compute_program, label="histogram-clear") compute.update_resources(hist=h) g.add_children(compute) local_size = 8 group_size = proxy_size / local_size compute_program = ngl.ComputeProgram(cfg.get_comp("histogram-exec"), workgroup_size=(local_size, local_size, 1)) compute = ngl.Compute(workgroup_count=(group_size, group_size, 1), program=compute_program, label="histogram-exec") compute.update_resources(hist=h, source=proxy) compute_program.update_properties(hist=ngl.ResourceProps(writable=True)) compute_program.update_properties(source=ngl.ResourceProps(as_image=True)) g.add_children(compute) q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) p = ngl.Program(vertex=cfg.get_vert("histogram-display"), fragment=cfg.get_frag("histogram-display")) p.update_vert_out_vars(var_uvcoord=ngl.IOVec2(), var_tex0_coord=ngl.IOVec2()) render = ngl.Render(q, p) render.update_frag_resources(tex0=t, hist=h) g.add_children(render) return g
def _get_rtt_scene(cfg, features="depth", texture_ds_format=None, samples=0, mipmap_filter="none", sample_depth=False): cfg.duration = 10 cfg.aspect_ratio = (1, 1) cube = _get_cube() program = ngl.Program(vertex=_RENDER_CUBE_VERT, fragment=_RENDER_CUBE_FRAG) program.update_vert_out_vars(var_normal=ngl.IOVec3()) render = ngl.Render(cube, program) render = ngl.Scale(render, (0.5, 0.5, 0.5)) for i in range(3): rot_animkf = ngl.AnimatedFloat( [ngl.AnimKeyFrameFloat(0, 0), ngl.AnimKeyFrameFloat(cfg.duration, 360 * (i + 1))] ) axis = tuple(int(i == x) for x in range(3)) render = ngl.Rotate(render, axis=axis, angle=rot_animkf) config = ngl.GraphicConfig(render, depth_test=True) camera = ngl.Camera( config, eye=(0.0, 0.0, 3.0), center=(0.0, 0.0, 0.0), up=(0.0, 1.0, 0.0), perspective=(45.0, cfg.aspect_ratio_float), clipping=(1.0, 10.0), ) size = 1024 texture_depth = None if texture_ds_format: texture_depth = ngl.Texture2D(width=size, height=size, format=texture_ds_format) texture = ngl.Texture2D( width=size, height=size, min_filter="linear", mipmap_filter=mipmap_filter, ) rtt = ngl.RenderToTexture( camera, [texture], features=features, depth_texture=texture_depth, samples=samples, clear_color=(0, 0, 0, 1), ) if sample_depth: quad = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) program = ngl.Program(vertex=cfg.get_vert("texture"), fragment=_RENDER_DEPTH) program.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_uvcoord=ngl.IOVec2()) render = ngl.Render(quad, program) render.update_frag_resources(tex0=texture_depth) else: render = ngl.RenderTexture(texture) return ngl.Group(children=(rtt, render))
def animated_camera(cfg, rotate=True): """Animated camera around a scene""" g = ngl.Group() q = ngl.Quad((-0.5, -0.5, 0), (1, 0, 0), (0, 1, 0)) m = ngl.Media(cfg.medias[0].filename) t = ngl.Texture2D(data_src=m) node = ngl.RenderTexture(t, geometry=q) g.add_children(node) translate = ngl.Translate(node, vector=(-0.6, 0.8, -1)) g.add_children(translate) translate = ngl.Translate(node, vector=(0.6, 0.8, -1)) g.add_children(translate) translate = ngl.Translate(node, vector=(-0.6, -0.5, -1)) g.add_children(translate) translate = ngl.Translate(node, vector=(0.6, -0.5, -1)) g.add_children(translate) g = ngl.GraphicConfig(g, depth_test=True) camera = ngl.Camera(g) camera.set_eye(0, 0, 2) camera.set_center(0.0, 0.0, 0.0) camera.set_up(0.0, 1.0, 0.0) camera.set_clipping(0.1, 10.0) tr_animkf = [ ngl.AnimKeyFrameVec3(0, (0.0, 0.0, 0.0)), ngl.AnimKeyFrameVec3(10, (0.0, 0.0, 3.0), "exp_out") ] node = ngl.Translate(ngl.Identity(), vector=ngl.AnimatedVec3(tr_animkf)) if rotate: rot_animkf = [ ngl.AnimKeyFrameFloat(0, 0), ngl.AnimKeyFrameFloat(cfg.duration, 360, "exp_out") ] node = ngl.Rotate(node, axis=(0, 1, 0), angle=ngl.AnimatedFloat(rot_animkf)) camera.set_eye_transform(node) perspective_animkf = [ ngl.AnimKeyFrameVec2(0.5, (60.0, cfg.aspect_ratio_float)), ngl.AnimKeyFrameVec2(cfg.duration, (45.0, cfg.aspect_ratio_float), "exp_out"), ] camera.set_perspective(ngl.AnimatedVec2(perspective_animkf)) return camera
def texture_data_animated(cfg, dim=8): cfg.duration = 3.0 nb_kf = int(cfg.duration) buffers = [get_random_color_buffer(cfg.rng, dim) for i in range(nb_kf)] random_animkf = [] time_scale = cfg.duration / float(nb_kf) for i, buf in enumerate(buffers + [buffers[0]]): random_animkf.append(ngl.AnimKeyFrameBuffer(i * time_scale, buf)) random_buffer = ngl.AnimatedBufferVec4(keyframes=random_animkf) random_tex = ngl.Texture2D(data_src=random_buffer, width=dim, height=dim) return ngl.RenderTexture(random_tex)
def texture_scissor(cfg): cfg.aspect_ratio = (1, 1) render = ngl.RenderColor(COLORS.orange) graphic_config = ngl.GraphicConfig(render, scissor_test=True, scissor=(32, 32, 32, 32)) texture = ngl.Texture2D(width=64, height=64) rtt = ngl.RenderToTexture(graphic_config, [texture], clear_color=(0, 0, 0, 1)) render = ngl.RenderTexture(texture) return ngl.Group(children=(rtt, render))
def media_flat_remap(cfg): m0 = cfg.medias[0] cfg.duration = m0.duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(cfg.duration / 2, 1.833), ] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) t = ngl.Texture2D(data_src=m) return ngl.RenderTexture(t)
def texture_clear_and_scissor(cfg): render = ngl.RenderColor(COLORS.white) graphic_config = ngl.GraphicConfig(render, scissor_test=True, scissor=(0, 0, 0, 0), color_write_mask="") texture = ngl.Texture2D(width=64, height=64) rtt = ngl.RenderToTexture(ngl.Identity(), [texture], clear_color=list(COLORS.orange) + [1]) render = ngl.RenderTexture(texture) return ngl.Group(children=(graphic_config, rtt, render))
def playback_speed(cfg, speed=1.0): """Adjust media playback speed using animation keyframes""" m0 = cfg.medias[0] media_duration = m0.duration initial_seek = min(media_duration, 5) rush_duration = media_duration - initial_seek cfg.duration = rush_duration / speed cfg.aspect_ratio = (m0.width, m0.height) q = ngl.Quad((-0.5, -0.5, 0), (1, 0, 0), (0, 1, 0)) time_animkf = [ngl.AnimKeyFrameFloat(0, initial_seek), ngl.AnimKeyFrameFloat(cfg.duration, media_duration)] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(time_animkf)) t = ngl.Texture2D(data_src=m) return ngl.RenderTexture(t, geometry=q)
def _rtt_load_attachment(): background = ngl.RenderColor(COLORS.white) render = ngl.RenderColor(COLORS.orange) texture = ngl.Texture2D(width=16, height=16) rtt = ngl.RenderToTexture(render, [texture]) texture_noop = ngl.Texture2D(width=16, height=16) rtt_noop = ngl.RenderToTexture(render, [texture_noop]) quad = ngl.Quad((0, 0, 0), (1, 0, 0), (0, 1, 0)) foreground = ngl.RenderTexture(texture, geometry=quad) return ngl.Group(children=(background, rtt, rtt_noop, foreground))
def compute_image_load_store(cfg, show_dbg_points=False): size = _N texture_data = ngl.BufferFloat( data=array.array("f", [x / (size**2) for x in range(size**2)])) texture_r = ngl.Texture2D(format="r32_sfloat", width=size, height=size, data_src=texture_data) texture_g = ngl.Texture2D(format="r32_sfloat", width=size, height=size, data_src=texture_data) texture_b = ngl.Texture2D(format="r32_sfloat", width=size, height=size, data_src=texture_data) scale = ngl.Block( fields=[ngl.UniformVec2(value=(-1.0, 1.0), label="factors")], layout="std140", ) texture_rgba = ngl.Texture2D(width=size, height=size) program = ngl.ComputeProgram(_IMAGE_LOAD_STORE_COMPUTE, workgroup_size=(size, size, 1)) program.update_properties( texture_r=ngl.ResourceProps(as_image=True), texture_g=ngl.ResourceProps(as_image=True), texture_b=ngl.ResourceProps(as_image=True), texture_rgba=ngl.ResourceProps(as_image=True, writable=True), ) compute = ngl.Compute(workgroup_count=(1, 1, 1), program=program) compute.update_resources(texture_r=texture_r, texture_g=texture_g, texture_b=texture_b, scale=scale, texture_rgba=texture_rgba) render = ngl.RenderTexture(texture_rgba) group = ngl.Group(children=(compute, render)) if show_dbg_points: cuepoints = _get_compute_histogram_cuepoints() group.add_children(get_debug_points(cfg, cuepoints)) return group
def filter_gamma_correct(cfg, linear=True): """This test operates a gamma correct blending (the blending happens in linear space)""" # Hue colors rotated clockwise dst = ngl.RenderGradient4( color_tl=COLORS.rose, color_tr=COLORS.blue, color_br=COLORS.sgreen, color_bl=COLORS.yellow, ) # Hue colors rotated counter-clockwise started with another color src = ngl.RenderGradient4( color_tl=COLORS.orange, color_tr=COLORS.magenta, color_br=COLORS.azure, color_bl=COLORS.green, ) # Screen blending so that working in linear space makes a significant # difference blend = ngl.GraphicConfig( ngl.Group(children=(dst, src)), blend=True, blend_src_factor="one", blend_dst_factor="one_minus_src_color", blend_src_factor_a="one", blend_dst_factor_a="zero", ) # Intermediate RTT so that we can gamma correct the result tex = ngl.Texture2D(width=320, height=240) rtt = ngl.RenderToTexture(blend, color_textures=[tex]) render = ngl.RenderTexture(tex) if linear: # The result of the combination is linear dst.add_filters(ngl.FilterSRGB2Linear()) src.add_filters(ngl.FilterSRGB2Linear()) # ...and we compress it back to sRGB render.add_filters(ngl.FilterLinear2sRGB()) return ngl.Group(children=(rtt, render))
def media_queue(cfg, overlap_time=7.0, dim=3): cfg.duration = 10 cfg.aspect_ratio = (1, 1) nb_medias = dim * dim medias = [ m.filename for m in cfg.medias if m.filename.endswith(("mp4", "jpg")) ] queued_medias = [] ag = AutoGrid(range(nb_medias)) for video_id, _, col, pos in ag: start = video_id * cfg.duration / nb_medias animkf = [ ngl.AnimKeyFrameFloat(start, 0), ngl.AnimKeyFrameFloat(start + cfg.duration, cfg.duration), ] media = ngl.Media(medias[video_id % len(medias)], time_anim=ngl.AnimatedTime(animkf)) texture = ngl.Texture2D(data_src=media, min_filter="linear", mag_filter="linear") render = ngl.RenderTexture(texture) render = ag.place_node(render, (col, pos)) rf = ngl.TimeRangeFilter(render) if start: rf.add_ranges(ngl.TimeRangeModeNoop(0)) rf.add_ranges( ngl.TimeRangeModeCont(start), ngl.TimeRangeModeNoop(start + cfg.duration / nb_medias + overlap_time)) queued_medias.append(rf) return ngl.Group(children=queued_medias)
def _get_random_rendertexture(cfg, rng): return ngl.RenderTexture( texture=_get_random_texture(cfg, rng), geometry=_get_random_geometry(rng), blending="src_over", )
def cube(cfg, display_depth_buffer=False): """ Cube with a common media Texture but a different color tainting on each side. Also includes a depth map visualization. """ cube = ngl.Group(label="cube") vert_data = cfg.get_vert("texture") frag_data = cfg.get_frag("tex-tint") program = ngl.Program(vertex=vert_data, fragment=frag_data) program.update_vert_out_vars(var_uvcoord=ngl.IOVec2(), var_tex0_coord=ngl.IOVec2()) texture = ngl.Texture2D(data_src=ngl.Media(cfg.medias[0].filename)) children = [ _get_cube_side(texture, program, qi[0], qi[1], qi[2], qi[3]) for qi in _get_cube_quads() ] cube.add_children(*children) for i in range(3): rot_animkf = ngl.AnimatedFloat([ ngl.AnimKeyFrameFloat(0, 0), ngl.AnimKeyFrameFloat(cfg.duration, 360 * (i + 1)) ]) axis = [int(i == x) for x in range(3)] cube = ngl.Rotate(cube, axis=axis, angle=rot_animkf) config = ngl.GraphicConfig(cube, depth_test=True) camera = ngl.Camera(config) camera.set_eye(0.0, 0.0, 2.0) camera.set_center(0.0, 0.0, 0.0) camera.set_up(0.0, 1.0, 0.0) camera.set_perspective(45.0, cfg.aspect_ratio_float) camera.set_clipping(1.0, 10.0) if not display_depth_buffer: return camera else: group = ngl.Group() depth_texture = ngl.Texture2D() depth_texture.set_format("auto_depth") depth_texture.set_width(640) depth_texture.set_height(480) texture = ngl.Texture2D() texture.set_width(640) texture.set_height(480) rtt = ngl.RenderToTexture(camera) rtt.add_color_textures(texture) rtt.set_depth_texture(depth_texture) quad = ngl.Quad((-1.0, -1.0, 0), (1, 0, 0), (0, 1, 0)) render = ngl.RenderTexture(texture, geometry=quad) group.add_children(rtt, render) quad = ngl.Quad((0.0, 0.0, 0), (1, 0, 0), (0, 1, 0)) render = ngl.RenderTexture(depth_texture, geometry=quad) group.add_children(rtt, render) return group
def time_remapping(cfg): """ Time remapping in the following order: - nothing displayed for a while (but media prefetch happening in background) - first frame displayed for a while - normal playback - last frame displayed for a while (even though the media is closed) - nothing again until the end """ m0 = cfg.medias[0] media_seek = 10 noop_duration = 2 prefetch_duration = 2 freeze_duration = 3 playback_duration = 5 range_start = noop_duration + prefetch_duration play_start = range_start + freeze_duration play_stop = play_start + playback_duration range_stop = play_stop + freeze_duration duration = range_stop + noop_duration cfg.duration = duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(play_start, media_seek), ngl.AnimKeyFrameFloat(play_stop, media_seek + playback_duration), ] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) m.set_sxplayer_min_level("verbose") t = ngl.Texture2D(data_src=m) r = ngl.RenderTexture(t) time_ranges = [ ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(range_start), ngl.TimeRangeModeNoop(range_stop), ] rf = ngl.TimeRangeFilter(r, ranges=time_ranges, prefetch_time=prefetch_duration) base_string = "media time: {:2g} to {:2g}\nscene time: {:2g} to {:2g}\ntime range: {:2g} to {:2g}".format( media_seek, media_seek + playback_duration, play_start, play_stop, range_start, range_stop ) text = ngl.Text( base_string, box_height=(0, 0.3, 0), box_corner=(-1, 1 - 0.3, 0), aspect_ratio=cfg.aspect_ratio, halign="left" ) group = ngl.Group() group.add_children(rf, text) steps = ( ("default color, nothing yet", 0, noop_duration), ("default color, media prefetched", noop_duration, range_start), ("first frame", range_start, play_start), ("normal playback", play_start, play_stop), ("last frame", play_stop, range_stop), ("default color, media released", range_stop, duration), ) for i, (description, start_time, end_time) in enumerate(steps): text = ngl.Text( f"{start_time:g} to {end_time:g}: {description}", aspect_ratio=cfg.aspect_ratio, box_height=(0, 0.2, 0) ) text_tr = ( ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(start_time), ngl.TimeRangeModeNoop(end_time), ) text_rf = ngl.TimeRangeFilter(text, ranges=text_tr, label="text-step-%d" % i) group.add_children(text_rf) return group