def queued_medias(cfg, overlap_time=1., dim=3): '''Queue of medias, mainly used as a demonstration for the prefetch/release mechanism''' qw = qh = 2. / dim nb_videos = dim * dim tqs = [] p = ngl.Program() for y in range(dim): for x in range(dim): video_id = y * dim + x start = video_id * cfg.duration / nb_videos animkf = [ngl.AnimKeyFrameFloat(start, 0)] m = ngl.Media(cfg.medias[video_id % len(cfg.medias)].filename, time_anim=ngl.AnimatedTime(animkf)) m.set_label('media #%d' % video_id) corner = (-1. + x * qw, 1. - (y + 1) * qh, 0) q = ngl.Quad(corner, (qw, 0, 0), (0, qh, 0)) t = ngl.Texture2D(data_src=m) render = ngl.Render(q, p) render.set_label('render #%d' % video_id) render.update_textures(tex0=t) rf = ngl.TimeRangeFilter(render) if start: rf.add_ranges(ngl.TimeRangeModeNoop(0)) rf.add_ranges( ngl.TimeRangeModeCont(start), ngl.TimeRangeModeNoop(start + cfg.duration / nb_videos + overlap_time)) tqs.append(rf) return ngl.Group(children=tqs)
def queued_medias(cfg, overlap_time=1., dim=3): '''Queue of medias, mainly used as a demonstration for the prefetch/release mechanism''' nb_videos = dim * dim tqs = [] q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) ag = AutoGrid(range(nb_videos)) for video_id, _, col, pos in ag: start = video_id * cfg.duration / nb_videos animkf = [ngl.AnimKeyFrameFloat(start, 0)] m = ngl.Media(cfg.medias[video_id % len(cfg.medias)].filename, time_anim=ngl.AnimatedTime(animkf)) m.set_label('media #%d' % video_id) t = ngl.Texture2D(data_src=m) program = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture')) program.update_vert_out_vars(var_uvcoord=ngl.IOVec2(), var_tex0_coord=ngl.IOVec2()) render = ngl.Render(q, program) render.set_label('render #%d' % video_id) render.update_frag_resources(tex0=t) render = ag.place_node(render, (col, pos)) rf = ngl.TimeRangeFilter(render) if start: rf.add_ranges(ngl.TimeRangeModeNoop(0)) rf.add_ranges(ngl.TimeRangeModeCont(start), ngl.TimeRangeModeNoop(start + cfg.duration/nb_videos + overlap_time)) tqs.append(rf) return ngl.Group(children=tqs)
def _get_time_scene(cfg): m0 = cfg.medias[0] media_seek = 10 noop_duration = 2 prefetch_duration = 2 freeze_duration = 3 playback_duration = 5 range_start = noop_duration + prefetch_duration play_start = range_start + freeze_duration play_stop = play_start + playback_duration range_stop = play_stop + freeze_duration duration = range_stop + noop_duration cfg.duration = duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(play_start, media_seek), ngl.AnimKeyFrameFloat(play_stop, media_seek + playback_duration), ] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) t = ngl.Texture2D(data_src=m) r = ngl.RenderTexture(t) time_ranges = [ ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(range_start), ngl.TimeRangeModeNoop(range_stop), ] rf = ngl.TimeRangeFilter(r, ranges=time_ranges, prefetch_time=prefetch_duration) return rf
def queued_medias(cfg, overlap_time=1.0, dim=3): """Queue of medias, mainly used as a demonstration for the prefetch/release mechanism""" nb_videos = dim * dim tqs = [] ag = AutoGrid(range(nb_videos)) for video_id, _, col, pos in ag: start = video_id * cfg.duration / nb_videos animkf = [ ngl.AnimKeyFrameFloat(start, 0), ngl.AnimKeyFrameFloat(start + cfg.duration, cfg.duration), ] m = ngl.Media(cfg.medias[video_id % len(cfg.medias)].filename, time_anim=ngl.AnimatedTime(animkf)) m.set_label("media #%d" % video_id) t = ngl.Texture2D(data_src=m) render = ngl.RenderTexture(t) render.set_label("render #%d" % video_id) render = ag.place_node(render, (col, pos)) rf = ngl.TimeRangeFilter(render) if start: rf.add_ranges(ngl.TimeRangeModeNoop(0)) rf.add_ranges( ngl.TimeRangeModeCont(start), ngl.TimeRangeModeNoop(start + cfg.duration / nb_videos + overlap_time)) tqs.append(rf) return ngl.Group(children=tqs)
def simple_transition(cfg, transition_start=2, transition_duration=4): """Fading transition between two medias""" cfg.duration = transition_start * 2 + transition_duration vertex = cfg.get_vert("dual-tex") fragment = cfg.get_frag("tex-mix") q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) p1_2 = ngl.Program(vertex=vertex, fragment=fragment) p1_2.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_tex1_coord=ngl.IOVec2()) m1 = ngl.Media(cfg.medias[0].filename, label="media #1") m2 = ngl.Media(cfg.medias[1 % len(cfg.medias)].filename, label="media #2") animkf_m2 = [ ngl.AnimKeyFrameFloat(transition_start, 0), ngl.AnimKeyFrameFloat(transition_start + cfg.duration, cfg.duration), ] m2.set_time_anim(ngl.AnimatedTime(animkf_m2)) t1 = ngl.Texture2D(data_src=m1, label="texture #1") t2 = ngl.Texture2D(data_src=m2, label="texture #2") render1 = ngl.RenderTexture(t1, label="render #1") render2 = ngl.RenderTexture(t2, label="render #2") delta_animkf = [ ngl.AnimKeyFrameFloat(transition_start, 1.0), ngl.AnimKeyFrameFloat(transition_start + transition_duration, 0.0), ] delta = ngl.AnimatedFloat(delta_animkf) render1_2 = ngl.Render(q, p1_2, label="transition") render1_2.update_frag_resources(tex0=t1, tex1=t2) render1_2.update_frag_resources(delta=delta) rr1 = [] rr2 = [] rr1_2 = [] rr1.append(ngl.TimeRangeModeNoop(transition_start)) rr2.append(ngl.TimeRangeModeNoop(0)) rr2.append(ngl.TimeRangeModeCont(transition_start + transition_duration)) rr1_2.append(ngl.TimeRangeModeNoop(0)) rr1_2.append(ngl.TimeRangeModeCont(transition_start)) rr1_2.append(ngl.TimeRangeModeNoop(transition_start + transition_duration)) rf1 = ngl.TimeRangeFilter(render1, ranges=rr1) rf2 = ngl.TimeRangeFilter(render2, ranges=rr2) rf1_2 = ngl.TimeRangeFilter(render1_2, ranges=rr1_2) g = ngl.Group() g.add_children(rf1, rf1_2, rf2) return g
def simple_transition(cfg, transition_start=2, transition_duration=4): '''Fading transition between two medias''' cfg.duration = transition_start * 2 + transition_duration vertex = cfg.get_vert('dual-tex') fragment = cfg.get_frag('tex-mix') q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) p1_2 = ngl.Program(vertex=vertex, fragment=fragment) m1 = ngl.Media(cfg.medias[0].filename, label='media #1') m2 = ngl.Media(cfg.medias[1 % len(cfg.medias)].filename, label='media #2') animkf_m2 = [ngl.AnimKeyFrameFloat(transition_start, 0)] m2.set_time_anim(ngl.AnimatedTime(animkf_m2)) t1 = ngl.Texture2D(data_src=m1, label='texture #1') t2 = ngl.Texture2D(data_src=m2, label='texture #2') program = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture')) render1 = ngl.Render(q, program, label='render #1') render1.update_textures(tex0=t1) render2 = ngl.Render(q, program, label='render #2') render2.update_textures(tex0=t2) delta_animkf = [ ngl.AnimKeyFrameFloat(transition_start, 1.0), ngl.AnimKeyFrameFloat(transition_start + transition_duration, 0.0) ] delta = anim = ngl.AnimatedFloat(delta_animkf) render1_2 = ngl.Render(q, p1_2, label='transition') render1_2.update_textures(tex0=t1, tex1=t2) render1_2.update_uniforms(delta=delta) rr1 = [] rr2 = [] rr1_2 = [] rr1.append(ngl.TimeRangeModeNoop(transition_start)) rr2.append(ngl.TimeRangeModeNoop(0)) rr2.append(ngl.TimeRangeModeCont(transition_start + transition_duration)) rr1_2.append(ngl.TimeRangeModeNoop(0)) rr1_2.append(ngl.TimeRangeModeCont(transition_start)) rr1_2.append(ngl.TimeRangeModeNoop(transition_start + transition_duration)) rf1 = ngl.TimeRangeFilter(render1, ranges=rr1) rf2 = ngl.TimeRangeFilter(render2, ranges=rr2) rf1_2 = ngl.TimeRangeFilter(render1_2, ranges=rr1_2) g = ngl.Group() g.add_children(rf1, rf1_2, rf2) return g
def _get_data_streamed_buffer_vec4_scene(cfg, scale, show_dbg_points): duration = _N cfg.duration = duration * scale cfg.aspect_ratio = (1, 1) size, data_size, = _N, _N * _N time_anim = None if scale != 1: kfs = [ ngl.AnimKeyFrameFloat(0, 0), ngl.AnimKeyFrameFloat(cfg.duration, duration), ] time_anim = ngl.AnimatedTime(kfs) pts_data = array.array('q') assert pts_data.itemsize == 8 for i in range(duration): offset = 10000 if i == 0 else 0 pts_data.extend([i * 1000000 + offset]) vec4_data = array.array('f') for i in range(duration): for j in range(data_size): v = i / float(duration) + j / float(data_size * duration) vec4_data.extend([v, v, v, v]) pts_buffer = ngl.BufferInt64(data=pts_data) vec4_buffer = ngl.BufferVec4(data=vec4_data) streamed_buffer = ngl.StreamedBufferVec4(data_size, pts_buffer, vec4_buffer, time_anim=time_anim, label='data') streamed_block = ngl.Block(layout='std140', label='streamed_block', fields=(streamed_buffer, )) shader_params = dict(data_size=data_size, size=size) quad = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) program = ngl.Program( vertex=_RENDER_STREAMEDBUFFER_VERT, fragment=_RENDER_STREAMEDBUFFER_FRAG % shader_params, ) program.update_vert_out_vars(var_uvcoord=ngl.IOVec2()) render = ngl.Render(quad, program) render.update_frag_resources(streamed=streamed_block) group = ngl.Group(children=(render, )) if show_dbg_points: cuepoints = _get_data_streamed_buffer_cuepoints() group.add_children(get_debug_points(cfg, cuepoints)) return group
def media_flat_remap(cfg): m0 = cfg.medias[0] cfg.duration = m0.duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(cfg.duration / 2, 1.833), ] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) t = ngl.Texture2D(data_src=m) return ngl.RenderTexture(t)
def playback_speed(cfg, speed=1.0): """Adjust media playback speed using animation keyframes""" m0 = cfg.medias[0] media_duration = m0.duration initial_seek = min(media_duration, 5) rush_duration = media_duration - initial_seek cfg.duration = rush_duration / speed cfg.aspect_ratio = (m0.width, m0.height) q = ngl.Quad((-0.5, -0.5, 0), (1, 0, 0), (0, 1, 0)) time_animkf = [ngl.AnimKeyFrameFloat(0, initial_seek), ngl.AnimKeyFrameFloat(cfg.duration, media_duration)] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(time_animkf)) t = ngl.Texture2D(data_src=m) return ngl.RenderTexture(t, geometry=q)
def flat_remap(cfg): cfg.medias = [Media('ngl-media-test.nut')] m0 = cfg.medias[0] cfg.duration = m0.duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(-0.4, 1.833), ngl.AnimKeyFrameFloat(cfg.duration, 1.833), ] q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) t = ngl.Texture2D(data_src=m) render = ngl.Render(q) render.update_frag_resources(tex0=t) return render
def playback_speed(cfg, speed=1.0): '''Adjust media playback speed using animation keyframes''' m0 = cfg.medias[0] media_duration = m0.duration initial_seek = 5 rush_duration = media_duration - initial_seek cfg.duration = rush_duration / speed cfg.aspect_ratio = (m0.width, m0.height) q = ngl.Quad((-0.5, -0.5, 0), (1, 0, 0), (0, 1, 0)) time_animkf = [ngl.AnimKeyFrameFloat(0, initial_seek), ngl.AnimKeyFrameFloat(cfg.duration, media_duration)] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(time_animkf)) t = ngl.Texture2D(data_src=m) p = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture')) render = ngl.Render(q, p) render.update_textures(tex0=t) return render
def _get_time_scene(cfg): m0 = cfg.medias[0] media_seek = 10 noop_duration = 2 prefetch_duration = 2 freeze_duration = 3 playback_duration = 5 range_start = noop_duration + prefetch_duration play_start = range_start + freeze_duration play_stop = play_start + playback_duration range_stop = play_stop + freeze_duration duration = range_stop + noop_duration cfg.duration = duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(play_start, media_seek), ngl.AnimKeyFrameFloat(play_stop, media_seek + playback_duration), ] q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) t = ngl.Texture2D(data_src=m) p = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture')) p.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_uvcoord=ngl.IOVec2()) r = ngl.Render(q, p) r.update_frag_resources(tex0=t) time_ranges = [ ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(range_start), ngl.TimeRangeModeNoop(range_stop), ] rf = ngl.TimeRangeFilter(r, ranges=time_ranges, prefetch_time=prefetch_duration) return rf
def media_queue(cfg, overlap_time=7.0, dim=3): cfg.duration = 10 cfg.aspect_ratio = (1, 1) nb_medias = dim * dim medias = [ m.filename for m in cfg.medias if m.filename.endswith(("mp4", "jpg")) ] queued_medias = [] ag = AutoGrid(range(nb_medias)) for video_id, _, col, pos in ag: start = video_id * cfg.duration / nb_medias animkf = [ ngl.AnimKeyFrameFloat(start, 0), ngl.AnimKeyFrameFloat(start + cfg.duration, cfg.duration), ] media = ngl.Media(medias[video_id % len(medias)], time_anim=ngl.AnimatedTime(animkf)) texture = ngl.Texture2D(data_src=media, min_filter="linear", mag_filter="linear") render = ngl.RenderTexture(texture) render = ag.place_node(render, (col, pos)) rf = ngl.TimeRangeFilter(render) if start: rf.add_ranges(ngl.TimeRangeModeNoop(0)) rf.add_ranges( ngl.TimeRangeModeCont(start), ngl.TimeRangeModeNoop(start + cfg.duration / nb_medias + overlap_time)) queued_medias.append(rf) return ngl.Group(children=queued_medias)
def time_remapping(cfg): ''' Time remapping in the following order: - nothing displayed for a while (but media prefetch happening in background) - first frame displayed for a while - normal playback - last frame displayed for a while (even though the media is closed) - nothing again until the end ''' m0 = cfg.medias[0] media_seek = 10 noop_duration = 2 prefetch_duration = 2 freeze_duration = 3 playback_duration = 5 range_start = noop_duration + prefetch_duration play_start = range_start + freeze_duration play_stop = play_start + playback_duration range_stop = play_stop + freeze_duration duration = range_stop + noop_duration cfg.duration = duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(play_start, media_seek), ngl.AnimKeyFrameFloat(play_stop, media_seek + playback_duration), ] q = ngl.Quad((-1, -1, 0), (2, 0, 0), (0, 2, 0)) m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) m.set_sxplayer_min_level('verbose') t = ngl.Texture2D(data_src=m) p = ngl.Program(vertex=cfg.get_vert('texture'), fragment=cfg.get_frag('texture')) p.update_vert_out_vars(var_tex0_coord=ngl.IOVec2(), var_uvcoord=ngl.IOVec2()) r = ngl.Render(q, p) r.update_frag_resources(tex0=t) time_ranges = [ ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(range_start), ngl.TimeRangeModeNoop(range_stop), ] rf = ngl.TimeRangeFilter(r, ranges=time_ranges, prefetch_time=prefetch_duration) base_string = 'media time: %2g to %2g\nscene time: %2g to %2g\ntime range: %2g to %2g' % ( media_seek, media_seek + playback_duration, play_start, play_stop, range_start, range_stop) text = ngl.Text(base_string, box_height=(0, 0.3, 0), box_corner=(-1, 1 - 0.3, 0), aspect_ratio=cfg.aspect_ratio, halign='left') group = ngl.Group() group.add_children(rf, text) steps = ( ('default color, nothing yet', 0, noop_duration), ('default color, media prefetched', noop_duration, range_start), ('first frame', range_start, play_start), ('normal playback', play_start, play_stop), ('last frame', play_stop, range_stop), ('default color, media released', range_stop, duration), ) for i, (description, start_time, end_time) in enumerate(steps): text = ngl.Text('%g to %g: %s' % (start_time, end_time, description), aspect_ratio=cfg.aspect_ratio, box_height=(0, 0.2, 0)) text_tr = ( ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(start_time), ngl.TimeRangeModeNoop(end_time), ) text_rf = ngl.TimeRangeFilter(text, ranges=text_tr, label='text-step-%d' % i) group.add_children(text_rf) return ngl.GraphicConfig(group, blend=True, blend_src_factor='src_alpha', blend_dst_factor='one_minus_src_alpha', blend_src_factor_a='zero', blend_dst_factor_a='one')
def time_remapping(cfg): """ Time remapping in the following order: - nothing displayed for a while (but media prefetch happening in background) - first frame displayed for a while - normal playback - last frame displayed for a while (even though the media is closed) - nothing again until the end """ m0 = cfg.medias[0] media_seek = 10 noop_duration = 2 prefetch_duration = 2 freeze_duration = 3 playback_duration = 5 range_start = noop_duration + prefetch_duration play_start = range_start + freeze_duration play_stop = play_start + playback_duration range_stop = play_stop + freeze_duration duration = range_stop + noop_duration cfg.duration = duration cfg.aspect_ratio = (m0.width, m0.height) media_animkf = [ ngl.AnimKeyFrameFloat(play_start, media_seek), ngl.AnimKeyFrameFloat(play_stop, media_seek + playback_duration), ] m = ngl.Media(m0.filename, time_anim=ngl.AnimatedTime(media_animkf)) m.set_sxplayer_min_level("verbose") t = ngl.Texture2D(data_src=m) r = ngl.RenderTexture(t) time_ranges = [ ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(range_start), ngl.TimeRangeModeNoop(range_stop), ] rf = ngl.TimeRangeFilter(r, ranges=time_ranges, prefetch_time=prefetch_duration) base_string = "media time: {:2g} to {:2g}\nscene time: {:2g} to {:2g}\ntime range: {:2g} to {:2g}".format( media_seek, media_seek + playback_duration, play_start, play_stop, range_start, range_stop ) text = ngl.Text( base_string, box_height=(0, 0.3, 0), box_corner=(-1, 1 - 0.3, 0), aspect_ratio=cfg.aspect_ratio, halign="left" ) group = ngl.Group() group.add_children(rf, text) steps = ( ("default color, nothing yet", 0, noop_duration), ("default color, media prefetched", noop_duration, range_start), ("first frame", range_start, play_start), ("normal playback", play_start, play_stop), ("last frame", play_stop, range_stop), ("default color, media released", range_stop, duration), ) for i, (description, start_time, end_time) in enumerate(steps): text = ngl.Text( f"{start_time:g} to {end_time:g}: {description}", aspect_ratio=cfg.aspect_ratio, box_height=(0, 0.2, 0) ) text_tr = ( ngl.TimeRangeModeNoop(0), ngl.TimeRangeModeCont(start_time), ngl.TimeRangeModeNoop(end_time), ) text_rf = ngl.TimeRangeFilter(text, ranges=text_tr, label="text-step-%d" % i) group.add_children(text_rf) return group