예제 #1
0
 def _init_ctx(self, rendering_backend):
     if self._ctx:
         return
     self._ctx = ngl.Context()
     self._ctx.configure(backend=misc.get_backend(rendering_backend),
                         offscreen=1,
                         width=16,
                         height=16)
예제 #2
0
 def _init_viewer(self, rendering_backend):
     if self._viewer:
         return
     self._viewer = ngl.Viewer()
     self._viewer.configure(backend=misc.get_backend(rendering_backend),
                            offscreen=1,
                            width=16,
                            height=16)
예제 #3
0
 def _configure_viewer(self):
     self._viewer.configure(
         platform=ngl.PLATFORM_AUTO,
         backend=misc.get_backend(self._backend),
         window=self._window,
         width=self._width,
         height=self._height,
         viewport=misc.get_viewport(self._width, self._height, self._aspect_ratio),
         swap_interval=1,
         samples=self._samples,
         clear_color=self._clear_color,
     )
예제 #4
0
    def render_frames(self):
        # We make sure the lists of medias is explicitely empty. If we don't a
        # jobbed make on the tests will attempt concurrent generations of a
        # default ngl-media.mp4.
        idict = dict(medias=[])

        backend = os.environ.get('BACKEND')
        if backend:
            idict['backend'] = backend

        ret = self._scene_func(idict=idict, **self._scene_kwargs)
        width, height = self._width, self._height
        duration = ret['duration']
        scene = ret['scene']

        capture_buffer = bytearray(width * height * 4)
        viewer = ngl.Context()
        assert viewer.configure(
            offscreen=1,
            width=width,
            height=height,
            backend=get_backend(backend) if backend else ngl.BACKEND_AUTO,
            samples=self._samples,
            clear_color=self._clear_color,
            capture_buffer=capture_buffer) == 0
        timescale = duration / float(self._nb_keyframes)

        if self._scene_wrap:
            scene = self._scene_wrap(scene)

        if self._exercise_dot:
            assert scene.dot()

        if self._exercise_serialization:
            scene_str = scene.serialize()
            viewer.set_scene_from_string(scene_str)
        else:
            viewer.set_scene(scene)

        for t_id in range(self._nb_keyframes):
            if self._keyframes_callback:
                self._keyframes_callback(t_id)
            viewer.draw(t_id * timescale)

            yield (width, height, capture_buffer)

            if not self._exercise_serialization and self._exercise_dot:
                scene.dot()
예제 #5
0
    def render_frames(self):
        idict = {}

        backend = os.environ.get("BACKEND")
        if backend:
            idict["backend"] = backend

        ret = self._scene_func(idict=idict, **self._scene_kwargs)
        width, height = self._width, self._height
        duration = ret["duration"]
        scene = ret["scene"]

        capture_buffer = bytearray(width * height * 4)
        ctx = ngl.Context()
        ret = ctx.configure(
            offscreen=1,
            width=width,
            height=height,
            backend=get_backend(backend) if backend else ngl.BACKEND_AUTO,
            samples=self._samples,
            clear_color=self._clear_color,
            capture_buffer=capture_buffer,
            hud=self._hud,
            hud_export_filename=self._hud_export_filename,
        )
        assert ret == 0
        timescale = duration / float(self._nb_keyframes)

        if self._exercise_dot:
            assert scene.dot()

        if self._exercise_serialization:
            scene_str = scene.serialize()
            assert ctx.set_scene_from_string(scene_str) == 0
        else:
            assert ctx.set_scene(scene) == 0

        for t_id in range(self._nb_keyframes):
            if self._keyframes_callback:
                self._keyframes_callback(t_id)
            ctx.draw(t_id * timescale)

            yield (width, height, capture_buffer)

            if not self._exercise_serialization and self._exercise_dot:
                scene.dot()
예제 #6
0
# KIND, either express or implied.  See the License for the
# specific language governing permissions and limitations
# under the License.
#

import math
import os
import random

from pynodegl_utils.misc import get_backend
from pynodegl_utils.toolbox.grid import autogrid_simple

import pynodegl as ngl

_backend_str = os.environ.get("BACKEND")
_backend = get_backend(_backend_str) if _backend_str else ngl.BACKEND_AUTO


def _get_scene(geometry=None):
    return ngl.RenderColor(geometry=ngl.Quad() if geometry is None else geometry)


def api_backend():
    ctx = ngl.Context()
    ret = ctx.configure(backend=0x1234)
    assert ret < 0
    del ctx


def api_reconfigure():
    ctx = ngl.Context()
    def _export(self, filename, width, height, extra_enc_args=None):
        fd_r, fd_w = os.pipe()

        cfg = self._get_scene_func()
        if not cfg:
            self.failed.emit("You didn't select any scene to export.")
            return False

        fps = cfg["framerate"]
        duration = cfg["duration"]
        samples = cfg["samples"]

        cmd = [
            # fmt: off
            "ffmpeg", "-r", "%d/%d" % fps,
            "-nostats", "-nostdin",
            "-f", "rawvideo",
            "-video_size", "%dx%d" % (width, height),
            "-pixel_format", "rgba",
            "-i", "pipe:%d" % fd_r
            # fmt: on
        ]
        if extra_enc_args:
            cmd += extra_enc_args
        cmd += ["-y", filename]

        reader = subprocess.Popen(cmd, pass_fds=(fd_r,))
        os.close(fd_r)

        capture_buffer = bytearray(width * height * 4)

        # node.gl context
        ctx = ngl.Context()
        ctx.configure(
            platform=ngl.PLATFORM_AUTO,
            backend=get_backend(cfg["backend"]),
            offscreen=1,
            width=width,
            height=height,
            viewport=get_viewport(width, height, cfg["aspect_ratio"]),
            samples=samples,
            clear_color=cfg["clear_color"],
            capture_buffer=capture_buffer,
        )
        ctx.set_scene_from_string(cfg["scene"])

        if self._time is not None:
            ctx.draw(self._time)
            os.write(fd_w, capture_buffer)
            self.progressed.emit(100)
        else:
            # Draw every frame
            nb_frame = int(duration * fps[0] / fps[1])
            for i in range(nb_frame):
                if self._cancelled:
                    break
                time = i * fps[1] / float(fps[0])
                ctx.draw(time)
                os.write(fd_w, capture_buffer)
                self.progressed.emit(i * 100 / nb_frame)
            self.progressed.emit(100)

        os.close(fd_w)
        reader.wait()
        return True