Exemple #1
0
class Render(Enhanced):
    _axes_order_corrected = (
        False  # at first render the axes orders is corrected
    )

    def __init__(
        self,
        verbose,
        display_inset,
        camera,
        screenshot_kwargs,
        use_default_key_bindings,
    ):
        """
            Creates and manages a Plotter instance

            :param add_root: if False a rendered outline of the whole brain is added to the scene (default value None)
            :param verbose: if False less feedback is printed to screen (default value True)
            :param display_inset: if False the inset displaying the brain's outline is not rendered (but the root is added to the scene) (default value None)
            :param camera: name of the camera parameters setting to use (controls the orientation of the rendered scene)
            :param screenshot_kwargs: pass a dictionary with keys:
                        - "folder" -> str, path to folder where to save screenshots
                        - "name" -> str, filename to prepend to screenshots files
                        - "format" -> str, format of the screenshot
            :param use_default_key_bindings: if True the defualt keybindings from vedo are used, otherwise
                            a custom function that can be used to take screenshots with the parameter above. 
        """
        Enhanced.__init__(self)

        # Setup a few rendering options
        self.verbose = verbose
        self.display_inset = (display_inset if display_inset is not None else
                              brainrender.DISPLAY_INSET)

        if vedosettings.notebookBackend == "k3d":
            self.jupyter = True
        else:
            self.jupyter = False

        if self.display_inset and self.jupyter:
            if self.verbose:
                print(
                    "Setting 'display_inset' to False as this feature is not \
                                available in juputer notebooks")
            self.display_inset = False

        # Camera parameters
        self.camera = get_scene_camera(camera, self.atlas)

        # Create vedo plotter
        self.plotter = Plotter(**get_scene_plotter_settings(
            self.jupyter, self.atlas, self.verbose))

        if brainrender.AXES_STYLE == 7 and brainrender.SHOW_AXES:
            self.make_custom_axes = True  # to be made at render
        else:
            self.make_custom_axes = False

        self.screenshot_kwargs = screenshot_kwargs

        if not use_default_key_bindings:
            self.plotter.keyPressFunction = self.keypress
            self.verbose = False

        if not brainrender.SCREENSHOT_TRANSPARENT_BACKGROUND:
            vedosettings.screenshotTransparentBackground = False
            vedosettings.useFXAA = True

    def _make_custom_axes(self):
        """
            When using `ruler` axes (vedy style 7), we need to 
            customize them a little bit, this function takes care of it. 
        """
        raise NotImplementedError(
            "Currently ony AXES_STYLE=1 is supported, sorry")

        # Get plotter and axes color
        plt = self.plotter
        c = (0.9, 0.9, 0.9)
        if np.sum(plt.renderer.GetBackground()) > 1.5:
            c = (0.1, 0.1, 0.1)

        bounds = [
            item for sublist in self.atlas._root_bounds for item in sublist
        ]
        rulax = buildRulerAxes(
            bounds,
            c=c,
            units="μm",
            xtitle="AP - ",
            ytitle="DV - ",
            ztitle="LR - ",
            precision=1,
            labelRotation=0,
            axisRotation=90,
            xycross=False,
        )
        rulax.UseBoundsOff()
        rulax.PickableOff()
        plt.renderer.AddActor(rulax)
        plt.axes_instances[0] = rulax

        return

    def _correct_axes(self):
        """
            When the scene is first rendered, a transform matrix
            is applied to each actor's points to correct orientation
            mismatches: https://github.com/brainglobe/bg-atlasapi/issues/73
        """
        self._axes_order_corrected = True

        # Flip every actor's orientation
        _silhouettes = []
        for actor in self.actors:
            try:
                _name = actor.name
            except AttributeError:
                """ not all scene objects will have a name """
                continue

            if _name != "silhouette":
                try:
                    actor.applyTransform(mtx).reverse()
                except AttributeError:
                    pass
            else:
                """
                    Silhouettes don't transform properly,
                    we need to re-generate them
                """
                _silhouettes.append(actor)

        for sil in _silhouettes:
            self.actors.pop(self.actors.index(sil))
            self.add_silhouette(sil._original_mesh)
            del sil

    def apply_render_style(self):
        if brainrender.SHADER_STYLE is None:  # No style to apply
            return

        for actor in self.actors:
            if actor is not None:
                try:
                    if brainrender.SHADER_STYLE != "cartoon":
                        actor.lighting(style=brainrender.SHADER_STYLE)
                    else:
                        actor.lighting("off")
                except AttributeError:
                    pass  # Some types of actors such as Text 2D don't have this attribute!

    def render(self,
               interactive=True,
               video=False,
               camera=None,
               zoom=None,
               **kwargs):
        """
        Takes care of rendering the scene
        """
        self.apply_render_style()

        if not video:
            if (not self.jupyter
                ):  # cameras work differently in jupyter notebooks?
                if camera is None:
                    camera = self.camera

                if isinstance(
                        camera,
                    (str, dict)):  # otherwise assume that it's vtk.camera
                    camera = check_camera_param(camera)

                set_camera(self, camera)

            if interactive and self.verbose:
                if not self.jupyter:
                    print(
                        f"\n\n[{mocassin}]Rendering scene.\n   Press [{orange}]'q'[/{orange}] to Quit"
                    )
                elif self.jupyter:
                    print(
                        f"[{mocassin}]The scene is ready to render in your jupyter notebook"
                    )

            self._get_inset()

        if zoom is None and not video:
            zoom = 1.2 if brainrender.WHOLE_SCREEN else 1.5

        # Make mesh labels follow the camera
        if not self.jupyter:
            for txt in self.actors_labels:
                txt.followCamera(self.plotter.camera)

        self.is_rendered = True

        args_dict = dict(
            interactive=interactive,
            zoom=zoom,
            bg=brainrender.BACKGROUND_COLOR,
            axes=self.plotter.axes,
        )

        if video:
            args_dict["offscreen"] = True

        if self.make_custom_axes:
            self._make_custom_axes()
            self.make_custom_axes = False

        # Correct axes orientations
        if not self._axes_order_corrected:
            self._correct_axes()

        show(*self.actors, *self.actors_labels, **args_dict)

    def close(self):
        closePlotter()

    def export_for_web(self, filepath="brexport.html"):
        """
            This function is used to export a brainrender scene
            for hosting it online. It saves an html file that can
            be opened in a web browser to show an interactive brainrender scene
        """
        if not filepath.endswith(".html"):
            raise ValueError("Filepath should point to a .html file")

        # prepare settings
        vedosettings.notebookBackend = "k3d"

        # Create new plotter and save to file
        plt = Plotter()
        plt.add(self.actors)
        plt = plt.show(interactive=False)
        plt.camera[-2] = -1

        if self.verbose:
            print(
                "Ready for exporting. Exporting scenes with many actors might require a few minutes"
            )
        with open(filepath, "w") as fp:
            fp.write(plt.get_snapshot())

        if self.verbose:
            print(
                f"The brainrender scene has been exported for web. The results are saved at {filepath}"
            )

        # Reset settings
        vedosettings.notebookBackend = None
        self.jupyter = False

    # ---------------------------------------------------------------------------- #
    #                               USER INTERACTION                               #
    # ---------------------------------------------------------------------------- #
    def keypress(self, key):
        if key == "s":
            self.take_screenshot()

        elif key == "q":
            self.close()

        elif key == "c":
            print(f"Camera parameters:\n{get_camera_params(scene=self)}")

    def take_screenshot(self,
                        screenshots_folder=None,
                        screenshot_name=None,
                        scale=None):
        """
        :param screenshots_folder: folder where the screenshot will be saved
        :param screenshot_name: name of the saved file
        :param scale: int, upsampling factor over screen resolution. Increase to export
        higher quality images
        """

        if screenshots_folder is None:
            screenshots_folder = Path(
                self.screenshot_kwargs.get(
                    "folder", brainrender.DEFAULT_SCREENSHOT_FOLDER))
        screenshots_folder.mkdir(exist_ok=True)

        if screenshot_name is None:
            name = self.screenshot_kwargs.get("name", "screenshot")
            timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
            file_format = self.screenshot_kwargs.get("format", ".png")
            screenshot_name = f"{name}_{timestamp}.{file_format}"

        if not self.is_rendered:
            print(
                "You need to render the scene before you can take a screenshot"
            )
            return

        if brainrender.SCREENSHOT_TRANSPARENT_BACKGROUND:
            warnings.warn(
                "BRAINRENDER - settings: screenshots are set to have transparent background. Set the parameter 'SCREENSHOT_TRANSPARENT_BACKGROUND' to False if you'd prefer a not transparent background"
            )

        savename = str(screenshots_folder / screenshot_name)
        print(f"\nSaving new screenshot at {savename}\n")
        self.plotter.screenshot(filename=savename, scale=scale)
        return savename
Exemple #2
0
class Render:
    is_rendered = False
    plotter = None

    axes_names = ("AP", "DV", "LR")
    axes_lookup = {"x": "AP", "y": "DV", "z": "LR"}
    axes_indices = {"AP": 0, "DV": 1, "LR": 2}

    def __init__(self):
        """
        Backend for Scene, handles all rendering and exporting
        related tasks.
        """
        self._get_plotter()

    def _get_plotter(self):
        """
        Make a vedo plotter with
        fancy axes and all
        """
        self.plotter = Plotter(
            axes=self._make_axes() if settings.SHOW_AXES else None,
            pos=(0, 0),
            title="brainrender",
            bg=settings.BACKGROUND_COLOR,
            offscreen=settings.OFFSCREEN,
            size="full" if settings.WHOLE_SCREEN else "auto",
        )

        self.plotter.keyPressFunction = self.keypress

    def _make_axes(self):
        """
        Returns a dictionary with axes
        parameters for the vedo plotter
        """
        ax_idx = self.atlas.space.axes_order.index("frontal")

        # make acustom axes dict
        atlas_shape = np.array(self.atlas.metadata["shape"]) * np.array(
            self.atlas.metadata["resolution"]
        )
        z_range = np.array([-atlas_shape[2], 0])
        z_ticks = [
            (-v, str(np.abs(v).astype(np.int32)))
            for v in np.linspace(
                0,
                atlas_shape[ax_idx],
                10,
            )
        ]

        if self.atlas.atlas_name == "allen_human_500um":
            z_range = None
            z_ticks = None
            logger.debug(
                "RENDER: manually forcing axes size for human atlas, atlas needs fixing"
            )

        # make custom axes dict
        axes = dict(
            axesLineWidth=3,
            tipSize=0,
            xtitle="AP (μm)",
            ytitle="DV (μm)",
            ztitle="LR (μm)",
            textScale=0.8,
            xTitleRotation=180,
            zrange=z_range,
            zValuesAndLabels=z_ticks,
            xyGrid=False,
            yzGrid=False,
            zxGrid=False,
            xUseBounds=True,
            yUseBounds=True,
            zUseBounds=True,
            xLabelRotation=180,
            yLabelRotation=180,
            zLabelRotation=90,
        )

        return axes

    def _prepare_actor(self, actor):
        """
        When an actor is first rendered, a transform matrix
        is applied to its points to correct axes orientation
        mismatches: https://github.com/brainglobe/bg-atlasapi/issues/73

        Once an actor is 'corrected' it spawns labels and silhouettes as needed
        """

        # Flip every actor's orientation
        if not actor._is_transformed:
            try:
                actor._mesh = actor.mesh.clone()
                actor._mesh.applyTransform(mtx)
            except AttributeError:  # some types of actors dont trasform
                actor._is_transformed = True
            else:
                try:
                    actor.mesh.reverse()
                except AttributeError:  # Volumes don't have reverse
                    pass
                actor._is_transformed = True

        # Add silhouette and labels
        if actor._needs_silhouette and not self.backend:
            self.plotter.add(actor.make_silhouette().mesh)

        if actor._needs_label and not self.backend:
            self.labels.extend(actor.make_label(self.atlas))

    def _apply_style(self):
        """
        Sets the rendering style for each mesh
        """
        for actor in self.clean_actors:
            if settings.SHADER_STYLE != "cartoon":
                style = settings.SHADER_STYLE
            else:
                if self.backend:  # notebook backend
                    print(
                        'Shader style "cartoon" cannot be used in a notebook'
                    )
                style = "off"

            try:
                actor.mesh.reverse()  # flip normals
                actor.mesh.lighting(style=style)

                actor._mesh.reverse()
                actor._mesh.lighting(style=style)
            except AttributeError:
                pass

    def render(
        self,
        interactive=None,
        camera=None,
        zoom=None,
        update_camera=True,
        **kwargs,
    ):
        """
        Renders the scene.

        :param interactive: bool. If note settings.INTERACTIVE is used.
            If true the program's execution is stopped and users
            can interact with scene.
        :param camera: str, dict. If none the default camera is used.
            Pass a valid camera input to specify the camera position when
            the scene is rendered.
        :param zoom: float, if None atlas default is used
        :param update_camera: bool, if False the camera is not changed
        :param kwargs: additional arguments to pass to self.plotter.show
        """
        logger.debug(
            f"Rendering scene. Interactive: {interactive}, camera: {camera}, zoom: {zoom}"
        )
        # get zoom
        zoom = zoom or self.atlas.zoom

        # get vedo plotter
        if self.plotter is None:
            self._get_plotter()

        # Get camera
        camera = camera or settings.DEFAULT_CAMERA
        if isinstance(camera, str):
            camera = get_camera(camera)
        else:
            camera = check_camera_param(camera)

        if camera["focalPoint"] is None:
            camera["focalPoint"] = self.root._mesh.centerOfMass()

        if not self.backend and camera is not None:
            camera = set_camera(self, camera)

        # Apply axes correction
        for actor in self.clean_actors:
            if not actor._is_transformed:
                self._prepare_actor(actor)
                self.plotter.add(actor.mesh)

            if actor._needs_silhouette or actor._needs_label:
                self._prepare_actor(actor)

        # add labels to the scene
        for label in self.labels:
            if label._is_added:
                continue
            else:
                label._mesh = label.mesh.clone()
                self._prepare_actor(label)
                self.plotter.add(label._mesh)
                label._is_added = True

        # Apply style
        self._apply_style()

        if self.inset and not self.is_rendered:
            self._get_inset()

        # render
        self.is_rendered = True
        if not self.backend:  # not running in a python script
            if interactive is None:
                interactive = settings.INTERACTIVE

            for txt in self.labels:
                txt.followCamera(self.plotter.camera)

            self.plotter.show(
                interactive=interactive,
                zoom=zoom,
                bg=settings.BACKGROUND_COLOR,
                offscreen=settings.OFFSCREEN,
                camera=camera.copy() if update_camera else None,
                interactorStyle=0,
                rate=40,
            )
        elif self.backend == "k3d":  # pragma: no cover
            # Remove silhouettes
            self.remove(*self.get_actors(br_class="silhouette"))
            print(
                f"[{teal}]Your scene is ready for rendering, use:\n",
                Syntax("from vedo import show", lexer_name="python"),
                Syntax("vedo.show(*scene.renderables)", lexer_name="python"),
                sep="\n",
            )
        else:  # pragma: no cover
            print(
                f"[{teal}]Your scene is ready for rendering, use:\n",
                Syntax("from itkwidgets import view", lexer_name="python"),
                Syntax(
                    "view(scene.plotter.show(*scene.renderables))",
                    lexer_name="python",
                ),
                sep="\n",
            )

    def close(self):
        closePlotter()

    def export(self, savepath):
        """
        Exports the scene to a .html
        file for online renderings.

        :param savepath: str, Path to a .html file to save the export
        """
        logger.debug(f"Exporting scene to {savepath}")
        _backend = self.backend

        if not self.is_rendered:
            self.render(interactive=False)

        path = Path(savepath)
        if path.suffix != ".html":
            raise ValueError("Savepath should point to a .html file")

        # prepare settings
        vsettings.notebookBackend = "k3d"

        # Create new plotter and save to file
        plt = Plotter()
        plt.add(self.renderables)
        plt = plt.show(interactive=False)
        plt.camera[-2] = -1

        with open(path, "w") as fp:
            fp.write(plt.get_snapshot())

        print(
            f"The brainrender scene has been exported for web. The results are saved at {path}"
        )

        # Reset settings
        vsettings.notebookBackend = None
        self.backend = _backend

        return str(path)

    def screenshot(self, name=None, scale=None):
        """
        Takes a screenshot of the current view
        and save it to file.
        Screenshots are saved in `screenshots_folder`
        (see Scene)

        :param name: str, name of png file
        :param scale: float, >1 for higher resolution
        """

        if not self.is_rendered:
            self.render(interactive=False)

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        name = name or f"brainrender_screenshot_{timestamp}"
        if ".png" not in name:
            name += ".png"

        scale = scale or settings.SCREENSHOT_SCALE

        print(f"\nSaving new screenshot at {name}\n")

        savepath = str(self.screenshots_folder / name)
        logger.debug(f"Saving scene at {savepath}")
        self.plotter.screenshot(filename=savepath, scale=scale)
        return savepath

    def _print_camera(self):
        pms = get_camera_params(scene=self)

        focal = pms.pop("focalPoint", None)
        dst = pms.pop("distance", None)

        names = [
            f"[green bold]     '{k}'[/green bold]: [{amber}]{v},"
            for k, v in pms.items()
        ]
        print(
            f"[{deep_purple_light}]Camera parameters:",
            f"[{orange}]    {{",
            *names,
            f"[{orange}]   }}",
            f"[{deep_purple_light}]Additional, (optional) parameters:",
            f"[green bold]     'focalPoint'[/green bold]: [{amber}]{focal},",
            f"[green bold]     'distance'[/green bold]: [{amber}]{dst},",
            sep="\n",
        )

    def keypress(self, key):  # pragma: no cover
        """
        Hanles key presses for interactive view
        -s: take's a screenshot
        -q: closes the window
        -c: prints the current camera parameters
        """
        if key == "s":
            self.screenshot()

        elif key == "q" or key == "Esc":
            self.close()

        elif key == "c":
            self._print_camera()
Exemple #3
0
class Render:
    transform_applied = False
    is_rendered = False
    plotter = None

    def __init__(self):
        """
            Backend for Scene, handles all rendering and exporting
            related tasks.
        """
        return

    def _get_plotter(self):
        # Make a vedo plotter
        self.plotter = Plotter(
            axes=self._make_axes() if settings.SHOW_AXES else None,
            pos=(0, 0),
            title="brainrender",
            bg=settings.BACKGROUND_COLOR,
            offscreen=settings.OFFSCREEN,
            size="full" if settings.WHOLE_SCREEN else "auto",
        )

        self.plotter.keyPressFunction = self.keypress

    def _make_axes(self):
        """
            Returns a dictionary with axes 
            parameters for the vedo plotter
        """
        ax_idx = self.atlas.space.axes_order.index("frontal")

        # make acustom axes dict
        atlas_shape = np.array(self.atlas.metadata["shape"]) * np.array(
            self.atlas.metadata["resolution"])

        z_ticks = [(-v, str(np.abs(v).astype(np.int32))) for v in np.linspace(
            0,
            atlas_shape[ax_idx],
            10,
        )]

        # make custom axes dict
        axes = dict(
            axesLineWidth=3,
            tipSize=0,
            xtitle="AP (μm)",
            ytitle="DV (μm)",
            ztitle="LR (μm)",
            textScale=0.8,
            xTitleRotation=0,
            xFlipText=True,
            zrange=np.array([-atlas_shape[2], 0]),
            zValuesAndLabels=z_ticks,
            xyGrid=False,
            yzGrid=False,
            zxGrid=False,
        )

        return axes

    def _correct_axes(self):
        """
            When an actor is first rendered, a transform matrix
            is applied to its points to correct axes orientation
            mismatches: https://github.com/brainglobe/bg-atlasapi/issues/73

            Once an actor is 'corrected' it spawns labels and silhouettes as needed
        """
        self.transform_applied = True

        # Flip every actor's orientation
        for actor in self.clean_actors + self.labels:
            if not actor._is_transformed:
                actor.mesh.applyTransform(mtx).reverse()
                actor._is_transformed = True

            if actor._needs_silhouette:
                self.actors.append(actor.make_silhouette())

            if actor._needs_label:
                self.labels.extend(actor.make_label(self.atlas))

    def _apply_style(self):
        """
            Sets the rendering style for each mesh
        """
        for actor in self.clean_actors:
            if settings.SHADER_STYLE != "cartoon":
                actor.mesh.lighting(style=settings.SHADER_STYLE)
            else:
                actor.mesh.lighting("off")

    def render(self, interactive=None, camera=None, zoom=1.75, **kwargs):
        """
            Renders the scene.

            :param interactive: bool. If note settings.INTERACTIVE is used.
                If true the program's execution is stopped and users
                can interact with scene.
            :param camera: str, dict. If none the default camera is used.
                Pass a valid camera input to specify the camera position when
                the scene is rendered.
            :param zoom: float
            :param kwargs: additional arguments to pass to self.plotter.show
        """
        # get vedo plotter
        if self.plotter is None:
            self._get_plotter()

        # Get camera
        if camera is None:
            camera = get_camera(settings.DEFAULT_CAMERA)
        else:
            camera = check_camera_param(camera)

        if not self.jupyter:
            camera = set_camera(self, camera)

        # Apply axes correction
        self._correct_axes()

        # Apply style
        self._apply_style()

        if self.inset and not self.jupyter and not self.is_rendered:
            self._get_inset()

        # render
        self.is_rendered = True
        if not self.jupyter:
            if interactive is None:
                interactive = settings.INTERACTIVE

            for txt in self.labels:
                txt.followCamera(self.plotter.camera)

            self.plotter.show(
                *self.renderables,
                interactive=interactive,
                zoom=zoom,
                bg=settings.BACKGROUND_COLOR,
                offscreen=settings.OFFSCREEN,
                camera=camera.copy(),
                interactorStyle=0,
            )
        else:
            print(
                "Your scene is ready for rendering, use: `show(scene.renderables)`"
            )

    def close(self):
        closePlotter()

    def export(self, savepath):
        """
            Exports the scene to a .html
            file for online renderings.

            :param savepath: str, Path to a .html file to save the export
        """
        _jupiter = self.jupyter

        if not self.is_rendered:
            self.render(interactive=False)

        path = Path(savepath)
        if path.suffix != ".html":
            raise ValueError("Savepath should point to a .html file")

        # prepare settings
        vsettings.notebookBackend = "k3d"

        # Create new plotter and save to file
        plt = Plotter()
        plt.add(self.renderables)
        plt = plt.show(interactive=False)
        plt.camera[-2] = -1

        with open(path, "w") as fp:
            fp.write(plt.get_snapshot())

        print(
            f"The brainrender scene has been exported for web. The results are saved at {path}"
        )

        # Reset settings
        vsettings.notebookBackend = None
        self.jupyter = _jupiter

        return str(path)

    def screenshot(self, name=None, scale=None):
        """
            Takes a screenshot of the current view
            and save it to file.
            Screenshots are saved in `screenshots_folder`
            (see Scene)

            :param name: str, name of png file
            :param scale: float, >1 for higher resolution
        """
        if not self.is_rendered:
            self.render(interactive=False)

        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
        name = name or f"brainrender_screenshot_{timestamp}"
        if ".png" not in name:
            name += ".png"

        scale = scale or settings.SCREENSHOT_SCALE

        print(f"\nSaving new screenshot at {name}\n")
        savepath = str(self.screenshots_folder / name)
        self.plotter.screenshot(filename=savepath, scale=scale)
        return savepath

    def _print_camera(self):
        pms = get_camera_params(scene=self)

        focal = pms.pop("focalPoint", None)
        dst = pms.pop("distance", None)

        names = [
            f"[green bold]     '{k}'[/green bold]: [{amber}]{v},"
            for k, v in pms.items()
        ]
        print(
            f"[{deep_purple_light}]Camera parameters:",
            f"[{orange}]    {{",
            *names,
            f"[{orange}]   }}",
            f"[{deep_purple_light}]Additional, (optional) parameters:",
            f"[green bold]     'focalPoint'[/green bold]: [{amber}]{focal},",
            f"[green bold]     'distance'[/green bold]: [{amber}]{dst},",
            sep="\n",
        )

    def keypress(self, key):  # pragma: no cover
        """
            Hanles key presses for interactive view
            -s: take's a screenshot
            -q: closes the window
            -c: prints the current camera parameters
        """
        if key == "s":
            self.screenshot()

        elif key == "q" or key == "Esc":
            self.close()

        elif key == "c":
            self._print_camera()
Exemple #4
0
class Render:
    def __init__(
        self,
        verbose,
        display_inset,
        camera,
        screenshot_kwargs,
        use_default_key_bindings,
    ):
        """
            Creates and manages a Plotter instance

            :param add_root: if False a rendered outline of the whole brain is added to the scene (default value None)
            :param verbose: if False less feedback is printed to screen (default value True)
            :param display_inset: if False the inset displaying the brain's outline is not rendered (but the root is added to the scene) (default value None)
            :param camera: name of the camera parameters setting to use (controls the orientation of the rendered scene)
            :param screenshot_kwargs: pass a dictionary with keys:
                        - 'folder' -> str, path to folder where to save screenshots
                        - 'name' -> str, filename to prepend to screenshots files
            :param use_default_key_bindings: if True the defualt keybindings from vedo are used, otherwise
                            a custom function that can be used to take screenshots with the parameter above. 
        """
        # Setup a few rendering options
        self.verbose = verbose
        self.display_inset = (display_inset if display_inset is not None else
                              brainrender.DISPLAY_INSET)

        if vedosettings.notebookBackend == "k3d":
            self.jupyter = True
        else:
            self.jupyter = False

        if self.display_inset and self.jupyter:
            print("Setting 'display_inset' to False as this feature is not \
                            available in juputer notebooks")
            self.display_inset = False

        # Camera parameters
        self.camera = get_scene_camera(camera, self.atlas)

        # Create vedo plotter
        self.plotter = Plotter(**get_scene_plotter_settings(self.jupyter))

        # SCreenshots and keypresses variables
        self.screenshots_folder = Path(
            screenshot_kwargs.pop("folder", self.atlas.output_screenshots))
        self.screenshots_name = screenshot_kwargs.pop(
            "name", brainrender.DEFAULT_SCREENSHOT_NAME)

        if not use_default_key_bindings:
            self.plotter.keyPressFunction = self.keypress
            self.verbose = False

        if not brainrender.SCREENSHOT_TRANSPARENT_BACKGROUND:
            vedosettings.screenshotTransparentBackground = False
            vedosettings.useFXAA = True

    def apply_render_style(self):
        if brainrender.SHADER_STYLE is None:  # No style to apply
            return

        for actor in self.actors:
            if actor is not None:
                try:
                    if brainrender.SHADER_STYLE != "cartoon":
                        actor.lighting(style=brainrender.SHADER_STYLE)
                    else:
                        actor.lighting("off")
                except AttributeError:
                    pass  # Some types of actors such as Text 2D don't have this attribute!

    def render(self,
               interactive=True,
               video=False,
               camera=None,
               zoom=None,
               **kwargs):
        """
        Takes care of rendering the scene
        """
        self.apply_render_style()

        if not video:
            if (not self.jupyter
                ):  # cameras work differently in jupyter notebooks?
                if camera is None:
                    camera = self.camera

                if isinstance(
                        camera,
                    (str, dict)):  # otherwise assume that it's vtk.camera
                    camera = check_camera_param(camera)

                set_camera(self, camera)

            if interactive:
                if self.verbose and not self.jupyter:
                    print(brainrender.INTERACTIVE_MSG)
                elif self.jupyter:
                    print(
                        "The scene is ready to render in your jupyter notebook"
                    )
                else:
                    print("\n\nRendering scene.\n   Press 'q' to Quit")

            self._get_inset()

        if zoom is None and not video:
            zoom = 1.85 if brainrender.WHOLE_SCREEN else 1.5

        # Make mesh labels follow the camera
        if not self.jupyter:
            for txt in self.actors_labels:
                txt.followCamera(self.plotter.camera)

        self.is_rendered = True

        args_dict = dict(
            interactive=interactive,
            zoom=zoom,
            bg=brainrender.BACKGROUND_COLOR,
            axes=self.plotter.axes,
        )

        if video:
            args_dict["offscreen"] = True
        show(*self.actors, *self.actors_labels, **args_dict)

    def close(self):
        closePlotter()

    def export_for_web(self, filepath="brexport.html"):
        """
            This function is used to export a brainrender scene
            for hosting it online. It saves an html file that can
            be opened in a web browser to show an interactive brainrender scene
        """
        if not filepath.endswith(".html"):
            raise ValueError("Filepath should point to a .html file")

        # prepare settings
        vedosettings.notebookBackend = "k3d"

        # Create new plotter and save to file
        plt = Plotter()
        plt.add(self.actors)
        plt = plt.show(interactive=False)
        plt.camera[-2] = -1

        print(
            "Ready for exporting. Exporting scenes with many actors might require a few minutes"
        )
        with open(filepath, "w") as fp:
            fp.write(plt.get_snapshot())

        print(
            f"The brainrender scene has been exported for web. The results are saved at {filepath}"
        )

        # Reset settings
        vedosettings.notebookBackend = None
        self.jupyter = False

    # ---------------------------------------------------------------------------- #
    #                               USER INTERACTION                               #
    # ---------------------------------------------------------------------------- #
    def keypress(self, key):
        if key == "s":
            self.take_screenshot()

        elif key == "q":
            self.close()

        elif key == "c":
            print(f"Camera parameters:\n{get_camera_params(scene=self)}")

    def take_screenshot(self):
        if not self.is_rendered:
            print(
                "You need to render the scene before you can take a screenshot"
            )
            return

        if brainrender.SCREENSHOT_TRANSPARENT_BACKGROUND:
            warnings.warn(
                "BRAINRENDER - settings: screenshots are set to have transparent background. Set the parameter 'SCREENSHOT_TRANSPARENT_BACKGROUND' to False if you'd prefer a not transparent background"
            )

        self.screenshots_folder.mkdir(exist_ok=True)

        savename = str(self.screenshots_folder / self.screenshots_name)
        savename += f'_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}'

        print(f"\nSaving screenshot at {savename}\n")
        self.plotter.screenshot(filename=savename)
        return savename
Exemple #5
0
                                           vmin=MINLAT,
                                           vmax=MAXLAT).addScalarBar(
                                               c='white',
                                               title='LAT (ms)   ',
                                               titleFontSize=fontSize,
                                               size=size)

a = 160
e = 0
r = 0
z = 1
verPoints = Points(vertices, r=5, c='white')

vplt = Plotter(N=1, axes=0, offscreen=True)
vplt.show(mesh, azimuth=a, elevation=e, roll=r, bg='black', zoom=z)
vplt.screenshot(filename=os.path.join(outSubDir, '0_mesh'),
                returnNumpy=False).close()

vplt = Plotter(N=1, axes=0, offscreen=True)
vplt.show(mesh,
          allLatPoints,
          azimuth=a,
          elevation=e,
          roll=r,
          bg='black',
          zoom=z)
vplt.screenshot(filename=os.path.join(outSubDir, '0_all'),
                returnNumpy=False).close()

vplt = Plotter(N=1, axes=0, offscreen=True)
vplt.show(mesh,
          origLatPoints,