示例#1
0
    def setup_plotter(self):
        """
            Changes the scene's default plotter
            with one attached to the qtWidget in the 
            pyqt application. 
        """
        # Get embedded plotter
        new_plotter = Plotter(qtWidget=self.vtkWidget)
        self.scene.plotter = new_plotter

        # Get axes
        if self.axes:
            ax = addons.buildAxes(
                self.scene.root,
                xtitle="x [um]",
                xLabelOffset=0.07,
                xTitleOffset=0.1,
                xTitleJustify="bottom-left",
                ytitle="y [um]",
                yLabelOffset=0.025,
                yTitleOffset=0.1,
                yTitleJustify="bottom-left",
                ztitle="z [um]",
                zLabelOffset=0.025,
                zTitleOffset=0.1,
                zTitleJustify="bottom-left",
            )
            for a in ax.unpack():
                if "xtitle" in a.name or "xNumericLabel" in a.name:
                    a.RotateZ(180)

            self.scene.add_actor(ax)

        # Fix camera
        set_camera(self.scene, self.scene.camera)
示例#2
0
    def render(self,
               interactive=True,
               video=False,
               camera=None,
               zoom=None,
               **kwargs):
        """
        Takes care of rendering the scene
        """
        self.apply_render_style()

        if not video:
            if (not self.jupyter
                ):  # cameras work differently in jupyter notebooks?
                if camera is None:
                    camera = self.camera

                if isinstance(
                        camera,
                    (str, dict)):  # otherwise assume that it's vtk.camera
                    camera = check_camera_param(camera)

                set_camera(self, camera)

            if interactive:
                if self.verbose and not self.jupyter:
                    print(brainrender.INTERACTIVE_MSG)
                elif self.jupyter:
                    print(
                        "The scene is ready to render in your jupyter notebook"
                    )
                else:
                    print("\n\nRendering scene.\n   Press 'q' to Quit")

            self._get_inset()

        if zoom is None and not video:
            zoom = 1.85 if brainrender.WHOLE_SCREEN else 1.5

        # Make mesh labels follow the camera
        if not self.jupyter:
            for txt in self.actors_labels:
                txt.followCamera(self.plotter.camera)

        self.is_rendered = True

        args_dict = dict(
            interactive=interactive,
            zoom=zoom,
            bg=brainrender.BACKGROUND_COLOR,
            axes=self.plotter.axes,
        )

        if video:
            args_dict["offscreen"] = True
        show(*self.actors, *self.actors_labels, **args_dict)
示例#3
0
def test_get_camera_params(scene):
    if not isinstance(get_camera_params(scene), dict):
        raise ValueError

    if not isinstance(get_camera_params(camera=scene.plotter.camera), dict):
        raise ValueError

    camera_dict = get_camera_params(scene)

    try:
        set_camera(scene, camera_dict)
    except ValueError as e:
        raise ValueError(
            f"Failed to produce a camera params dict that can be used to set camera:\n {e}"
        )
示例#4
0
    def render(self, _interactive=True, **kwargs):
        """

        :param _interactive:  (Default value = True)
        :param **kwargs:

        """
        camera = kwargs.pop("camera", None)

        for scene in self.scenes:
            scene.apply_render_style()

            if camera is None:
                if scene.atlas.default_camera is None:
                    scene_camera = brainrender.CAMERA
                else:
                    scene_camera = scene.atlas.default_camera
            else:
                if camera:
                    scene_camera = camera
                else:
                    scene_camera = None
            if scene_camera is not None:
                set_camera(scene, scene_camera)

        mv = Plotter(
            N=self.N,
            axes=4 if brainrender.SHOW_AXES else 0,
            size="full" if brainrender.WHOLE_SCREEN else "auto",
            sharecam=True,
            bg=brainrender.BACKGROUND_COLOR,
        )

        actors = []
        for i, scene in enumerate(self.scenes):
            scene.apply_render_style()
            actors.append(scene.actors)
            mv.add(scene.actors)

        for i, scene.actors in enumerate(actors):
            mv.show(scene.actors, at=i, interactive=False)

        print("Rendering complete")
        if _interactive:
            interactive()
示例#5
0
	def render(self, _interactive=True,  **kwargs):
		"""

		:param _interactive:  (Default value = True)
		:param **kwargs:

		"""
		camera = kwargs.pop("camera", None)

		for scene in self.scenes:
			scene.apply_render_style()
		
			if camera is None: 
				if scene.atlas.default_camera is None:
					scene_camera = brainrender.CAMERA
				else:
					scene_camera = scene.atlas.default_camera
			else:
				if camera:
					scene_camera = camera
				else:
					scene_camera = None
			if scene_camera is not None:
				set_camera(scene, scene_camera)

		if self.N > 4:
			print("Rendering {} scenes. Might take a few minutes.".format(self.N))
		mv = Plotter(N=self.N, axes=4, size="auto", sharecam=True, bg=brainrender.BACKGROUND_COLOR)

		actors = []
		for i, scene in enumerate(self.scenes):
			scene_actors = scene.get_actors()
			actors.append(scene_actors)
			mv.add(scene_actors)

		for i, scene_actors in enumerate(actors):
			mv.show(scene_actors, at=i,  interactive=False)

		print("Rendering complete")
		if _interactive:
			interactive()
示例#6
0
    def render(self,
               interactive=True,
               video=False,
               camera=None,
               zoom=None,
               **kwargs):
        """
        Takes care of rendering the scene
        """
        self.apply_render_style()

        if not video:
            if (not self.jupyter
                ):  # cameras work differently in jupyter notebooks?
                if camera is None:
                    camera = self.camera

                if isinstance(
                        camera,
                    (str, dict)):  # otherwise assume that it's vtk.camera
                    camera = check_camera_param(camera)

                set_camera(self, camera)

            if interactive and self.verbose:
                if not self.jupyter:
                    print(
                        f"\n\n[{mocassin}]Rendering scene.\n   Press [{orange}]'q'[/{orange}] to Quit"
                    )
                elif self.jupyter:
                    print(
                        f"[{mocassin}]The scene is ready to render in your jupyter notebook"
                    )

            self._get_inset()

        if zoom is None and not video:
            zoom = 1.2 if brainrender.WHOLE_SCREEN else 1.5

        # Make mesh labels follow the camera
        if not self.jupyter:
            for txt in self.actors_labels:
                txt.followCamera(self.plotter.camera)

        self.is_rendered = True

        args_dict = dict(
            interactive=interactive,
            zoom=zoom,
            bg=brainrender.BACKGROUND_COLOR,
            axes=self.plotter.axes,
        )

        if video:
            args_dict["offscreen"] = True

        if self.make_custom_axes:
            self._make_custom_axes()
            self.make_custom_axes = False

        # Correct axes orientations
        if not self._axes_order_corrected:
            self._correct_axes()

        show(*self.actors, *self.actors_labels, **args_dict)