コード例 #1
0
def get_scene_camera(camera, atlas):
    """
        Gets a working camera. 
        In order these alternatives are used:
            - user given camera
            - atlas specific camera
            - default camera
    """
    if camera is None:
        if atlas.default_camera is not None:
            return check_camera_param(atlas.default_camera)
        else:
            return brainrender.CAMERA
    else:
        return check_camera_param(camera)
コード例 #2
0
    def render(self,
               interactive=True,
               video=False,
               camera=None,
               zoom=None,
               **kwargs):
        """
        Takes care of rendering the scene
        """
        self.apply_render_style()

        if not video:
            if (not self.jupyter
                ):  # cameras work differently in jupyter notebooks?
                if camera is None:
                    camera = self.camera

                if isinstance(
                        camera,
                    (str, dict)):  # otherwise assume that it's vtk.camera
                    camera = check_camera_param(camera)

                set_camera(self, camera)

            if interactive:
                if self.verbose and not self.jupyter:
                    print(brainrender.INTERACTIVE_MSG)
                elif self.jupyter:
                    print(
                        "The scene is ready to render in your jupyter notebook"
                    )
                else:
                    print("\n\nRendering scene.\n   Press 'q' to Quit")

            self._get_inset()

        if zoom is None and not video:
            zoom = 1.85 if brainrender.WHOLE_SCREEN else 1.5

        # Make mesh labels follow the camera
        if not self.jupyter:
            for txt in self.actors_labels:
                txt.followCamera(self.plotter.camera)

        self.is_rendered = True

        args_dict = dict(
            interactive=interactive,
            zoom=zoom,
            bg=brainrender.BACKGROUND_COLOR,
            axes=self.plotter.axes,
        )

        if video:
            args_dict["offscreen"] = True
        show(*self.actors, *self.actors_labels, **args_dict)
コード例 #3
0
    def render(self,
               interactive=True,
               video=False,
               camera=None,
               zoom=None,
               **kwargs):
        """
        Takes care of rendering the scene
        """
        self.apply_render_style()

        if not video:
            if (not self.jupyter
                ):  # cameras work differently in jupyter notebooks?
                if camera is None:
                    camera = self.camera

                if isinstance(
                        camera,
                    (str, dict)):  # otherwise assume that it's vtk.camera
                    camera = check_camera_param(camera)

                set_camera(self, camera)

            if interactive and self.verbose:
                if not self.jupyter:
                    print(
                        f"\n\n[{mocassin}]Rendering scene.\n   Press [{orange}]'q'[/{orange}] to Quit"
                    )
                elif self.jupyter:
                    print(
                        f"[{mocassin}]The scene is ready to render in your jupyter notebook"
                    )

            self._get_inset()

        if zoom is None and not video:
            zoom = 1.2 if brainrender.WHOLE_SCREEN else 1.5

        # Make mesh labels follow the camera
        if not self.jupyter:
            for txt in self.actors_labels:
                txt.followCamera(self.plotter.camera)

        self.is_rendered = True

        args_dict = dict(
            interactive=interactive,
            zoom=zoom,
            bg=brainrender.BACKGROUND_COLOR,
            axes=self.plotter.axes,
        )

        if video:
            args_dict["offscreen"] = True

        if self.make_custom_axes:
            self._make_custom_axes()
            self.make_custom_axes = False

        # Correct axes orientations
        if not self._axes_order_corrected:
            self._correct_axes()

        show(*self.actors, *self.actors_labels, **args_dict)
コード例 #4
0
	def __init__(self,  
				brain_regions=None, 
				regions_aba_color=False,
				neurons=None, 
				tracts=None, 
				add_root=None, 
				verbose=True, 
				jupyter=False,
				display_inset=None, 
				base_dir=None,
				camera=None, 
				screenshot_kwargs = {},
				use_default_key_bindings=False,
				title=None,
				atlas=None,
				atlas_kwargs=dict(),
				**kwargs):
		"""

			Creates and manages a Plotter instance

			:param brain_regions: list of brain regions acronyms to be added to the rendered scene (default value None)
			:param regions_aba_color: if True, use the Allen Brain Atlas regions colors (default value None)
			:param neurons: path to JSON or SWC file with data of neurons to be rendered [or list of files] (default value None)
			:param tracts: list of JSON files with tractography data to be rendered (default value None)
			:param add_root: if False a rendered outline of the whole brain is added to the scene (default value None)
			:param verbose: if False less feedback is printed to screen (default value True)
			:param display_insert: if False the inset displaying the brain's outline is not rendered (but the root is added to the scene) (default value None)
			:param base_dir: path to directory to use for saving data (default value None)
			:param camera: name of the camera parameters setting to use (controls the orientation of the rendered scene)
			:param kwargs: can be used to pass path to individual data folders. See brainrender/Utils/paths_manager.py
			:param screenshot_kwargs: pass a dictionary with keys:
						- 'folder' -> str, path to folder where to save screenshots
						- 'name' -> str, filename to prepend to screenshots files
						- 'format' -> str, 'png', 'svg' or 'jpg'
						- scale -> float, values > 1 yield higher resultion screenshots
			:param use_default_key_bindings: if True the defualt keybindings from VtkPlotter are used, otherwise
							a custom function that can be used to take screenshots with the parameter above. 
			:param title: str, if a string is passed a text is added to the top of the rendering window as a title
			:param atlas: an instance of a valid Atlas class to use to fetch anatomical data for the scene. By default
				if not atlas is passed the allen brain atlas for the adult mouse brain is used.
			:param atlas_kwargs: dictionary used to pass extra arguments to atlas class
		"""
		if atlas is None:
			self.atlas = ABA(base_dir=base_dir,  **atlas_kwargs, **kwargs)
		else:
			self.atlas = atlas(base_dir=base_dir, **atlas_kwargs, **kwargs)


		# Setup a few rendering options
		self.verbose = verbose
		self.regions_aba_color = regions_aba_color

		# Infer if we are using k3d from vtkplotter.settings
		if settings.notebookBackend == 'k3d':
			self.jupyter = True
		else:
			self.jupyter = False

		if display_inset is None:
			self.display_inset = brainrender.DISPLAY_INSET
		else:
			self.display_inset = display_inset

		if self.display_inset and jupyter:
			print("Setting 'display_inset' to False as this feature is not available in juputer notebooks")
			self.display_inset = False


		if add_root is None:
			add_root = brainrender.DISPLAY_ROOT

		# Camera parameters
		if camera is None:
			if self.atlas.default_camera is not None:
				self.camera = check_camera_param(self.atlas.default_camera)
			else:
				self.camera = brainrender.CAMERA
		else:
			self.camera = check_camera_param(camera)

		# Set up vtkplotter plotter and actors records
		if brainrender.WHOLE_SCREEN and not self.jupyter:
			sz = "full"
		elif brainrender.WHOLE_SCREEN and self.jupyter:
			print("Setting window size to 'auto' as whole screen is not available in jupyter")
			sz='auto'
		else:
			sz = "auto"

		if brainrender.SHOW_AXES:
			axes = 1
		else:
			axes = 0

		# Create plotter
		self.plotter = Plotter(axes=axes, size=sz, pos=brainrender.WINDOW_POS, title='brainrender')
		self.plotter.legendBC = getColor('blackboard')

		# SCreenshots and keypresses variables
		self.screenshots_folder = screenshot_kwargs.pop('folder', self.atlas.output_screenshots)
		self.screenshots_name = screenshot_kwargs.pop('name', brainrender.DEFAULT_SCREENSHOT_NAME)
		self.screenshots_extension = screenshot_kwargs.pop('type', brainrender.DEFAULT_SCREENSHOT_TYPE)
		self.screenshots_scale = screenshot_kwargs.pop('scale', brainrender.DEFAULT_SCREENSHOT_SCALE)

		if not use_default_key_bindings:
			self.plotter.keyPressFunction = self.keypress
			self.verbose = False

		if not brainrender.SCREENSHOT_TRANSPARENT_BACKGROUND:
			settings.screenshotTransparentBackground = False
			settings.useFXAA = True

		
		# Prepare store for actors added to scene
		self.actors = {"regions":{}, "tracts":[], "neurons":[], "root":None, "injection_sites":[], 
						"others":[], "labels":[],}
		self._actors = None # store a copy of the actors when manipulations like slicing are done
		self.store = {} # in case we need to store some data

		# Add items to scene
		if brain_regions is not None:
			self.add_brain_regions(brain_regions)

		if neurons is not None:
			self.add_neurons(neurons)

		if tracts is not None:
			self.add_tractography(tracts)

		if add_root:
			self.add_root(render=True)
		else:
			self.root = None

		if title is not None:
			self.add_text(title)

		# Placeholder variables
		self.inset = None  # the first time the scene is rendered create and store the inset here
		self.is_rendered = False # keep track of if the scene has already been rendered