Exemple #1
0
    def __init__(
        self, *args, atlas=None, axes=None, random_colors=False, **kwargs
    ):
        """
            Adds brainrender/vedo functionality to the 
            pyqt5 application created in bgviewer.viewer3d.ui.Window

            Arguments
            ---------
            atlas: name of a brainatlas api atlas (or any atlas class supported by brainrender)
            random_colors: if True brain regions are assigned a random color
            axes: by default it's None, so no axes are shown. If True is passed
                Cartesian coordinates axes are shown
        """
        self.scene = Scene(*args, atlas=atlas, **kwargs)
        Window.__init__(self, *args, **kwargs)

        self.axes = axes

        # Create a new vedo plotter
        self.setup_plotter()
        self.random_colors = random_colors

        # update plotter
        self._update()

        # Add inset
        self.scene._get_inset()
Exemple #2
0
def test_tractography():
    scene = Scene()
    analyzer = ABA()
    p0 = scene.get_region_CenterOfMass("ZI")
    tract = analyzer.get_projection_tracts_to_target(p0=p0)
    scene.add_tractography(tract, display_injection_structure=False, color_by="target_region", 
                                VIP_regions=['MOs'], VIP_color="red", others_color="ivory")
Exemple #3
0
def test_mouselight():
    from brainrender.Utils.MouseLightAPI.mouselight_api import MouseLightAPI
    from brainrender.Utils.MouseLightAPI.mouselight_info import mouselight_api_info, mouselight_fetch_neurons_metadata

    # Fetch metadata for neurons with some in the secondary motor cortex
    neurons_metadata = mouselight_fetch_neurons_metadata(
        filterby='soma', filter_regions=['MOs'])

    # Then we can download the files and save them as a .json file
    ml_api = MouseLightAPI()
    neurons_files = ml_api.download_neurons(
        neurons_metadata[:2]
    )  # just saving the first couple neurons to speed things up

    # Show neurons and ZI in the same scene:
    scene = Scene()
    scene.add_neurons(
        neurons_files,
        soma_color='orangered',
        dendrites_color='orangered',
        axon_color='darkseagreen',
        neurite_radius=8
    )  # add_neurons takes a lot of arguments to specify how the neurons should look
    # make sure to check the source code to see all available optionsq

    scene.add_brain_regions(['MOs'], alpha=0.15)
    scene.render(camera='coronal')
    def _make_root(self, rootpath):
        """
            Creates a root mesh by merging the mesh corresponding to each neuron,
            then saves it as an obj file at rootpath
        """
        raise NotImplementedError(
            f"Create root method not supported yet, sorry")

        print(f"Creating root mesh for atlas {self.atlas_name}")
        temp_scene = Scene(atlas=Celegans,
                           add_root=False,
                           display_inset=False,
                           atlas_kwargs=dict(data_folder=self.data_folder))

        temp_scene.add_neurons(self.neurons_names)
        temp_scene.render(interactive=False)
        temp_scene.close()

        root = merge(*temp_scene.actors['neurons']).clean().cap()
        # root = mesh2Volume(root, spacing=(0.02, 0.02, 0.02)).isosurface()

        points = Points(root.points()).smoothMLS2D(f=0.8).clean(tol=0.005)

        root = recoSurface(points, dims=100, radius=0.2)

        # Save
        write(root, rootpath)

        del temp_scene
        return root
Exemple #5
0
    def __init__(
        self,
        base_dir=None,
        add_root=True,
        use_cache=True,
        scene_kwargs={},
        **kwargs,
    ):
        """
            Initialise the class instance to get a few useful paths and variables. 

            :param base_dir: str, path to base directory in which all of brainrender data are stored. 
                    Pass only if you want to use a different one from what's default.
            :param add_root: bool, if True the root mesh is added to the rendered scene
            :param use_cache: if true data are loaded from a cache to speed things up.
                    Useful to set it to false to help debugging.
            :param scene_kwargs: dict, params passed to the instance of Scene associated with this class
        """
        Paths.__init__(self, base_dir=base_dir, **kwargs)

        # Get MCM cache
        cache_path = (Path(self.mouse_connectivity_volumetric) /
                      "voxel_model_manifest.json")

        if not cache_path.exists():
            if not connected_to_internet():
                raise ValueError(
                    "The first time you use this class it will need to download some data, but it seems that you're not connected to the internet."
                )
            print(
                "Downloading volumetric data. This will take several minutes but it only needs to be done once."
            )

        self.cache = VoxelModelCache(manifest_file=str(cache_path))
        self.voxel_array = None
        self.target_coords, self.source_coords = None, None

        # Get projection cache paths
        self.data_cache = self.mouse_connectivity_volumetric_cache
        self.data_cache_projections = os.path.join(self.data_cache,
                                                   "projections")
        self.data_cache_targets = os.path.join(self.data_cache, "targets")
        self.data_cache_sources = os.path.join(self.data_cache, "sources")

        for fold in [
                self.data_cache_projections,
                self.data_cache_targets,
                self.data_cache_sources,
        ]:
            if not os.path.isdir(fold):
                os.mkdir(fold)

        # Get structures tree
        self.structure_tree = self.cache.get_structure_tree()

        # Get scene
        self.scene = Scene(add_root=add_root, **scene_kwargs)

        # Other vars
        self.use_cache = use_cache
Exemple #6
0
def test_custom_video():
    from brainrender.animation.video import CustomVideoMaker

    # --------------------------------- Variables -------------------------------- #
    N_FRAMES = 20

    # Variables to specify camera position at each frame
    zoom = np.linspace(1, 1.35, N_FRAMES)
    frac = np.zeros_like(
        zoom
    )  # for camera transition, interpolation value between cameras
    frac[:10] = np.linspace(0, 1, 10)
    frac[10:] = np.linspace(1, 0, len(frac[10:]))

    # ------------------------------- Create scene ------------------------------- #
    scene = Scene(display_inset=True, use_default_key_bindings=True)

    filepaths, data = scene.atlas.download_streamlines_for_region("TH")
    scene.add_brain_regions(["TH"], alpha=0.2)

    # Create new cameras
    cam1 = buildcam(sagittal_camera)
    cam2 = buildcam(top_camera)
    cam3 = buildcam(
        dict(
            position=[1862.135, -4020.792, -36292.348],
            focal=[6587.835, 3849.085, 5688.164],
            viewup=[0.185, -0.97, 0.161],
            distance=42972.44,
            clipping=[29629.503, 59872.10],
        )
    )

    # Iniziale camera position
    scene.plotter.moveCamera(cam1, cam2, frac[0])

    # ------------------------------- Create frames ------------------------------ #
    def frame_maker(scene=None, video=None, videomaker=None):
        for step in track(
            np.arange(N_FRAMES),
            total=N_FRAMES,
            description="Generating frames...",
        ):
            # Move scene camera between 3 cameras
            if step < 150:
                scene.plotter.moveCamera(cam1, cam2, frac[step])
            else:
                scene.plotter.moveCamera(cam3, cam2, frac[step])

            # Add frame to video
            scene.render(zoom=zoom[step], interactive=False, video=True)
            video.addFrame()
        return video

    # ---------------------------------------------------------------------------- #
    #                                  Video maker                                 #
    # ---------------------------------------------------------------------------- #
    vm = CustomVideoMaker(scene, save_name="streamlines_animation")
    vm.make_video(frame_maker)
def BrainRegionsScene():
    scene = Scene()
    scene.add_brain_regions(['TH', 'VP'], use_original_color=True, alpha=1)

    act = scene.actors['regions']['TH']
    scene.edit_actors([act], wireframe=True)

    scene.render()
Exemple #8
0
def test_scene_creation_brainglobe():
    scene = Scene(atlas="allen_mouse_25um")

    try:
        scene.add_brain_regions("TH")
    except:
        raise ValueError

    try:
        scene.add_streamlines
    except:
        raise ValueError

    scene = Scene(atlas="allen_human_500um")
    try:
        scene.add_brain_regions("TH")
    except:
        raise ValueError
Exemple #9
0
def test_regions():
    scene = Scene(camera=coronal_camera)
    regions = ["MOs", "VISp", "ZI"]
    scene.add_brain_regions(regions, colors="green")
    ca1 = scene.add_brain_regions("CA1", add_labels=True)
    ca1.alpha(0.2)

    print(ca1)

    scene.close()
Exemple #10
0
def test_streamlines():
    streamlines_api = StreamlinesAPI()

    streamlines_files, data = streamlines_api.download_streamlines_for_region("PAG") 

    scene = Scene()
    scene.add_streamlines(data[3], color="powderblue", show_injection_site=False, alpha=.3, radius=10)
    scene.add_brain_regions(['PAG'], use_original_color=False, colors='powderblue', alpha=.9)
    mos = scene.actors['regions']['PAG']
    scene.edit_actors([mos], wireframe=True) 
Exemple #11
0
def test_video():
    from brainrender.animation.video import BasicVideoMaker as VideoMaker

    scene = Scene()

    # Create an instance of VideoMaker with our scene
    vm = VideoMaker(scene, niters=10)

    # Make a video!
    vm.make_video(elevation=1, roll=5) # specify how the scene rotates at each frame
Exemple #12
0
def test_neurons():
    scene = Scene()

    mlapi = MouseLightAPI()

    # Fetch metadata for neurons with some in the secondary motor cortex
    neurons_metadata = mlapi.fetch_neurons_metadata(filterby='soma',
                                                    filter_regions=['MOs'])

    # Then we can download the files and save them as a .json file
    neurons = mlapi.download_neurons(neurons_metadata[:5])

    scene = Scene(title='One color')
    scene.add_neurons(neurons,
                      color='salmon',
                      display_axon=True,
                      neurite_radius=6)
    scene.render(interactive=False)
    scene.close()
def CartoonStyleScene():
    if brainrender.SHADER_STYLE != 'cartoon':
        raise ValueError('Set cartoon style at imports')

    scene = Scene(camera='coronal', add_root=False)
    scene.add_brain_regions(['PAG', 'SCm', 'SCs'],
                            use_original_color=True,
                            alpha=1)
    # scene.add_brain_regions(['VISl', 'VISpl', 'VISpm', 'VISam', 'VISal', 'VISa'], use_original_color=True, alpha=.4)

    scene.render()
def visualize_obj(obj_path, *args, color="lightcoral", **kwargs):
    """
        Uses brainrender to visualize a .obj file registered to the Allen CCF
        :param obj_path: str, path to a .obj file
        :param color: str, color of object being rendered
    """
    print("Visualizing : " + obj_path)
    scene = Scene(add_root=True)
    scene.add_from_file(obj_path, *args, c=color, **kwargs)

    return scene
Exemple #15
0
def test_video():
    from brainrender.Utils.videomaker import VideoMaker

    scene = Scene()

    # Create an instance of VideoMaker with our scene
    vm = VideoMaker(scene, savefile="Output/Videos/video.mp4", niters=10)

    # Make a video!
    vm.make_video(elevation=1,
                  roll=5)  # specify how the scene rotates at each frame
Exemple #16
0
def test_scene_addition():
    scene = Scene()

    scene + "Examples/example_files/root.obj"

    scene + scene.root

    scene += scene.root

    # test report

    scene.list_actors()
def NeuronsScene2():
    scene = Scene()

    neurons_metadata = mouselight_fetch_neurons_metadata(filterby='soma', filter_regions=['MOp5'])
    neurons_files =  mlapi.download_neurons(neurons_metadata[2:6]) 
    scene.add_neurons(neurons_files, soma_color='deepskyblue', force_to_hemisphere="right")

    streamlines_files, data = streamlines_api.download_streamlines_for_region("MOp") 
    scene.add_streamlines(data[:1], color="palegreen", show_injection_site=False, alpha=.2, radius=10)

    set_camera(scene)
    scene.render()
def NeuronsScene(show_regions = False):
    scene = Scene()

    fl = 'Examples/example_files/one_neuron.json'
    scene.add_neurons(fl, soma_color='darkseagreen', force_to_hemisphere="right",)

    if show_regions:
        scene.add_brain_regions(['ZI', 'PAG', 'MRN', 'NPC', "VTA", "STN", "PPT", "SCm", "HY"], 
                        use_original_color=True, alpha=.5)

    set_camera(scene)
    scene.render() 
def CellsScene():
    # Load and clean data
    data = pd.read_csv('/Users/federicoclaudi/Downloads/41593_2019_354_MOESM3_ESM.csv')
    data = data[['genotype', 'Xpos', 'Ypos', 'z.position']]
    data.columns = ['genotype', 'x', 'y', 'z']

    # Visualise data
    scene = Scene()
    scene.add_cells(data)

    set_camera(scene)
    scene.render() 
Exemple #20
0
def test_neurons():
    scene = Scene()
    mlapi = MouseLightAPI()
    neurons_metadata = mouselight_fetch_neurons_metadata(filterby='soma', filter_regions=['MOs'])
    neurons_files =  mlapi.download_neurons(neurons_metadata[:2])

    parser = NeuronsParser(scene=scene, 
                        color_neurites=True, axon_color="antiquewhite", 
                        soma_color="darkgoldenrod", dendrites_color="firebrick")
    neurons, regions = parser.render_neurons(neurons_files)

    scene.add_neurons(neurons_files, color_neurites=False, random_color="jet", display_axon_regions=False)
Exemple #21
0
def test_streamlines():
    scene = Scene()

    filepaths, data = scene.atlas.download_streamlines_for_region("CA1")

    scene.add_brain_regions(['CA1'], use_original_color=True, alpha=.2)

    scene.add_streamlines(data,
                          color="darkseagreen",
                          show_injection_site=False)

    scene.render(camera='sagittal', zoom=1, interactive=False)
    scene.close()
Exemple #22
0
def test_video():

    s = Scene(title="BR")

    s.add_brain_region("TH")

    vm = VideoMaker(s, "tests", "test")
    savepath = vm.make_video(duration=1, fps=15, azimuth=3)

    assert savepath == "tests/test.mp4"
    path = Path(savepath)
    assert path.exists()
    path.unlink()
def ConnectivityScene():
    scene = Scene()
    p0 = scene.get_region_CenterOfMass("ZI")

    # Then we se these coordinates to get tractography data, note: any set of X,Y,Z coordinates would do. 
    tract = aba.get_projection_tracts_to_target(p0=p0)

    scene.add_tractography(tract, display_injection_structure=False, color_by="region", 
                        display_injection_volume=True, others_alpha=.25)
    scene.add_brain_regions(['ZI'], colors="ivory", alpha=1)

    set_camera(scene)
    scene.render()
Exemple #24
0
def load_regions_into_brainrender(list_of_regions, alpha=0.8, shading="flat"):
    """
    Loads a list of .obj files into brainrender
    :param list_of_regions: List of .obj files to be loaded
    :param alpha: Object transparency
    :param shading: Object shading type ("flat", "giroud" or "phong").
    Defaults to "phong"
    """
    scene = Scene()
    for obj_file in list_of_regions:
        load_obj_into_brainrender(scene,
                                  obj_file,
                                  alpha=alpha,
                                  shading=shading)
    scene.render()
Exemple #25
0
def test_fish_neurons():
    api = MpinMorphologyAPI()
    # api.download_dataset()
    neurons_ids = api.get_neurons_by_structure(837)[:5]
    neurons = api.load_neurons(neurons_ids)

    neurons = [
        neuron.create_mesh(soma_radius=1, neurite_radius=1)[1]
        for neuron in neurons
    ]

    scene = Scene(atlas="mpin_zfish_1um", add_root=True, camera="sagittal2")
    scene.add_neurons(neurons)
    scene.render(interactive=False)
    scene.close()
Exemple #26
0
def test_video_custom():
    def custom(scene, *args, **kwargs):
        return

    s = Scene(title="BR")

    s.add_brain_region("TH")

    vm = VideoMaker(s, "tests", "test", make_frame_func=custom)

    savepath = vm.make_video(duration=1, fps=15, azimuth=3)

    assert savepath == "tests/test.mp4"
    path = Path(savepath)
    assert path.exists()
    path.unlink()
def ElectrodesArrayScene():
    scene = Scene(add_root=False, camera='sagittal')
    z_offset = -1500
    scene.add_brain_regions(['VAL'], use_original_color=True, alpha=.5)
    scene.add_brain_regions(['TH'],
                            use_original_color=True,
                            alpha=.5,
                            wireframe=True)

    # scene.add_optic_cannula('VAL')

    # for x_offset in [-200, -500, -800, -1100]:
    #     scene.add_optic_cannula('VAL', z_offset=z_offset, x_offset=x_offset, alpha=1,
    #                 radius=50, y_offset=-500, color='blackboard')

    scene.render()
def NeuronsScene3():
    scene = Scene()

    neurons_metadata = mouselight_fetch_neurons_metadata(filterby='soma', filter_regions=['VAL'])
    neurons_files =  mlapi.download_neurons(neurons_metadata[2:6]) 
    scene.add_neurons(neurons_files, soma_color='deepskyblue', force_to_hemisphere="right")

    scene.add_brain_regions(['VAL'], use_original_color=False, colors='palegreen', alpha=.9)
    mos = scene.actors['regions']['VAL']
    scene.edit_actors([mos], wireframe=True) 

    streamlines_files, data = streamlines_api.download_streamlines_for_region("VAL") 
    scene.add_streamlines(data[:1], color="palegreen", show_injection_site=False, alpha=.2, radius=10)

    set_camera(scene)
    scene.render()
Exemple #29
0
def test_streamlines():
    from brainrender.Utils.parsers.streamlines import StreamlinesAPI

    # Download streamlines data for injections in the CA1 field of the hippocampus
    streamlines_api = StreamlinesAPI()
    filepaths, data = streamlines_api.download_streamlines_for_region("CA1")

    # Start by creating a scene
    scene = Scene()

    scene.add_brain_regions(['CA1'], use_original_color=True, alpha=.2)

    # you can pass either the filepaths or the data
    scene.add_streamlines(data, color="darkseagreen", show_injection_site=False)

    scene.render(interactive=False, camera='sagittal', zoom=1)
    scene.close()
Exemple #30
0
def test_camera():
    # Create a scene
    scene = Scene(camera='top')  # specify that you want a view from the top

    # render
    scene.render()

    # Now render but with a different view
    scene.render(camera='sagittal', zoom=1)

    # Now render but with specific camera parameters
    bespoke_camera = dict(
        position=[801.843, -1339.564, 8120.729],
        focal=[9207.34, 2416.64, 5689.725],
        viewup=[0.36, -0.917, -0.171],
        distance=9522.144,
        clipping=[5892.778, 14113.736],
    )