Esempio n. 1
0
def test_regions():
    scene = Scene(camera=coronal_camera)
    regions = ["MOs", "VISp", "ZI"]
    scene.add_brain_regions(regions, colors="green")
    ca1 = scene.add_brain_regions("CA1", add_labels=True)
    ca1.alpha(0.2)
    scene.close()
Esempio n. 2
0
    def _make_root(self, rootpath):
        """
            Creates a root mesh by merging the mesh corresponding to each neuron,
            then saves it as an obj file at rootpath
        """
        raise NotImplementedError(
            "Create root method not supported yet, sorry")

        print(f"Creating root mesh for atlas {self.atlas_name}")
        temp_scene = Scene(
            atlas=Celegans,
            add_root=False,
            display_inset=False,
            atlas_kwargs=dict(data_folder=self.data_folder),
        )

        temp_scene.add_neurons(self.neurons_names)
        temp_scene.render(interactive=False)
        temp_scene.close()

        root = merge(*temp_scene.actors["neurons"]).clean().cap()
        # root = mesh2Volume(root, spacing=(0.02, 0.02, 0.02)).isosurface()

        points = Points(root.points()).smoothMLS2D(f=0.8).clean(tol=0.005)

        root = recoSurface(points, dims=100, radius=0.2)

        # Save
        write(root, rootpath)

        del temp_scene
        return root
Esempio n. 3
0
def test_streamlines():
    scene = Scene()

    filepaths, data = scene.atlas.download_streamlines_for_region("CA1")

    scene.add_brain_regions(['CA1'], use_original_color=True, alpha=.2)

    scene.add_streamlines(data,
                          color="darkseagreen",
                          show_injection_site=False)

    scene.render(camera='sagittal', zoom=1, interactive=False)
    scene.close()
Esempio n. 4
0
def test_fish_neurons():
    api = MpinMorphologyAPI()
    # api.download_dataset()
    neurons_ids = api.get_neurons_by_structure(837)[:5]
    neurons = api.load_neurons(neurons_ids)

    neurons = [
        neuron.create_mesh(soma_radius=1, neurite_radius=1)[1]
        for neuron in neurons
    ]

    scene = Scene(atlas="mpin_zfish_1um", add_root=True, camera="sagittal2")
    scene.add_neurons(neurons)
    scene.render(interactive=False)
    scene.close()
Esempio n. 5
0
def test_neurons():
    scene = Scene()
    
    mlapi = MouseLightAPI()

    # Fetch metadata for neurons with some in the secondary motor cortex
    neurons_metadata = mlapi.fetch_neurons_metadata(filterby='soma', filter_regions=['MOs'])

    # Then we can download the files and save them as a .json file
    neurons =  mlapi.download_neurons(neurons_metadata[:5])

    scene = Scene(title='One color')
    scene.add_neurons(neurons, color='salmon', display_axon=True, neurite_radius=6)
    scene.render(interactive=False)
    scene.close()
Esempio n. 6
0
def test_streamlines():
    from brainrender.Utils.parsers.streamlines import StreamlinesAPI

    # Download streamlines data for injections in the CA1 field of the hippocampus
    streamlines_api = StreamlinesAPI()
    filepaths, data = streamlines_api.download_streamlines_for_region("CA1")

    # Start by creating a scene
    scene = Scene()

    scene.add_brain_regions(['CA1'], use_original_color=True, alpha=.2)

    # you can pass either the filepaths or the data
    scene.add_streamlines(data, color="darkseagreen", show_injection_site=False)

    scene.render(interactive=False, camera='sagittal', zoom=1)
    scene.close()
Esempio n. 7
0
def test_tractography():
    from brainrender.Utils.ABA.connectome import ABA
    # Create a scene
    scene = Scene()

    # Get the center of mass of the region of interest
    p0 = scene.get_region_CenterOfMass("ZI")

    # Get projections to that point
    analyzer = ABA()
    tract = analyzer.get_projection_tracts_to_target(p0=p0)

    # Add the brain regions and the projections to it
    scene.add_brain_regions(['ZI'], alpha=.4, use_original_color=True)
    scene.add_tractography(tract, display_injection_structure=False, color_by="region")

    scene.render(interactive=False, )
    scene.close()
Esempio n. 8
0
def main(regions, atlas=None, cartoon=False, debug=False, file=None):
    # Set look
    if cartoon:
        brainrender.SHADER_STYLE = "cartoon"

    # Create scene
    scene = Scene(atlas=atlas)

    # Add brain regions
    if regions is not None and len(regions) > 0:
        acts = scene.add_brain_regions(list(regions))

        # Add silhouettes
        if cartoon:
            if isinstance(acts, list):
                scene.add_silhouette(*acts)
            else:
                scene.add_silhouette(acts)

    # Add data from file
    if file is not None:
        if file.endswith(".h5"):
            scene.add_cells_from_file(file)
        else:
            try:
                scene.add_from_file(file)
            except Exception as e:
                raise ValueError(
                    f"Failed to load data from file onto scene: {file}\n{e}"
                )

    # If debug set interactive = Off and close scene
    if not debug:
        interactive = True
    else:
        interactive = False

    # Render and close
    scene.render(interactive=interactive)

    if debug:
        scene.close()
Esempio n. 9
0
def test_camera():
    # Create a scene
    scene = Scene(camera='top') # specify that you want a view from the top

    # render
    scene.render(interactive=False, )
    scene.close()

    # Now render but with a different view
    scene.render(interactive=False, camera='sagittal', zoom=1)
    scene.close()

    # Now render but with specific camera parameters
    bespoke_camera = dict(
        position = [801.843, -1339.564, 8120.729] ,
        focal = [9207.34, 2416.64, 5689.725],
        viewup = [0.36, -0.917, -0.171],
        distance = 9522.144,
        clipping = [5892.778, 14113.736],
    )
Esempio n. 10
0
def test_mouselight():
    from brainrender.Utils.MouseLightAPI.mouselight_api import MouseLightAPI
    from brainrender.Utils.MouseLightAPI.mouselight_info import mouselight_api_info, mouselight_fetch_neurons_metadata

    # Fetch metadata for neurons with some in the secondary motor cortex
    neurons_metadata = mouselight_fetch_neurons_metadata(filterby='soma', filter_regions=['MOs'])

    # Then we can download the files and save them as a .json file
    ml_api = MouseLightAPI() 
    neurons_files =  ml_api.download_neurons(neurons_metadata[:2]) # just saving the first couple neurons to speed things up

    # Show neurons and ZI in the same scene:
    scene = Scene()
    scene.add_neurons(neurons_files, soma_color='orangered', dendrites_color='orangered', 
                    axon_color='darkseagreen', neurite_radius=8) # add_neurons takes a lot of arguments to specify how the neurons should look
    # make sure to check the source code to see all available optionsq

    scene.add_brain_regions(['MOs'], alpha=0.15) 
    scene.render(interactive=False, camera='coronal') 
    scene.close()
Esempio n. 11
0
def test_animated_scene():
    # --------------------------------- Variables -------------------------------- #
    minalpha = 0.01  # transparency of background neurons
    darkcolor = "lightgray"  # background neurons color

    N_FRAMES = 50
    N_neurons = 4  # number of neurons to show in total, if -1 all neurons are shown but it might take a while to render them at first
    N_neurons_in_frame = (
        2  # number of neurons to be highlighted in a given frame
    )
    N_frames_for_change = 15  # every N frames which neurons are shown changes

    # Variables to specify camera position at each frame
    zoom = np.linspace(1, 1.5, N_FRAMES)
    frac = np.zeros_like(
        zoom)  # for camera transition, interpolation value between cameras
    frac[:10] = np.linspace(0, 1, 10)
    frac[10:] = np.linspace(1, 0, len(frac[10:]))

    # -------------------------------- Fetch data -------------------------------- #

    # Then we can download the files and save them as a .json file
    ml_api = MouseLightAPI()
    # Fetch metadata for neurons with some in the secondary motor cortex
    neurons_metadata = ml_api.fetch_neurons_metadata(filterby="soma",
                                                     filter_regions=["MOs"])

    neurons_files = ml_api.download_neurons(neurons_metadata[:N_neurons])

    # ------------------------------- Create scene ------------------------------- #
    scene = Scene(display_inset=False, use_default_key_bindings=True)

    neurons_actors = scene.add_neurons(neurons_files,
                                       neurite_radius=12,
                                       alpha=0)

    # Create new cameras
    cam1 = buildcam(sagittal_camera)

    cam2 = buildcam(
        dict(
            position=[-16624.081, -33431.408, 33527.412],
            focal=[6587.835, 3849.085, 5688.164],
            viewup=[0.634, -0.676, -0.376],
            distance=51996.653,
            clipping=[34765.671, 73812.327],
        ))

    cam3 = buildcam(
        dict(
            position=[1862.135, -4020.792, -36292.348],
            focal=[6587.835, 3849.085, 5688.164],
            viewup=[0.185, -0.97, 0.161],
            distance=42972.44,
            clipping=[29629.503, 59872.10],
        ))

    # ------------------------------- Create frames ------------------------------ #
    # Create frames
    prev_neurons = []
    for step in track(np.arange(N_FRAMES),
                      total=N_FRAMES,
                      description="Generating frames..."):
        if step % N_frames_for_change == 0:  # change neurons every N framse

            # reset neurons from previous set of neurons
            for neuron in prev_neurons:
                for component, actor in neuron.items():
                    actor.alpha(minalpha)
                    actor.color(darkcolor)
            prev_neurons = []

            # highlight new neurons
            neurons = choices(neurons_actors, k=N_neurons_in_frame)
            for n, neuron in enumerate(neurons):
                color = colorMap(n,
                                 "Greens_r",
                                 vmin=-2,
                                 vmax=N_neurons_in_frame + 3)
                for component, actor in neuron.items():
                    actor.alpha(1)
                    actor.color(color)
                prev_neurons.append(neuron)

        # Move scene camera between 3 cameras
        scene.plotter.moveCamera(cam1, cam2, frac[step])
        if frac[step] == 1:
            cam1 = cam3

        # Update rendered window
        time.sleep(0.1)
        scene.render(zoom=zoom[step], interactive=False, video=True)
    scene.close()
Esempio n. 12
0
    title="Whole connectome",
)

# Exclude some neurons we don't want to render
metadata = scene.atlas.neurons_metadata
neurons = metadata.loc[(metadata.type != "nonvalid")
                       & (metadata.type != "other")]

# Add each neuron with the corresponding outline
scene.add_neurons(list(neurons.neuron.values))
for neuron in scene.actors["neurons"]:
    scene.add_actor(neuron.silhouette().lw(3).c("k"))

# Render
scene.render()
scene.close()

# ------------------------- Scene 2: showing synapses ------------------------ #

scene = Scene(
    add_root=False,
    atlas=Celegans,  # Pass the custom atlas class to scene.
    display_inset=False,
    atlas_kwargs=dict(
        data_folder=data_folder
    ),  # use this to pass keyword arguments to the Atlas class
    title="Synapses",
)

# Show only a few neurons and their pre and post synapses
scene.add_neurons(
Esempio n. 13
0
def test_scene_creation_ignore_root_and_inset():
    s = Scene(add_root=False, display_inset=False)
    s.render(interactive=False)
    s.close()
Esempio n. 14
0
    proj_data = iso20.transform(np.sqrt(rates))
    print('All data to 3')
    proj_data = iso_instance.transform(proj_data)

starts = proj_data[1:, :]
ends = proj_data[:-1, :]
lines = Lines(starts, endPoints=ends)

coords = pd.DataFrame(
    dict(x=proj_data[:, 0], y=proj_data[:, 1], z=proj_data[:, 2]))

isoscene = Scene(add_root=False, display_inset=False, title='isomap')
isoscene.add_cells(coords, radius=0.1, color='salmon', res=24)
isoscene.add_vtkactor(lines)
isoscene.render()
isoscene.close()

# %%
# ----------------------------------- UMAP ----------------------------------- #
print('Umap')
_umap_params = dict(
    n_neighbors=5,  # higher values favour global vs local structure
    n_components=3,
    min_dist=
    0.1,  # min distance between point in low D embedding space. Low vals favour clustering
)

if not load_umap:
    umapper = umap.UMAP(**_umap_params)

    umapped = umapper.fit_transform(data)
Esempio n. 15
0
def test_regions():
    scene = Scene()
    regions = ["MOs", "VISp", "ZI"]
    scene.add_brain_regions(regions, colors="green")
    scene.close()