Exemplo n.º 1
0
def get_link_edges(mesh, seg_id, datastack_name=None, close_map_distance=300,
                   server_address=None, verbose=False, client=None):
    """function to get a set of edges that should be added to a mesh

    Parameters
    ----------
    mesh : trimesh_io.Mesh
        the mesh to add edges to
    seg_id : np.uint64 or int
        the seg_id to query the PCG endpoint for merges
    dataset_name: str
        the name of the dataset to query
    close_map_distance: int or float
        the maximum distance to map (default 300 in units of mesh.vertices)
    server_address: str
        the url to the root of the CAVE deployment
    verbose: bool
        whether to print debug statements
    client : caveclient.ChunkedGraphClient

    Returns
    -------
    np.array
        link_edges, a Kx2 array of mesh.vertices indices representing edges that should be added to the mesh graph

    """

    # initialize a chunkedgraph client
    if client is None:
        client = CAVEclient(
            datastack_name, server_address=server_address).chunkedgraph

    # get the merge log
    if type(seg_id) is np.int64:
        seg_id = int(seg_id)

    merge_log = client.get_merge_log(seg_id)

    return merge_log_edges(mesh, merge_log, client.base_resolution,
                           close_map_distance=close_map_distance,
                           verbose=verbose)
Exemplo n.º 2
0
def chunk_index_mesh(
    root_id,
    client=None,
    datastack_name=None,
    cv=None,
    return_l2dict=False,
):
    """Download a mesh with chunk index vertices

    Parameters
    ----------
    root_id : int
        Root id to download.
    client : CAVEclient, optional
        Preset CAVEclient, by default None.
    datastack_name : str or None, optional
        Datastack to use to initialize a CAVEclient, by default None.
    cv : cloudvolume.CloudVolume or None, optional
        Cloudvolume instance, by default None.
    return_l2dict : bool, optional
        If True, returns both a l2id to vertex dict and the reverse, by default False.

    Returns
    -------
    mesh : trimesh_io.Mesh
        Chunk graph represented as a mesh, with vertices at chunk index locations and edges in the link_edges attribute.
    l2dict_mesh : dict
        l2 id to mesh vertex index dictionary. Only returned if return_l2dict is True.
    l2dict_r_mesh : dict
        Mesh vertex index to l2 id dictionary. Only returned if return_l2dict is True.
    """
    if client is None:
        client = CAVEclient(datastack_name)
    if cv is None:
        cv = cloudvolume.CloudVolume(
            client.info.segmentation_source(),
            use_https=True,
            progress=False,
            bounded=False,
            fill_missing=True,
            secrets={"token": client.auth.token},
        )
    lvl2_eg = client.chunkedgraph.level2_chunk_graph(root_id)
    eg, l2dict_mesh, l2dict_r_mesh, x_ch = build_spatial_graph(lvl2_eg, cv)
    mesh_chunk = trimesh_io.Mesh(
        vertices=x_ch,
        faces=[[0, 0, 0]],  # Some functions fail if no faces are set.
        link_edges=eg,
    )
    if return_l2dict:
        return mesh_chunk, l2dict_mesh, l2dict_r_mesh
    else:
        return mesh_chunk
def myclient():
    url_template = endpoints.infoservice_endpoints_v2["datastack_info"]
    mapping = {
        "i_server_address": TEST_GLOBAL_SERVER,
        "datastack_name": TEST_DATASTACK
    }
    url = url_template.format_map(mapping)

    responses.add(responses.GET, url, json=test_info, status=200)

    client = CAVEclient(TEST_DATASTACK, server_address=TEST_GLOBAL_SERVER)
    return client
Exemplo n.º 4
0
    def __init__(self,
                 image_source=None,
                 segmentation_source=None,
                 datastack_name=None,
                 server_address=None,
                 base_resolution='image',
                 image_mip=0,
                 segmentation_mip=0,
                 segmentation=True,
                 imagery=True,
                 framework_client=None,
                 auth_token=None,
                 timestamp=None):

        self._image_source = image_source
        self._segmentation_source = segmentation_source

        if framework_client is not None:
            self._client = framework_client
        elif datastack_name is not None:
            try:
                from caveclient import CAVEclient
            except:
                raise ImportError(
                    'You need to install caveclient to use this functionality')
            self._client = CAVEclient(datastack_name,
                                      server_address=server_address)
        else:
            self._client = None

        self._auth_token = None
        if auth_token is not None:
            self._auth_token = auth_token

        if isinstance(base_resolution, str):
            if base_resolution in ['image', 'segmentation']:
                self._base_resolution = base_resolution
            else:
                raise ValueError(
                    'Base resolution must be array-like, "image" or "segmentation"'
                )
        else:
            self._base_resolution = np.array(base_resolution)

        self._timestamp = timestamp
        self._base_imagery_mip = image_mip
        self._base_segmentation_mip = segmentation_mip

        self._use_segmentation = segmentation
        self._use_imagery = imagery
        self._img_cv = None
        self._seg_cv = None
Exemplo n.º 5
0
def get_cave_client(datastack='cortex65'):
    """Get caveclient for given datastack.

    Parameters
    ----------
    datastack :     "cortex65" | "cortex35" | "layer 2/3"
                    Name of the dataset to use.

    """
    if not CAVEclient:
        raise ImportError(err_msg)

    # Try mapping, else pass-through
    datastack = CAVE_DATASTACKS.get(datastack, datastack)
    return CAVEclient(datastack)
def datastack_view(datastackname):
    datastack = DataStackService.get_datastack_by_name(datastackname)
    if datastack.viewer_resolution_x is not None:
        resolution = [
            datastack.viewer_resolution_x,
            datastack.viewer_resolution_y,
            datastack.viewer_resolution_z,
        ]
    else:
        resolution = [4, 4, 40]

    if datastack.base_link_id is not None:
        client = CAVEclient(
            auth_token=current_app.config.get('AUTH_TOKEN', None))
        base_state = client.state.get_state_json(datastack.base_link_id)
    else:
        base_state = None

    img_layer = ImageLayerConfig(
        name="img",
        source=datastack.aligned_volume.image_source,
        contrast_controls=True,
        black=0.0,
        white=1.0,
    )
    # we want the segmentation layer with our target neuron always on
    seg_layer = SegmentationLayerConfig(name="seg",
                                        source=datastack.segmentation_source)
    ann_layer = AnnotationLayerConfig(name="ann")

    # setup a state builder with this layer pipeline
    sb = StateBuilder([img_layer, seg_layer, ann_layer],
                      base_state=base_state,
                      resolution=resolution)

    if datastack.viewer_site is not None:
        site = datastack.viewer_site
    else:
        site = current_app.config["NEUROGLANCER_URL"]
    ng_url = sb.render_state(return_as="url", url_prefix=site)

    return render_template(
        "datastack.html",
        datastack=datastack,
        is_admin=g.auth_user["admin"],
        ng_url=ng_url,
        version=__version__,
    )
Exemplo n.º 7
0
def pcg_meshwork(
    root_id,
    datastack_name=None,
    client=None,
    cv=None,
    refine="all",
    root_point=None,
    root_point_resolution=None,
    root_point_search_radius=300,
    collapse_soma=False,
    collapse_radius=DEFAULT_COLLAPSE_RADIUS,
    synapses=None,
    synapse_table=None,
    remove_self_synapse=True,
    live_query=False,
    timestamp=None,
    invalidation_d=3,
    segmentation_fallback=False,
    fallback_mip=2,
    cache=None,
    save_to_cache=False,
    n_parallel=None,
):
    """Generate a meshwork file based on the level 2 graph.

    Parameters
    ----------
    root_id : int
        Root id of an object in the pychunkedgraph.
    datastack_name : str or None, optional
        Datastack name to use to initialize a client, if none is provided. By default None.
    client : caveclient.CAVEclientFull or None, optional
        Initialized CAVE client. If None is given, will use the datastack_name to create one. By default None
    cv : cloudvolume.CloudVolume or None, optional
        Initialized cloudvolume. If none is given, the client info will be used to create one. By default None
    refine : 'all', 'ep', 'bp', 'epbp'/'bpep', or None, optional
        Selects how to refine vertex locations by downloading mesh chunks.
        Unrefined vertices are placed in the center of their chunk in euclidean space.
        * 'all' refines all vertex locations. (Default)
        * 'ep' refines end points only
        * 'bp' refines branch points only
        * 'bpep' or 'epbp' refines both branch and end points.
        * 'chunk' keeps vertices in chunk index space.
        * None refines no points but maps them to the center of the chunk in euclidean space.
    root_point : array-like or None, optional
        3 element xyz location for the location to set the root in units set by root_point_resolution,
        by default None. If None, a distal tip is selected.
    root_point_resolution : array-like, optional
        Resolution in euclidean space of the root_point, by default [4, 4, 40]
    root_point_search_radius : int, optional
        Distance in euclidean space to look for segmentation when finding the root vertex, by default 300
    collapse_soma : bool, optional,
        If True, collapses vertices within a given radius of the root point into the root vertex, typically to better
        represent primary neurite branches. Requires a specified root_point. Default if False.
    collapse_radius : float, optional
        Max distance in euclidean space for soma collapse. Default is 10,000 nm (10 microns).
    synapses : 'pre', 'post', 'all', or None, optional
        If not None, queries the synapse_table for presynaptic synapses (if 'pre'),  postsynaptic sites (if 'post'), or both (if 'all'). By default None
    synapse_table : str, optional
        Name of the synapse table to query if synapses are requested, by default None
    remove_self_synapse : bool, optional
        If True, filters out synapses whose pre- and postsynaptic root ids are the same neuron, by default True
    invalidation_d : int, optional
        Invalidation radius in hops for the mesh skeletonization along the chunk adjacency graph, by default 3
    cache : str or None, optional
        Filename to a sqlite database with cached lookups for l2 ids. Optional, default is None.
    n_parallel : int, optional
        Number of parallel downloads passed to cloudvolume, by default 1

    Returns
    -------
    meshparty.meshwork.Meshwork
        Meshwork object with skeleton based on the level 2 graph. See documentation for details.
    """

    if client is None:
        client = CAVEclient(datastack_name)
    if n_parallel is None:
        n_parallel = 1
    if cv is None:
        cv = cloudvolume.CloudVolume(
            client.info.segmentation_source(),
            parallel=n_parallel,
            use_https=True,
            progress=False,
            bounded=False,
            fill_missing=True,
            secrets={"token": client.auth.token},
        )
    if root_point_resolution is None:
        root_point_resolution = cv.mip_resolution(0)

    sk_l2, mesh_chunk, (l2dict_mesh, l2dict_mesh_r) = pcg_skeleton(
        root_id,
        client=client,
        cv=cv,
        root_point=root_point,
        root_point_resolution=root_point_resolution,
        root_point_search_radius=root_point_search_radius,
        collapse_soma=collapse_soma,
        collapse_radius=collapse_radius,
        refine=refine,
        invalidation_d=invalidation_d,
        n_parallel=n_parallel,
        return_mesh=True,
        return_l2dict_mesh=True,
        segmentation_fallback=segmentation_fallback,
        fallback_mip=fallback_mip,
        cache=cache,
        save_to_cache=save_to_cache,
    )

    nrn = meshwork.Meshwork(mesh_chunk, seg_id=root_id, skeleton=sk_l2)

    if synapses is not None and synapse_table is not None:
        if synapses == "pre":
            pre, post = True, False
        elif synapses == "post":
            pre, post = False, True
        elif synapses == "all":
            pre, post = True, True
        else:
            raise ValueError(
                'Synapses must be one of "pre", "post", or "all".')

        pre_syn_df, post_syn_df = pcg_anno.get_level2_synapses(
            root_id,
            l2dict_mesh,
            client,
            synapse_table,
            remove_self=remove_self_synapse,
            pre=pre,
            post=post,
            live_query=live_query,
            timestamp=timestamp,
        )
        if pre_syn_df is not None:
            nrn.anno.add_annotations(
                "pre_syn",
                pre_syn_df,
                index_column="pre_pt_mesh_ind",
                point_column="ctr_pt_position",
            )
        if post_syn_df is not None:
            nrn.anno.add_annotations(
                "post_syn",
                post_syn_df,
                index_column="post_pt_mesh_ind",
                point_column="ctr_pt_position",
            )

    lvl2_df = pd.DataFrame({
        "lvl2_id": list(l2dict_mesh.keys()),
        "mesh_ind": list(l2dict_mesh.values())
    })
    nrn.anno.add_annotations("lvl2_ids", lvl2_df, index_column="mesh_ind")

    if refine != "chunk":
        _adjust_meshwork(nrn, cv)

    return nrn
Exemplo n.º 8
0
def chunk_index_skeleton(
    root_id,
    client=None,
    datastack_name=None,
    cv=None,
    root_point=None,
    invalidation_d=3,
    return_mesh=False,
    return_l2dict=False,
    return_mesh_l2dict=False,
    root_point_resolution=None,
    root_point_search_radius=300,
    n_parallel=1,
):
    """Generate a basic skeleton with chunked-graph index vertices.

    Parameters
    ----------
    root_id : np.uint64
        Neuron root id
    client : caveclient.CAVEclient, optional
        CAVEclient for a datastack, by default None. If None, you must specify a datastack name.
    datastack_name : str, optional
        Datastack name to create a CAVEclient, by default None. Only used if client is None.
    cv : cloudvolume.CloudVolume, optional
        CloudVolume associated with the object, by default None. If None, one is created based on the client info.
    root_point : array, optional
        Point in voxel space to set the root vertex. By default None, which makes a random tip root.
    invalidation_d : int, optional
        TEASAR invalidation radius in chunk space, by default 3
    return_mesh : bool, optional
        If True, returns the pre-skeletonization mesh with vertices in chunk index space, by default False
    return_l2dict : bool, optional
        If True, returns the level 2 id to vertex index dict. By default True
    n_parallel : int, optional
        Sets number of parallel threads for cloudvolume, by default 1

    Returns
    -------
    sk : meshparty.skeleton.Skeleton
        Skeleton object
    mesh : meshparty.trimesh_io.Mesh
        Mesh object, only if return_mesh is True
    level2_dict : dict
        Level 2 id to vertex map, only if return_l2dict is True.
    """
    if client is None:
        client = CAVEclient(datastack_name)
    if n_parallel is None:
        n_parallel = 1
    if cv is None:
        cv = cloudvolume.CloudVolume(
            client.info.segmentation_source(),
            parallel=n_parallel,
            use_https=True,
            progress=False,
            bounded=False,
            fill_missing=True,
            secrets={"token": client.auth.token},
        )

    if root_point_resolution is None:
        root_point_resolution = cv.mip_resolution(0)

    # lvl2_eg = client.chunkedgraph.level2_chunk_graph(root_id)

    # eg, l2dict_mesh, l2dict_r_mesh, x_ch = build_spatial_graph(lvl2_eg, cv)
    # mesh_chunk = trimesh_io.Mesh(vertices=x_ch, faces=[], link_edges=eg)

    mesh_chunk, l2dict_mesh, l2dict_r_mesh = chunk_index_mesh(
        root_id, client=client, cv=cv, return_l2dict=True)

    if root_point is not None:
        lvl2_root_chid, lvl2_root_loc = chunk_tools.get_closest_lvl2_chunk(
            root_point,
            root_id,
            client=client,
            cv=None,
            radius=root_point_search_radius,
            voxel_resolution=root_point_resolution,
            return_point=True,
        )  # Need to have cv=None because of a cloudvolume inconsistency
        root_mesh_index = l2dict_mesh[lvl2_root_chid]
    else:
        root_mesh_index = None

    metameta = {"space": "chunk", "datastack": client.datastack_name}
    sk_ch = skeletonize.skeletonize_mesh(
        mesh_chunk,
        invalidation_d=invalidation_d,
        collapse_soma=False,
        compute_radius=False,
        cc_vertex_thresh=0,
        root_index=root_mesh_index,
        remove_zero_length_edges=False,
        meta={
            "root_id": root_id,
            "skeleton_type": skeleton_type,
            "meta": metameta,
        },
    )

    l2dict, l2dict_r = sk_utils.filter_l2dict(sk_ch, l2dict_r_mesh)

    out_list = [sk_ch]
    if return_mesh:
        out_list.append(mesh_chunk)
    if return_l2dict:
        out_list.append((l2dict, l2dict_r))
    if return_mesh_l2dict:
        out_list.append((l2dict_mesh, l2dict_r_mesh))
    if len(out_list) == 1:
        return out_list[0]
    else:
        return tuple(out_list)
Exemplo n.º 9
0
    Returns
    -------
    sk_l2 : meshparty.skeleton.Skeleton
        Skeleton with vertices in euclidean space
    mesh_l2 : meshparty.mesh.Mesh, optional
        Mesh with vertices in chunk index space. Only if return_mesh is True.
    (l2dict, l2dict_r) : (dict, dict), optional
        Mappings between level 2 ids and skeleton indices. Only if return_l2dict is True.
    (l2dict_mesh, l2dict_mesh_r) : (dict, dict), optional
        Mappings between level 2 ids and mesh indices. Only if return_l2dict_mesh is True.
    missing_ids : np.array, optional
        List of level 2 ids with missing mesh fragments. Only if return_missing_ids is True.
    """
    if client is None:
        client = CAVEclient(datastack_name)
    if n_parallel is None:
        n_parallel = 1
    if cv is None:
        cv = cloudvolume.CloudVolume(
            client.info.segmentation_source(),
            parallel=n_parallel,
            fill_missing=True,
            use_https=True,
            progress=False,
            bounded=False,
            secrets={"token": client.auth.token},
        )

    if root_point_resolution is None:
        root_point_resolution = cv.mip_resolution(0)
Exemplo n.º 10
0
def generate_lvl2_paths(
    datastack,
    root_id,
    server_address,
    root_point=None,
    root_point_resolution=GUIDEBOOK_EXPECTED_RESOLUTION,
    n_choice="all",
    segment_length_thresh=0,
    spacing=PATH_SPACING,
    interp_method="linear",
    selection_point=None,
    downstream=True,
    invalidation_d=3,
    collapse_soma=True,
    n_parallel=1,
    point_radius=300,
    root_id_from_point=False,
    auth_token_key=None,
    return_as="url",
    verbose=True,
):
    if verbose:
        t0 = time.time()

    client = CAVEclient(datastack,
                        server_address=server_address,
                        auth_token_key=auth_token_key)

    if root_id_from_point and root_id is None:
        root_id = get_root_id_from_point(root_point, root_point_resolution,
                                         client)
        if root_id == 0:
            raise ValueError("Root point was not on any segmentation")

    l2_sk, (l2dict, l2dict_r) = pcg_skel.pcg_skeleton(
        root_id,
        client=client,
        refine="all",
        root_point=root_point,
        root_point_resolution=root_point_resolution,
        collapse_soma=collapse_soma,
        collapse_radius=10_000,
        nan_rounds=20,
        return_l2dict=True,
        invalidation_d=invalidation_d,
        root_point_search_radius=point_radius,
        segmentation_fallback=False,
        n_parallel=n_parallel,
    )

    if selection_point is not None:
        l2_sk, _ = mask_skeleton(
            root_id,
            l2_sk,
            l2dict,
            selection_point=selection_point,
            downstream=downstream,
            client=client,
            voxel_resolution=root_point_resolution,
            radius=point_radius,
        )

    dfs, sbs = construct_cover_paths(
        l2_sk,
        n_choice,
        spacing,
        root_id,
        segment_length_thresh,
        client,
        root_point_resolution,
        interp_method,
        selection_point,
    )

    csb = sb.ChainedStateBuilder(sbs)
    if verbose:
        print("\nComplete time: ", time.time() - t0)

    return csb.render_state(dfs,
                            return_as=return_as,
                            url_prefix=client.info.viewer_site())
Exemplo n.º 11
0
def generate_lvl2_proofreading(
    datastack,
    root_id,
    server_address,
    root_point=None,
    root_point_resolution=GUIDEBOOK_EXPECTED_RESOLUTION,
    refine_branch_points=True,
    refine_end_points=True,
    point_radius=300,
    invalidation_d=3,
    collapse_soma=True,
    return_as="url",
    verbose=True,
    segmentation_fallback=True,
    selection_point=None,
    downstream=True,
    n_parallel=1,
    root_id_from_point=False,
    auth_token_key=None,
):
    if verbose:
        t0 = time.time()
    client = CAVEclient(datastack,
                        server_address=server_address,
                        auth_token_key=auth_token_key)

    if refine_end_points and refine_branch_points:
        refine = "bpep"
    elif refine_end_points is False:
        refine = "bp"
    else:
        refine = "ep"

    if root_id_from_point and root_id is None:
        root_id = get_root_id_from_point(root_point, root_point_resolution,
                                         client)
        if root_id == 0:
            raise ValueError("Root point was not on any segmentation")

    l2_sk, (l2dict, l2dict_r) = pcg_skel.pcg_skeleton(
        root_id,
        client=client,
        refine=refine,
        root_point=root_point,
        root_point_resolution=root_point_resolution,
        collapse_soma=collapse_soma,
        collapse_radius=10_000,
        nan_rounds=None,
        return_l2dict=True,
        invalidation_d=invalidation_d,
        root_point_search_radius=point_radius,
        segmentation_fallback=segmentation_fallback,
        n_parallel=n_parallel,
    )

    if selection_point is not None:
        l2_sk, selection_l2id = mask_skeleton(
            root_id,
            l2_sk,
            l2dict,
            selection_point=selection_point,
            downstream=downstream,
            client=client,
            voxel_resolution=root_point_resolution,
            radius=point_radius,
        )
        selection_skinds = l2_sk.filter_unmasked_indices(
            np.array([l2dict[selection_l2id]]))
    else:
        selection_skinds = []

    sbs, dfs = topo_point_construction(
        l2_sk,
        l2dict,
        root_id,
        root_point,
        root_point_resolution,
        refine_branch_points,
        refine_end_points,
        selection_point,
        selection_skinds,
        downstream,
        client,
    )

    sb_pf = sb.ChainedStateBuilder(sbs)
    if verbose:
        print("\nComplete time: ", time.time() - t0)
    return sb_pf.render_state(dfs,
                              return_as=return_as,
                              url_prefix=client.info.viewer_site())
Exemplo n.º 12
0
def coord_space_meshwork(
    root_id,
    datastack_name=None,
    client=None,
    cv=None,
    root_point=None,
    root_point_resolution=None,
    collapse_soma=False,
    collapse_radius=DEFAULT_COLLAPSE_RADIUS,
    synapses=None,
    synapse_table=None,
    remove_self_synapse=True,
    live_query=False,
    timestamp=None,
    invalidation_d=DEFAULT_INVALIDATION_D,
    require_complete=False,
):
    if client is None:
        client = CAVEclient(datastack_name)
    if cv is None:
        cv = client.info.segmentation_cloudvolume(progress=True, parallel=1)
    if root_point_resolution is None:
        root_point_resolution = cv.mip_resolution(0)

    sk, mesh, (l2dict_mesh, l2dict_mesh_r) = coord_space_skeleton(
        root_id,
        client=client,
        cv=cv,
        root_point=root_point,
        root_point_resolution=root_point_resolution,
        collapse_soma=collapse_soma,
        collapse_radius=collapse_radius,
        invalidation_d=invalidation_d,
        return_mesh=True,
        return_l2dict_mesh=True,
        require_complete=require_complete,
    )

    nrn = meshwork.Meshwork(mesh, seg_id=root_id, skeleton=sk)

    pre, post = False, False
    if synapses is not None and synapse_table is not None:
        if synapses == "pre":
            pre, post = True, False
        elif synapses == "post":
            pre, post = False, True
        elif synapses == "all":
            pre, post = True, True
        else:
            raise ValueError(
                'Synapses must be one of "pre", "post", or "all".')

        if not timestamp:
            timestamp = client.materialize.get_timestamp()

        features.add_synapses(
            nrn,
            synapse_table,
            l2dict_mesh,
            client,
            root_id=root_id,
            pre=pre,
            post=post,
            remove_self_synapse=remove_self_synapse,
            timestamp=timestamp,
            live_query=live_query,
        )

    features.add_lvl2_ids(nrn, l2dict_mesh)
    return nrn