コード例 #1
0
async def merge_helper(cg: ChunkedGraph, request: Request):
    from numpy import all
    from numpy import abs

    nodes = loads(await request.body())
    assert len(nodes) == 2, "Only 2 points can be merged at this time."

    atomic_edge, coords = _process_node_info(cg, nodes)
    # limit merge span to 3 chunks
    coord0 = cg.get_chunk_coordinates(atomic_edge[0])
    coord1 = cg.get_chunk_coordinates(atomic_edge[1])
    assert all(
        abs(coord0 - coord1) < 4), "Chebyshev distance exceeded, max 3 chunks."

    try:
        resp = cg.add_edges(
            user_id=request.headers.get("X-Forwarded-User",
                                        str(request.client)),
            atomic_edges=array(atomic_edge, dtype=uint64),
            source_coords=coords[:1],
            sink_coords=coords[1:],
        )
    except exceptions.LockingError as e:
        raise exceptions.InternalServerError(e)
    except exceptions.PreconditionError as e:
        raise exceptions.BadRequest(e)

    assert resp.new_root_ids is not None, "Could not merge selected supervoxels."
    if len(resp.new_lvl2_ids):
        await _remesh(cg, resp.operation_id, resp.new_lvl2_ids.tolist())
    return resp
コード例 #2
0
def _check_post_options(cg: ChunkedGraph, resp: dict, data: dict,
                        seg_ids: Iterable) -> dict:
    from ..utils import toboolean

    if toboolean(data.get("return_seg_ids", "false")):
        resp["seg_ids"] = seg_ids
    if toboolean(data.get("return_seg_id_layers", "false")):
        resp["seg_id_layers"] = cg.get_chunk_layers(seg_ids)
    if toboolean(data.get("return_seg_chunk_coordinates", "false")):
        resp["seg_chunk_coordinates"] = [
            cg.get_chunk_coordinates(seg_id) for seg_id in seg_ids
        ]
    return resp
コード例 #3
0
def preload_datasets(glob_path: str = DATASETS_PATH) -> None:
    from pychunkedgraph.graph.utils.context_managers import TimeIt

    print(f"loading datasets from {glob_path}")
    for dataset in get_datasets(glob_path):
        graph_id, client_info = dataset
        with TimeIt(f"preloading {graph_id}"):
            CACHE[graph_id] = ChunkedGraph(graph_id=graph_id, client_info=client_info)
            # trigger CloudVolume initialization as well
            print(f"layer count {CACHE[graph_id].meta.layer_count}")
コード例 #4
0
def _process_node_info(cg: ChunkedGraph,
                       nodes: Iterable[Iterable]) -> Tuple[list, list]:
    atomic_ids = []
    coords = []
    for node in nodes:
        node_id = node[0]
        x, y, z = node[1:]
        coord = array([x, y, z]) / cg.meta.resolution
        atomic_id = cg.get_atomic_id_from_coord(*coord,
                                                parent_id=uint64(node_id))
        assert atomic_id, f"Could not determine supervoxel ID for {coord}."
        coords.append(coord)
        atomic_ids.append(atomic_id)
    return atomic_ids, coords
コード例 #5
0
def remesh_task(glob_path: str) -> None:
    from pychunkedgraph.graph import ChunkedGraph
    from pychunkedgraph.graph.exceptions import ChunkedGraphError
    from .utils import remesh_pending
    from ..utils import CACHE
    from ..utils import get_datasets

    for dataset in get_datasets(glob_path=glob_path):
        graph_id, client_info = dataset
        try:
            cg = ChunkedGraph(graph_id=graph_id, client_info=client_info)
        except Exception as e:
            raise ChunkedGraphError(e)
        remesh_pending(cg)
コード例 #6
0
async def redo_helper(cg: ChunkedGraph, request: Request):
    operation_id = uint64(loads(await request.body())["operation_id"])
    try:
        resp = cg.redo(
            user_id=request.headers.get("X-Forwarded-User",
                                        str(request.client)),
            operation_id=operation_id,
        )
    except exceptions.LockingError as e:
        raise exceptions.InternalServerError(e)
    except (exceptions.PreconditionError, exceptions.PostconditionError) as e:
        raise exceptions.BadRequest(e)
    if len(resp.new_lvl2_ids):
        await _remesh(cg, resp.operation_id, resp.new_lvl2_ids.tolist())
    return resp
コード例 #7
0
def get_cg(graph_id: str) -> ChunkedGraph:
    from pychunkedgraph.graph.client import get_default_client_info
    from pychunkedgraph.graph.exceptions import ChunkedGraphError

    try:
        return CACHE[graph_id]
    except KeyError:
        pass
    try:
        CACHE[graph_id] = ChunkedGraph(
            graph_id=graph_id, client_info=get_default_client_info()
        )
    except Exception as e:
        raise ChunkedGraphError(f"Error initializing ChunkedGraph: {str(e)}.")
    return CACHE[graph_id]
コード例 #8
0
async def split_helper(cg: ChunkedGraph, request: Request):
    from collections import defaultdict

    data_dict = _process_split_request_nodes(cg, loads(await request.body()))
    try:
        resp = cg.remove_edges(
            user_id=request.headers.get("X-Forwarded-User",
                                        str(request.client)),
            source_ids=data_dict["sources"]["id"],
            sink_ids=data_dict["sinks"]["id"],
            source_coords=data_dict["sources"]["coord"],
            sink_coords=data_dict["sinks"]["coord"],
            mincut=True,
        )
    except exceptions.LockingError as e:
        raise exceptions.InternalServerError(e)
    except exceptions.PreconditionError as e:
        raise exceptions.BadRequest(e)

    assert resp.new_root_ids is not None, "Could not split selected segment groups."
    if len(resp.new_lvl2_ids):
        await _remesh(cg, resp.operation_id, resp.new_lvl2_ids.tolist())
    return resp