Exemplo n.º 1
0
def rewrite_single_image_block(coordinate, block_size, from_cv=None, to_cv=None,
                               from_url=None, to_url=None, mip=None):
    if from_cv is None:
        assert from_url is not None and mip is not None
        from_cv = cloudvolume.CloudVolume(from_url, mip=mip)

    if to_cv is None:
        assert to_url is not None and mip is not None
        assert 'svenmd' in to_url
        to_cv = cloudvolume.CloudVolume(to_url, bounded=False, mip=mip,
                                        compress=False)

    x_start = coordinate[0]
    x_end = coordinate[0] + block_size[0]
    y_start = coordinate[1]
    y_end = coordinate[1] + block_size[1]
    z_start = coordinate[2]
    z_end = coordinate[2] + block_size[2]

    bbox = to_cv.bounds.to_list()[3:]
    if x_end > bbox[0]:
        x_end = bbox[0]

    if y_end > bbox[1]:
        y_end = bbox[1]

    if z_end > bbox[2]:
        z_end = bbox[2]

    print(x_start, y_start, z_start, x_end, y_end, z_end)

    img = from_cv[x_start: x_end, y_start: y_end, z_start: z_end]
    to_cv[x_start: x_end, y_start: y_end, z_start: z_end] = img
Exemplo n.º 2
0
def readbbox(cvpath,
             bbox,
             agglomerate=False,
             parallel=1,
             progress=False,
             volumes=0,
             resolution=(8, 8, 40),
             timestamp=None):
    """Main function for reading from cloudvolume"""
    if agglomerate:
        cv = cloudvolume.CloudVolume(cvpath,
                                     mip=resolution,
                                     agglomerate=True,
                                     parallel=parallel,
                                     progress=progress)
    else:
        cv = cloudvolume.CloudVolume(cvpath,
                                     mip=resolution,
                                     parallel=parallel,
                                     progress=progress)

    cv.fill_missing = True
    cv.bounded = False

    data = cv.download(bbox, timestamp=timestamp)

    return data[..., volumes].transpose((2, 1, 0))
Exemplo n.º 3
0
def _write_flat_segmentation_thread(args):
    """ Helper of write_flat_segmentation """
    cg_info, start_block, end_block, from_url, to_url, mip = args

    assert 'segmentation' in to_url
    assert 'svenmd' in to_url

    from_cv = cloudvolume.CloudVolume(from_url, mip=mip)
    to_cv = cloudvolume.CloudVolume(to_url, mip=mip)

    cg = chunkedgraph.ChunkedGraph(table_id=cg_info["table_id"],
                                   instance_id=cg_info["instance_id"],
                                   project_id=cg_info["project_id"],
                                   credentials=cg_info["credentials"])

    for block_z in range(start_block[2], end_block[2]):
        z_start = block_z * cg.chunk_size[2]
        z_end = (block_z + 1) * cg.chunk_size[2]
        for block_y in range(start_block[1], end_block[1]):
            y_start = block_y * cg.chunk_size[1]
            y_end = (block_y + 1) * cg.chunk_size[1]
            for block_x in range(start_block[0], end_block[0]):
                x_start = block_x * cg.chunk_size[0]
                x_end = (block_x + 1) * cg.chunk_size[0]

                block = from_cv[x_start:x_end, y_start:y_end, z_start:z_end]

                _, remapped_block = get_sv_to_root_id_mapping_chunk(
                    cg, [x_start, y_start, z_start], block)

                to_cv[x_start:x_end, y_start:y_end,
                      z_start:z_end] = remapped_block
Exemplo n.º 4
0
def update_mips(cv_path, bb, **kwargs):
    cvol = cloudvolume.CloudVolume('file://' + cv_path, mip=0, **kwargs)
    last_res = cvol.info['scales'][0]['resolution']
    data = cvol[bb]
    for m, s in enumerate(cvol.info['scales']):
        if m == 0:
            continue
        curr_res = np.array(s['resolution'])
        scaling = curr_res // last_res
        last_res = curr_res
        bb = bb // scaling
        data = data[::scaling[0], ::scaling[1], ::scaling[2]]
        cvol = cloudvolume.CloudVolume('file://' + cv_path, mip=m, **cv_args)
        cvol[bb] = data
Exemplo n.º 5
0
def add_mip(vol_path, factor):
  c_path = 'file://%s' % vol_path
  vol = cloudvolume.CloudVolume(c_path, parallel=True)
  max_mip = len(vol.info['scales'])
  #new_vol = cloudvolume.CloudVolume('file://%s' % vol_path, mip=max_mip, parallel=True)
  new_info = vol.add_scale([i**max_mip for i in factor])
  print(vol.info)
  vol.commit_info()
  
  from_vol = cloudvolume.CloudVolume(c_path, mip=max_mip-1, parallel=True, progress=True)
  np_vol = np.asarray(from_vol[::factor[0], ::factor[1], ::factor[2]])
  to_vol = cloudvolume.CloudVolume(c_path, mip=max_mip, parallel=True, progress=True, info=vol.info)
  to_vol[:,:,:] = np_vol
  pass
Exemplo n.º 6
0
def _rewrite_image_thread(args):
    start_coordinates, end_coordinates, block_size, from_url, to_url, mip = args

    from_cv = cloudvolume.CloudVolume(from_url, mip=mip)
    to_cv = cloudvolume.CloudVolume(to_url, bounded=False, mip=mip)

    assert 'svenmd' in to_url

    coordinate_iter = itertools.product(np.arange(start_coordinates[0], end_coordinates[0], block_size[0]),
                                        np.arange(start_coordinates[1], end_coordinates[1], block_size[1]),
                                        np.arange(start_coordinates[2], end_coordinates[2], block_size[2]))

    for coordinate in coordinate_iter:
        rewrite_single_image_block(coordinate, block_size, from_cv=from_cv,
                                   to_cv=to_cv)
Exemplo n.º 7
0
def agglomerate(cv_path_1,
                cv_path_2,
                contiguous=False,
                inplace=False,
                no_zero=True):
    """Given two cloudvolumes, intersect and perform agglomeration"""
    cv_args = dict(bounded=True,
                   fill_missing=True,
                   autocrop=False,
                   cache=False,
                   compress_cache=None,
                   cdn_cache=False,
                   progress=False,
                   provenance=None,
                   compress=True,
                   non_aligned_writes=True,
                   parallel=True)

    cv1 = cloudvolume.CloudVolume('file://' + cv_path_1, mip=0, **cv_args)
    cv2 = cloudvolume.CloudVolume('file://' + cv_path_2, mip=0, **cv_args)

    bb1 = get_bbox_from_cv(cv1)
    bb2 = get_bbox_from_cv(cv2)

    int_bb = Bbox.intersection(bb1, bb2)
    data_1 = cv1[int_bb]
    data_2 = cv2[int_bb]
    if contiguous:
        data_1, map_1 = make_labels_contiguous(data_1)
        data_2, map_2 = make_labels_contiguous(data_2)

    data_1 = np.uint32(data_1)
    data_2 = np.uint32(data_2)
    # find remap from 2 to 1
    remap_label = find_remap(data_2, data_1)
    if no_zero:
        # filter out ones with either key or val == 0
        remap_label = {
            k: v
            for k, v in remap_label.items() if k != 0 and v != 0
        }
    data_2_full = cv2[bb2]
    data_2_full_remap = perform_remap(data_2_full, remap_label)
    if inplace:
        cv2[bb2] = data_2_full_remap
        update_mips(cv_path_2, bb2, **cv_args)

    return remap_label
Exemplo n.º 8
0
def init_seg_volume(cv_name,
                    resolution,
                    vol_size,
                    description,
                    owners,
                    offset=(0, 0, 0),
                    sources=None,
                    chunk_size=(64, 64, 64)):
    """ Initialize a CloudVolume for use as a cleft segmentation. """

    info = cloudvolume.CloudVolume.create_new_info(1,
                                                   "segmentation",
                                                   "uint32",
                                                   "raw",
                                                   resolution,
                                                   offset,
                                                   vol_size,
                                                   chunk_size=chunk_size)

    cv = cloudvolume.CloudVolume(cv_name, mip=0, info=info)

    cv.provenance["owners"] = owners
    cv.provenance["description"] = description

    if sources is not None:
        cv.provenance["sources"] = sources

    cv.commit_info()
    cv.commit_provenance()

    return cv
Exemplo n.º 9
0
    def __init__(self, parallel=5, **kwargs):
        # Lazy initialization of volume
        url = 'graphene://*****:*****@type':
                                            'neuroglancer_legacy_mesh',
                                            'scales': [1, 1, 1]
                                        })
        self.server.start()

        DEFAULTS = dict(name='fanc-meshes', type='segmentation')
        DEFAULTS.update(kwargs)

        super().__init__(source=f'precomputed://{self.server.url}/fanc',
                         **DEFAULTS)
Exemplo n.º 10
0
def ingest(data, opt, progress=True):
    # Neuroglancer format
    data = to_tensor(data)
    data = data.transpose((3, 2, 1, 0))
    num_channels = data.shape[-1]
    shape = data.shape[:-1]

    # Offset
    offset = opt.begin if opt.offset is None else opt.offset

    # Create info
    info = make_info(num_channels,
                     opt.vol_type,
                     str(data.dtype),
                     shape,
                     opt.resolution,
                     offset=offset,
                     chunk_size=opt.chunk_size)
    print(info)
    gs_path = opt.gs_output
    print("gs_output:\n{}".format(gs_path))
    cvol = cv.CloudVolume(gs_path,
                          mip=0,
                          info=info,
                          parallel=opt.parallel,
                          progress=progress)
    cvol[:, :, :, :] = data
    cvol.commit_info()

    # Optional downsampling & meshing
    downsample(opt)
    mesh(opt)
    def _make_cloudvolume(self, cloudpath, use_client_secret=True, **kwargs):
        try:
            import cloudvolume
        except:
            raise ImportError(
                "Could not import cloudvolume. Make sure it is installed. See https://pypi.org/project/cloud-volume for more info."
            )

        use_https = kwargs.pop("use_https", True)
        bounded = kwargs.pop("bounded", False)
        fill_missing = kwargs.pop("fill_missing", True)

        if re.search("^graphene", cloudpath) and use_client_secret:
            # Authentication header is "Authorization {token}"
            secrets = {"token": self.session.headers.get("Authorization").split(" ")[1]}
        else:
            secrets = None

        cv = cloudvolume.CloudVolume(
            cloudpath,
            use_https=use_https,
            fill_missing=fill_missing,
            bounded=bounded,
            secrets=secrets,
            **kwargs,
        )
        return cv
Exemplo n.º 12
0
def extract_points_chunk(chunk_range, input_dir, object_id, mip_level):

    # Extract chunk from google cloud
    range3 = chunk_range.split('_')
    ind = []
    for i in range(3):
        range_i = range3[i]
        bound = range_i.split('-')
        ind.append(int(bound[0]))
        ind.append(int(bound[1]))

    print("Extracting chunk...")
    vol = cloudvolume.CloudVolume(input_dir, mip=mip_level, progress=True)
    min_bound = vol.bounds.minpt
    max_bound = vol.bounds.maxpt

    for i in range(3):
        if ind[2 * i] < min_bound[i]:
            ind[2 * i] = min_bound[i]
        if ind[2 * i + 1] > max_bound[i]:
            ind[2 * i + 1] = max_bound[i]

    chunk = vol[ind[0]:ind[1], ind[2]:ind[3], ind[4]:ind[5]]
    chunk = chunk[:, :, :, 0]

    print("Extracting point cloud...")
    object_loc = np.where(chunk == object_id)

    points = np.zeros([object_loc[0].size, 3])
    for i in range(3):
        points[:, i] = object_loc[i] + ind[2 * i]

    return points
Exemplo n.º 13
0
def _localize_l2_id(l2id, l2mesh, cv_path, auth_token, segmentation_fallback,
                    fallback_mip):
    if l2mesh is not None:
        l2m_abs = np.mean(l2mesh.vertices, axis=0)
        _, ii = spatial.cKDTree(l2mesh.vertices).query(l2m_abs)
        l2m = l2mesh.vertices[ii]
    else:
        if segmentation_fallback:
            cv = cloudvolume.CloudVolume(
                cv_path,
                bounded=False,
                progress=False,
                fill_missing=True,
                use_https=True,
                mip=0,
                secrets={"token": auth_token},
            )
            try:
                l2m = chunk_location_from_segmentation(l2id,
                                                       cv,
                                                       mip=fallback_mip)
            except:
                l2m = np.array([np.nan, np.nan, np.nan])
            del cv
        else:
            l2m = np.array([np.nan, np.nan, np.nan])
    return l2m
Exemplo n.º 14
0
def test_graphene_auth_token(graphene_vol):
  cloudpath = "graphene://" + posixpath.join(PCG_LOCATION, 'segmentation', 'api/v1/', TEST_DATASET_NAME)
  
  cloudvolume.CloudVolume(cloudpath, secrets=TEST_TOKEN)
  cloudvolume.CloudVolume(cloudpath, secrets={ "token": TEST_TOKEN })

  try:
    cloudvolume.CloudVolume(cloudpath, secrets=None)
  except cloudvolume.exceptions.AuthenticationError:
    pass

  try:
    cloudvolume.CloudVolume(cloudpath, secrets={ "token": "Z@(ASINAFSOFAFOSNS" })
    assert False
  except cloudvolume.exceptions.AuthenticationError:
    pass
Exemplo n.º 15
0
def _download_meshes_thread(args):
    """ Helper to Download meshes into target directory """
    seg_ids, cv_path, target_dir, fmt, overwrite, mesh_endpoint, \
        merge_large_components = args

    cv = cloudvolume.CloudVolume(cv_path)

    for seg_id in seg_ids:
        if not overwrite and os.path.exists(f"{seg_id}.h5"):
            continue

        frags = [np.uint64(seg_id)]

        if mesh_endpoint is not None:
            frags = get_frag_ids_from_endpoint(seg_id, mesh_endpoint)

        try:
            cv_mesh = cv.mesh.get(frags)

            mesh = Mesh(vertices=cv_mesh["vertices"],
                        faces=np.array(cv_mesh["faces"]).reshape(-1, 3))

            if merge_large_components:
                mesh.merge_large_components()

            if fmt == "hdf5":
                write_mesh_h5(f"{target_dir}/{seg_id}.h5",
                              mesh.vertices,
                              mesh.faces.flatten(),
                              mesh_edges=mesh.mesh_edges,
                              overwrite=overwrite)
            else:
                mesh.write_to_file(f"{target_dir}/{seg_id}.{fmt}")
        except Exception as e:
            print(e)
    def __init__(self):
        viewer = self.viewer = neuroglancer.Viewer()
        self.gt_vol = cloudvolume.CloudVolume(
            'https://storage.googleapis.com/neuroglancer-public-data/flyem_fib-25/ground_truth',
            mip=0,
            bounded=True,
            progress=False,
            provenance={})
        viewer.actions.add('start-fill', self._start_fill_action)
        viewer.actions.add('stop-fill', self._stop_fill_action)
        self.dimensions = neuroglancer.CoordinateSpace(
            names=['x', 'y', 'z'],
            units='nm',
            scales=[8, 8, 8],
        )
        with viewer.config_state.txn() as s:
            s.input_event_bindings.data_view['shift+mousedown0'] = 'start-fill'
            s.input_event_bindings.data_view['keyt'] = 'stop-fill'

        with viewer.txn() as s:
            s.layers['image'] = neuroglancer.ImageLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/image',
            )
            s.layers['ground_truth'] = neuroglancer.SegmentationLayer(
                source=
                'precomputed://gs://neuroglancer-public-data/flyem_fib-25/ground_truth',
            )
            s.layers['ground_truth'].visible = False
            self.flood_fill_event = None
Exemplo n.º 17
0
def save_meshes(state, output_dir, output_format, lod):
    for layer in state.layers:
        if not isinstance(layer.layer, neuroglancer.SegmentationLayer): continue
        if not layer.visible: return False
        for source in layer.source:
            if not source.url.startswith('precomputed://'):
                continue
            vol = cloudvolume.CloudVolume(source.url, parallel=True, progress=True)
            if len(layer.segments) == 0: continue
            get_mesh_kwargs = {}
            if lod != 0:
                get_mesh_kwargs.update(lod=lod)
            for segment in layer.segments:
                output_path = os.path.join(output_dir, '%d.%s' % (segment, output_format))
                print('Saving layer %r object %s -> %s' % (layer.name, segment, output_path))
                os.makedirs(output_dir, exist_ok=True)
                mesh = vol.mesh.get(segment, **get_mesh_kwargs)
                if isinstance(mesh, dict):
                    mesh = list(mesh.values())[0]
                if output_format == 'obj':
                    data = mesh.to_obj()
                elif output_format == 'ply':
                    data = mesh.to_ply()
                elif output_format == 'precomputed':
                    data = mesh.to_precomputed()
                with open(output_path, 'wb') as f:
                    f.write(data)
            return
    print('No segmentation layer found')
    sys.exit(1)
Exemplo n.º 18
0
def initialize_chunkedgraph(
    meta: ChunkedGraphMeta, cg_mesh_dir="mesh_dir", n_bits_root_counter=8, size=None
):
    """ Initalizes a chunkedgraph on BigTable """
    _check_table_existence(meta.bigtable_config, meta.graph_config)
    ws_cv = cloudvolume.CloudVolume(meta.data_source.watershed)
    if size is not None:
        size = np.array(size)
        for i in range(len(ws_cv.info["scales"])):
            original_size = ws_cv.info["scales"][i]["size"]
            size = np.min([size, original_size], axis=0)
            ws_cv.info["scales"][i]["size"] = [int(x) for x in size]
            size[:-1] //= 2

    dataset_info = ws_cv.info
    dataset_info["mesh"] = cg_mesh_dir
    dataset_info["data_dir"] = meta.data_source.watershed
    dataset_info["graph"] = {
        "chunk_size": [int(s) for s in meta.graph_config.chunk_size]
    }

    kwargs = {
        "instance_id": meta.bigtable_config.instance_id,
        "project_id": meta.bigtable_config.project_id,
        "table_id": meta.graph_config.graph_id,
        "chunk_size": meta.graph_config.chunk_size,
        "fan_out": np.uint64(meta.graph_config.fanout),
        "n_layers": np.uint64(meta.layer_count),
        "dataset_info": dataset_info,
        "use_skip_connections": meta.graph_config.use_skip_connections,
        "s_bits_atomic_layer": meta.graph_config.s_bits_atomic_layer,
        "n_bits_root_counter": n_bits_root_counter,
        "is_new": True,
    }
    return chunkedgraph.ChunkedGraph(**kwargs)
Exemplo n.º 19
0
def cv_supervoxels(N=64, blockN=16):

    block_per_row = int(N / blockN)

    chunk_size = [32, 32, 32]
    info = cloudvolume.CloudVolume.create_new_info(
        num_channels=1,
        layer_type='segmentation',
        data_type='uint64',
        encoding='raw',
        resolution=[4, 4, 40],  # Voxel scaling, units are in nanometers
        voxel_offset=[0, 0, 0],  # x,y,z offset in voxels from the origin
        # Pick a convenient size for your underlying chunk representation
        # Powers of two are recommended, doesn't need to cover image exactly
        chunk_size=chunk_size,  # units are voxels
        volume_size=[N, N, N],
    )

    vol = cloudvolume.CloudVolume(TEST_PATH, info=info)
    vol.commit_info()
    xx, yy, zz = np.meshgrid(*[np.arange(0, N) for cs in chunk_size])
    id_ind = (np.uint64(xx / blockN), np.uint64(yy / blockN),
              np.uint64(zz / blockN))
    id_shape = (block_per_row, block_per_row, block_per_row)

    seg = np.ravel_multi_index(id_ind, id_shape)
    vol[:] = np.uint64(seg)

    yield TEST_PATH

    shutil.rmtree(tempdir)
Exemplo n.º 20
0
def initialize_chunkedgraph(cg_table_id,
                            ws_cv_path,
                            chunk_size,
                            cg_mesh_dir,
                            fan_out=2,
                            instance_id=None,
                            project_id=None):
    """ Initalizes a chunkedgraph on BigTable

    :param cg_table_id: str
        name of chunkedgraph
    :param ws_cv_path: str
        path to watershed segmentation on Google Cloud
    :param chunk_size: np.ndarray
        array of three ints
    :param cg_mesh_dir: str
        mesh folder name
    :param fan_out: int
        fan out of chunked graph (2 == Octree)
    :param instance_id: str
        Google instance id
    :param project_id: str
        Google project id
    :return: ChunkedGraph
    """
    ws_cv = cloudvolume.CloudVolume(ws_cv_path)
    bbox = np.array(ws_cv.bounds.to_list()).reshape(2, 3)

    # assert np.all(bbox[0] == 0)
    # assert np.all((bbox[1] % chunk_size) == 0)

    n_chunks = ((bbox[1] - bbox[0]) / chunk_size).astype(np.int)
    n_layers = int(np.ceil(chunkedgraph_utils.log_n(np.max(n_chunks),
                                                    fan_out))) + 2

    dataset_info = ws_cv.info
    dataset_info["mesh"] = cg_mesh_dir
    dataset_info["data_dir"] = ws_cv_path
    dataset_info["graph"] = {"chunk_size": [int(s) for s in chunk_size]}

    kwargs = {
        "table_id": cg_table_id,
        "chunk_size": chunk_size,
        "fan_out": np.uint64(fan_out),
        "n_layers": np.uint64(n_layers),
        "dataset_info": dataset_info,
        "is_new": True
    }

    if instance_id is not None:
        kwargs["instance_id"] = instance_id

    if project_id is not None:
        kwargs["project_id"] = project_id

    cg = chunkedgraph.ChunkedGraph(**kwargs)

    return cg
Exemplo n.º 21
0
def use_google_storage(volume, max_workers=8, progress=True, **kwargs):
    """DEPCREATED. Use Google Storage via CloudVolume for segmentation IDs.

    Parameters
    ----------
    volume :        str | CloudVolume
                    Name or URL of CloudVolume to use to fetch segmentation IDs.
    max_workers :   int, optional
                    Maximal number of parallel queries.
    progress :      bool, optional
                    If False, will not show progress bar.
    **kwargs
                    Keyword arguments passed on to ``cloudvolume.CloudVolume``.


    Returns
    -------
    None

    Examples
    --------
    # Segmentation for FAFB autoseg V3
    >>> fafbseg.use_google_storage("https://storage.googleapis.com/fafb-ffn1-20190805/segmentation")

    # Also works with just the ID
    >>> fafbseg.use_google_storage("fafb-ffn1-20190805")

    See Also
    --------
    :func:`~fafbseg.use_brainmaps`
                        Use this if you have access to the brainmaps API.
    :func:`~fafbseg.use_remote_service`
                        Use this is if you are hosting your own solution.
    :func:`~fafbseg.use_local_data`
                        Use this is if you have a local copy of the segmentation
                        data.

    """
    global _get_seg_ids

    if 'CloudVolume' not in str(type(volume)):
        # Set and update defaults from kwargs
        defaults = dict(cache=True,
                        mip=0,
                        progress=False)
        defaults.update(kwargs)

        if 'http' in volume:
            url = volume
        else:
            url = 'https://storage.googleapis.com/{}/segmentation'.format(volume)

        volume = cloudvolume.CloudVolume(url, **defaults)

    _get_seg_ids = lambda x: _get_seg_ids_gs(x, volume,
                                             max_workers=max_workers,
                                             progress=progress)
    print('Using Google CloudStorage to retrieve segmentation IDs.')
    def run(self):
        collection_id = self.args['synapse_collection_id']

        box = Synapse.query.with_entities(ST_3DExtent(Synapse.areas))\
            .filter_by(object_collection_id=collection_id).first()[0]
        bounds = np.array(get_box_as_array(box))
        mins = bounds[0:3]
        maxs = bounds[3:]
        print(mins)
        print(maxs)

        cv = cloudvolume.CloudVolume(self.args['cv_for_bounds'],
                                     mip=0,
                                     compress=True)
        info = cv._fetch_info()
        info['type'] = 'segmentation'
        info['encoding'] = 'compressed_segmentation'
        info['data_type'] = 'uint64'

        newinfo = cloudvolume.CloudVolume.create_new_info(
            num_channels=1,
            layer_type='segmentation',
            data_type='uint64',  # Channel images might be 'uint8'
            encoding='raw',  # raw, jpeg, compressed_segmentation are all options
            resolution=info['scales'][0]
            ['resolution'],  # Voxel scaling, units are in nanometers
            voxel_offset=info['scales'][0]
            ['voxel_offset'],  # x,y,z offset in voxels from the origin
            mesh='mesh',
            # Pick a convenient size for your underlying chunk representation
            # Powers of two are recommended, doesn't need to cover image exactly
            chunk_size=info['scales'][0]['chunk_sizes'][0],  # units are voxels
            volume_size=info['scales'][0]
            ['size'],  # e.g. a cubic millimeter dataset
        )
        #print(newinfo)
        cv_out = cloudvolume.CloudVolume(self.args['cv_output'],
                                         mip=0,
                                         info=newinfo,
                                         fill_missing=True,
                                         non_aligned_writes=True)
        cv_out.commit_info()

        synapses = Synapse.query.filter_by(
            object_collection_id=collection_id).all()
Exemplo n.º 23
0
def get_cloudvol(url, cache=True):
    """Get (cached) CloudVolume for given segmentation.

    Parameters
    ----------
    url :     str

    """
    return cv.CloudVolume(url, cache=cache, use_https=True, progress=False)
Exemplo n.º 24
0
def decorated_volume(settings, **kwargs):
    """Converts DecoratedVolume proto object into volume objects.

  Args:
    settings: DecoratedVolume proto object.
    **kwargs: forwarded to VolumeStore constructor if volinfo volume_path.

  Returns:
    A volume object corresponding to the settings proto.  The returned type
  should support at least __getitem__, shape, and ndim with reasonable numpy
  compatibility.  The returned volume can have ndim in (3, 4).

  Raises:
    ValueError: On bad specification.
  """
    if settings.HasField('volinfo'):
        raise NotImplementedError('VolumeStore operations not available.')
    elif settings.HasField('hdf5'):
        path = settings.hdf5.split(':')
        if len(path) != 2:
            raise ValueError(
                'hdf5 volume_path should be specified as file_path:'
                'hdf5_internal_dataset_path.  Got: ' + settings.hdf5)
        volume = h5py.File(path[0], 'r')[path[1]]
    elif settings.HasField('precomputed'):
        if settings.HasField('axes'):
            if settings.axes == 'zyx' or settings.axes == 'czyx':
                transpose = None
            elif settings.axes == 'xyz' or settings.axes == 'xyzc':
                transpose = (3, 2, 1, 0)
            else:
                raise ValueError('Unknow axes type')
        if settings.HasField('mip'):
            mip = int(settings.mip)
        else:
            mip = 0
        c_vol = cloudvolume.CloudVolume('file://%s' % settings.precomputed,
                                        mip=mip,
                                        parallel=False,
                                        progress=False,
                                        bounded=False,
                                        fill_missing=True)
        volume = TransposedCloudVolume(c_vol, transpose=transpose)

    else:
        raise ValueError('A volume_path must be set.')

    if settings.HasField('decorator_specs'):
        if not settings.HasField('volinfo'):
            raise ValueError(
                'decorator_specs is only valid for volinfo volumes.')
        raise NotImplementedError('VolumeStore operations not available.')

    if volume.ndim not in (3, 4):
        raise ValueError('Volume must be 3d or 4d.')

    return volume
Exemplo n.º 25
0
def rewrite_single_segmentation_block(file_path,
                                      from_cv=None,
                                      to_cv=None,
                                      from_url=None,
                                      to_url=None):
    if from_cv is None:
        assert from_url is not None
        from_cv = cloudvolume.CloudVolume(from_url)

    if to_cv is None:
        assert to_url is not None
        assert 'svenmd' in to_url
        to_cv = cloudvolume.CloudVolume(to_url, bounded=False)

    dx, dy, dz, _ = os.path.basename(file_path).split("_")

    x_start, x_end = np.array(dx.split("-"), dtype=np.int)
    y_start, y_end = np.array(dy.split("-"), dtype=np.int)
    z_start, z_end = np.array(dz.split("-"), dtype=np.int)

    bbox = to_cv.bounds.to_list()[3:]
    if x_end > bbox[0]:
        x_end = bbox[0]

    if y_end > bbox[1]:
        y_end = bbox[1]

    if z_end > bbox[2]:
        z_end = bbox[2]

    seg = from_cv[x_start:x_end, y_start:y_end, z_start:z_end]
    mapping = creator_utils.read_mapping_h5(file_path)

    if 0 in seg and not 0 in mapping[:, 0]:
        mapping = np.concatenate(
            ([np.array([[0, 0]], dtype=np.uint64), mapping]))

    sort_idx = np.argsort(mapping[:, 0])
    idx = np.searchsorted(mapping[:, 0], seg, sorter=sort_idx)
    out = np.asarray(mapping[:, 1])[sort_idx][idx]

    # print(out.shape, x_start, x_end, y_start, y_end, z_start, z_end)
    to_cv[x_start:x_end, y_start:y_end, z_start:z_end] = out
Exemplo n.º 26
0
def _rewrite_segmentation_thread(args):
    file_paths, from_url, to_url = args

    from_cv = cloudvolume.CloudVolume(from_url)
    to_cv = cloudvolume.CloudVolume(to_url, bounded=False)

    assert 'svenmd' in to_url

    n_file_paths = len(file_paths)

    time_start = time.time()
    for i_fp, fp in enumerate(file_paths):
        if i_fp % 10 == 5:
            dt = time.time() - time_start
            eta = dt / i_fp * n_file_paths - dt
            print("%d / %d - dt: %.3fs - eta: %.3fs" %
                  (i_fp, n_file_paths, dt, eta))

        rewrite_single_segmentation_block(fp, from_cv=from_cv, to_cv=to_cv)
Exemplo n.º 27
0
def get_object_sizes(seg_path):
    seg_cv = cloudvolume.CloudVolume('file://%s' % seg_path,
                                     progress=False,
                                     parallel=False)
    seg_chunk = np.array(seg_cv[...])[..., 0]
    uni, counts = np.unique(seg_chunk, return_counts=True)
    nz = uni != 0
    uni = uni[nz]
    counts = counts[nz]
    return {k: v for k, v in zip(uni, counts)}
Exemplo n.º 28
0
def get_closest_lvl2_chunk(point,
                           root_id,
                           client,
                           cv=None,
                           resolution=[4, 4, 40],
                           mip_rescale=[2, 2, 1],
                           radius=200,
                           return_point=False):
    """Get the closest level 2 chunk on a root id 

    Parameters
    ----------
    point : array-like
        Point in space.
    root_id : int
        Root id of the object
    client : FrameworkClient
        Framework client to access data
    cv : cloudvolume.CloudVolume, optional
        Predefined cloudvolume, generated if None. By default None
    resolution : list, optional
        Point resolution to map between point resolution and mesh resolution, by default [4, 4, 40]
    mip_rescale : resolution difference between 
    """
    if cv is None:
        cv = cloudvolume.CloudVolume(client.info.segmentation_source(),
                                     use_https=True,
                                     bounded=False)

    # Get the closest adjacent point for the root id within the radius.
    pt = np.array(point) // mip_rescale
    offset = radius // (np.array(mip_rescale) * np.array(resolution))
    lx = np.array(pt) - offset
    ux = np.array(pt) + offset
    bbox = cloudvolume.Bbox(lx, ux)
    vol = cv.download(bbox, segids=[root_id])
    vol = np.squeeze(vol)
    if not bool(np.any(vol > 0)):
        raise ValueError('No point of the root id is near the specified point')

    ctr = offset * point * resolution
    xyz = np.vstack(np.where(vol > 0)).T
    xyz_nm = xyz * mip_rescale * resolution

    ind = np.argmin(np.linalg.norm(xyz_nm - ctr, axis=1))
    closest_pt = vol.bounds.minpt + xyz[ind]

    # Look up the level 2 supervoxel for that id.
    closest_sv = int(cv.download_point(closest_pt, size=1))
    lvl2_id = client.chunkedgraph.get_root_id(closest_sv, level2=True)

    if return_point:
        return lvl2_id, closest_pt * mip_rescale * resolution
    else:
        return lvl2_id
Exemplo n.º 29
0
def get_cv_data(cv_path, offset_xyz, size_xyz):
    full_cv = cloudvolume.CloudVolume('file://%s' % cv_path,
                                      mip=0,
                                      parallel=True,
                                      progress=False)

    offset_xyz = np.array(offset_xyz)
    size_xyz = np.array(size_xyz)
    bbox = cloudvolume.Bbox(offset_xyz, offset_xyz + size_xyz)

    return full_cv[bbox][..., 0]
Exemplo n.º 30
0
def _download_meshes_thread(args):
    """ Downloads meshes into target directory

    :param args: list
    """
    seg_ids, cv_path, target_dir = args

    cv = cloudvolume.CloudVolume(cv_path)
    os.chdir(target_dir)

    for seg_id in seg_ids:
        cv.mesh.save(seg_id)