Ejemplo n.º 1
0
def test_caching():
    vol = CloudVolume('file:///tmp/cloudvolume/test-skeletons',
                      info=info,
                      cache=True)

    vol.cache.flush()

    skel = Skeleton(
        [
            (0, 0, 0),
            (1, 0, 0),
            (2, 0, 0),
            (0, 1, 0),
            (0, 2, 0),
            (0, 3, 0),
        ],
        edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)],
        segid=666,
    )

    vol.skeleton.upload(skel)

    assert vol.cache.list_skeletons() == ['666.gz']

    skel.id = 1
    with open(os.path.join(vol.cache.path, 'skeletons/1'), 'wb') as f:
        f.write(skel.to_precomputed())

    cached_skel = vol.skeleton.get(1)

    assert cached_skel == skel

    vol.cache.flush()
Ejemplo n.º 2
0
    def execute(self):

        corgie_logger.info(
            f"Generate new skeleton vertices task for id {self.skeleton_id_str}"
        )
        skeleton = get_skeleton(self.src_path, self.skeleton_id_str)
        if self.vertex_sort:
            vertex_sort = skeleton.vertices[:, 2].argsort()
        else:
            vertex_sort = np.arange(0, len(skeleton.vertices))
        number_vertices = len(skeleton.vertices)
        index_points = list(range(0, number_vertices, self.task_vertex_size))
        cf = CloudFiles(f"{self.dst_path}")
        array_filenames = []
        for i in range(len(index_points)):
            start_index = index_points[i]
            if i + 1 == len(index_points):
                end_index = number_vertices
            else:
                end_index = index_points[i + 1]
            array_filenames.append(
                f"intermediary_arrays/{self.skeleton_id_str}:{start_index}-{end_index}"
            )
        array_files = cf.get(array_filenames)
        # Dict to make sure arrays are concatenated in correct order
        array_dict = {}
        for array_file in array_files:
            array_dict[array_file["path"]] = pickle.loads(
                array_file["content"])
        array_arrays = []
        for array_filename in array_filenames:
            array_arrays.append(array_dict[array_filename])
        array_arrays = np.concatenate(array_arrays)
        # Restore the correct order of the vertices
        restore_sort = vertex_sort.argsort()
        new_vertices = array_arrays[restore_sort]
        new_skeleton = Skeleton(
            vertices=new_vertices,
            edges=skeleton.edges,
            radii=skeleton.radius,
            vertex_types=skeleton.vertex_types,
            space=skeleton.space,
            transform=skeleton.transform,
        )
        cf.put(
            path=self.skeleton_id_str,
            content=new_skeleton.to_precomputed(),
            compress="gzip",
        )
Ejemplo n.º 3
0
def uploadshardedskeletons(skelsource, skelseglist, skelnamelist, path):
    """Upload sharded skeletons to a local server.

    Parameters
    ----------
    skelsource :     List containing cloud volume skeletons
    skelseglist :    List containing the segids(skid)
    skelnamelist :   List containing the names of skeletons
    path :           path to the local data server

    Returns
    -------
    cv :     cloudvolume class object
    """
    info = {
        "@type":
        "neuroglancer_skeletons",
        "transform":
        skelsource[0].transform.flatten(),
        "vertex_attributes": [{
            "id": "radius",
            "data_type": "float32",
            "num_components": 1
        }],
        "scales":
        "um"
    }
    path = 'file://' + path + '/precomputed'
    cv = CloudVolume(path, info=info)

    # prepare for info file
    cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons'
    cv.skeleton.meta.info['transform'] = skelsource[0].transform.flatten()
    cv.skeleton.meta.info['vertex_attributes'] = [{
        'id': 'radius',
        'data_type': 'float32',
        'num_components': 1
    }]

    # prepare sharding info
    spec = ShardingSpecification(
        'neuroglancer_uint64_sharded_v1',
        preshift_bits=9,
        hash='murmurhash3_x86_128',
        minishard_bits=6,
        shard_bits=15,
        minishard_index_encoding='raw',
        data_encoding='raw',
    )
    cv.skeleton.meta.info['sharding'] = spec.to_dict()

    cv.skeleton.meta.info['segment_properties'] = 'seg_props'

    cv.skeleton.meta.commit_info()

    precomputedskels = {}
    for skelidx in range(len(skelsource)):
        skelid = int(skelsource[skelidx].id)
        skel = Skeleton(skelsource[skelidx].vertices,
                        edges=skelsource[skelidx].edges,
                        segid=skelid,
                        extra_attributes=[{
                            "id": "radius",
                            "data_type": "float32",
                            "num_components": 1,
                        }]).physical_space()
        precomputedskels[skelid] = skel.to_precomputed()

    shardfiles = spec.synthesize_shards(precomputedskels)
    shardedfilepath = os.path.join(cv.basepath, os.path.basename(path),
                                   cv.skeleton.meta.skeleton_path)

    for fname in shardfiles.keys():
        with open(shardedfilepath + '/' + fname, 'wb') as f:
            f.write(shardfiles[fname])

    segfilepath = os.path.join(cv.basepath, os.path.basename(path),
                               cv.skeleton.meta.skeleton_path, 'seg_props')

    if not os.path.exists(segfilepath):
        os.makedirs(segfilepath)
        print('creating:', segfilepath)

    allsegproplist = []
    for segid in skelseglist:
        segpropdict = {}
        segpropdict['id'] = segid
        segpropdict['type'] = 'label'
        segpropdict['values'] = skelnamelist
        allsegproplist.append(segpropdict)

    seginfo = {
        "@type": "neuroglancer_segment_properties",
        "inline": {
            "ids": skelseglist,
            "properties": allsegproplist
        }
    }

    segfile = os.path.join(segfilepath, 'info')
    with open(segfile, 'w') as segfile:
        json.dump(seginfo, segfile)

    return cv
Ejemplo n.º 4
0
def uploadskeletons(skelsource, skelseglist, skelnamelist, path):
    """Upload skeleton (of cloudvolume class) to a local server.

    Parameters
    ----------
    skelsource :     List containing cloud volume skeletons
    skelseglist :    List containing the segids(skid)
    skelnamelist :   List containing the names of skeletons
    path :           path to the local data server

    Returns
    -------
    cv :     cloudvolume class object
    """
    info = {
        "@type":
        "neuroglancer_skeletons",
        "transform":
        skelsource[0].transform.flatten(),
        "vertex_attributes": [{
            "id": "radius",
            "data_type": "float32",
            "num_components": 1
        }],
        "scales":
        "um"
    }
    path = 'file://' + path + '/precomputed'
    cv = CloudVolume(path, info=info)

    # prepare for info file
    cv.skeleton.meta.info['@type'] = 'neuroglancer_skeletons'
    cv.skeleton.meta.info['transform'] = skelsource[0].transform.flatten()
    cv.skeleton.meta.info['vertex_attributes'] = [{
        'id': 'radius',
        'data_type': 'float32',
        'num_components': 1
    }]
    del cv.skeleton.meta.info['sharding']
    del cv.skeleton.meta.info['spatial_index']

    cv.skeleton.meta.info['segment_properties'] = 'seg_props'

    cv.skeleton.meta.commit_info()

    files = [
        os.path.join(cv.skeleton.meta.skeleton_path, str(skel.id))
        for skel in skelsource
    ]

    for fileidx in range(len(files)):
        fullfilepath = files[fileidx]
        fullfilepath = os.path.join(cv.basepath, os.path.basename(path),
                                    fullfilepath)
        uploadskel = Skeleton(vertices=skelsource[fileidx].vertices,
                              edges=skelsource[fileidx].edges)
        print(fullfilepath)
        with open(fullfilepath, 'wb') as f:
            f.write(uploadskel.to_precomputed())

    segfilepath = os.path.join(cv.basepath, os.path.basename(path),
                               cv.skeleton.meta.skeleton_path, 'seg_props')

    if not os.path.exists(segfilepath):
        os.makedirs(segfilepath)
        print('creating:', segfilepath)

    allsegproplist = []
    for segid in skelseglist:
        segpropdict = {}
        segpropdict['id'] = segid
        segpropdict['type'] = 'label'
        segpropdict['values'] = skelnamelist
        allsegproplist.append(segpropdict)

    seginfo = {
        "@type": "neuroglancer_segment_properties",
        "inline": {
            "ids": skelseglist,
            "properties": allsegproplist
        }
    }

    segfile = os.path.join(segfilepath, 'info')
    with open(segfile, 'w') as segfile:
        json.dump(seginfo, segfile)

    return cv