예제 #1
0
def MultiResUnshardedMeshMergeTask(
  cloudpath:str, 
  prefix:str,
  cache_control:bool = False,
  draco_compression_level:int = 1,
  mesh_dir:Optional[str] = None,
  num_lod:int = 1,
  progress:bool = False,
):
  cv = CloudVolume(cloudpath)
  
  if mesh_dir is None and 'mesh' in cv.info:
    mesh_dir = cv.info['mesh']

  files_per_label = get_mesh_filenames_subset(
    cloudpath, mesh_dir, prefix
  )

  cf = CloudFiles(cv.meta.join(cloudpath, mesh_dir))
  for label, filenames in tqdm(files_per_label.items(), disable=(not progress)):
    files = cf.get(filenames)
    # we should handle draco as well
    files = [ Mesh.from_precomputed(f["content"]) for f in files ]

    (manifest, mesh) = process_mesh(
      cv, label, files, 
      num_lod, draco_compression_level
    )

    cf.put(f"{label}.index", manifest.to_binary(), cache_control="no-cache")
    cf.put(f"{label}", mesh, cache_control="no-cache")
예제 #2
0
def MultiResShardedMeshMergeTask(
  cloudpath:str,
  shard_no:str,
  draco_compression_level:int = 1,
  mesh_dir:Optional[str] = None,
  num_lod:int = 1,
  spatial_index_db:Optional[str] = None,
  progress:bool = False
):
  cv = CloudVolume(cloudpath, spatial_index_db=spatial_index_db)
  cv.mip = cv.mesh.meta.mip
  if mesh_dir is None and 'mesh' in cv.info:
    mesh_dir = cv.info['mesh']

  # This looks messy because we are trying to avoid retaining
  # unnecessary memory. In the original skeleton iteration, this was 
  # using 50 GB+ memory on minnie65. So it makes sense to be just
  # as careful with a heavier type of object.
  locations = locations_for_labels(cv, labels_for_shard(cv, shard_no))
  filenames = set(itertools.chain(*locations.values()))
  labels = set(locations.keys())
  del locations
  meshes = collect_mesh_fragments(
    cv, labels, filenames, mesh_dir, progress
  )
  del filenames

  # important to iterate this way to avoid
  # creating a copy of meshes vs. { ... for in }
  for label in labels:
    meshes[label] = Mesh.concatenate(*meshes[label])
  del labels

  fname, shard = create_mesh_shard(
    cv, meshes, 
    num_lod, draco_compression_level,
    progress, shard_no
  )
  del meshes

  if shard is None:
    return

  cf = CloudFiles(cv.mesh.meta.layerpath)
  cf.put(
    fname, shard,
    compress=False,
    content_type='application/octet-stream',
    cache_control='no-cache',
  )
예제 #3
0
def _generate_mesh(x):
    """Generate mesh (of cloudvolume class) for given navis volume.

    Parameters
    ----------
    x :             Navis Volume

    Returns
    -------
    mesh :      Cloud volume mesh
    """
    mesh = Mesh(segid=x.id, vertices=x.vertices, faces=x.faces)

    return mesh
예제 #4
0
def process_mesh(
  cv:CloudVolume,
  label:int,
  mesh: Mesh,
  num_lod:int = 1,
  draco_compression_level:int = 1,
) -> Tuple[MultiLevelPrecomputedMeshManifest, Mesh]:

  mesh.vertices /= cv.meta.resolution(cv.mesh.meta.mip)

  grid_origin = np.floor(np.min(mesh.vertices, axis=0))
  chunk_shape = np.ceil(np.max(mesh.vertices, axis=0) - grid_origin)

  manifest = MultiLevelPrecomputedMeshManifest(
    segment_id=label,
    chunk_shape=chunk_shape,
    grid_origin=grid_origin, 
    num_lods=int(num_lod), 
    lod_scales=[ 1 ] * int(num_lod),
    vertex_offsets=[[0,0,0]],
    num_fragments_per_lod=[1], 
    fragment_positions=[[[0,0,0]]], 
    fragment_offsets=[0], # needs to be set when we have the final value
  )

  vqb = int(cv.mesh.meta.info["vertex_quantization_bits"])
  mesh.vertices = to_stored_model_space(
    mesh.vertices, manifest, 
    lod=0, 
    vertex_quantization_bits=vqb,
    frag=0
  )

  quantization_range = np.max(mesh.vertices, axis=0) - np.min(mesh.vertices, axis=0)
  quantization_range = np.max(quantization_range)

  # mesh.vertices must be integer type or mesh will display
  # distored in neuroglancer.
  mesh = DracoPy.encode(
    mesh.vertices, mesh.faces, 
    quantization_bits=vqb,
    compression_level=draco_compression_level,
    quantization_range=quantization_range,
    quantization_origin=np.min(mesh.vertices, axis=0),
    create_metadata=True,
  )
  manifest.fragment_offsets = [ len(mesh) ]

  return (manifest, mesh)
예제 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("cvpath", help="Path to precomputed volume to create")
    parser.add_argument("meshes", nargs="+", help="Mesh files to convert")
    parser.add_argument("--initial-id",
                        default=1,
                        type=int,
                        help="Initial ID for meshes")
    parser.add_argument("--resolution",
                        nargs=3,
                        default=[4, 4, 40],
                        help="Voxel resolution (full res)")
    parser.add_argument("--volume-size",
                        nargs=3,
                        default=[248832, 134144, 7063],
                        help="Extent of segmentation (full res)")
    args = parser.parse_args()

    cvpath = args.cvpath
    mesh_path = os.path.join(cvpath, "mesh")
    seg_props_path = os.path.join(cvpath, "seg_props")
    os.makedirs(mesh_path, exist_ok=True)
    os.makedirs(seg_props_path, exist_ok=True)

    # Convert the entire space into units of "one entire brain" in case neuroglancer tries to load the image layer
    resolution = np.asarray(args.resolution) * np.asarray(args.volume_size)
    size = np.asarray([1, 1, 1])

    with open(os.path.join(cvpath, "info"), "w") as f:
        info = {
            "data_type":
            "uint64",
            "scales": [{
                "key": "fake",
                "encoding": "raw",
                "voxel_offset": [0, 0, 0],
                "resolution": resolution.tolist(),
                "size": size.tolist(),
                "chunk_sizes": [[256, 256, 16]]
            }],
            "mesh":
            "mesh",
            "segment_properties":
            "seg_props",
            "type":
            "segmentation",
            "num_channels":
            1
        }
        f.write(json.dumps(info))

    with open(os.path.join(mesh_path, "info"), "w") as f:
        info = {"@type": "neuroglancer_legacy_mesh"}
        f.write(json.dumps(info))

    segment_props = {
        "@type": "neuroglancer_segment_properties",
        'inline': {
            'ids': [],
            'properties': [{
                "id": "source",
                "type": "label",
                "values": []
            }]
        }
    }

    mesh_id = args.initial_id
    for meshfile in args.meshes:
        # This will work when https://github.com/seung-lab/cloud-volume/pull/413 is merged:
        #cv_mesh = Mesh.from_obj(text=Path(meshfile).read_text(), segid=mesh_id)

        # Until then, use trimesh:
        tmesh = trimesh.load_mesh(meshfile)
        cv_mesh = Mesh(vertices=tmesh.vertices,
                       faces=tmesh.faces,
                       segid=mesh_id)

        ngl_mesh_file = "%d.frag" % mesh_id
        with open(os.path.join(mesh_path, "%d:0" % (mesh_id)), "w") as f:
            info = {"fragments": [ngl_mesh_file]}
            f.write(json.dumps(info))

        with open(os.path.join(mesh_path, ngl_mesh_file), "wb") as f:
            f.write(cv_mesh.to_precomputed())

        segment_props['inline']['ids'].append(f'{mesh_id}')
        segment_props['inline']['properties'][0]['values'].append(
            os.path.basename(meshfile))
        mesh_id += 1

    with open(os.path.join(seg_props_path, "info"), "w") as f:
        f.write(json.dumps(segment_props))
예제 #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("meshcsv", help="Mesh files to convert")
    parser.add_argument("meshdir", help="Mesh files to convert")
    parser.add_argument("cvpath", help="Path to precomputed volume to create")
    parser.add_argument("--resolution",
                        nargs=3,
                        default=[4, 4, 40],
                        help="Voxel resolution (full res)")
    parser.add_argument("--volume-size",
                        nargs=3,
                        default=[248832, 134144, 7063],
                        help="Extent of segmentation (full res)")
    args = parser.parse_args()

    df = pandas.read_csv(args.meshcsv, header=1)

    cvpath = args.cvpath
    mesh_path = os.path.join(cvpath, "mesh")
    seg_props_path = os.path.join(cvpath, "seg_props")
    os.makedirs(mesh_path, exist_ok=True)
    os.makedirs(seg_props_path, exist_ok=True)

    segment_props = {
        "@type": "neuroglancer_segment_properties",
        'inline': {
            'ids': [],
            'properties': [{
                "id": "source",
                "type": "label",
                "values": []
            }]
        }
    }

    # Set size to the max value in the polygons
    size = np.asarray([0, 0, 0])

    for _, row in df.iterrows():
        meshfile = os.path.join(args.meshdir, "%s.obj" % row['Material'])
        mesh_id = row['Nr']
        mesh_name = row['Material']
        if os.path.exists(meshfile):
            # This will work when https://github.com/seung-lab/cloud-volume/pull/413 is merged:
            #cv_mesh = Mesh.from_obj(text=Path(meshfile).read_text(), segid=mesh_id)

            # Until then, use trimesh:
            tmesh = trimesh.load_mesh(meshfile)

            vertices = np.asarray(tmesh.vertices)

            size[0] = max(size[0], np.max(vertices[:, 0]))
            size[1] = max(size[1], np.max(vertices[:, 1]))
            size[2] = max(size[2], np.max(vertices[:, 2]))

            cv_mesh = Mesh(vertices=tmesh.vertices,
                           faces=tmesh.faces,
                           segid=mesh_id)

            ngl_mesh_file = "%d.frag" % mesh_id
            with open(os.path.join(mesh_path, "%d:0" % (mesh_id)), "w") as f:
                info = {"fragments": [ngl_mesh_file]}
                f.write(json.dumps(info))

            with open(os.path.join(mesh_path, ngl_mesh_file), "wb") as f:
                f.write(cv_mesh.to_precomputed())

            segment_props['inline']['ids'].append(f'{mesh_id}')
            segment_props['inline']['properties'][0]['values'].append(
                mesh_name)

        else:
            print(f'{meshfile} does not exist. Skipping.')

    with open(os.path.join(seg_props_path, "info"), "w") as f:
        f.write(json.dumps(segment_props))

    # Convert the entire space into units of "one entire brain" in case neuroglancer tries to load the image layer
    # resolution = np.asarray(args.resolution) * np.asarray(args.volume_size)

    resolution = size
    size = np.asarray([1, 1, 1])

    with open(os.path.join(cvpath, "info"), "w") as f:
        info = {
            "data_type":
            "uint64",
            "scales": [{
                "key": "fake",
                "encoding": "raw",
                "voxel_offset": [0, 0, 0],
                "resolution": resolution.tolist(),
                "size": size.tolist(),
                "chunk_sizes": [[256, 256, 16]]
            }],
            "mesh":
            "mesh",
            "segment_properties":
            "seg_props",
            "type":
            "segmentation",
            "num_channels":
            1
        }
        f.write(json.dumps(info))

    with open(os.path.join(mesh_path, "info"), "w") as f:
        info = {"@type": "neuroglancer_legacy_mesh"}
        f.write(json.dumps(info))
예제 #7
0
def test_duplicate_vertices():
    verts = np.array(
        [
            [0, 0, 0],
            [0, 1, 0],
            [1, 0, 0],
            [1, 1, 0],
            [2, 0, 0],
            [2, 1, 0],
            [3, 0, 0],
            [3, 1, 0],
            [3, 0, 0],
            [4, 0, 0],
            [4, 1, 0],
            [4, 0, 0],  # duplicate in x direction
            [5, 0, 0],
            [5, 1, 0],
            [5, 0, 0],
            [6, 0, 0],
            [6, 1, 0],
            [6, 1, 2],
            [7, 0, 0],
            [7, 1, 0],
            [4, 0, 0]
        ],
        dtype=np.float32)

    faces = np.array(
        [[0, 1, 2], [2, 3, 4], [4, 5, 6], [7, 8, 9], [9, 10, 11], [10, 11, 12],
         [12, 13, 14], [14, 15, 16], [15, 16, 17], [15, 18, 19], [18, 19, 20]],
        dtype=np.uint32)

    mesh = Mesh(verts, faces, segid=666)

    def deduplicate(mesh, x, offset_x=0):
        return mesh.deduplicate_chunk_boundaries(
            (x, 100, 100),
            is_draco=False,
            offset=(offset_x, -1, -1)  # so y=0,z=0 isn't a chunk boundary
        )

    # test that triple 4 isn't affected
    mesh2 = deduplicate(mesh, x=4)
    assert not np.all(mesh.vertices == mesh2.vertices)
    assert mesh2.vertices.shape[0] == mesh.vertices.shape[0]

    # pop off the last 4
    mesh.vertices = mesh.vertices[:-1]
    mesh.faces = mesh.faces[:-1]

    # test that 4 is now affected
    mesh2 = deduplicate(mesh, x=4)
    assert not np.all(mesh.vertices == mesh2.vertices)
    assert mesh2.vertices.shape[0] == mesh.vertices.shape[0] - 1

    mesh2 = deduplicate(mesh, x=3)
    assert not np.all(mesh.vertices == mesh2.vertices)
    assert mesh2.vertices.shape[0] == mesh.vertices.shape[0] - 1

    mesh2 = deduplicate(mesh, x=4, offset_x=-1)
    assert not np.all(mesh.vertices == mesh2.vertices)
    assert mesh2.vertices.shape[0] == mesh.vertices.shape[0] - 1

    mesh2 = deduplicate(mesh, x=5)
    assert not np.all(mesh.vertices == mesh2.vertices)
    assert mesh2.vertices.shape[0] == mesh.vertices.shape[0] - 1

    mesh2 = deduplicate(mesh, x=1)
    assert not np.all(mesh.vertices == mesh2.vertices)
    assert mesh2.vertices.shape[0] == mesh.vertices.shape[0] - 3
예제 #8
0
def uploadmeshes(volumedatasource, volumeidlist, volumenamelist, path,
                 layer_name):
    """Upload mesh (of cloudvolume class) to a local server.

    Parameters
    ----------
    volumedatasource :     List containing cloud volume meshes
    volumeidlist :    List containing the segids(volume id)
    volumenamelist :   List containing the names of volumes
    path :           path to the local data server
    layer_name:      name of layer/path to add to

    Returns
    -------
    cv :     cloudvolume class object
    """
    info = {
        "@type": "neuroglancer_legacy_mesh",
        'scales': [1, 1, 1],
    }
    path = 'file://' + path + '/precomputed/' + layer_name
    cv = CloudVolume(path, info=info)

    cv.mesh.meta.info['@type'] = 'neuroglancer_legacy_mesh'
    cv.mesh.meta.info['segment_name_map'] = 'segment_names'
    cv.mesh.meta.info['segment_properties'] = 'segment_properties'
    cv.mesh.meta.commit_info()

    files = [
        os.path.join(cv.mesh.meta.mesh_path, str(vol.segid))
        for vol in volumedatasource
    ]
    volumeids = [str(vol.segid) for vol in volumedatasource]

    for fileidx in range(len(files)):
        fullfilepath = str(files[fileidx])  # files[fileidx]
        print(files[fileidx])
        fullfilepath = os.path.join(cv.basepath, os.path.basename(path),
                                    fullfilepath)
        uploadvol = Mesh(vertices=volumedatasource[fileidx].vertices,
                         faces=volumedatasource[fileidx].faces,
                         segid=None)
        precomputed_mesh = _to_precomputed(uploadvol)
        print('Seg id is:', str(volumeids[fileidx]))
        print('Full filepath:', fullfilepath)
        with open(fullfilepath, 'wb') as f:
            f.write(precomputed_mesh)

        manifestinfo = {"fragments": [str(volumeids[fileidx])]}
        manifestfilepath = str(files[fileidx]) + ':' + str(0)  # files[fileidx]
        manifestfilepath = os.path.join(cv.basepath, os.path.basename(path),
                                        manifestfilepath)
        with open(manifestfilepath, 'w') as f:
            json.dump(manifestinfo, f)

    # create the file for segment_properties
    allvolproplist = {"id": "label", "type": "label", "values": volumenamelist}

    volinfo = {
        "@type": "neuroglancer_segment_properties",
        "inline": {
            "ids": list(map(str, volumeidlist)),
            "properties": [allvolproplist]
        }
    }
    volfilepath = os.path.join(cv.basepath, os.path.basename(path),
                               os.path.join(cv.mesh.meta.mesh_path),
                               'segment_properties')
    if not os.path.exists(volfilepath):
        os.makedirs(volfilepath)
        print('creating:', volfilepath)
    volinfofile = os.path.join(volfilepath, 'info')
    with open(volinfofile, 'w') as volinfofile:
        json.dump(volinfo, volinfofile)

    # create the file for segment_names
    volumenamedict = dict(zip(map(str, volumeidlist), volumenamelist))
    volnamemap = {
        "@type": "neuroglancer_segment_name_map",
        "map": volumenamedict
    }
    volnamefilepath = os.path.join(cv.basepath, os.path.basename(path),
                                   os.path.join(cv.mesh.meta.mesh_path),
                                   'segment_names')
    if not os.path.exists(volnamefilepath):
        os.makedirs(volnamefilepath)
        print('creating:', volnamefilepath)
    volnamemapfile = os.path.join(volnamefilepath, 'info')
    with open(volnamemapfile, 'w') as volnamemapfile:
        json.dump(volnamemap, volnamemapfile)