Exemplo n.º 1
0
def MultiResShardedMeshMergeTask(
  cloudpath:str,
  shard_no:str,
  draco_compression_level:int = 1,
  mesh_dir:Optional[str] = None,
  num_lod:int = 1,
  spatial_index_db:Optional[str] = None,
  progress:bool = False
):
  cv = CloudVolume(cloudpath, spatial_index_db=spatial_index_db)
  cv.mip = cv.mesh.meta.mip
  if mesh_dir is None and 'mesh' in cv.info:
    mesh_dir = cv.info['mesh']

  # This looks messy because we are trying to avoid retaining
  # unnecessary memory. In the original skeleton iteration, this was 
  # using 50 GB+ memory on minnie65. So it makes sense to be just
  # as careful with a heavier type of object.
  locations = locations_for_labels(cv, labels_for_shard(cv, shard_no))
  filenames = set(itertools.chain(*locations.values()))
  labels = set(locations.keys())
  del locations
  meshes = collect_mesh_fragments(
    cv, labels, filenames, mesh_dir, progress
  )
  del filenames

  # important to iterate this way to avoid
  # creating a copy of meshes vs. { ... for in }
  for label in labels:
    meshes[label] = Mesh.concatenate(*meshes[label])
  del labels

  fname, shard = create_mesh_shard(
    cv, meshes, 
    num_lod, draco_compression_level,
    progress, shard_no
  )
  del meshes

  if shard is None:
    return

  cf = CloudFiles(cv.mesh.meta.layerpath)
  cf.put(
    fname, shard,
    compress=False,
    content_type='application/octet-stream',
    cache_control='no-cache',
  )
Exemplo n.º 2
0
def process_mesh(
    cv: CloudVolume,
    label: int,
    mesh_fragments: List[Mesh],
    num_lod: int = 1,
    draco_compression_level: int = 1,
) -> Tuple[MultiLevelPrecomputedMeshManifest, Mesh]:

    mesh = Mesh.concatenate(*mesh_fragments)

    manifest = MultiLevelPrecomputedMeshManifest(
        segment_id=label,
        chunk_shape=cv.bounds.size3(),
        grid_origin=cv.bounds.minpt,
        num_lods=1,
        lod_scales=[1],
        vertex_offsets=[[0, 0, 0]],
        num_fragments_per_lod=[1],
        fragment_positions=[[[0, 0, 0]]],
        fragment_offsets=[0],  # needs to be set when we have the final value
    )

    vqb = int(cv.mesh.meta.info["vertex_quantization_bits"])

    mesh.vertices /= cv.meta.resolution(cv.mesh.meta.mip)
    mesh.vertices = to_stored_model_space(mesh.vertices,
                                          manifest,
                                          lod=0,
                                          vertex_quantization_bits=vqb,
                                          frag=0)

    quantization_range = np.max(mesh.vertices, axis=0) - np.min(mesh.vertices,
                                                                axis=0)
    quantization_range = np.max(quantization_range)

    # mesh.vertices must be integer type or mesh will display
    # distored in neuroglancer.
    mesh = DracoPy.encode(
        mesh.vertices,
        mesh.faces,
        quantization_bits=vqb,
        compression_level=draco_compression_level,
        quantization_range=quantization_range,
        quantization_origin=np.min(mesh.vertices, axis=0),
        create_metadata=True,
    )
    manifest.fragment_offsets = [len(mesh)]

    return (manifest, mesh)