def remesh_pending(cg: ChunkedGraph): mesh_dir = cg.meta.dataset_info["mesh"] mesh_info = cg.meta.custom_data.get("mesh", {}) unsharded_mesh_path = join( cg.meta.data_source.WATERSHED, mesh_dir, cg.meta.dataset_info["mesh_metadata"]["unsharded_mesh_dir"], ) pending_path = f"{unsharded_mesh_path}/in-progress" for task in _get_pending_tasks(pending_path): fname, l2ids = task print(f"remeshing IDs {l2ids} from {fname}") remeshing( cg, l2ids, stop_layer=mesh_info["max_layer"], mip=mesh_info["mip"], max_err=mesh_info["max_error"], cv_sharded_mesh_dir=mesh_dir, cv_unsharded_mesh_path=unsharded_mesh_path, ) with Storage(pending_path) as storage: # pylint: disable=not-context-manager storage.delete_file(fname) print(f"remesh job for {fname} with {l2ids} complete")
def _remeshing(serialized_cg_info, lvl2_nodes): cg = chunkedgraph.ChunkedGraph(**serialized_cg_info) # TODO: stop_layer and mip should be configurable by dataset meshgen.remeshing(cg, lvl2_nodes, stop_layer=4, mesh_path=None, mip=1, max_err=320) return Response(status=200)
def remeshing(table_id, lvl2_nodes): lvl2_nodes = np.array(lvl2_nodes, dtype=np.uint64) cg = app_utils.get_cg(table_id) current_app.logger.debug( f"remeshing {lvl2_nodes} {cg.get_serialized_info()}") # TODO: stop_layer and mip should be configurable by dataset meshgen.remeshing(cg, lvl2_nodes, stop_layer=4, cv_path=None, cv_mesh_dir=None, mip=1, max_err=320)
def remesh(cg: ChunkedGraph, operation_id: int, l2ids: ndarray): from cloudvolume.storage import SimpleStorage as Storage mesh_info = cg.meta.custom_data.get("mesh", {}) mesh_dir, unsharded_mesh_path, bucket_path, file_name = get_remesh_info( cg, operation_id) remeshing( cg, l2ids, stop_layer=mesh_info["max_layer"], mip=mesh_info["mip"], max_err=mesh_info["max_error"], cv_sharded_mesh_dir=mesh_dir, cv_unsharded_mesh_path=unsharded_mesh_path, ) with Storage(bucket_path) as storage: # pylint: disable=not-context-manager storage.delete_file(file_name)