def main():
    ''' run with 
    mpiexex -n 16 mesh_generator $LABEL_PATH
    '''
    parser = argparse.ArgumentParser()
    parser.add_argument('labels',
                        default=None,
                        help="path to precomputed labels")
    parser.add_argument('--verbose',
                        default=False,
                        help="wether to use progressbar")
    parser.add_argument('--dim_size',
                        default='64,64,64',
                        help="mesh chunksize")
    args = parser.parse_args()

    if rank == 0:
        in_path = 'file://' + args.labels
        mip = 0
        dim_size = tuple(int(d) for d in args.dim_size.split(','))
        print(dim_size)

        print("Making meshes...")
        mtq = mpiTaskQueue()
        tc.create_meshing_tasks(mtq, in_path, mip, shape=Vec(*dim_size))
        L = len(mtq._queue)
        #print('total', rank,size, L)
        all_range = np.arange(L)
        sub_ranges = np.array_split(all_range, size)
        #print(sub_ranges)
    else:
        sub_ranges = None
        mtq = None

    sub_ranges = comm.bcast(sub_ranges, root=0)
    mtq = comm.bcast(mtq, root=0)
    mtq.run(sub_ranges[rank], args.verbose)
    comm.barrier()
    if rank == 0:
        mtq.clean(all_range)
        print("Cleaned", len(mtq._queue))
        print("Updating metadata...")
        tc.create_mesh_manifest_tasks(mtq, in_path)
        print(len(mtq._queue))
        all_range = np.arange(L)
        sub_ranges = np.array_split(all_range, size)
    else:
        sub_ranges = None
        mtq = None

    sub_ranges = comm.bcast(sub_ranges, root=0)
    mtq = comm.bcast(mtq, root=0)
    mtq.run(sub_ranges[rank], args.verbose)
    #mtq.run(sub_ranges[rank])
    comm.barrier()
    if rank == 0:
        command = r'gunzip {}/mesh/*.gz'.format(args.labels)
        print(command)
        subprocess.call(command, shell=True)
        print("Done!")
def create_mesh(animal, mip, mse):
    fileLocationManager = FileLocationManager(animal)
    channel_outdir = 'color_mesh_v2'
    OUTPUT_DIR = os.path.join(fileLocationManager.neuroglancer_data,
                              channel_outdir)
    if not os.path.exists(OUTPUT_DIR):
        print(f'DIR {OUTPUT_DIR} does not exist, exiting.')
        sys.exit()

    cloudpath = f"file://{OUTPUT_DIR}"
    cv = CloudVolume(cloudpath, mip)
    workers, _ = get_cpus()
    tq = LocalTaskQueue(parallel=workers)
    mesh_dir = f'mesh_mip_{mip}_err_{mse}'
    cv.info['mesh'] = mesh_dir
    cv.commit_info()
    tasks = tc.create_meshing_tasks(cv.layer_cloudpath,
                                    mip=mip,
                                    mesh_dir=mesh_dir,
                                    max_simplification_error=mse)
    tq.insert(tasks)
    tq.execute()
    tasks = tc.create_mesh_manifest_tasks(cv.layer_cloudpath,
                                          mesh_dir=mesh_dir)
    tq.insert(tasks)
    tq.execute()

    print("Done!")
def make_demo_mesh():
	# Mesh on 8 cores, use True to use all cores
	cloudpath = 'file:///home/ahoag/ngdemo/demo_bucket/atlas/allenatlas_2017'
	with LocalTaskQueue(parallel=8) as tq:
	  tasks = tc.create_meshing_tasks(cloudpath, mip=0, shape=(256, 256, 256))
	  tq.insert_all(tasks)
	  tasks = tc.create_mesh_manifest_tasks(cloudpath)
	  tq.insert_all(tasks)
	print("Done!")	
Exemple #4
0
def make_mesh(vol, cores=8):
    # Mesh on 8 cores, use parallel=True to use all cores
    cloudpath = vol.cloudpath
    with LocalTaskQueue(parallel=cores) as tq:
        tasks = tc.create_meshing_tasks(cloudpath,
                                        mip=0,
                                        shape=(256, 256, 256))
        tq.insert_all(tasks)
        tasks = tc.create_mesh_manifest_tasks(cloudpath)
        tq.insert_all(tasks)
    print("Done!")
Exemple #5
0
def mesh(opt):
    gs_path = opt.gs_output

    # Mesh
    if opt.mesh:
        assert opt.vol_type == 'segmentation'

        # Create mesh
        with LocalTaskQueue(parallel=opt.parallel) as tq:
            tasks = tc.create_meshing_tasks(gs_path, mip=opt.mesh_mip)
            tq.insert_all(tasks)

        # Manifest
        with MockTaskQueue() as tq:
            tasks = tc.create_mesh_manifest_tasks(gs_path)
            tq.insert_all(tasks)
Exemple #6
0
def mesh_merge(ctx, path, queue, magnitude, dir):
  """
  (2) Merge the mesh pieces produced from the forging step.

  The per-cutout mesh fragments are then assembled and
  merged. However, this process occurs by compiling 
  a list of fragment files and uploading a "mesh manifest"
  file that is an index for locating the fragments.
  """
  tasks = tc.create_mesh_manifest_tasks(
    path, magnitude=magnitude, mesh_dir=dir
  )

  parallel = int(ctx.obj.get("parallel", 1))
  tq = TaskQueue(normalize_path(queue))
  tq.insert(tasks, parallel=parallel)
    def add_segmentation_mesh(self, shape=[448, 448, 448], mip=0):
        if self.precomputed_vol is None:
            raise NotImplementedError(
                'You have to call init_precomputed before calling this function.'
            )

        _, cpus = get_cpus()
        tq = LocalTaskQueue(parallel=cpus)
        tasks = tc.create_meshing_tasks(
            self.precomputed_vol.layer_cloudpath,
            mip=mip,
            max_simplification_error=40,
            shape=shape,
            compress=True)  # The first phase of creating mesh
        tq.insert(tasks)
        tq.execute()
        # It should be able to incoporated to above tasks, but it will give a weird bug. Don't know the reason
        tasks = tc.create_mesh_manifest_tasks(
            self.precomputed_vol.layer_cloudpath
        )  # The second phase of creating mesh
        tq.insert(tasks)
        tq.execute()
    vol[bbx] = (h5_data[bbx.to_slices()[::-1]].T).astype(np_type)

print("KNeuroViz pre-processing DONE!")

if (h5_class.ImageType == 'segmentation'):
    seg_mesh_path = "file://" + h5_class.Destination_path

    with LocalTaskQueue(parallel=8) as tq:
        tasks = tc.create_meshing_tasks(seg_mesh_path,
                                        mip=0,
                                        shape=Vec(h5_class.shape_x // 2,
                                                  h5_class.shape_x // 2,
                                                  h5_class.shape_x // 2 // 8))
        tq.insert_all(tasks)
        tasks = tc.create_mesh_manifest_tasks(seg_mesh_path, magnitude=2)
        tq.insert_all(tasks)
    print("Mesh manifest processing DONE!")

else:
    print('channel is not running for 3D mesh manifest work')
    sys.exit(1)
'''
seg_mesh_path = "file://"+h5_class.Destination_path

with LocalTaskQueue(parallel=8) as tq:
    tasks = tc.create_meshing_tasks(seg_mesh_path, mip=0, shape=Vec(h5_class.shape_x // 2, h5_class.shape_x // 2, h5_class.shape_x // 2 //8))
    tq.insert_all(tasks)
    tasks = tc.create_mesh_manifest_tasks(seg_mesh_path, magnitude=2)
    tq.insert_all(tasks)
    
Exemple #9
0
def segment(args):
    """Run segmentation on contiguous block of affinities from CV

    Args:
        args: ArgParse object from main
    """
    bbox_start = Vec(*args.bbox_start)
    bbox_size = Vec(*args.bbox_size)
    chunk_size = Vec(*args.chunk_size)
    bbox = Bbox(bbox_start, bbox_start + bbox_size)
    src_cv = CloudVolume(args.src_path,
                         fill_missing=True,
                         parallel=args.parallel)
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type='segmentation',
        data_type='uint64',
        encoding='raw',
        resolution=src_cv.info['scales'][args.mip]['resolution'],
        voxel_offset=bbox_start,
        chunk_size=chunk_size,
        volume_size=bbox_size,
        mesh='mesh_mip_{}_err_{}'.format(args.mip,
                                         args.max_simplification_error))
    dst_cv = CloudVolume(args.dst_path, info=info, parallel=args.parallel)
    dst_cv.provenance.description = 'ws+agg using waterz'
    dst_cv.provenance.processing.append({
        'method': {
            'task': 'watershed+agglomeration',
            'src_path': args.src_path,
            'dst_path': args.dst_path,
            'mip': args.mip,
            'shape': bbox_size.tolist(),
            'bounds': [
                bbox.minpt.tolist(),
                bbox.maxpt.tolist(),
            ],
        },
        'by': args.owner,
        'date': strftime('%Y-%m-%d%H:%M %Z'),
    })
    dst_cv.provenance.owners = [args.owner]
    dst_cv.commit_info()
    dst_cv.commit_provenance()
    if args.segment:
        print('Downloading affinities')
        aff = src_cv[bbox.to_slices()]
        aff = np.transpose(aff, (3, 0, 1, 2))
        aff = np.ascontiguousarray(aff, dtype=np.float32)
        thresholds = [args.threshold]
        print('Starting ws+agg')
        seg_gen = waterz.agglomerate(aff, thresholds)
        seg = next(seg_gen)
        print('Deleting affinities')
        del aff
        print('Uploading segmentation')
        dst_cv[bbox.to_slices()] = seg
    if args.mesh:
        print('Starting meshing')
        with LocalTaskQueue(parallel=args.parallel) as tq:
            tasks = tc.create_meshing_tasks(
                layer_path=args.dst_path,
                mip=args.mip,
                shape=args.chunk_size,
                simplification=True,
                max_simplification_error=args.max_simplification_error,
                progress=True)
            tq.insert_all(tasks)
            tasks = tc.create_mesh_manifest_tasks(layer_path=args.dst_path,
                                                  magnitude=args.magnitude)
            tq.insert_all(tasks)
        print("Meshing complete")
Exemple #10
0
def create_mesh(animal, limit, mse):
    #chunks = calculate_chunks('full', -1)
    chunks = [64,64,1]
    scales = (10400, 10400, 20000)
    fileLocationManager = FileLocationManager(animal)
    INPUT = "/net/birdstore/Active_Atlas_Data/data_root/pipeline_data/DK52/preps/CH3/shapes"
    OUTPUT1_DIR = os.path.join(fileLocationManager.neuroglancer_data, 'mesh_input')
    OUTPUT2_DIR = os.path.join(fileLocationManager.neuroglancer_data, 'mesh')
    if 'ultraman' in get_hostname():
        if os.path.exists(OUTPUT1_DIR):
            shutil.rmtree(OUTPUT1_DIR)
        if os.path.exists(OUTPUT2_DIR):
            shutil.rmtree(OUTPUT2_DIR)

    files = sorted(os.listdir(INPUT))

    os.makedirs(OUTPUT1_DIR, exist_ok=True)
    os.makedirs(OUTPUT2_DIR, exist_ok=True)

    len_files = len(files)
    midpoint = len_files // 2
    midfilepath = os.path.join(INPUT, files[midpoint])
    midfile = io.imread(midfilepath)
    data_type = midfile.dtype
    #image = np.load('/net/birdstore/Active_Atlas_Data/data_root/pipeline_data/structures/allen/allen.npy')
    #ids = np.unique(image)
    ids = {'infrahypoglossal': 200, 'perifacial': 210, 'suprahypoglossal': 220}

    height, width = midfile.shape
    volume_size = (width, height, len(files)) # neuroglancer is width, height
    print('volume size', volume_size)
    ng = NumpyToNeuroglancer(animal, None, scales, layer_type='segmentation', 
        data_type=data_type, chunk_size=chunks)
    ng.init_precomputed(OUTPUT1_DIR, volume_size, progress_id=1)

    file_keys = []
    for i,f in enumerate(tqdm(files)):
        infile = os.path.join(INPUT, f)
        file_keys.append([i, infile])
        #ng.process_image([i, infile])
    #sys.exit()

    start = timer()
    workers, cpus = get_cpus()
    print(f'Working on {len(file_keys)} files with {workers} cpus')
    with ProcessPoolExecutor(max_workers=workers) as executor:
        executor.map(ng.process_image, sorted(file_keys), chunksize=1)
        executor.shutdown(wait=True)

    ng.precomputed_vol.cache.flush()

    end = timer()
    print(f'Create volume method took {end - start} seconds')


    ##### rechunk
    cloudpath1 = f"file://{OUTPUT1_DIR}"
    cv1 = CloudVolume(cloudpath1, 0)
    _, workers = get_cpus()
    tq = LocalTaskQueue(parallel=workers)
    cloudpath2 = f'file://{OUTPUT2_DIR}'

    tasks = tc.create_transfer_tasks(cloudpath1, dest_layer_path=cloudpath2, 
        chunk_size=[64,64,64], mip=0, skip_downsamples=True)

    tq.insert(tasks)
    tq.execute()

    ##### add segment properties
    cv2 = CloudVolume(cloudpath2, 0)
    cv2.info['segment_properties'] = 'names'
    cv2.commit_info()

    segment_properties_path = os.path.join(cloudpath2.replace('file://', ''), 'names')
    os.makedirs(segment_properties_path, exist_ok=True)

    info = {
        "@type": "neuroglancer_segment_properties",
        "inline": {
            "ids": [str(value) for key, value in ids.items()],
            "properties": [{
                "id": "label",
                "type": "label",
                "values": [str(key) for key, value in ids.items()]
            }]
        }
    }
    with open(os.path.join(segment_properties_path, 'info'), 'w') as file:
        json.dump(info, file, indent=2)

    ##### first mesh task, create meshing tasks
    workers, _ = get_cpus()
    tq = LocalTaskQueue(parallel=workers)
    mesh_dir = f'mesh_mip_0_err_{mse}'
    cv2.info['mesh'] = mesh_dir
    cv2.commit_info()
    tasks = tc.create_meshing_tasks(cv2.layer_cloudpath, mip=0, mesh_dir=mesh_dir, max_simplification_error=mse)
    tq.insert(tasks)
    tq.execute()
    ##### 2nd mesh task, create manifest
    tasks = tc.create_mesh_manifest_tasks(cv2.layer_cloudpath, mesh_dir=mesh_dir)
    tq.insert(tasks)
    tq.execute()
    
    print("Done!")
def generate_mesh(labels, dim_size=(64, 64, 64), quiet=False):
    """Generate a mesh of label data.

    Create a mesh from a CloudVolume segmentation layer and save the result in
    the correct place for visualization. Mesh generation is parallelized over
    MPI ranks.

    Parameters
    ----------
    labels : str
        Path to a CloudVolume layer of segmentation labels.
    dim_size : tuple of int
        The size of the subvolume assigned to each task.
    quiet : bool
        If True, suppress stdout logging output.

    Notes
    -----
    To take advantage of MPI parallelism, this script must be run as
    ``mpiexec -n <n_ranks> python mesh_generator_v3.py <...args>``.
    """
    args = parse_args()

    # Disable logging if requested.
    if args.quiet:
        LOGGER.logger.removeHandler(syslog)
        noop = logging.NullHandler()
        LOGGER.logger.addHandler(noop)

    LOGGER.info('Starting on host {}'.format(HOST))

    # Set up the meshing task queue on rank 0.
    if RANK == 0:
        # Load in the layer data.
        LOGGER.info('Loading CloudVolume layer at {}'.format(args.labels))
        if os.path.isdir(
                args.labels) and not re.search(r'^file://', args.labels):
            in_path = 'file://' + args.labels
        mip = 0
        LOGGER.info('Meshing volume with dimensions {}'.format(dim_size))

        # Create a queue of meshing tasks over subvolumes of the layer.
        LOGGER.info("Setting up meshing task queue.")
        mtq = MPITaskQueue()
        tasks = tc.create_meshing_tasks(layer_path=in_path,
                                        mip=mip,
                                        shape=Vec(*dim_size),
                                        mesh_dir='mesh')

        for t in tasks:
            mtq.insert(t)

        # Compute the tasks for each rank to complete.
        LOGGER.info('{} tasks created.'.format(len(mtq._queue)))
        L = len(mtq._queue)
        all_range = np.arange(L)
        sub_ranges = np.array_split(all_range, SIZE)
    else:
        sub_ranges = None
        mtq = None

    # Synchronize and broadast the task queue and assigned tasks to all ranks.
    sub_ranges = COMM.bcast(sub_ranges, root=0)
    mtq = COMM.bcast(mtq, root=0)

    # Run the tasks assigned to this rank, then wait for all to finish.
    LOGGER.info('Running task queue 1.')
    mtq.run(sub_ranges[RANK])
    LOGGER.info('Finished task queue 1.')
    COMM.barrier()

    # Set up the metadata update queue
    if RANK == 0:
        LOGGER.info('Cleaning {} tasks.'.format(len(mtq._queue)))
        mtq.clean(all_range)

        # Create a queue of `igneous` metadata update tasks.
        LOGGER.info('Setting up manifest update task queue.')
        mtq2 = MPITaskQueue()
        tasks = tc.create_mesh_manifest_tasks(in_path, magnitude=3)

        for t in tasks:
            mtq2.insert(t)

        # Compute the tasks for each rank to complete.
        LOGGER.info('Created {} mesh manifest tasks.'.format(len(mtq2._queue)))
        L2 = len(mtq2._queue)
        all_range = np.arange(L2)
        sub_ranges = np.array_split(all_range, SIZE)
    else:
        sub_ranges = None
        mtq2 = None

    # Synchronize and broadcast the metadata update task queue and assignments.
    sub_ranges = COMM.bcast(sub_ranges, root=0)
    mtq2 = COMM.bcast(mtq2, root=0)

    # Run the metadata update queue.
    LOGGER.info('Running task queue 2.')
    mtq2.run(sub_ranges[RANK])
    LOGGER.info('Finished task queue 2.')
    COMM.barrier()
    LOGGER.info('Done')
Exemple #12
0
def upload_seg(
    meta: PreviewMeta,
    data: ndarray,
    slack_response: SlackResponse,
    transpose: bool = False,
):
    from numpy import transpose as np_transpose

    em = CloudVolume(meta.em_layer, mip=meta.dst_mip)
    output_layer = f"{environ['GT_BUCKET_PATH']}/{meta.author}/preview/{token_hex(8)}"
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type="segmentation",
        data_type="uint32",
        encoding="raw",
        resolution=em.resolution,
        voxel_offset=meta.dst_bbox.minpt,
        volume_size=meta.dst_bbox.size3(),
        mesh=f"mesh_mip_{meta.dst_mip}_err_0",
        chunk_size=(64, 64, 8),
    )

    dst_cv = CloudVolume(output_layer, info=info, mip=0, cdn_cache=False)
    dst_cv.provenance.description = "Image directory ingest"
    dst_cv.provenance.processing.append({
        "method": {
            "task": "ingest",
            "image_path": meta.em_layer,
        },
        "date": str(datetime.today()),
        "script": "cloud_bot",
    })
    dst_cv.provenance.owners = [meta.author]
    dst_cv.commit_info()
    dst_cv.commit_provenance()

    checkpoint_notify("Processing data.", slack_response)
    crop_bbox = meta.dst_bbox - meta.src_bbox.minpt
    data = data[crop_bbox.to_slices()]
    dst_cv[meta.dst_bbox.to_slices()] = (np_transpose(data, (1, 0, 2))
                                         if transpose else data)

    with LocalTaskQueue(parallel=16) as tq:
        tasks = tc.create_downsampling_tasks(output_layer,
                                             mip=0,
                                             fill_missing=True,
                                             preserve_chunk_size=True)
        tq.insert_all(tasks)

        checkpoint_notify("Creating meshing tasks.", slack_response)
        tasks = tc.create_meshing_tasks(
            output_layer,
            mip=meta.dst_mip,
            simplification=False,
            shape=(320, 320, 40),
            max_simplification_error=0,
        )
        tq.insert_all(tasks)
        tasks = tc.create_mesh_manifest_tasks(output_layer, magnitude=1)
        tq.insert_all(tasks)
    return output_layer