Beispiel #1
0
    def __iter__(self):
      for boundstr, volume_id in volume_map.items():
        bbox = Bbox.from_filename(boundstr)
        bbox.minpt = Vec.clamp(bbox.minpt, vol.bounds.minpt, vol.bounds.maxpt)
        bbox.maxpt = Vec.clamp(bbox.maxpt, vol.bounds.minpt, vol.bounds.maxpt)

        yield HyperSquareConsensusTask(
          src_path=src_path,
          dest_path=dest_path,
          ew_volume_id=int(volume_id),
          consensus_map_path=consensus_map_path,
          shape=bbox.size3(),
          offset=bbox.minpt.clone(),
        )
Beispiel #2
0
def generate_chunks(meta, img, offset, mip):
    shape = Vec(*img.shape)[:3]
    offset = Vec(*offset)[:3]

    bounds = Bbox(offset, shape + offset)

    alignment_check = bounds.round_to_chunk_size(meta.chunk_size(mip),
                                                 meta.voxel_offset(mip))

    if not np.all(alignment_check.minpt == bounds.minpt):
        raise AlignmentError("""
      Only chunk aligned writes are supported by this function. 

      Got:             {}
      Volume Offset:   {} 
      Nearest Aligned: {}
    """.format(bounds, meta.voxel_offset(mip), alignment_check))

    bounds = Bbox.clamp(bounds, meta.bounds(mip))

    img_offset = bounds.minpt - offset
    img_end = Vec.clamp(bounds.size3() + img_offset, Vec(0, 0, 0), shape)

    for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)):
        startpt = startpt.clone()
        endpt = min2(startpt + meta.chunk_size(mip), shape)
        spt = (startpt + bounds.minpt).astype(int)
        ept = (endpt + bounds.minpt).astype(int)
        yield (startpt, endpt, spt, ept)
Beispiel #3
0
def create_hypersquare_consensus_tasks(task_queue, src_path, dest_path,
                                       volume_map_file, consensus_map_path):
    """
  Transfer an Eyewire consensus into neuroglancer. This first requires
  importing the raw segmentation via a hypersquare ingest task. However,
  this can probably be streamlined at some point.

  The volume map file should be JSON encoded and 
  look like { "X-X_Y-Y_Z-Z": EW_VOLUME_ID }

  The consensus map file should look like:
  { VOLUMEID: { CELLID: [segids] } }
  """

    with open(volume_map_file, 'r') as f:
        volume_map = json.loads(f.read())

    vol = CloudVolume(dest_path)

    for boundstr, volume_id in tqdm(
            volume_map.items(),
            desc="Inserting HyperSquare Consensus Remap Tasks"):
        bbox = Bbox.from_filename(boundstr)
        bbox.minpt = Vec.clamp(bbox.minpt, vol.bounds.minpt, vol.bounds.maxpt)
        bbox.maxpt = Vec.clamp(bbox.maxpt, vol.bounds.minpt, vol.bounds.maxpt)

        task = HyperSquareConsensusTask(
            src_path=src_path,
            dest_path=dest_path,
            ew_volume_id=int(volume_id),
            consensus_map_path=consensus_map_path,
            shape=bbox.size3(),
            offset=bbox.minpt.clone(),
        )
        task_queue.insert(task)
    task_queue.wait()
Beispiel #4
0
def generate_chunks(meta, img, offset, mip):
    shape = Vec(*img.shape)[:3]
    offset = Vec(*offset)[:3]

    bounds = Bbox(offset, shape + offset)

    alignment_check = bounds.round_to_chunk_size(meta.chunk_size(mip),
                                                 meta.voxel_offset(mip))

    if not np.all(alignment_check.minpt == bounds.minpt):
        raise AlignmentError(f"""
      Only chunk aligned writes are supported by this function. 

      Got:             {bounds}
      Volume Offset:   {meta.voxel_offset(mip)} 
      Nearest Aligned: {alignment_check}
    """)

    bounds = Bbox.clamp(bounds, meta.bounds(mip))

    img_offset = bounds.minpt - offset
    img_end = Vec.clamp(bounds.size3() + img_offset, Vec(0, 0, 0), shape)

    class ChunkIterator():
        def __len__(self):
            csize = meta.chunk_size(mip)
            bbox = Bbox(img_offset, img_end)
            # round up and avoid conversion to float
            n_chunks = (bbox.dx + csize[0] - 1) // csize[0]
            n_chunks *= (bbox.dy + csize[1] - 1) // csize[1]
            n_chunks *= (bbox.dz + csize[2] - 1) // csize[2]
            return n_chunks

        def __iter__(self):
            for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)):
                startpt = startpt.clone()
                endpt = min2(startpt + meta.chunk_size(mip), shape)
                spt = (startpt + bounds.minpt).astype(int)
                ept = (endpt + bounds.minpt).astype(int)
                yield (startpt, endpt, spt, ept)

    return ChunkIterator()
Beispiel #5
0
def ingest(args):
    """
    Ingest an HDF file to a CloudVolume bucket
    """
    if args.local_hdf_path:
        hdf_file = h5py.File(args.local_hdf_path, "r")
    else:
        with Storage(args.cloud_src_path) as storage:
            hdf_file = h5py.File(storage.get_file(args.cloud_hdf_filename),
                                 "r")
    cur_hdf_group = hdf_file
    for group_name in args.hdf_keys_to_dataset:
        cur_hdf_group = cur_hdf_group[group_name]
    hdf_dataset = cur_hdf_group
    if args.zyx:
        dataset_shape = np.array(
            [hdf_dataset.shape[2], hdf_dataset.shape[1], hdf_dataset.shape[0]])
    else:
        dataset_shape = np.array([*hdf_dataset.shape])
    if args.layer_type == "image":
        data_type = "uint8"
    else:
        data_type = "uint64"
    voxel_offset = args.voxel_offset
    info = CloudVolume.create_new_info(
        num_channels=1,
        layer_type=args.layer_type,
        data_type=data_type,
        encoding="raw",
        resolution=args.resolution,
        voxel_offset=voxel_offset,
        chunk_size=args.chunk_size,
        volume_size=dataset_shape,
    )
    provenance = {
        "description": args.provenance_description,
        "owners": [args.owner]
    }
    vol = CloudVolume(args.dst_path, info=info, provenance=provenance)
    vol.commit_info()
    vol.commit_provenance()

    all_files = set()
    for x in np.arange(voxel_offset[0], voxel_offset[0] + dataset_shape[0],
                       args.chunk_size[0]):
        for y in np.arange(voxel_offset[1], voxel_offset[1] + dataset_shape[1],
                           args.chunk_size[1]):
            for z in np.arange(voxel_offset[2],
                               voxel_offset[2] + dataset_shape[2],
                               args.chunk_size[2]):
                all_files.add(tuple((x, y, z)))

    progress_dir = mkdir(
        "progress/")  # unlike os.mkdir doesn't crash on prexisting
    done_files = set()
    for done_file in os.listdir(progress_dir):
        done_files.add(tuple(done_file.split(",")))
    to_upload = all_files.difference(done_files)

    for chunk_start_tuple in to_upload:
        chunk_start = np.array(list(chunk_start_tuple))
        end_of_dataset = np.array(voxel_offset) + dataset_shape
        chunk_end = chunk_start + np.array(args.chunk_size)
        chunk_end = Vec(*chunk_end)
        chunk_end = Vec.clamp(chunk_end, Vec(0, 0, 0), end_of_dataset)
        chunk_hdf_start = chunk_start - voxel_offset
        chunk_hdf_end = chunk_end - voxel_offset
        if args.zyx:
            chunk = hdf_dataset[chunk_hdf_start[2]:chunk_hdf_end[2],
                                chunk_hdf_start[1]:chunk_hdf_end[1],
                                chunk_hdf_start[0]:chunk_hdf_end[0], ]
            chunk = chunk.T
        else:
            chunk = hdf_dataset[chunk_hdf_start[0]:chunk_hdf_end[0],
                                chunk_hdf_start[1]:chunk_hdf_end[1],
                                chunk_hdf_start[2]:chunk_hdf_end[2], ]
        print("Processing ", chunk_start_tuple)
        array = np.array(chunk, dtype=np.dtype(data_type), order="F")
        vol[chunk_start[0]:chunk_end[0], chunk_start[1]:chunk_end[1],
            chunk_start[2]:chunk_end[2], ] = array
        touch(os.path.join(progress_dir, str(chunk_start_tuple)))
Beispiel #6
0
def create_contrast_normalization_tasks(src_path,
                                        dest_path,
                                        levels_path=None,
                                        shape=None,
                                        mip=0,
                                        clip_fraction=0.01,
                                        fill_missing=False,
                                        translate=(0, 0, 0),
                                        minval=None,
                                        maxval=None,
                                        bounds=None):

    srcvol = CloudVolume(src_path, mip=mip)

    try:
        dvol = CloudVolume(dest_path, mip=mip)
    except Exception:  # no info file
        info = copy.deepcopy(srcvol.info)
        dvol = CloudVolume(dest_path, mip=mip, info=info)
        dvol.info['scales'] = dvol.info['scales'][:mip + 1]
        dvol.commit_info()

    if shape == None:
        shape = Bbox((0, 0, 0), (2048, 2048, 64))
        shape = shape.shrink_to_chunk_size(dvol.underlying).size3()
        shape = Vec.clamp(shape, (1, 1, 1), bounds.size3())

    shape = Vec(*shape)

    create_downsample_scales(dest_path,
                             mip=mip,
                             ds_shape=shape,
                             preserve_chunk_size=True)
    dvol.refresh_info()

    bounds = get_bounds(srcvol, bounds, shape, mip)

    class ContrastNormalizationTaskIterator(object):
        def __len__(self):
            return int(reduce(operator.mul, np.ceil(bounds.size3() / shape)))

        def __iter__(self):
            for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape):
                task_shape = min2(shape.clone(), srcvol.bounds.maxpt - startpt)
                yield ContrastNormalizationTask(
                    src_path=src_path,
                    dest_path=dest_path,
                    levels_path=levels_path,
                    shape=task_shape,
                    offset=startpt.clone(),
                    clip_fraction=clip_fraction,
                    mip=mip,
                    fill_missing=fill_missing,
                    translate=translate,
                    minval=minval,
                    maxval=maxval,
                )

            dvol.provenance.processing.append({
                'method': {
                    'task': 'ContrastNormalizationTask',
                    'src_path': src_path,
                    'dest_path': dest_path,
                    'shape': Vec(*shape).tolist(),
                    'clip_fraction': clip_fraction,
                    'mip': mip,
                    'translate': Vec(*translate).tolist(),
                    'minval': minval,
                    'maxval': maxval,
                    'bounds': [bounds.minpt.tolist(),
                               bounds.maxpt.tolist()],
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
            dvol.commit_provenance()

    return ContrastNormalizationTaskIterator()
Beispiel #7
0
def create_contrast_normalization_tasks(src_path,
                                        dest_path,
                                        levels_path=None,
                                        shape=None,
                                        mip=0,
                                        clip_fraction=0.01,
                                        fill_missing=False,
                                        translate=(0, 0, 0),
                                        minval=None,
                                        maxval=None,
                                        bounds=None,
                                        bounds_mip=0):
    """
  Use the output of luminence levels to contrast
  correct the image by stretching the histogram
  to cover the full range of the data type.
  """
    srcvol = CloudVolume(src_path, mip=mip)

    try:
        dvol = CloudVolume(dest_path, mip=mip)
    except Exception:  # no info file
        info = copy.deepcopy(srcvol.info)
        dvol = CloudVolume(dest_path, mip=mip, info=info)
        dvol.info['scales'] = dvol.info['scales'][:mip + 1]
        dvol.commit_info()

    if bounds is None:
        bounds = srcvol.bounds.clone()

    if shape is None:
        shape = Bbox((0, 0, 0), (2048, 2048, 64))
        shape = shape.shrink_to_chunk_size(dvol.underlying).size3()
        shape = Vec.clamp(shape, (1, 1, 1), bounds.size3())

    shape = Vec(*shape)

    downsample_scales.create_downsample_scales(dest_path,
                                               mip=mip,
                                               ds_shape=shape,
                                               preserve_chunk_size=True)
    dvol.refresh_info()

    bounds = get_bounds(srcvol, bounds, mip, bounds_mip=bounds_mip)

    class ContrastNormalizationTaskIterator(FinelyDividedTaskIterator):
        def task(self, shape, offset):
            return ContrastNormalizationTask(
                src_path=src_path,
                dest_path=dest_path,
                levels_path=levels_path,
                shape=shape.clone(),
                offset=offset.clone(),
                clip_fraction=clip_fraction,
                mip=mip,
                fill_missing=fill_missing,
                translate=translate,
                minval=minval,
                maxval=maxval,
            )

        def on_finish(self):
            dvol.provenance.processing.append({
                'method': {
                    'task': 'ContrastNormalizationTask',
                    'src_path': src_path,
                    'dest_path': dest_path,
                    'shape': Vec(*shape).tolist(),
                    'clip_fraction': clip_fraction,
                    'mip': mip,
                    'translate': Vec(*translate).tolist(),
                    'minval': minval,
                    'maxval': maxval,
                    'bounds': [bounds.minpt.tolist(),
                               bounds.maxpt.tolist()],
                },
                'by':
                operator_contact(),
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
            dvol.commit_provenance()

    return ContrastNormalizationTaskIterator(bounds, shape)