Exemplo n.º 1
0
def test_sharded_image_bits(scale):
    dataset_size = Vec(*scale["size"])
    chunk_size = Vec(*scale["chunk_sizes"][0])

    spec = create_sharded_image_info(dataset_size=dataset_size,
                                     chunk_size=chunk_size,
                                     encoding=scale["encoding"],
                                     dtype=np.uint8)

    shape = image_shard_shape_from_spec(spec, dataset_size, chunk_size)

    shape = lib.min2(shape, dataset_size)
    dataset_bbox = Bbox.from_vec(dataset_size)
    gpts = list(gridpoints(dataset_bbox, dataset_bbox, chunk_size))
    grid_size = np.ceil(dataset_size / chunk_size).astype(np.int64)

    spec = ShardingSpecification.from_dict(spec)
    reader = ShardReader(None, None, spec)

    morton_codes = compressed_morton_code(gpts, grid_size)
    min_num_shards = prod(dataset_size / shape)
    max_num_shards = prod(np.ceil(dataset_size / shape))

    assert 0 < min_num_shards <= 2**spec.shard_bits
    assert 0 < max_num_shards <= 2**spec.shard_bits

    real_num_shards = len(set(map(reader.get_filename, morton_codes)))

    assert min_num_shards <= real_num_shards <= max_num_shards
Exemplo n.º 2
0
def generate_chunks(meta, img, offset, mip):
    shape = Vec(*img.shape)[:3]
    offset = Vec(*offset)[:3]

    bounds = Bbox(offset, shape + offset)

    alignment_check = bounds.round_to_chunk_size(meta.chunk_size(mip),
                                                 meta.voxel_offset(mip))

    if not np.all(alignment_check.minpt == bounds.minpt):
        raise AlignmentError("""
      Only chunk aligned writes are supported by this function. 

      Got:             {}
      Volume Offset:   {} 
      Nearest Aligned: {}
    """.format(bounds, meta.voxel_offset(mip), alignment_check))

    bounds = Bbox.clamp(bounds, meta.bounds(mip))

    img_offset = bounds.minpt - offset
    img_end = Vec.clamp(bounds.size3() + img_offset, Vec(0, 0, 0), shape)

    for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)):
        startpt = startpt.clone()
        endpt = min2(startpt + meta.chunk_size(mip), shape)
        spt = (startpt + bounds.minpt).astype(int)
        ept = (endpt + bounds.minpt).astype(int)
        yield (startpt, endpt, spt, ept)
Exemplo n.º 3
0
 def __iter__(self):
     for startpt in xyzrange(img_offset, img_end, meta.chunk_size(mip)):
         startpt = startpt.clone()
         endpt = min2(startpt + meta.chunk_size(mip), shape)
         spt = (startpt + bounds.minpt).astype(int)
         ept = (endpt + bounds.minpt).astype(int)
         yield (startpt, endpt, spt, ept)
Exemplo n.º 4
0
        def __iter__(self):
            for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape):
                bounded_shape = min2(shape, vol.bounds.maxpt - startpt)
                yield igneous.tasks.BlackoutTask(
                    cloudpath=cloudpath,
                    mip=mip,
                    shape=shape.clone(),
                    offset=startpt.clone(),
                    value=value,
                    non_aligned_writes=non_aligned_writes,
                )

            vol.provenance.processing.append({
                'method': {
                    'task': 'BlackoutTask',
                    'cloudpath': cloudpath,
                    'mip': mip,
                    'non_aligned_writes': non_aligned_writes,
                    'value': value,
                    'shape': shape.tolist(),
                    'bounds': [
                        bounds.minpt.tolist(),
                        bounds.maxpt.tolist(),
                    ],
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
Exemplo n.º 5
0
def compute_scales(vol, mip, shape, axis, factor, chunk_size=None):
  shape = min2(vol.meta.volume_size(mip), shape)
  # sometimes we downsample a base layer of 512x512 
  # into underlying chunks of 64x64 which permits more scales
  underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip

  if chunk_size:
    scale_chunk_size = Vec(*chunk_size).astype(np.float32)
  else:
    scale_chunk_size = vol.meta.chunk_size(underlying_mip).astype(np.float32)

  if factor is None:
    factor = axis_to_factor(axis)

  factors = compute_factors(shape, factor, scale_chunk_size, vol.meta.volume_size(mip))
  scales = [ vol.meta.resolution(mip) ]

  precision = max(map(getprecision, vol.meta.resolution(mip)))

  def prec(x):
    if precision == 0:
      return int(x)
    return round(x, precision)

  for factor3 in factors:
    scales.append(
      list(map(prec, Vec(*scales[-1], dtype=np.float32) * Vec(*factor3)))
    )
  return scales[1:]
Exemplo n.º 6
0
        def __iter__(self):
            for startpt in xyzrange(vol.bounds.minpt, vol.bounds.maxpt, shape):
                bounded_shape = min2(shape, vol.bounds.maxpt - startpt)
                yield DeleteTask(
                    layer_path=layer_path,
                    shape=bounded_shape.clone(),
                    offset=startpt.clone(),
                    mip=mip,
                    num_mips=num_mips,
                )

            vol = CloudVolume(layer_path)
            vol.provenance.processing.append({
                'method': {
                    'task': 'DeleteTask',
                    'mip': mip,
                    'num_mips': num_mips,
                    'shape': shape.tolist(),
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
            vol.commit_provenance()
Exemplo n.º 7
0
        def __iter__(self):
            for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape):
                bounded_shape = min2(shape, vol.bounds.maxpt - startpt)
                yield igneous.tasks.TouchTask(
                    cloudpath=cloudpath,
                    shape=bounded_shape.clone(),
                    offset=startpt.clone(),
                    mip=mip,
                )

            vol.provenance.processing.append({
                'method': {
                    'task': 'TouchTask',
                    'mip': mip,
                    'shape': shape.tolist(),
                    'bounds': [
                        bounds.minpt.tolist(),
                        bounds.maxpt.tolist(),
                    ],
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
            vol.commit_provenance()
Exemplo n.º 8
0
  def process(startpt, endpt, spt, ept):
    if np.array_equal(spt, ept):
      return

    imgchunk = img[ startpt.x:endpt.x, startpt.y:endpt.y, startpt.z:endpt.z, : ]

    # handle the edge of the dataset
    clamp_ept = min2(ept, meta.bounds(mip).maxpt)
    newept = clamp_ept - spt
    imgchunk = imgchunk[ :newept.x, :newept.y, :newept.z, : ]

    filename = "{}-{}_{}-{}_{}-{}".format(
      spt.x, clamp_ept.x,
      spt.y, clamp_ept.y, 
      spt.z, clamp_ept.z
    )

    cloudpath = meta.join(meta.key(mip), filename)

    if delete_black_uploads:
      if np.any(imgchunk != background_color):
        do_upload(imgchunk, cloudpath)
      else:
        do_delete(cloudpath)
    else:
      do_upload(imgchunk, cloudpath)
Exemplo n.º 9
0
 def task(self, shape, offset):
   bounded_shape = min2(shape, vol.bounds.maxpt - offset)
   return igneous.tasks.TouchTask(
     cloudpath=cloudpath,
     shape=bounded_shape.clone(),
     offset=offset.clone(),
     mip=mip,
   )
Exemplo n.º 10
0
def downsample_and_upload(image,
                          bounds,
                          vol,
                          ds_shape,
                          mip=0,
                          axis='z',
                          skip_first=False,
                          sparse=False,
                          only_last_mip=True):
    """ 
  mip:int, the current mip level of image
  only_last_mip::bool, only save the last mip level or not. 
    In default as False, we'll save all the intermediate mip level 
  """
    ds_shape = min2(vol.volume_size, ds_shape[:3])

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)
    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    # Need to use ds_shape here. Using image bounds means truncated
    # edges won't generate as many mip levels
    fullscales = downsample_scales.compute_plane_downsampling_scales(
        size=ds_shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    factors = downsample.scale_series_to_downsample_factors(fullscales)

    if len(factors) == 0:
        print(
            "No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}"
            .format(image.shape, ds_shape, vol.volume_size, bounds))

    downsamplefn = downsample.method(vol.layer_type, sparse=sparse)

    vol.mip = mip
    if not skip_first:
        vol[bounds.to_slices()] = image

    new_bounds = bounds.clone()

    for factor3 in factors:
        vol.mip += 1
        image = downsamplefn(image, factor3)
        new_bounds //= factor3
        new_bounds.maxpt = new_bounds.minpt + Vec(*image.shape[:3])
        if factor3 is factors[-1]:
            # this is the last mip level
            vol[new_bounds.to_slices()] = image
        else:
            # this is not the last mip level
            if not only_last_mip:
                vol[new_bounds.to_slices()] = image
Exemplo n.º 11
0
 def task(self, shape, offset):
   bounded_shape = min2(shape, bounds.maxpt - offset)
   return DeleteTask(
     layer_path=layer_path,
     shape=bounded_shape.clone(),
     offset=offset.clone(),
     mip=mip,
     num_mips=num_mips,
   )
Exemplo n.º 12
0
def downsample_and_upload(
    image, bounds, vol, ds_shape, 
    mip=0, axis='z', skip_first=False,
    sparse=False
  ):
    ds_shape = min2(vol.volume_size, ds_shape[:3])

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)
    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    # Need to use ds_shape here. Using image bounds means truncated
    # edges won't generate as many mip levels
    fullscales = downsample_scales.compute_plane_downsampling_scales(
      size=ds_shape,
      preserve_axis=axis,
      max_downsampled_size=int(min(*underlying_shape)),
    )
    factors = downsample_scales.scale_series_to_downsample_factors(fullscales)

    if len(factors) == 0:
      print("No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}".format(
          image.shape, ds_shape, vol.volume_size, bounds)
      )

    vol.mip = mip
    if not skip_first:
      vol[bounds.to_slices()] = image

    if len(factors) == 0:
      return

    num_mips = len(factors)

    mips = []
    if vol.layer_type == 'image':
      mips = tinybrain.downsample_with_averaging(image, factors[0], num_mips=num_mips)
    elif vol.layer_type == 'segmentation':
      mips = tinybrain.downsample_segmentation(
        image, factors[0], 
        num_mips=num_mips, sparse=sparse
      )
    else:
      mips = tinybrain.downsample_with_striding(image, factors[0], num_mips=num_mips)

    new_bounds = bounds.clone()
   
    for factor3 in factors:
      vol.mip += 1
      new_bounds //= factor3
      mipped = mips.pop(0)
      new_bounds.maxpt = new_bounds.minpt + Vec(*mipped.shape[:3])
      vol[new_bounds] = mipped
Exemplo n.º 13
0
 def task(self, shape, offset):
   bounded_shape = min2(shape, vol.bounds.maxpt - offset)
   return igneous.tasks.BlackoutTask(
     cloudpath=cloudpath, 
     mip=mip, 
     shape=shape.clone(), 
     offset=offset.clone(),
     value=value, 
     non_aligned_writes=non_aligned_writes,
   )
Exemplo n.º 14
0
def downsample_and_upload(image,
                          bounds,
                          vol,
                          ds_shape,
                          mip=0,
                          axis='z',
                          skip_first=False,
                          sparse=False,
                          factor=None):
    ds_shape = min2(vol.volume_size, ds_shape[:3])
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    chunk_size = vol.meta.chunk_size(underlying_mip).astype(np.float32)

    if factor is None:
        factor = downsample_scales.axis_to_factor(axis)
    factors = downsample_scales.compute_factors(ds_shape, factor, chunk_size)

    if len(factors) == 0:
        print(
            "No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}"
            .format(image.shape, ds_shape, vol.volume_size, bounds))

    vol.mip = mip
    if not skip_first:
        vol[bounds.to_slices()] = image

    if len(factors) == 0:
        return

    num_mips = len(factors)

    mips = []
    if vol.layer_type == 'image':
        mips = tinybrain.downsample_with_averaging(image,
                                                   factors[0],
                                                   num_mips=num_mips)
    elif vol.layer_type == 'segmentation':
        mips = tinybrain.downsample_segmentation(image,
                                                 factors[0],
                                                 num_mips=num_mips,
                                                 sparse=sparse)
    else:
        mips = tinybrain.downsample_with_striding(image,
                                                  factors[0],
                                                  num_mips=num_mips)

    new_bounds = bounds.clone()

    for factor3 in factors:
        vol.mip += 1
        new_bounds //= factor3
        mipped = mips.pop(0)
        new_bounds.maxpt = new_bounds.minpt + Vec(*mipped.shape[:3])
        vol[new_bounds] = mipped
Exemplo n.º 15
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False,
                             chunk_size=None,
                             encoding=None):
    vol = CloudVolume(layer_path, mip)
    shape = min2(vol.volume_size, ds_shape)

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)

    if chunk_size:
        underlying_shape = Vec(*chunk_size).astype(np.float32)

    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    scales = downsample_scales.compute_plane_downsampling_scales(
        size=shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    scales = scales[1:]  # omit (1,1,1)
    scales = [
        list(map(int, vol.downsample_ratio * Vec(*factor3)))
        for factor3 in scales
    ]

    if len(scales) == 0:
        print("WARNING: No scales generated.")

    for scale in scales:
        vol.add_scale(scale, encoding=encoding, chunk_size=chunk_size)

    if chunk_size is None:
        if preserve_chunk_size or len(scales) == 0:
            chunk_size = vol.scales[mip]['chunk_sizes']
        else:
            chunk_size = vol.scales[mip + 1]['chunk_sizes']
    else:
        chunk_size = [chunk_size]

    if encoding is None:
        encoding = vol.scales[mip]['encoding']

    for i in range(mip + 1, mip + len(scales) + 1):
        vol.scales[i]['chunk_sizes'] = chunk_size

    return vol.commit_info()
Exemplo n.º 16
0
 def task(self, shape, offset):  
   task_shape = min2(shape.clone(), dvol_bounds.maxpt - offset)
   return TransferTask(
     src_path=src_layer_path,
     dest_path=dest_layer_path,
     shape=task_shape,
     offset=offset.clone(),
     fill_missing=fill_missing,
     translate=translate,
     mip=mip,
     skip_downsamples=skip_downsamples,
   )
Exemplo n.º 17
0
def upload_build_chunks(storage, volume, offset=[0, 0, 0], build_chunk_size=[1024,1024,128]):
  offset = Vec(*offset)
  shape = Vec(*volume.shape[:3])
  build_chunk_size = Vec(*build_chunk_size)

  for spt in xyzrange( (0,0,0), shape, build_chunk_size):
    ept = min2(spt + build_chunk_size, shape)
    bbox = Bbox(spt, ept)
    chunk = volume[ bbox.to_slices() ]
    bbox += offset
    filename = 'build/{}'.format(bbox.to_filename())
    storage.put_file(filename, chunks.encode_npz(chunk))
  storage.wait()
Exemplo n.º 18
0
def create_deletion_tasks(task_queue, layer_path):
    vol = CloudVolume(layer_path)
    shape = vol.underlying * 10

    for startpt in tqdm(xyzrange(vol.bounds.minpt, vol.bounds.maxpt, shape),
                        desc="Inserting Deletion Tasks"):
        shape = min2(shape, vol.bounds.maxpt - startpt)
        task = DeleteTask(
            layer_path=layer_path,
            shape=shape.clone(),
            offset=startpt.clone(),
        )
        task_queue.insert(task)
    task_queue.wait('Uploading DeleteTasks')
Exemplo n.º 19
0
 def task(self, shape, offset):
   task_shape = min2(shape.clone(), srcvol.bounds.maxpt - offset)
   return ContrastNormalizationTask( 
     src_path=src_path, 
     dest_path=dest_path,
     levels_path=levels_path,
     shape=task_shape, 
     offset=offset.clone(), 
     clip_fraction=clip_fraction,
     mip=mip,
     fill_missing=fill_missing,
     translate=translate,
     minval=minval,
     maxval=maxval,
   )
Exemplo n.º 20
0
 def task(self, shape, offset):
   shape += 1 # 1px overlap on the right hand side
   bounded_shape = min2(shape, bounds.maxpt - offset)
   return SkeletonTask(
     cloudpath=cloudpath,
     shape=shape.clone(),
     offset=offset.clone(),
     mip=mip,
     teasar_params=teasar_params,
     will_postprocess=will_postprocess,
     info=info,
     object_ids=object_ids,
     fix_branching=fix_branching,
     fix_borders=fix_borders,
     dust_threshold=dust_threshold,
     progress=progress,
     parallel=parallel,
   )
Exemplo n.º 21
0
        def __iter__(self):
            self.bounds = bounds.clone()
            self.bounds.minpt.z = bounds.minpt.z + self.level_start * shape.z
            self.bounds.maxpt.z = bounds.minpt.z + self.level_end * shape.z

            for start in xyzrange(self.bounds.minpt, self.bounds.maxpt, shape):
                task_shape = min2(shape.clone(), self.bounds.maxpt - start)

                task_bounds = Bbox(start, start + task_shape)
                if task_bounds.volume() < 1:
                    continue

                chunk_begin = tup2str(task_bounds.minpt)
                chunk_end = tup2str(task_bounds.maxpt)
                res_str = tup2str(resolution)

                cmd = (f"remap_ids {cleftpath} {cleftoutpath} {storagestr}"
                       f" --chunk_begin {chunk_begin} --chunk_end {chunk_end}"
                       f" --dup_map_storagestr {dupstoragestr} --mip {res_str}")

                yield SynaptorTask(cmd)
Exemplo n.º 22
0
      def __iter__(self):
        self.bounds = bounds.clone()
        self.bounds.minpt.z = bounds.minpt.z + self.level_start * shape.z
        self.bounds.maxpt.z = bounds.minpt.z + self.level_end * shape.z

        for startpt in xyzrange( self.bounds.minpt, self.bounds.maxpt, shape ):
          task_shape = min2(shape.clone(), self.bounds.maxpt - startpt)

          task_bounds = Bbox( startpt, startpt + task_shape )
          if task_bounds.volume() < 1:
            continue

          chunk_begin = tup2str(task_bounds.minpt)
          chunk_end = tup2str(task_bounds.maxpt)
          mip_str = tup2str(mip)

          cmd = (f"chunk_overlaps {segpath} {base_segpath} {storagestr}"
                 f" --chunk_begin {chunk_begin} --chunk_end {chunk_end}"
                 f" --parallel {parallel} --mip {mip_str}")

          yield SynaptorTask(cmd)
Exemplo n.º 23
0
def downsample_and_upload(image, bounds, vol, ds_shape, mip=0, axis='z', skip_first=False, zero_as_background=False):
    ds_shape = min2(vol.volume_size, ds_shape[:3])

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)
    toidx = { 'x': 0, 'y': 1, 'z': 2 }
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    # Need to use ds_shape here. Using image bounds means truncated
    # edges won't generate as many mip levels
    fullscales = downsample_scales.compute_plane_downsampling_scales(
      size=ds_shape,
      preserve_axis=axis,
      max_downsampled_size=int(min(*underlying_shape)),
    )
    factors = downsample.scale_series_to_downsample_factors(fullscales)

    if len(factors) == 0:
      print("No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}".format(
        image.shape, ds_shape, vol.volume_size, bounds)
      )

    downsamplefn = downsample.method(vol.layer_type,
                                     zero_as_background=zero_as_background)

    vol.mip = mip
    if not skip_first:
      vol[ bounds.to_slices() ] = image

    new_bounds = bounds.clone()

    for factor3 in factors:
      vol.mip += 1
      image = downsamplefn(image, factor3)
      new_bounds //= factor3
      new_bounds.maxpt = new_bounds.minpt + Vec(*image.shape[:3])
      vol[ new_bounds.to_slices() ] = image
Exemplo n.º 24
0
        def __iter__(self):
            self.bounds = bounds.clone()
            self.bounds.minpt.z = bounds.minpt.z + self.level_start * shape.z
            self.bounds.maxpt.z = bounds.minpt.z + self.level_end * shape.z

            for start in xyzrange(self.bounds.minpt, self.bounds.maxpt, shape):
                task_shape = min2(shape.clone(), self.bounds.maxpt - start)

                task_bounds = Bbox(start, start + task_shape)
                if task_bounds.volume() < 1:
                    continue

                chunk_begin = tup2str(task_bounds.minpt)
                chunk_end = tup2str(task_bounds.maxpt)
                patchsz_str = tup2str(patchsz)
                res_str = tup2str(resolution)

                cmd = (f"chunk_edges {imgpath} {cleftpath} {segpath}"
                       f" {storagestr} {hashmax} --storagedir {storagedir}"
                       f" --chunk_begin {chunk_begin} --chunk_end {chunk_end}"
                       f" --patchsz {patchsz_str} --resolution {res_str}")

                yield SynaptorTask(cmd)
Exemplo n.º 25
0
        def __iter__(self):
            for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape):
                task_shape = min2(shape.clone(), srcvol.bounds.maxpt - startpt)
                yield ContrastNormalizationTask(
                    src_path=src_path,
                    dest_path=dest_path,
                    levels_path=levels_path,
                    shape=task_shape,
                    offset=startpt.clone(),
                    clip_fraction=clip_fraction,
                    mip=mip,
                    fill_missing=fill_missing,
                    translate=translate,
                    minval=minval,
                    maxval=maxval,
                )

            dvol.provenance.processing.append({
                'method': {
                    'task': 'ContrastNormalizationTask',
                    'src_path': src_path,
                    'dest_path': dest_path,
                    'shape': Vec(*shape).tolist(),
                    'clip_fraction': clip_fraction,
                    'mip': mip,
                    'translate': Vec(*translate).tolist(),
                    'minval': minval,
                    'maxval': maxval,
                    'bounds': [bounds.minpt.tolist(),
                               bounds.maxpt.tolist()],
                },
                'by':
                OPERATOR_CONTACT,
                'date':
                strftime('%Y-%m-%d %H:%M %Z'),
            })
            dvol.commit_provenance()
Exemplo n.º 26
0
        def __iter__(self):
            for startpt in xyzrange(bounds.minpt, bounds.maxpt, shape):
                task_shape = min2(shape.clone(), dvol_bounds.maxpt - startpt)
                yield TransferTask(
                    src_path=src_layer_path,
                    dest_path=dest_layer_path,
                    shape=task_shape,
                    offset=startpt.clone(),
                    fill_missing=fill_missing,
                    translate=translate,
                    mip=mip,
                )

            job_details = {
                'method': {
                    'task': 'TransferTask',
                    'src': src_layer_path,
                    'dest': dest_layer_path,
                    'shape': list(map(int, shape)),
                    'fill_missing': fill_missing,
                    'translate': list(map(int, translate)),
                    'bounds': [bounds.minpt.tolist(),
                               bounds.maxpt.tolist()],
                    'mip': mip,
                },
                'by': OPERATOR_CONTACT,
                'date': strftime('%Y-%m-%d %H:%M %Z'),
            }

            dvol = CloudVolume(dest_layer_path)
            dvol.provenance.sources = [src_layer_path]
            dvol.provenance.processing.append(job_details)
            dvol.commit_provenance()

            if vol.path.protocol != 'boss':
                vol.provenance.processing.append(job_details)
                vol.commit_provenance()
Exemplo n.º 27
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False):
    vol = CloudVolume(layer_path, mip)
    shape = min2(vol.volume_size, ds_shape)

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)

    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    scales = downsample_scales.compute_plane_downsampling_scales(
        size=shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    scales = scales[1:]  # omit (1,1,1)
    scales = [
        list(map(int, vol.downsample_ratio * Vec(*factor3)))
        for factor3 in scales
    ]

    for scale in scales:
        vol.add_scale(scale)

    if preserve_chunk_size:
        for i in range(1, len(vol.scales)):
            vol.scales[i]['chunk_sizes'] = vol.scales[0]['chunk_sizes']

    return vol.commit_info()
Exemplo n.º 28
0
def create_contrast_normalization_tasks(task_queue,
                                        src_path,
                                        dest_path,
                                        shape=None,
                                        mip=0,
                                        clip_fraction=0.01,
                                        fill_missing=False,
                                        translate=(0, 0, 0)):

    srcvol = CloudVolume(src_path, mip=mip)

    try:
        dvol = CloudVolume(dest_path, mip=mip)
    except Exception:  # no info file
        info = copy.deepcopy(srcvol.info)
        dvol = CloudVolume(dest_path, mip=mip, info=info)
        dvol.info['scales'] = dvol.info['scales'][:mip + 1]
        dvol.commit_info()

    if shape == None:
        shape = Bbox((0, 0, 0), (2048, 2048, 64))
        shape = shape.shrink_to_chunk_size(dvol.underlying).size3()

    shape = Vec(*shape)

    create_downsample_scales(dest_path,
                             mip=mip,
                             ds_shape=shape,
                             preserve_chunk_size=True)
    dvol.refresh_info()

    bounds = srcvol.bounds.clone()
    for startpt in tqdm(xyzrange(bounds.minpt, bounds.maxpt, shape),
                        desc="Inserting Contrast Normalization Tasks"):
        task_shape = min2(shape.clone(), srcvol.bounds.maxpt - startpt)
        task = ContrastNormalizationTask(
            src_path=src_path,
            dest_path=dest_path,
            shape=task_shape,
            offset=startpt.clone(),
            clip_fraction=clip_fraction,
            mip=mip,
            fill_missing=fill_missing,
            translate=translate,
        )
        task_queue.insert(task)
    task_queue.wait('Uploading Contrast Normalization Tasks')

    dvol.provenance.processing.append({
        'method': {
            'task': 'ContrastNormalizationTask',
            'src_path': src_path,
            'dest_path': dest_path,
            'shape': Vec(*shape).tolist(),
            'clip_fraction': clip_fraction,
            'mip': mip,
            'translate': Vec(*translate).tolist(),
        },
        'by': USER_EMAIL,
        'date': strftime('%Y-%m-%d %H:%M %Z'),
    })
    dvol.commit_provenance()
Exemplo n.º 29
0
    voxel_offset=[0, 0, 0],
    #volume_size = [1024, 10240, 14592],
    volume_size=[h5_class.shape_z, h5_class.shape_y, h5_class.shape_x],
    #chunk_size = [512, 512, 64],
)
vol = CloudVolume("file://" + h5_class.Destination_path,
                  compress=False,
                  info=info,
                  non_aligned_writes=True)
vol.commit_info()

h5_data = h5_file[Dataset_name]

for x, y, z in tqdm(xyzrange(bounds.minpt, bounds.maxpt, shape)):
    pt = Vec(x, y, z)
    bounded_shape = min2(shape, bounds.maxpt - Vec(x, y, z))
    bbx = Bbox(pt, pt + bounded_shape)
    if bbx.subvoxel():
        continue

    vol[bbx] = (h5_data[bbx.to_slices()[::-1]].T).astype(np_type)

print("KNeuroViz pre-processing DONE!")

if (h5_class.ImageType == 'segmentation'):
    seg_mesh_path = "file://" + h5_class.Destination_path

    with LocalTaskQueue(parallel=8) as tq:
        tasks = tc.create_meshing_tasks(seg_mesh_path,
                                        mip=0,
                                        shape=Vec(h5_class.shape_x // 2,