def test_plane_scales_xy():
  scales = downsample_scales.compute_plane_downsampling_scales( 
    (2048, 2048, 512), max_downsampled_size=128
  )

  assert len(scales) == 5
  assert scales[0] == (1,1,1)
  assert scales[1] == (2,2,1)
  assert scales[2] == (4,4,1)
  assert scales[3] == (8,8,1)
  assert scales[4] == (16,16,1)

  scales = downsample_scales.compute_plane_downsampling_scales( 
    (357, 2048, 512), max_downsampled_size=128
  )

  assert len(scales) == 2
  assert scales[0] == (1,1,1)
  assert scales[1] == (2,2,1)

  scales = downsample_scales.compute_plane_downsampling_scales( 
    (0, 2048, 512), max_downsampled_size=128
  )

  assert len(scales) == 1
  assert scales[0] == (1,1,1)
Exemple #2
0
def create_hypersquare_ingest_tasks(task_queue, hypersquare_bucket_name,
                                    dataset_name, hypersquare_chunk_size,
                                    resolution, voxel_offset, volume_size,
                                    overlap):
    def crtinfo(layer_type, dtype, encoding):
        return CloudVolume.create_new_info(
            num_channels=1,
            layer_type=layer_type,
            data_type=dtype,
            encoding=encoding,
            resolution=resolution,
            voxel_offset=voxel_offset,
            volume_size=volume_size,
            chunk_size=[56, 56, 56],
        )

    imginfo = crtinfo('image', 'uint8', 'jpeg')
    seginfo = crtinfo('segmentation', 'uint16', 'raw')

    scales = downsample_scales.compute_plane_downsampling_scales(
        hypersquare_chunk_size)[1:]  # omit (1,1,1)

    IMG_LAYER_NAME = 'image'
    SEG_LAYER_NAME = 'segmentation'

    imgvol = CloudVolume(dataset_name, IMG_LAYER_NAME, 0, info=imginfo)
    segvol = CloudVolume(dataset_name, SEG_LAYER_NAME, 0, info=seginfo)

    print("Creating info files for image and segmentation...")
    imgvol.commit_info()
    segvol.commit_info()

    def crttask(volname, tasktype, layer_name):
        return HyperSquareTask(
            bucket_name=hypersquare_bucket_name,
            dataset_name=dataset_name,
            layer_name=layer_name,
            volume_dir=volname,
            layer_type=tasktype,
            overlap=overlap,
            resolution=resolution,
        )

    print("Listing hypersquare bucket...")
    volumes_listing = lib.gcloud_ls('gs://{}/'.format(hypersquare_bucket_name))

    # download this from:
    # with open('e2198_volumes.json', 'r') as f:
    #   volumes_listing = json.loads(f.read())

    volumes_listing = [x.split('/')[-2] for x in volumes_listing]

    for cloudpath in tqdm(volumes_listing, desc="Creating Ingest Tasks"):
        # print(cloudpath)
        # img_task = crttask(cloudpath, 'image', IMG_LAYER_NAME)
        seg_task = crttask(cloudpath, 'segmentation', SEG_LAYER_NAME)
        # seg_task.execute()
        tq.insert(seg_task)
Exemple #3
0
def downsample_and_upload(
    image, bounds, vol, ds_shape, 
    mip=0, axis='z', skip_first=False,
    sparse=False
  ):
    ds_shape = min2(vol.volume_size, ds_shape[:3])

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)
    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    # Need to use ds_shape here. Using image bounds means truncated
    # edges won't generate as many mip levels
    fullscales = downsample_scales.compute_plane_downsampling_scales(
      size=ds_shape,
      preserve_axis=axis,
      max_downsampled_size=int(min(*underlying_shape)),
    )
    factors = downsample_scales.scale_series_to_downsample_factors(fullscales)

    if len(factors) == 0:
      print("No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}".format(
          image.shape, ds_shape, vol.volume_size, bounds)
      )

    vol.mip = mip
    if not skip_first:
      vol[bounds.to_slices()] = image

    if len(factors) == 0:
      return

    num_mips = len(factors)

    mips = []
    if vol.layer_type == 'image':
      mips = tinybrain.downsample_with_averaging(image, factors[0], num_mips=num_mips)
    elif vol.layer_type == 'segmentation':
      mips = tinybrain.downsample_segmentation(
        image, factors[0], 
        num_mips=num_mips, sparse=sparse
      )
    else:
      mips = tinybrain.downsample_with_striding(image, factors[0], num_mips=num_mips)

    new_bounds = bounds.clone()
   
    for factor3 in factors:
      vol.mip += 1
      new_bounds //= factor3
      mipped = mips.pop(0)
      new_bounds.maxpt = new_bounds.minpt + Vec(*mipped.shape[:3])
      vol[new_bounds] = mipped
Exemple #4
0
def test_plane_scales_yz():
    scales = downsample_scales.compute_plane_downsampling_scales(
        (512, 2048, 2048), max_downsampled_size=128, preserve_axis='x')

    assert len(scales) == 5
    assert scales[0] == (1, 1, 1)
    assert scales[1] == (1, 2, 2)
    assert scales[2] == (1, 4, 4)
    assert scales[3] == (1, 8, 8)
    assert scales[4] == (1, 16, 16)

    scales = downsample_scales.compute_plane_downsampling_scales(
        (64, 2048, 2048), max_downsampled_size=128, preserve_axis='x')

    assert len(scales) == 5
    assert scales[0] == (1, 1, 1)
    assert scales[1] == (1, 2, 2)
    assert scales[2] == (1, 4, 4)
    assert scales[3] == (1, 8, 8)
    assert scales[4] == (1, 16, 16)
Exemple #5
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False,
                             chunk_size=None,
                             encoding=None):
    vol = CloudVolume(layer_path, mip)
    shape = min2(vol.volume_size, ds_shape)

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)

    if chunk_size:
        underlying_shape = Vec(*chunk_size).astype(np.float32)

    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    scales = downsample_scales.compute_plane_downsampling_scales(
        size=shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    scales = scales[1:]  # omit (1,1,1)
    scales = [
        list(map(int, vol.downsample_ratio * Vec(*factor3)))
        for factor3 in scales
    ]

    if len(scales) == 0:
        print("WARNING: No scales generated.")

    for scale in scales:
        vol.add_scale(scale, encoding=encoding, chunk_size=chunk_size)

    if chunk_size is None:
        if preserve_chunk_size or len(scales) == 0:
            chunk_size = vol.scales[mip]['chunk_sizes']
        else:
            chunk_size = vol.scales[mip + 1]['chunk_sizes']
    else:
        chunk_size = [chunk_size]

    if encoding is None:
        encoding = vol.scales[mip]['encoding']

    for i in range(mip + 1, mip + len(scales) + 1):
        vol.scales[i]['chunk_sizes'] = chunk_size

    return vol.commit_info()
Exemple #6
0
def downsample_and_upload(image,
                          bounds,
                          vol,
                          ds_shape,
                          mip=0,
                          axis='z',
                          skip_first=False):
    ds_shape = min2(vol.volume_size, ds_shape[:3])

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)
    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    # Need to use ds_shape here. Using image bounds means truncated
    # edges won't generate as many mip levels
    fullscales = downsample_scales.compute_plane_downsampling_scales(
        size=ds_shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    factors = downsample.scale_series_to_downsample_factors(fullscales)

    if len(factors) == 0:
        print(
            "No factors generated. Image Shape: {}, Downsample Shape: {}, Volume Shape: {}, Bounds: {}"
            .format(image.shape, ds_shape, vol.volume_size, bounds))

    downsamplefn = downsample.method(vol.layer_type)

    vol.mip = mip
    if not skip_first:
        vol[bounds.to_slices()] = image

    new_bounds = bounds.clone()

    for factor3 in factors:
        vol.mip += 1
        image = downsamplefn(image, factor3)
        new_bounds //= factor3
        new_bounds.maxpt = new_bounds.minpt + Vec(*image.shape[:3])
        vol[new_bounds.to_slices()] = image
Exemple #7
0
def create_downsample_scales(layer_path,
                             mip,
                             ds_shape,
                             axis='z',
                             preserve_chunk_size=False):
    vol = CloudVolume(layer_path, mip)
    shape = min2(vol.volume_size, ds_shape)

    # sometimes we downsample a base layer of 512x512
    # into underlying chunks of 64x64 which permits more scales
    underlying_mip = (mip + 1) if (mip + 1) in vol.available_mips else mip
    underlying_shape = vol.mip_underlying(underlying_mip).astype(np.float32)

    toidx = {'x': 0, 'y': 1, 'z': 2}
    preserved_idx = toidx[axis]
    underlying_shape[preserved_idx] = float('inf')

    scales = downsample_scales.compute_plane_downsampling_scales(
        size=shape,
        preserve_axis=axis,
        max_downsampled_size=int(min(*underlying_shape)),
    )
    scales = scales[1:]  # omit (1,1,1)
    scales = [
        list(map(int, vol.downsample_ratio * Vec(*factor3)))
        for factor3 in scales
    ]

    for scale in scales:
        vol.add_scale(scale)

    if preserve_chunk_size:
        for i in range(1, len(vol.scales)):
            vol.scales[i]['chunk_sizes'] = vol.scales[0]['chunk_sizes']

    return vol.commit_info()