Exemple #1
0
def test_bbox_from_filename():
    filenames = [
        "0-512_0-512_0-16",
        "0-512_0-512_0-16.gz",
        "0-512_0-512_0-16.br",
    ]

    for fn in filenames:
        bbox = Bbox.from_filename(fn)
        assert np.array_equal(bbox.minpt, [0, 0, 0])
        assert np.array_equal(bbox.maxpt, [512, 512, 16])

    filenames = [
        "gibberish",
        "0-512_0-512_0-16.lol",
        "0-512_0-512_0-16.gzip",
        "0-512_0-512_0-16.brotli",
        "0-512_0-512_0-16.na",
        "0-512_0-512_0-abc",
        "0-512_0-abc_0-16",
        "0-abc_0-512_0-16",
    ]
    for fn in filenames:
        with pytest.raises(ValueError):
            bbox = Bbox.from_filename(fn)
Exemple #2
0
 def locations_for_labels(self, labels, cv):
   index_filenames = cv.skeleton.spatial_index.file_locations_per_label(labels)
   for label, locations in index_filenames.items():
     for i, location in enumerate(locations):
       bbx = Bbox.from_filename(re.sub(SPATIAL_EXT, '', location))
       bbx /= cv.meta.resolution(cv.skeleton.meta.mip)
       index_filenames[label][i] = bbx.to_filename() + '.frags'
   return index_filenames
Exemple #3
0
 def execute(self):
   volume = CloudVolume(self.layer_path, mip=0)
   bounds = Bbox.from_filename(self.chunk_path)
   image = self._download_input_chunk(bounds)
   image = chunks.decode(image, self.chunk_encoding)
   # BUG: We need to provide some kind of ds_shape independent of the image
   # otherwise the edges of the dataset may not generate as many mip levels.
   downsample_and_upload(image, bounds, volume, mip=0, ds_shape=image.shape[:3],
                         zero_as_background=self.zero_as_background)
Exemple #4
0
def locations_for_labels(cv: CloudVolume,
                         labels: List[int]) -> Dict[int, List[str]]:

    SPATIAL_EXT = re.compile(r'\.spatial$')
    index_filenames = cv.mesh.spatial_index.file_locations_per_label(labels)
    for label, locations in index_filenames.items():
        for i, location in enumerate(locations):
            bbx = Bbox.from_filename(re.sub(SPATIAL_EXT, '', location))
            bbx /= cv.meta.resolution(cv.mesh.meta.mip)
            index_filenames[label][i] = bbx.to_filename() + '.frags'
    return index_filenames
def compute_build_bounding_box(storage, prefix='build/'):
    bboxes = []
    for filename in tqdm(storage.list_files(prefix=prefix), desc='Computing Bounds'):
        bbox = Bbox.from_filename(filename) 
        bboxes.append(bbox)

    bounds = Bbox.expand(*bboxes)
    chunk_size = reduce(max2, map(lambda bbox: bbox.size3(), bboxes))

    print('bounds={} (size: {}); chunk_size={}'.format(bounds, bounds.size3(), chunk_size))
  
    return bounds, chunk_size
    def __iter__(self):
      for boundstr, volume_id in volume_map.items():
        bbox = Bbox.from_filename(boundstr)
        bbox.minpt = Vec.clamp(bbox.minpt, vol.bounds.minpt, vol.bounds.maxpt)
        bbox.maxpt = Vec.clamp(bbox.maxpt, vol.bounds.minpt, vol.bounds.maxpt)

        yield HyperSquareConsensusTask(
          src_path=src_path,
          dest_path=dest_path,
          ew_volume_id=int(volume_id),
          consensus_map_path=consensus_map_path,
          shape=bbox.size3(),
          offset=bbox.minpt.clone(),
        )
Exemple #7
0
def fetch_task(queue_name, visibility_timeout):
    """Fetch task from queue."""
    # This operator is actually a generator,
    # it replaces old tasks to a completely new tasks and loop over it!
    queue = SQSQueue(queue_name, visibility_timeout=visibility_timeout)
    for task_handle, bbox_str in queue:
        print('get task: ', bbox_str)
        bbox = Bbox.from_filename(bbox_str)
        # record the task handle to delete after the processing
        task = get_initial_task()
        task['queue'] = queue
        task['task_handle'] = task_handle
        task['bbox'] = bbox
        task['log']['bbox'] = bbox.to_filename()
        yield task
Exemple #8
0
    def get_skeletons_by_segid(self, filenames):
        with Storage(self.cloudpath, progress=True) as stor:
            skels = stor.get_files(filenames)

        skeletons = defaultdict(list)
        for skel in skels:
            try:
                segid = filename_to_segid(skel['filename'])
            except ValueError:
                # Typically this is due to preexisting fully
                # formed skeletons e.g. skeletons_mip_3/1588494
                continue

            skeletons[segid].append((Bbox.from_filename(skel['filename']),
                                     pickle.loads(skel['content'])))

        return skeletons
Exemple #9
0
def create_hypersquare_consensus_tasks(task_queue, src_path, dest_path,
                                       volume_map_file, consensus_map_path):
    """
  Transfer an Eyewire consensus into neuroglancer. This first requires
  importing the raw segmentation via a hypersquare ingest task. However,
  this can probably be streamlined at some point.

  The volume map file should be JSON encoded and 
  look like { "X-X_Y-Y_Z-Z": EW_VOLUME_ID }

  The consensus map file should look like:
  { VOLUMEID: { CELLID: [segids] } }
  """

    with open(volume_map_file, 'r') as f:
        volume_map = json.loads(f.read())

    vol = CloudVolume(dest_path)

    for boundstr, volume_id in tqdm(
            volume_map.items(),
            desc="Inserting HyperSquare Consensus Remap Tasks"):
        bbox = Bbox.from_filename(boundstr)
        bbox.minpt = Vec.clamp(bbox.minpt, vol.bounds.minpt, vol.bounds.maxpt)
        bbox.maxpt = Vec.clamp(bbox.maxpt, vol.bounds.minpt, vol.bounds.maxpt)

        task = HyperSquareConsensusTask(
            src_path=src_path,
            dest_path=dest_path,
            ew_volume_id=int(volume_id),
            consensus_map_path=consensus_map_path,
            shape=bbox.size3(),
            offset=bbox.minpt.clone(),
        )
        task_queue.insert(task)
    task_queue.wait()