Beispiel #1
0
def test_sharded():
    skel = Skeleton([
        (0, 0, 0),
        (1, 0, 0),
        (2, 0, 0),
        (0, 1, 0),
        (0, 2, 0),
        (0, 3, 0),
    ],
                    edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)],
                    segid=1,
                    extra_attributes=[{
                        "id": "radius",
                        "data_type": "float32",
                        "num_components": 1,
                    }]).physical_space()

    skels = {}
    for i in range(10):
        sk = skel.clone()
        sk.id = i
        skels[i] = sk.to_precomputed()

    mkdir('/tmp/removeme/skeletons/sharded/skeletons')
    with open('/tmp/removeme/skeletons/sharded/info', 'wt') as f:
        f.write(jsonify(info))

    for idxenc in ('raw', 'gzip'):
        for dataenc in ('raw', 'gzip'):

            spec = ShardingSpecification(
                'neuroglancer_uint64_sharded_v1',
                preshift_bits=1,
                hash='murmurhash3_x86_128',
                minishard_bits=2,
                shard_bits=1,
                minishard_index_encoding=idxenc,
                data_encoding=dataenc,
            )
            skel_info['sharding'] = spec.to_dict()

            with open('/tmp/removeme/skeletons/sharded/skeletons/info',
                      'wt') as f:
                f.write(jsonify(skel_info))

            files = spec.synthesize_shards(skels)
            for fname in files.keys():
                with open('/tmp/removeme/skeletons/sharded/skeletons/' + fname,
                          'wb') as f:
                    f.write(files[fname])

            cv = CloudVolume('file:///tmp/removeme/skeletons/sharded/')
            assert cv.skeleton.meta.mip == 3

            for i in range(10):
                sk = cv.skeleton.get(i).physical_space()
                sk.id = 1
                assert sk == skel

    shutil.rmtree('/tmp/removeme/skeletons')
Beispiel #2
0
def test_jsonify():
  obj = {
    'x': [ np.array([1,2,3,4,5], dtype=np.uint64) ],
    'y': [ {}, {} ],
    'z': 5,
    'w': '1 2 34 5'
  }

  assert lib.jsonify(obj, sort_keys=True) == r"""{"w": "1 2 34 5", "x": [[1, 2, 3, 4, 5]], "y": [{}, {}], "z": 5}"""
Beispiel #3
0
 def _upload_spatial_index(self, bbox, mesh_bboxes):
     with SimpleStorage(self.layer_path,
                        progress=self.options['progress']) as stor:
         stor.put_file(
             file_path="{}/{}.spatial".format(self._mesh_dir,
                                              bbox.to_filename()),
             content=jsonify(mesh_bboxes).encode('utf8'),
             compress=self.options['compress'],
             content_type="application/json",
             cache_control=False,
         )
Beispiel #4
0
 def put_json(self,
              file_path,
              content,
              content_type='application/json',
              *args,
              **kwargs):
     if type(content) != str:
         content = jsonify(content)
     return self.put_file(file_path,
                          content,
                          content_type=content_type,
                          *args,
                          **kwargs)
Beispiel #5
0
  def upload_spatial_index(self, vol, path, bbox, skeletons):
    spatial_index = {}
    for segid, skel in tqdm(skeletons.items(), disable=(not vol.progress), desc="Extracting Bounding Boxes"):
      segid_bbx = Bbox.from_points( skel.vertices )
      spatial_index[segid] = segid_bbx.to_list()

    bbox = bbox * vol.resolution
    with SimpleStorage(path, progress=vol.progress) as stor:
      stor.put_file(
        file_path="{}.spatial".format(bbox.to_filename()),
        content=jsonify(spatial_index).encode('utf8'),
        compress='gzip',
        content_type="application/json",
        cache_control=False,
      )
def test_sharded():
    skel = Skeleton([
        (0, 0, 0),
        (1, 0, 0),
        (2, 0, 0),
        (0, 1, 0),
        (0, 2, 0),
        (0, 3, 0),
    ],
                    edges=[(0, 1), (1, 2), (3, 4), (4, 5), (3, 5)],
                    segid=1,
                    extra_attributes=[{
                        "id": "radius",
                        "data_type": "float32",
                        "num_components": 1,
                    }]).physical_space()

    skels = {}
    for i in range(10):
        sk = skel.clone()
        sk.id = i
        skels[i] = sk.to_precomputed()

    mkdir('/tmp/removeme/skeletons/sharded/skeletons')
    with open('/tmp/removeme/skeletons/sharded/info', 'wt') as f:
        f.write(jsonify(info))

    for idxenc in ('raw', 'gzip'):
        for dataenc in ('raw', 'gzip'):

            spec = ShardingSpecification(
                'neuroglancer_uint64_sharded_v1',
                preshift_bits=1,
                hash='murmurhash3_x86_128',
                minishard_bits=2,
                shard_bits=1,
                minishard_index_encoding=idxenc,
                data_encoding=dataenc,
            )
            skel_info['sharding'] = spec.to_dict()

            with open('/tmp/removeme/skeletons/sharded/skeletons/info',
                      'wt') as f:
                f.write(jsonify(skel_info))

            files = spec.synthesize_shards(skels)
            for fname in files.keys():
                with open('/tmp/removeme/skeletons/sharded/skeletons/' + fname,
                          'wb') as f:
                    f.write(files[fname])

            cv = CloudVolume('file:///tmp/removeme/skeletons/sharded/')
            assert cv.skeleton.meta.mip == 3

            for i in range(10):
                sk = cv.skeleton.get(i).physical_space()
                sk.id = 1
                assert sk == skel

            labels = []
            for fname in files.keys():
                lbls = cv.skeleton.reader.list_labels(fname, path='skeletons')
                labels += list(lbls)

            labels.sort()
            assert labels == list(range(10))

            for filename, shard in files.items():
                decoded_skels = cv.skeleton.reader.disassemble_shard(shard)
                for label, binary in decoded_skels.items():
                    Skeleton.from_precomputed(binary)

            exists = cv.skeleton.reader.exists(list(range(11)),
                                               path='skeletons')
            assert exists == {
                0: 'skeletons/0.shard',
                1: 'skeletons/0.shard',
                2: 'skeletons/0.shard',
                3: 'skeletons/0.shard',
                4: 'skeletons/0.shard',
                5: 'skeletons/0.shard',
                6: 'skeletons/0.shard',
                7: 'skeletons/0.shard',
                8: 'skeletons/1.shard',
                9: 'skeletons/1.shard',
                10: None,
            }

    shutil.rmtree('/tmp/removeme/skeletons')