Esempio n. 1
0
    def _upload_batch(self, meshes, bbox):
        cf = CloudFiles(self.layer_path, progress=self.options['progress'])

        mbuf = MapBuffer(meshes, compress="br")

        cf.put(
            f"{self._mesh_dir}/{bbox.to_filename()}.frags",
            content=mbuf.tobytes(),
            compress=None,
            content_type="application/x.mapbuffer",
            cache_control=False,
        )
Esempio n. 2
0
    def get_unfused(self, labels, filenames, cv):
        skeldirfn = lambda loc: cv.meta.join(cv.skeleton.meta.skeleton_path,
                                             loc)
        filenames = [skeldirfn(loc) for loc in filenames]

        block_size = 50

        if len(filenames) < block_size:
            blocks = [filenames]
            n_blocks = 1
        else:
            n_blocks = max(len(filenames) // block_size, 1)
            blocks = sip(filenames, block_size)

        all_skels = defaultdict(list)
        for filenames_block in tqdm(blocks,
                                    desc="Filename Block",
                                    total=n_blocks,
                                    disable=(not self.progress)):
            if cv.meta.path.protocol == "file":
                all_files = {}
                prefix = cv.cloudpath.replace("file://", "")
                for filename in filenames_block:
                    all_files[filename] = open(os.path.join(prefix, filename),
                                               "rb")
            else:
                all_files = cv.skeleton.cache.download(filenames_block,
                                                       progress=self.progress)

            for filename, content in tqdm(all_files.items(),
                                          desc="Scanning Fragments",
                                          disable=(not self.progress)):
                try:
                    fragment = MapBuffer(
                        content,
                        frombytesfn=PrecomputedSkeleton.from_precomputed)
                    fragment.validate()
                except mapbuffer.ValidationError:
                    fragment = pickle.loads(content)

                for label in labels:
                    try:
                        skel = fragment[label]
                        skel.id = label
                        all_skels[label].append(skel)
                    except KeyError:
                        continue

                if hasattr(content, "close"):
                    content.close()

        return all_skels
Esempio n. 3
0
    def upload_batch(self, vol, path, bbox, skeletons):
        mbuf = MapBuffer(skeletons,
                         compress="br",
                         tobytesfn=lambda skel: skel.to_precomputed())

        cf = CloudFiles(path, progress=vol.progress)
        cf.put(
            path="{}.frags".format(bbox.to_filename()),
            content=mbuf.tobytes(),
            compress=None,
            content_type="application/x-mapbuffer",
            cache_control=False,
        )
Esempio n. 4
0
def test_empty(compress):
  mbuf = MapBuffer({}, compress=compress)
  assert len(mbuf) == 0
  assert list(mbuf) == []

  assert mbuf.validate()
  assert mbuf.compress == compress

  try:
    mbuf[1000]
    assert False
  except KeyError:
    pass
Esempio n. 5
0
def collect_mesh_fragments(
  cv:CloudVolume, 
  labels:List[int], 
  filenames:List[str], 
  mesh_dir:str, 
  progress:bool = False
) -> Dict[int, List[Mesh]]:
  dirfn = lambda loc: cv.meta.join(mesh_dir, loc)
  filenames = [ dirfn(loc) for loc in filenames ]

  block_size = 50

  if len(filenames) < block_size:
    blocks = [ filenames ]
    n_blocks = 1
  else:
    n_blocks = max(len(filenames) // block_size, 1)
    blocks = sip(filenames, block_size)

  all_meshes = defaultdict(list)
  for filenames_block in tqdm(blocks, desc="Filename Block", total=n_blocks, disable=(not progress)):
    if cv.meta.path.protocol == "file":
      all_files = {}
      prefix = cv.cloudpath.replace("file://", "")
      for filename in filenames_block:
        all_files[filename] = open(os.path.join(prefix, filename), "rb")
    else:
      all_files = cv.mesh.cache.download(filenames_block, progress=progress)
    
    for filename, content in tqdm(all_files.items(), desc="Scanning Fragments", disable=(not progress)):
      fragment = MapBuffer(content, frombytesfn=Mesh.from_precomputed)
      fragment.validate()

      for label in labels:
        try:
          mesh = fragment[label]
          mesh.id = label
          all_meshes[label].append((filename, mesh))
        except KeyError:
          continue

      if hasattr(content, "close"):
        content.close()

  # ensure consistent results across multiple runs
  # by sorting mesh fragments by filename
  for label in all_meshes:
    all_meshes[label].sort(key=lambda pair: pair[0])
    all_meshes[label] = [ pair[1] for pair in all_meshes[label] ]

  return all_meshes
Esempio n. 6
0
def test_full(compress):
  data = { 
    random.randint(0, 1000000000): bytes([ 
      random.randint(0,255) for __ in range(random.randint(0,50)) 
    ]) for _ in range(10000) 
  }
  mbuf = MapBuffer(data, compress=compress)
  assert set(data.keys()) == set(mbuf.keys())
  assert set(data) == set(mbuf)
  assert set(data.values()) == set(mbuf.values())

  for key in data:
    assert data[key] == mbuf[key]
    assert data[key] == mbuf.get(key)
    assert key in mbuf

  assert data == mbuf.todict()

  for i in range(2000):
    if i not in data:
      assert i not in mbuf
      try:
        mbuf[i]
        assert False
      except KeyError:
        pass

  mbuf.validate()

  assert len(mbuf.buffer) > HEADER_LENGTH
Esempio n. 7
0
def test_mb(data):
    datasize = len(data)
    labels = list(data.keys())
    random.shuffle(labels)
    labels = labels[:datasize // 10]

    mbuf = MapBuffer(data)
    buf = mbuf.tobytes()

    s = time.time()
    mbuf = MapBuffer(buf)
    for label in labels:
        mbuf[label]
    t = time.time() - s
    mf.write(f"{datasize}\t{t*1000:.5f}\n")
    mf.flush()
Esempio n. 8
0
def test_object_access(compress):
  data = { 
    1: b"hello",
    2: b"world",
  }
  mbuf = MapBuffer(data, compress=compress)

  class Reader:
    def __init__(self):
      self.lst = mbuf.tobytes()
    def __getitem__(self, slc):
      return self.lst[slc]

  mbuf2 = MapBuffer(Reader())
  assert mbuf2[1] == b"hello"
  assert mbuf2[2] == b"world"
Esempio n. 9
0
def test_crc32c(compress):
  data = { 
    1: b"hello",
    2: b"world",
  }
  mbuf = MapBuffer(data, compress=compress)

  idx = mbuf.buffer.index(b"hello")
  buf = list(mbuf.buffer)
  buf[idx] = ord(b'H')
  mbuf.buffer = bytes(buf)

  try:
    mbuf[1]
    assert False
  except ValidationError:
    pass
Esempio n. 10
0
def test_mmap_access(compress):
  data = { 
    1: b"hello",
    2: b"world",
  }
  mbuf = MapBuffer(data, compress=compress)

  fileno = random.randint(0,2**32)
  filename = f"test_mmap-{fileno}.mb"

  with open(filename, "wb") as f:
    f.write(mbuf.tobytes())

  with open(filename, "rb") as f:
    mb = MapBuffer(f)

    assert mb[1] == b"hello"
    assert mb[2] == b"world"

  try:
    os.remove(filename)
  except (PermissionError, FileNotFoundError):
    pass