def test_fixedpartitionandreloffset(self):
        """Test rigid partition sizes and relative offsets.

        No padding should be needed and mask should be None.
        """

        arr = np.random.randint(1025, size=(4, 6, 4)).astype(np.uint16)
        schema = partitionSchema(PartitionDims(2, 8, 8),
                                 blank_delimiter=1111,
                                 padding=2,
                                 enablemask=True)
        volpart = volumePartition(0,
                                  VolumeOffset(1, 1, 1),
                                  reloffset=VolumeOffset(1, 1, 1))

        res = schema.partition_data([[volpart, arr]])
        self.assertEqual(len(res), 2)  # 2 partitions with zsize=2 each

        for partpair in res:
            part, partvol = partpair
            zidx = part.get_offset().z

            # this mask should be all 1
            self.assertEqual(part.get_reloffset().x, 2)
            self.assertEqual(part.get_reloffset().y, 2)
            self.assertEqual(part.get_reloffset().z, 0)

            match = np.array_equal(arr[zidx - 2:zidx, :, :], partvol)
            self.assertEqual(match, True)
            self.assertEqual(part.mask, None)
예제 #2
0
 def repartition_down(part_volume):
     part, volume = part_volume
     downsampled_offset = np.array(part.get_offset()) // 2
     downsampled_reloffset = np.array(
         part.get_reloffset()) // 2
     offsetnew = VolumeOffset(*downsampled_offset)
     reloffsetnew = VolumeOffset(*downsampled_reloffset)
     partnew = volumePartition(
         (offsetnew.z, offsetnew.y, offsetnew.x),
         offsetnew,
         reloffset=reloffsetnew)
     return partnew, volume
    def test_partitionhash(self):
        """Check hashing function for volumePartition.
        """
        part1 = volumePartition(7, VolumeOffset(3, 1, 5))
        part2 = volumePartition(7, VolumeOffset(3, 2, 5))
        part3 = volumePartition(8, VolumeOffset(3, 1, 5))

        # hash should be equal even with different offsets
        self.assertEqual(hash(part1), hash(part2))

        # hash should be different if index is different
        self.assertNotEqual(hash(part1), hash(part3))
    def test_partitioneq(self):
        """Check equivalence function for volumePartition.
        """
        part1 = volumePartition(7, VolumeOffset(3, 1, 5))
        part2 = volumePartition(7, VolumeOffset(3, 2, 5))
        part3 = volumePartition(8, VolumeOffset(3, 1, 5))

        # should be equal even with different offsets
        self.assertEqual(part1, part2)

        # should be different if index is different
        self.assertNotEqual(part1, part3)
예제 #5
0
    def test_largeimportandreverse(self):
        """Imports a large volume shifted, partitions, then unpartitions.
        """
        zplanes = 503
        arr = np.random.randint(255, size=(zplanes, 233, 112)).astype(np.uint8)
        filterstr, formatstr, fnames = writeImages(arr)

        schema = partitionSchema(PartitionDims(32, 64, 64), padding=32)
        imgreader = imagefileSrc(schema,
                                 formatstr,
                                 minmaxplane=(0, zplanes),
                                 offset=VolumeOffset(35, 21, 55))
        partitions = imgreader.extract_volume()

        for fname in fnames:
            os.remove(fname)

        self.assertEqual(len(partitions), 192)

        schema = partitionSchema(PartitionDims(0, 0, 0))
        res = schema.partition_data(partitions)
        self.assertEqual(len(res), 1)

        # data that comes back will be padded by 32
        origvol = np.zeros((512, 256, 160), dtype=np.uint8)
        origvol[3:506, 21:254, 23:135] = arr

        finalvol = res[0][1]
        match = np.array_equal(origvol, finalvol)
        self.assertEqual(match, True)
예제 #6
0
    def test_iteration(self):
        """Reads 32 images at a time and checks the final result is equal to original.
        """
        zplanes = 503
        arr = np.random.randint(255, size=(zplanes, 233, 112)).astype(np.uint8)
        filterstr, formatstr, fnames = writeImages(arr)

        schema = partitionSchema(PartitionDims(32, 64, 64), padding=32)
        imgreader = imagefileSrc(schema,
                                 formatstr,
                                 minmaxplane=(0, zplanes),
                                 offset=VolumeOffset(35, 21, 55))

        # use the iterator, the iteration size is determiend by the Z partition size
        partitions = []
        for partitions_iter in imgreader:
            partitions.extend(partitions_iter)

        for fname in fnames:
            os.remove(fname)

        self.assertEqual(len(partitions), 192)

        schema = partitionSchema(PartitionDims(0, 0, 0))
        res = schema.partition_data(partitions)
        self.assertEqual(len(res), 1)

        # data that comes back will be padded by 32
        origvol = np.zeros((512, 256, 160), dtype=np.uint8)
        origvol[3:506, 21:254, 23:135] = arr

        finalvol = res[0][1]
        match = np.array_equal(origvol, finalvol)
        self.assertEqual(match, True)
    def test_creatinglargevol(self):
        """Take small partitions and group into one large partition.
        """

        arr = np.random.randint(1025, size=(4, 6, 4)).astype(np.uint16)
        schema = partitionSchema(PartitionDims(2, 0, 0),
                                 blank_delimiter=1111,
                                 padding=2)
        volpart = volumePartition(0, VolumeOffset(1, 1, 1))

        res = schema.partition_data([(volpart, arr)])
        self.assertEqual(len(res), 3)  # 3 partitions with zsize=2 each

        # make a new volume and pad
        arrcomp = np.zeros((6, 8, 6), dtype=np.uint16)
        arrcomp[:] = 1111
        arrcomp[1, 1:7, 1:5] = arr[0, :, :]
        arrcomp[2, 1:7, 1:5] = arr[1, :, :]
        arrcomp[3, 1:7, 1:5] = arr[2, :, :]
        arrcomp[4, 1:7, 1:5] = arr[3, :, :]

        # reverse procedure should be same as the original
        schemaglb = partitionSchema(PartitionDims(0, 0, 0))
        res2 = schemaglb.partition_data(res)
        self.assertEqual(len(res2), 1)  # 3 partitions with zsize=2 each

        match = np.array_equal(arrcomp, res2[0][1])
        self.assertEqual(match, True)
    def test_createtiles(self):
        """Take a 3D volume and transform into a series of slices.
        """

        arr = np.random.randint(255, size=(45, 25, 13)).astype(np.uint8)
        #arr = np.random.randint(255, size=(2,3,4)).astype(np.uint8)
        schema = partitionSchema(PartitionDims(1, 0, 0))
        volpart = volumePartition(0, VolumeOffset(0, 0, 0))

        res = schema.partition_data([[volpart, arr]])
        self.assertEqual(len(res), 45)

        for tilepair in res:
            part, tile = tilepair
            zplane = part.get_offset().z
            match = np.array_equal(tile[0, :, :], arr[zplane, :, :])
            self.assertEqual(match, True)
예제 #9
0
    def test_retrieve_shiftedpaddedimages(self):
        """Converts 3d numpy array and imports into shifted global space with padding.

        Note:
            Tests min/max plane format string as well.
        """
        zplanes = 32
        arr = np.random.randint(255, size=(zplanes, 25, 13)).astype(np.uint8)
        filterstr, formatstr, fnames = writeImages(arr, 5)

        schema = partitionSchema(PartitionDims(32, 0, 0), padding=8)
        imgreader = imagefileSrc(schema,
                                 formatstr,
                                 minmaxplane=(5, 5 + zplanes),
                                 offset=VolumeOffset(1, 3, 2))
        partitions = imgreader.extract_volume()

        for fname in fnames:
            os.remove(fname)

        self.assertEqual(len(partitions), 2)

        origvol = np.zeros((40, 32, 16), dtype=np.uint8)
        origvol[1:33, 3:28, 2:15] = arr

        zoff = partitions[0][0].get_offset().z
        if zoff == 0:
            finalvol = partitions[0][1]
            match = np.array_equal(origvol[0:32, 0:32, 0:16], finalvol)
            self.assertEqual(match, True)

            finalvol = partitions[1][1]
            match = np.array_equal(origvol[32:40, 0:32, 0:16], finalvol)
            self.assertEqual(match, True)
        else:
            finalvol = partitions[1][1]
            match = np.array_equal(origvol[0:32, 0:32, 0:16], finalvol)
            self.assertEqual(match, True)

            finalvol = partitions[0][1]
            match = np.array_equal(origvol[32:40, 0:32, 0:16], finalvol)
            self.assertEqual(match, True)
예제 #10
0
    def test_dvidpadlabels(self):
        """Check padding data with DVID labels.
        """

        service = DVIDServerService(dvidserver)
        uuid = service.create_new_repo("foo", "bar")

        ns = DVIDNodeService(dvidserver, uuid)
        ns.create_labelblk("labels")

        arr = np.random.randint(12442, size=(58, 58, 58)).astype(np.uint64)

        arr2 = np.zeros((64, 64, 64), np.uint64)
        arr2[0:58, 0:58, 0:58] = arr
        # load gray data
        ns.put_labels3D("labels", arr2, (0, 0, 0))

        # load shifted data for comparison
        arr2[6:64, 6:64, 6:64] = arr

        # read and pad data
        schema = partitionSchema(PartitionDims(32, 64, 64),
                                 enablemask=True,
                                 padding=8,
                                 blank_delimiter=99999)
        volpart = volumePartition(0, VolumeOffset(6, 6, 6))
        partitions = schema.partition_data([(volpart, arr)])

        # fetch with mask
        dvidreader = dvidSrc(dvidserver, uuid, "labels", partitions)

        newparts = dvidreader.extract_volume()
        self.assertEqual(len(newparts), 2)

        for (part, vol) in newparts:
            if part.get_offset().z == 0:
                match = np.array_equal(arr2[0:32, :, :], vol)
                self.assertTrue(match)
            else:
                match = np.array_equal(arr2[32:64, :, :], vol)
                self.assertTrue(match)
    def test_createtilesshifted(self):
        """Take a 3d volume with offset and transform into a series of slices.

        The size of each tile should be the same even with the shift.
        """

        arr = np.random.randint(255, size=(45, 25, 13)).astype(np.uint8)
        #arr = np.random.randint(255, size=(2,3,4)).astype(np.uint8)
        schema = partitionSchema(PartitionDims(1, 0, 0))
        volpart = volumePartition(0, VolumeOffset(4, 2, 3))

        res = schema.partition_data([[volpart, arr]])
        self.assertEqual(len(res), 45)

        for tilepair in res:
            part, tile = tilepair
            zplane = part.get_offset().z - 4
            self.assertEqual(part.get_offset().x, 0)
            self.assertEqual(part.get_offset().y, 0)
            match = np.array_equal(tile[0, :, :], arr[zplane, :, :])
            self.assertEqual(match, True)
예제 #12
0
    def test_extractpartitionaligned(self):
        """Imports images shifted into partitioned space that is padded entire Z size.
        """
        zplanes = 33
        arr = np.random.randint(255, size=(zplanes, 25, 13)).astype(np.uint8)
        filterstr, formatstr, fnames = writeImages(arr, 5)

        schema = partitionSchema(PartitionDims(32, 0, 0), padding=32)
        imgreader = imagefileSrc(schema,
                                 formatstr,
                                 minmaxplane=(5, 5 + zplanes),
                                 offset=VolumeOffset(1, 3, 2))
        partitions = imgreader.extract_volume()

        for fname in fnames:
            os.remove(fname)

        self.assertEqual(len(partitions), 2)

        origvol = np.zeros((64, 32, 32), dtype=np.uint8)
        origvol[1:34, 3:28, 2:15] = arr

        zoff = partitions[0][0].get_offset().z
        if zoff == 0:
            finalvol = partitions[0][1]
            match = np.array_equal(origvol[0:32, :, :], finalvol)
            self.assertEqual(match, True)

            finalvol = partitions[1][1]
            match = np.array_equal(origvol[32:64, :, :], finalvol)
            self.assertEqual(match, True)
        else:
            finalvol = partitions[1][1]
            match = np.array_equal(origvol[0:32, :, :], finalvol)
            self.assertEqual(match, True)

            finalvol = partitions[0][1]
            match = np.array_equal(origvol[32:64, :, :], finalvol)
            self.assertEqual(match, True)
    def test_createvolshiftandpad(self):
        """Take a 3d volume with offset and transform into a series of subvolumes with padding.

        The data is moved to the proper partition and is padded so that the x, y, z
        of the stored data is lined up to a grid determined by the padding specified.
        Also, tests >8bit data and the data mask.
        """

        arr = np.random.randint(1025, size=(4, 6, 4)).astype(np.uint16)
        #arr = np.random.randint(255, size=(2,3,4)).astype(np.uint8)
        schema = partitionSchema(PartitionDims(2, 0, 0),
                                 blank_delimiter=1111,
                                 padding=2,
                                 enablemask=True)
        volpart = volumePartition(0, VolumeOffset(1, 1, 1))

        res = schema.partition_data([[volpart, arr]])
        self.assertEqual(len(res), 3)  # 3 partitions with zsize=2 each

        arrcomp = np.zeros((6, 8, 6), dtype=np.uint16)
        arrcomp[:] = 1111
        arrcomp[1, 1:7, 1:5] = arr[0, :, :]
        arrcomp[2, 1:7, 1:5] = arr[1, :, :]
        arrcomp[3, 1:7, 1:5] = arr[2, :, :]
        arrcomp[4, 1:7, 1:5] = arr[3, :, :]

        for partpair in res:
            part, partvol = partpair
            zidx = part.get_offset().z

            # make a mask
            mask = arrcomp[zidx:zidx + 2, :, :].copy()
            mask[mask != 1111] = 1
            mask[mask == 1111] = 0

            match = np.array_equal(arrcomp[zidx:zidx + 2, :, :], partvol)
            matchmask = np.array_equal(mask, part.mask)
            self.assertEqual(match, True)
            self.assertEqual(matchmask, True)
예제 #14
0
    def test_retrieve_wholevolume(self):
        """Converts 3d numpy array to 2D slices and imports these slices as a 3D volume.

        Note: 
            Also checks that volume offset works properly.
        """
        zplanes = 5
        arr = np.random.randint(255, size=(zplanes, 25, 13)).astype(np.uint8)
        filterstr, formatstr, fnames = writeImages(arr, 10)

        schema = partitionSchema(PartitionDims(0, 0, 0))
        imgreader = imagefileSrc(schema,
                                 filterstr,
                                 offset=VolumeOffset(1, 0, 0))
        partitions = imgreader.extract_volume()

        for fname in fnames:
            os.remove(fname)

        self.assertEqual(len(partitions), 1)
        finalvol = partitions[0][1]
        match = np.array_equal(arr, finalvol)
        self.assertEqual(match, True)
예제 #15
0
    def execute(self):
        """
        Execute spark workflow.
        """
        self._sanitize_config()
        session = default_dvid_session()

        dvid_info = self.config_data["dvid-info"]
        options = self.config_data["options"]
        block_shape = 3 * (options["blocksize"], )
        self.partition_size = options["blockwritelimit"] * options["blocksize"]
        # ?? num parallel requests might be really small at high levels of pyramids

        # xdim is unbounded or very large
        partition_dims = PartitionDims(options["blocksize"],
                                       options["blocksize"],
                                       self.partition_size)
        partition_schema = partitionSchema(
            partition_dims,
            blank_delimiter=options["blankdelimiter"],
            padding=options["blocksize"],
            enablemask=options["has-dvidmask"])

        offset_zyx = np.array(options["offset"][::-1])
        offset_zyx[0] += options["minslice"]
        imgreader = imagefileSrc(partition_schema, options["basename"],
                                 (options["minslice"], options["maxslice"]),
                                 VolumeOffset(*offset_zyx), self.sc)

        # !! hack: override iteration size that is set to partition size, TODO: add option
        # this just makes the downstream processing a little more convenient, and reduces
        # unnecessary DVID patching if that is enabled.
        # (must be a multiple of block size)
        imgreader.iteration_size = options["num-tasks"]

        # get dims from image (hackage)
        from PIL import Image
        import requests
        if '%' in options["basename"]:
            minslice_name = options["basename"] % options["minslice"]
        elif '{' in options["basename"]:
            minslice_name = options["basename"].format(options["minslice"])
        else:
            raise RuntimeError(
                f"Unrecognized format string for image basename: {options['basename']}"
            )

        img = Image.open(minslice_name)
        volume_shape = (1 + options["maxslice"] - options["minslice"],
                        img.height, img.width)
        del img

        global_box_zyx = np.zeros((2, 3), dtype=int)
        global_box_zyx[0] = options["offset"]
        global_box_zyx[0] += (options["minslice"], 0, 0)

        global_box_zyx[1] = global_box_zyx[0] + volume_shape

        if options["create-pyramid"]:
            if is_datainstance(dvid_info["dvid-server"], dvid_info["uuid"],
                               dvid_info["dataname"]):
                logger.info(
                    "'{dataname}' already exists, skipping creation".format(
                        **dvid_info))
            else:
                # create data instance and disable dvidmask
                # !! assume if data instance exists and mask is set that all pyramid
                # !! also exits, meaning the mask should be used.
                options["has-dvidmask"] = False
                if options["disable-original"]:
                    logger.info(
                        "Not creating '{dataname}' due to 'disable-original' config setting"
                        .format(**dvid_info))
                elif 0 in options["skipped-pyramid-levels"]:
                    logger.info(
                        "Not creating '{dataname}' due to 'skipped-pyramid-levels' config setting"
                        .format(**dvid_info))
                else:
                    if options["is-rawarray"]:
                        create_rawarray8(dvid_info["dvid-server"],
                                         dvid_info["uuid"],
                                         dvid_info["dataname"], block_shape)
                    else:
                        create_label_instance(dvid_info["dvid-server"],
                                              dvid_info["uuid"],
                                              dvid_info["dataname"], 0,
                                              block_shape)

            if not options["disable-original"] and 0 not in options[
                    "skipped-pyramid-levels"]:
                update_extents(dvid_info["dvid-server"], dvid_info["uuid"],
                               dvid_info["dataname"], global_box_zyx)

                # Bottom level of pyramid is listed as neuroglancer-compatible
                extend_list_value(dvid_info["dvid-server"], dvid_info["uuid"],
                                  '.meta', 'neuroglancer',
                                  [dvid_info["dataname"]])

        # determine number of pyramid levels if not specified
        if options["create-pyramid"] or options["create-pyramid-jpeg"]:
            if options["pyramid-depth"] == -1:
                options["pyramid-depth"] = 0
                zsize = options["maxslice"] - options["minslice"] + 1
                while zsize > 512:
                    options["pyramid-depth"] += 1
                    zsize /= 2

                # NeuTu doesn't work well if there aren't at least a few pyramid levels.
                # Even for small volumes, use at least a few pyramid levels,
                # unless the depth was explicit in the config.
                options["pyramid-depth"] = max(options["pyramid-depth"], 4)

        # create pyramid data instances
        if options["create-pyramid-jpeg"]:
            dataname_jpeg = dvid_info["dataname"] + self.JPEGPYRAMID_NAME
            if 0 in options["skipped-pyramid-levels"]:
                logger.info(
                    "Not creating '{}' due to 'skipped-pyramid-levels' config setting"
                    .format(dataname_jpeg))
            else:
                if is_datainstance(dvid_info["dvid-server"], dvid_info["uuid"],
                                   dataname_jpeg):
                    logger.info(
                        "'{}' already exists, skipping creation".format(
                            dataname_jpeg))
                else:
                    create_rawarray8(dvid_info["dvid-server"],
                                     dvid_info["uuid"], dataname_jpeg,
                                     block_shape, Compression.JPEG)

                update_extents(dvid_info["dvid-server"], dvid_info["uuid"],
                               dataname_jpeg, global_box_zyx)

                # Bottom level of pyramid is listed as neuroglancer-compatible
                extend_list_value(dvid_info["dvid-server"], dvid_info["uuid"],
                                  '.meta', 'neuroglancer', [dataname_jpeg])

        if options["create-pyramid"]:
            for level in range(1, 1 + options["pyramid-depth"]):
                downsampled_box_zyx = global_box_zyx // (2**level)
                downname = dvid_info["dataname"] + "_%d" % level

                if level in options["skipped-pyramid-levels"]:
                    logger.info(
                        "Not creating '{}' due to 'skipped-pyramid-levels' config setting"
                        .format(downname))
                    continue

                if is_datainstance(dvid_info["dvid-server"], dvid_info["uuid"],
                                   downname):
                    logger.info(
                        "'{}' already exists, skipping creation".format(
                            downname))
                else:
                    if options["is-rawarray"]:
                        create_rawarray8(dvid_info["dvid-server"],
                                         dvid_info["uuid"], downname,
                                         block_shape)
                    else:
                        create_label_instance(dvid_info["dvid-server"],
                                              dvid_info["uuid"], downname, 0,
                                              block_shape)

                update_extents(dvid_info["dvid-server"], dvid_info["uuid"],
                               downname, downsampled_box_zyx)

                # Higher-levels of the pyramid should not appear in the DVID-lite console.
                extend_list_value(dvid_info["dvid-server"], dvid_info["uuid"],
                                  '.meta', 'restrictions', [downname])

        if options["create-pyramid-jpeg"]:
            for level in range(1, 1 + options["pyramid-depth"]):
                downsampled_box_zyx = global_box_zyx // (2**level)
                downname = dvid_info[
                    "dataname"] + self.JPEGPYRAMID_NAME + "_%d" % level

                if level in options["skipped-pyramid-levels"]:
                    logger.info(
                        "Not creating '{}' due to 'skipped-pyramid-levels' config setting"
                        .format(downname))
                    continue

                if is_datainstance(dvid_info["dvid-server"], dvid_info["uuid"],
                                   downname):
                    logger.info(
                        "'{}' already exists, skipping creation".format(
                            downname))
                else:
                    create_rawarray8(dvid_info["dvid-server"],
                                     dvid_info["uuid"], downname, block_shape,
                                     Compression.JPEG)

                update_extents(dvid_info["dvid-server"], dvid_info["uuid"],
                               downname, downsampled_box_zyx)

                # Higher-levels of the pyramid should not appear in the DVID-lite console.
                extend_list_value(dvid_info["dvid-server"], dvid_info["uuid"],
                                  '.meta', 'restrictions', [downname])

        # create tiles
        if options["create-tiles"] or options["create-tiles-jpeg"]:
            MinTileCoord = global_box_zyx[0][::-1] // options["tilesize"]
            MaxTileCoord = global_box_zyx[1][::-1] // options["tilesize"]

            # get max level by just finding max tile coord
            maxval = max(MaxTileCoord) - min(MinTileCoord) + 1
            import math
            self.maxlevel = int(math.log(maxval) / math.log(2))

            tilemeta = {}
            tilemeta["MinTileCoord"] = MinTileCoord.tolist()
            tilemeta["MaxTileCoord"] = MaxTileCoord.tolist()
            tilemeta["Levels"] = {}

            currres = 8.0  # just use as placeholder for now
            for level in range(0, self.maxlevel + 1):
                tilemeta["Levels"][str(level)] = {
                    "Resolution": 3 * [currres],
                    "TileSize": 3 * [options["tilesize"]]
                }
                currres *= 2

            if options["create-tiles"]:
                session.post("{dvid-server}/api/repo/{uuid}/instance".format(
                    **dvid_info),
                             json={
                                 "typename": "imagetile",
                                 "dataname":
                                 dvid_info["dataname"] + self.TILENAME,
                                 "source": dvid_info["dataname"],
                                 "format": "png"
                             })
                session.post(
                    "{dvid-server}/api/repo/{uuid}/{dataname}{tilename}/metadata"
                    .format(tilename=self.TILENAME, **dvid_info),
                    json=tilemeta)

            if options["create-tiles-jpeg"]:
                session.post("{dvid-server}/api/repo/{uuid}/instance".format(
                    **dvid_info),
                             json={
                                 "typename": "imagetile",
                                 "dataname":
                                 dvid_info["dataname"] + self.JPEGTILENAME,
                                 "source": dvid_info["dataname"],
                                 "format": "jpg"
                             })
                session.post(
                    "{dvid-server}/api/repo/{uuid}/{dataname_jpeg_tile}/metadata"
                    .format(dataname_jpeg_tile=dvid_info["dataname"] +
                            self.JPEGTILENAME,
                            **dvid_info),
                    json=tilemeta)

        if dvid_info["dvid-server"].startswith("http://127.0.0.1"):

            def reload_meta():
                reload_server_metadata(dvid_info["dvid-server"])

            self.run_on_each_worker(reload_meta)

        # TODO Validation: should verify syncs exist, should verify pyramid depth

        # TODO: set syncs for pyramids, tiles if base datatype exists
        # syncs should be removed before ingestion and added afterward

        levels_cache = {}

        # iterate through each partition
        for arraypartition in imgreader:
            # DVID pad if necessary
            if options["has-dvidmask"]:
                dvidsrc = dvidSrc(dvid_info["dvid-server"],
                                  dvid_info["uuid"],
                                  dvid_info["dataname"],
                                  arraypartition,
                                  resource_server=self.resource_server,
                                  resource_port=self.resource_port)

                arraypartition = dvidsrc.extract_volume()

            # potentially need for future iterations
            arraypartition.persist()

            # check for final layer
            finallayer = imgreader.curr_slice > imgreader.end_slice

            if not options["disable-original"]:
                # Write level-0 of the raw data, even if we aren't writing the rest of the pyramid.
                dataname = datanamelossy = None
                if options["create-pyramid"]:
                    dataname = dvid_info["dataname"]
                if options["create-pyramid-jpeg"]:
                    datanamelossy = dvid_info[
                        "dataname"] + self.JPEGPYRAMID_NAME

                if (dataname or datanamelossy
                    ) and 0 not in options["skipped-pyramid-levels"]:
                    self._write_blocks(arraypartition, dataname, datanamelossy)

            if options["create-tiles"] or options["create-tiles-jpeg"]:
                # repartition into tiles
                schema = partitionSchema(PartitionDims(1, 0, 0))
                tilepartition = schema.partition_data(arraypartition)

                # write unpadded tilesize (will pad with delimiter if needed)
                self._writeimagepyramid(tilepartition)

            if options["create-pyramid"] or options["create-pyramid-jpeg"]:
                if 0 not in levels_cache:
                    levels_cache[0] = []
                levels_cache[0].append(arraypartition)
                curr_level = 1
                downsample_factor = 2

                # should be a multiple of Z blocks or the final fetch
                assert imgreader.curr_slice % options["blocksize"] == 0
                while ((((imgreader.curr_slice // options["blocksize"]) %
                         downsample_factor) == 0) or
                       finallayer) and curr_level <= options["pyramid-depth"]:
                    partlist = levels_cache[curr_level - 1]
                    part = partlist[0]
                    # union all RDDs from the same level
                    for iter1 in range(1, len(partlist)):
                        part = part.union(partlist[iter1])

                    # downsample map
                    israw = options["is-rawarray"]

                    def downsample(part_vol):
                        part, vol = part_vol
                        if not israw:
                            vol = downsample_3Dlabels(vol)[0]
                        else:
                            vol = downsample_raw(vol)[0]
                        return (part, vol)

                    downsampled_array = part.map(downsample)

                    # repart (vol and offset will always be power of two because of padding)
                    def repartition_down(part_volume):
                        part, volume = part_volume
                        downsampled_offset = np.array(part.get_offset()) // 2
                        downsampled_reloffset = np.array(
                            part.get_reloffset()) // 2
                        offsetnew = VolumeOffset(*downsampled_offset)
                        reloffsetnew = VolumeOffset(*downsampled_reloffset)
                        partnew = volumePartition(
                            (offsetnew.z, offsetnew.y, offsetnew.x),
                            offsetnew,
                            reloffset=reloffsetnew)
                        return partnew, volume

                    downsampled_array = downsampled_array.map(repartition_down)

                    # repartition downsample data
                    partition_dims = PartitionDims(options["blocksize"],
                                                   options["blocksize"],
                                                   self.partition_size)
                    schema = partitionSchema(
                        partition_dims,
                        blank_delimiter=options["blankdelimiter"],
                        padding=options["blocksize"],
                        enablemask=options["has-dvidmask"])
                    downsampled_array = schema.partition_data(
                        downsampled_array)

                    # persist before padding if there are more levels
                    if curr_level < options["pyramid-depth"]:
                        downsampled_array.persist()
                        if curr_level not in levels_cache:
                            levels_cache[curr_level] = []
                        levels_cache[curr_level].append(downsampled_array)

                    # pad from DVID (move before persist will allow multi-ingest
                    # but will lead to slightly non-optimal downsampling boundary
                    # effects if using a lossy compression only.
                    if options["has-dvidmask"]:
                        padname = dvid_info["dataname"]
                        if options[
                                "create-pyramid-jpeg"]:  # !! should pad with orig if computing
                            # pad with jpeg
                            padname += self.JPEGPYRAMID_NAME
                        padname += "_%d" % curr_level
                        dvidsrc = dvidSrc(dvid_info["dvid-server"],
                                          dvid_info["uuid"],
                                          padname,
                                          downsampled_array,
                                          resource_server=self.resource_server,
                                          resource_port=self.resource_port)

                        downsampled_array = dvidsrc.extract_volume()

                    # write result
                    downname = None
                    downnamelossy = None
                    if options["create-pyramid"]:
                        downname = dvid_info["dataname"] + "_%d" % curr_level
                    if options["create-pyramid-jpeg"]:
                        downnamelossy = dvid_info[
                            "dataname"] + self.JPEGPYRAMID_NAME + "_%d" % curr_level

                    if curr_level not in options["skipped-pyramid-levels"]:
                        self._write_blocks(downsampled_array, downname,
                                           downnamelossy)

                    # remove previous level
                    del levels_cache[curr_level - 1]
                    curr_level += 1
                    downsample_factor *= 2
예제 #16
0
    def test_dvidfetchgray(self):
        """Check reading grayscale from DVID from partitions.

        This also checks basic iteration and overwrite of
        previous data.
        """

        service = DVIDServerService(dvidserver)
        uuid = service.create_new_repo("foo", "bar")

        ns = DVIDNodeService(dvidserver, uuid)
        ns.create_grayscale8("gray")

        arr = np.random.randint(255, size=(64, 64, 64)).astype(np.uint8)

        # load gray data
        ns.put_gray3D("gray", arr, (0, 0, 0))

        # read data
        schema = partitionSchema(PartitionDims(32, 64, 64))
        volpart = volumePartition(0, VolumeOffset(0, 0, 0))
        overwrite = np.random.randint(255, size=(64, 64, 64)).astype(np.uint8)
        partitions = schema.partition_data([(volpart, overwrite)])

        dvidreader = dvidSrc(dvidserver,
                             uuid,
                             "gray",
                             partitions,
                             maskonly=False)

        newparts = dvidreader.extract_volume()
        self.assertEqual(len(newparts), 2)

        for (part, vol) in newparts:
            if part.get_offset().z == 0:
                match = np.array_equal(arr[0:32, :, :], vol)
                self.assertTrue(match)
            else:
                match = np.array_equal(arr[32:64, :, :], vol)
                self.assertTrue(match)

        # test iteration
        dvidreader2 = dvidSrc(dvidserver,
                              uuid,
                              "gray",
                              partitions,
                              maskonly=False)

        newparts2 = []
        for newpart in dvidreader2:
            self.assertEqual(len(newpart), 1)
            newparts2.extend(newpart)
        self.assertEqual(len(newparts2), 2)

        for (part, vol) in newparts2:
            if part.get_offset().z == 0:
                match = np.array_equal(arr[0:32, :, :], vol)
                self.assertTrue(match)
            else:
                match = np.array_equal(arr[32:64, :, :], vol)
                self.assertTrue(match)