コード例 #1
0
    def execute(self):
        self._init_services()
        self._validate_config()

        options = self.config["copygrayscale"]
        input_bb_zyx = self.input_service.bounding_box_zyx

        min_scale = options["min-pyramid-scale"]
        max_scale = options["max-pyramid-scale"]

        starting_slice = options["starting-slice"]

        axis_name = options["slab-axis"]
        axis = 'zyx'.index(axis_name)
        slab_boxes = list(slabs_from_box(input_bb_zyx, options["slab-depth"], slab_cutting_axis=axis))
        logger.info(f"Processing volume in {len(slab_boxes)} slabs")

        for slab_index, slab_fullres_box_zyx in enumerate(slab_boxes):
            if slab_fullres_box_zyx[0, axis] < starting_slice:
                logger.info(f"Slab {slab_index}: SKIPPING. {slab_fullres_box_zyx[:,::-1].tolist()}")
                continue

            with Timer() as slab_timer:
                logger.info(f"Slab {slab_index}: STARTING. {slab_fullres_box_zyx[:,::-1].tolist()}")
                slab_wall = None
                for scale in range(0, max_scale+1):
                    with Timer() as scale_timer:
                        slab_wall = self._process_slab(scale, slab_fullres_box_zyx, slab_index, len(slab_boxes), slab_wall, min_scale)
                    logger.info(f"Slab {slab_index}: Scale {scale} took {scale_timer.timedelta}")

            logger.info(f"Slab {slab_index}: DONE. ({slab_timer.timedelta})", extra={'status': f"DONE with slab {slab_index}"})

        logger.info(f"DONE exporting {len(slab_boxes)} slabs")
コード例 #2
0
    def execute(self):
        self._init_services()
        self._validate_config()
        self._prepare_output()
        
        options = self.config_data["options"]
        input_bb_zyx = self.input_service.bounding_box_zyx

        min_scale = options["min-pyramid-scale"]
        max_scale = options["max-pyramid-scale"]
        
        starting_slice = options["starting-slice"]
        
        axis_name = options["slab-axis"]
        axis = 'zyx'.index(axis_name)
        slab_boxes = list(slabs_from_box(input_bb_zyx, options["slab-depth"], 0, 'round-down', axis))
        logger.info(f"Processing volume in {len(slab_boxes)} slabs")

        for slab_index, slab_fullres_box_zyx in enumerate(slab_boxes):
            if slab_fullres_box_zyx[0, axis] < starting_slice:
                logger.info(f"Slab {slab_index}: SKIPPING. {slab_fullres_box_zyx[:,::-1].tolist()}")
                continue

            with Timer() as slab_timer:
                logger.info(f"Slab {slab_index}: STARTING. {slab_fullres_box_zyx[:,::-1].tolist()}")
                slab_wall = None
                for scale in range(min_scale, max_scale+1):
                    with Timer() as scale_timer:
                        slab_wall = self._process_slab(scale, slab_fullres_box_zyx, slab_index, len(slab_boxes), slab_wall)
                    logger.info(f"Slab {slab_index}: Scale {scale} took {scale_timer.timedelta}")

            logger.info(f"Slab {slab_index}: DONE. ({slab_timer.timedelta})", extra={'status': f"DONE with slab {slab_index}"})

        logger.info(f"DONE exporting {len(slab_boxes)} slabs")
コード例 #3
0
ファイル: test_grid.py プロジェクト: y2mk1ng/neuclease
def test_slabs_for_box_scaled():
    box = [(0, 0, 0), (100, 200, 300)]
    slabs = list(slabs_from_box(box, 10, scale=1))
    expected = np.array([((a, 0, 0), (b, 100, 150))
                         for (a, b) in zip(range(0, 50, 10), range(10, 51, 10))
                         ])
    assert (np.array(slabs) == expected).all()
コード例 #4
0
def test_slabs_for_box_scaled_nonaligned():
    box = [(5,7,8), (99, 200, 299)]
    slabs = list(slabs_from_box( box, 10, scale=1 ))
    
    first_slab = ((2,3,4), (10,100,150))
    expected = [first_slab] + [((a,3,4), (b,100,150)) for (a,b) in zip(range(10,50,10), range(20,51,10))]
    
    assert (np.array(slabs) == expected).all()
コード例 #5
0
def test_slabs_for_box_scaled_nonaligned_round_in():
    box = [(5,7,8), (99, 200, 299)]
    slabs = list(slabs_from_box( box, 10, scale=1, scaling_policy='round-in' ))
    
    first_slab = ((3,4,4), (10,100,149))
    last_slab = ((40,4,4), (49,100,149))
    expected = [first_slab] + [((a,4,4), (b,100,149)) for (a,b) in zip(range(10,40,10), range(20,50,10))] + [last_slab]
    
    assert (np.array(slabs) == expected).all()
コード例 #6
0
    def execute(self):
        self._init_services()
        self._init_masks()
        self._log_neuroglancer_links()
        self._sanitize_config()

        # Aim for 2 GB RDD partitions when loading segmentation
        GB = 2**30
        self.target_partition_size_voxels = 2 * GB // np.uint64().nbytes

        # (See note in _init_services() regarding output bounding boxes)
        input_bb_zyx = self.input_service.bounding_box_zyx
        output_bb_zyx = self.output_service.bounding_box_zyx
        self.translation_offset_zyx = output_bb_zyx[0] - input_bb_zyx[0]
        if self.translation_offset_zyx.any():
            logger.info(
                f"Translation offset is {self.translation_offset_zyx[:, ::-1].tolist()}"
            )

        pyramid_depth = self.config["copysegmentation"]["pyramid-depth"]
        slab_depth = self.config["copysegmentation"]["slab-depth"]

        # Process data in Z-slabs
        output_slab_boxes = list(slabs_from_box(output_bb_zyx, slab_depth))
        max_depth = max(
            map(lambda box: box[1][0] - box[0][0], output_slab_boxes))
        logger.info(
            f"Processing data in {len(output_slab_boxes)} slabs (max depth={max_depth}) for {pyramid_depth} pyramid levels"
        )

        if self.config["copysegmentation"]["compute-block-statistics"]:
            self._init_stats_file()

        # Read data and accumulate statistics, one slab at a time.
        for slab_index, output_slab_box in enumerate(output_slab_boxes):
            with Timer() as timer:
                self._process_slab(slab_index, output_slab_box)
            logger.info(
                f"Slab {slab_index}: Total processing time: {timer.timedelta}")

            delay_minutes = self.config["copysegmentation"][
                "delay-minutes-between-slabs"]
            if delay_minutes > 0 and slab_index != len(output_slab_boxes) - 1:
                logger.info(
                    f"Delaying {delay_minutes} before continuing to next slab..."
                )
                time.sleep(delay_minutes * 60)

        logger.info(f"DONE copying/downsampling all slabs")
コード例 #7
0
    def execute(self):
        self._init_services()
        self._create_output_instances_if_necessary()
        self._log_neuroglancer_links()
        self._sanitize_config()

        # Aim for 2 GB RDD partitions when loading segmentation
        GB = 2**30
        self.target_partition_size_voxels = 2 * GB // np.uint64().nbytes

        # (See note in _init_services() regarding output bounding boxes)
        input_bb_zyx = self.input_service.bounding_box_zyx
        output_bb_zyx = self.output_services[0].bounding_box_zyx
        self.translation_offset_zyx = output_bb_zyx[0] - input_bb_zyx[0]

        pyramid_depth = self.config_data["options"]["pyramid-depth"]
        slab_depth = self.config_data["options"]["slab-depth"]

        # Process data in Z-slabs
        output_slab_boxes = list( slabs_from_box(output_bb_zyx, slab_depth) )
        max_depth = max(map(lambda box: box[1][0] - box[0][0], output_slab_boxes))
        logger.info(f"Processing data in {len(output_slab_boxes)} slabs (max depth={max_depth}) for {pyramid_depth} pyramid levels")

        if self.config_data["options"]["compute-block-statistics"]:
            self._init_stats_file()

        # Read data and accumulate statistics, one slab at a time.
        for slab_index, output_slab_box in enumerate( output_slab_boxes ):
            with Timer() as timer:
                self._process_slab(slab_index, output_slab_box )
            logger.info(f"Slab {slab_index}: Done copying to {len(self.config_data['outputs'])} destinations.")
            logger.info(f"Slab {slab_index}: Total processing time: {timer.timedelta}")

            delay_minutes = self.config_data["options"]["delay-minutes-between-slabs"]
            if delay_minutes > 0 and slab_index != len(output_slab_boxes)-1:
                logger.info(f"Delaying {delay_minutes} before continuing to next slab...")
                time.sleep(delay_minutes * 60)

        logger.info(f"DONE copying/downsampling all slabs to {len(self.config_data['outputs'])} destinations.")