コード例 #1
0
    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)

        # Save the axistags as a dataset attribute
        self.d.attrs['axistags'] = self.Image.meta.axistags.toJSON()

        def handle_block_result(roi, data):
            slicing = roiToSlice(*roi)
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
            else:
                self.d[slicing] = data

        batch_size = None
        if self.BatchSize.ready():
            batch_size = self.BatchSize.value
        requester = BigRequestStreamer(self.Image,
                                       roiFromShape(self.Image.meta.shape),
                                       batchSize=batch_size)
        requester.resultSignal.subscribe(handle_block_result)
        requester.progressSignal.subscribe(self.progressSignal)
        requester.execute()

        # Be paranoid: Flush right now.
        self.f.file.flush()

        # We're finished.
        result[0] = True

        self.progressSignal(100)
コード例 #2
0
    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)
        
        # Save the axistags as a dataset attribute
        self.d.attrs['axistags'] = self.Image.meta.axistags.toJSON()

        def handle_block_result(roi, data):
            slicing = roiToSlice(*roi)
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
            else:
                self.d[slicing] = data
        batch_size = None
        if self.BatchSize.ready():
            batch_size = self.BatchSize.value
        requester = BigRequestStreamer( self.Image, roiFromShape( self.Image.meta.shape ), batchSize=batch_size )
        requester.resultSignal.subscribe( handle_block_result )
        requester.progressSignal.subscribe( self.progressSignal )
        requester.execute()            

        # Be paranoid: Flush right now.
        self.f.file.flush()

        # We're finished.
        result[0] = True

        self.progressSignal(100)
コード例 #3
0
ファイル: ioOperators.py プロジェクト: burcin/lazyflow
    def run_export(self):
        """
        Request the volume in slices (running in parallel), and write each slice to a separate image.
        """
        # Make the directory first if necessary
        export_dir = os.path.split(self.FilepathPattern.value)[0]
        if not os.path.exists(export_dir):
            os.makedirs(export_dir)
        
        # Sliceshape is the same as the input shape, except for the sliced dimension
        tagged_sliceshape = self.Input.meta.getTaggedShape()
        tagged_sliceshape[self._volume_axes[0]] = 1
        slice_shape = (tagged_sliceshape.values())

        # Use a request streamer to automatically request a constant batch of 4 active requests.
        streamer = BigRequestStreamer( self.Input,
                                       roiFromShape( self.Input.meta.shape ),
                                       slice_shape,
                                       batchSize=4 )

        # Write the slices as they come in (possibly out-of-order, but probably not.        
        streamer.resultSignal.subscribe( self._write_slice )
        streamer.progressSignal.subscribe( self.progressSignal )

        logger.debug("Starting Stack Export with slicing shape: {}".format( slice_shape ))
        streamer.execute()
コード例 #4
0
    def run_export(self):
        """
        Request the volume in slices (running in parallel), and write each slice to a separate image.
        """
        # Make the directory first if necessary
        export_dir = os.path.split(self.FilepathPattern.value)[0]
        if not os.path.exists(export_dir):
            os.makedirs(export_dir)
        
        # Sliceshape is the same as the input shape, except for the sliced dimension
        tagged_sliceshape = self.Input.meta.getTaggedShape()
        tagged_sliceshape[self._volume_axes[0]] = 1
        slice_shape = (tagged_sliceshape.values())

        # Use a request streamer to automatically request a constant batch of 4 active requests.
        streamer = BigRequestStreamer( self.Input,
                                       roiFromShape( self.Input.meta.shape ),
                                       slice_shape,
                                       batchSize=4 )

        # Write the slices as they come in (possibly out-of-order, but probably not.        
        streamer.resultSignal.subscribe( self._write_slice )
        streamer.progressSignal.subscribe( self.progressSignal )

        logger.debug("Starting Stack Export with slicing shape: {}".format( slice_shape ))
        streamer.execute()
コード例 #5
0
    def run_export(self):
        """
        Request the volume in slices (running in parallel), and write each slice to a separate image.
        """
        # Make the directory first if necessary
        export_dir = os.path.split(self.FilepathPattern.value)[0]
        if not os.path.exists(export_dir):
            os.makedirs(export_dir)

        # Sliceshape is the same as the input shape, except for the sliced dimension
        tagged_sliceshape = self.Input.meta.getTaggedShape()
        tagged_sliceshape[self._volume_axes[0]] = 1
        slice_shape = (list(tagged_sliceshape.values()))

        parallel_requests = 4

        # If ram usage info is available, make a better guess about how many requests we can launch in parallel
        ram_usage_per_requested_pixel = self.Input.meta.ram_usage_per_requested_pixel
        if ram_usage_per_requested_pixel is not None:
            pixels_per_slice = numpy.prod(slice_shape)
            if 'c' in tagged_sliceshape:
                pixels_per_slice //= tagged_sliceshape['c']

            ram_usage_per_slice = pixels_per_slice * ram_usage_per_requested_pixel

            # Fudge factor: Reduce RAM usage by a bit
            available_ram = psutil.virtual_memory().available
            available_ram *= 0.5

            parallel_requests = int(available_ram // ram_usage_per_slice)

            if parallel_requests < 1:
                raise MemoryError(
                    'Not enough RAM to export to the selected format. '
                    'Consider exporting to hdf5 (h5).'
                )

        streamer = BigRequestStreamer( self.Input,
                                       roiFromShape( self.Input.meta.shape ),
                                       slice_shape,
                                       parallel_requests )

        # Write the slices as they come in (possibly out-of-order, but probably not)
        streamer.resultSignal.subscribe( self._write_slice )
        streamer.progressSignal.subscribe( self.progressSignal )

        logger.debug("Starting Stack Export with slicing shape: {}".format( slice_shape ))
        streamer.execute()
コード例 #6
0
ファイル: ioOperators.py プロジェクト: ilastik/lazyflow
    def run_export(self):
        """
        Request the volume in slices (running in parallel), and write each slice to a separate image.
        """
        # Make the directory first if necessary
        export_dir = os.path.split(self.FilepathPattern.value)[0]
        if not os.path.exists(export_dir):
            os.makedirs(export_dir)

        # Sliceshape is the same as the input shape, except for the sliced dimension
        tagged_sliceshape = self.Input.meta.getTaggedShape()
        tagged_sliceshape[self._volume_axes[0]] = 1
        slice_shape = list(tagged_sliceshape.values())

        parallel_requests = 4

        # If ram usage info is available, make a better guess about how many requests we can launch in parallel
        ram_usage_per_requested_pixel = self.Input.meta.ram_usage_per_requested_pixel
        if ram_usage_per_requested_pixel is not None:
            pixels_per_slice = numpy.prod(slice_shape)
            if "c" in tagged_sliceshape:
                pixels_per_slice //= tagged_sliceshape["c"]

            ram_usage_per_slice = pixels_per_slice * ram_usage_per_requested_pixel

            # Fudge factor: Reduce RAM usage by a bit
            available_ram = psutil.virtual_memory().available
            available_ram *= 0.5

            parallel_requests = int(available_ram // ram_usage_per_slice)

            if parallel_requests < 1:
                raise MemoryError(
                    "Not enough RAM to export to the selected format. " "Consider exporting to hdf5 (h5)."
                )

        streamer = BigRequestStreamer(self.Input, roiFromShape(self.Input.meta.shape), slice_shape, parallel_requests)

        # Write the slices as they come in (possibly out-of-order, but probably not)
        streamer.resultSignal.subscribe(self._write_slice)
        streamer.progressSignal.subscribe(self.progressSignal)

        logger.debug(f"Starting Stack Export with slicing shape: {slice_shape}")
        streamer.execute()
コード例 #7
0
ファイル: ioOperators.py プロジェクト: syaffa/ilastik
    def execute(self, slot, subindex, rroi, result):
        self.progressSignal(0)

        # Save the axistags as a dataset attribute
        self.d.attrs["axistags"] = self.Image.meta.axistags.toJSON()
        if isinstance(self.d, h5py.Dataset):
            for index, tag in enumerate(self.Image.meta.axistags):
                self.d.dims[index].label = tag.key
        else:  # if n5 dataset, apply neuroglancer's axes tags convention
            self.d.attrs["axes"] = "".join(
                tag.key for tag in self.Image.meta.axistags)[::-1]
        drange = self.Image.meta.get("drange")
        if drange:
            self.d.attrs["drange"] = drange

        def handle_block_result(roi, data):
            slicing = roiToSlice(*roi)
            if data.flags.c_contiguous:
                self.d.write_direct(data.view(numpy.ndarray), dest_sel=slicing)
            else:
                self.d[slicing] = data

        batch_size = None
        if self.BatchSize.ready():
            batch_size = self.BatchSize.value
        requester = BigRequestStreamer(self.Image,
                                       roiFromShape(self.Image.meta.shape),
                                       batchSize=batch_size)
        requester.resultSignal.subscribe(handle_block_result)
        requester.progressSignal.subscribe(self.progressSignal)
        requester.execute()

        # Be paranoid: Flush right now.
        if isinstance(self.f, h5py.File):
            self.f.file.flush()  # not available in z5py

        # We're finished.
        result[0] = True

        self.progressSignal(100)