Example #1
0
    def __init__(self, *args, **kwargs):
        super(OpBlockedArrayCache, self).__init__(*args, **kwargs)

        # SCHEMATIC WHEN BypassModeEnabled == False:
        #
        # Input ---------> opCacheFixer -> opSimpleBlockedArrayCache -> (indirectly via execute) -> Output
        #                 /               /
        # fixAtCurrent --                /
        #                               /
        # BlockShape -------------------

        # SCHEMATIC WHEN BypassModeEnabled == True:
        #
        # Input --> (indirectly via execute) -> Output

        self._opCacheFixer = OpCacheFixer(parent=self)
        self._opCacheFixer.Input.connect(self.Input)
        self._opCacheFixer.fixAtCurrent.connect(self.fixAtCurrent)

        self._opSimpleBlockedArrayCache = OpSimpleBlockedArrayCache(
            parent=self)
        self._opSimpleBlockedArrayCache.Input.connect(
            self._opCacheFixer.Output)
        self._opSimpleBlockedArrayCache.CompressionEnabled.connect(
            self.CompressionEnabled)
        self._opSimpleBlockedArrayCache.Input.connect(
            self._opCacheFixer.Output)
        self._opSimpleBlockedArrayCache.BlockShape.connect(
            self.outerBlockShape)
        self.CleanBlocks.connect(self._opSimpleBlockedArrayCache.CleanBlocks)

        # Instead of connecting our Output directly to our internal pipeline,
        # We manually forward the data via the execute() function,
        #  which allows us to implement a bypass for the internal pipeline if Enabled
        #self.Output.connect( self._opSimpleBlockedArrayCache.Output )

        # Since we didn't directly connect the pipeline to our output, explicitly forward dirty notifications
        self._opSimpleBlockedArrayCache.Output.notifyDirty(
            lambda slot, roi: self.Output.setDirty(roi.start, roi.stop))

        # This member is used by tests that check RAM usage.
        self.setup_ram_context = RamMeasurementContext()
        self.registerWithMemoryManager()
    def __init__(self, *args, **kwargs):
        super( OpBlockedArrayCache, self ).__init__(*args, **kwargs)
        
        # SCHEMATIC WHEN BypassModeEnabled == False:
        # 
        # Input ---------> opCacheFixer -> opSimpleBlockedArrayCache -> (indirectly via execute) -> Output
        #                 /               /
        # fixAtCurrent --                /
        #                               /
        # BlockShape -------------------
        
        # SCHEMATIC WHEN BypassModeEnabled == True:
        #
        # Input --> (indirectly via execute) -> Output
        
        self._opCacheFixer = OpCacheFixer( parent=self )
        self._opCacheFixer.Input.connect( self.Input )
        self._opCacheFixer.fixAtCurrent.connect( self.fixAtCurrent )

        self._opSimpleBlockedArrayCache = OpSimpleBlockedArrayCache( parent=self )
        self._opSimpleBlockedArrayCache.Input.connect( self._opCacheFixer.Output )
        self._opSimpleBlockedArrayCache.CompressionEnabled.connect( self.CompressionEnabled )
        self._opSimpleBlockedArrayCache.Input.connect( self._opCacheFixer.Output )
        self._opSimpleBlockedArrayCache.BlockShape.connect( self.outerBlockShape )
        self.CleanBlocks.connect( self._opSimpleBlockedArrayCache.CleanBlocks )

        # Instead of connecting our Output directly to our internal pipeline,
        # We manually forward the data via the execute() function,
        #  which allows us to implement a bypass for the internal pipeline if Enabled
        #self.Output.connect( self._opSimpleBlockedArrayCache.Output )

        # Since we didn't directly connect the pipeline to our output, explicitly forward dirty notifications 
        self._opSimpleBlockedArrayCache.Output.notifyDirty( lambda slot, roi: self.Output.setDirty(roi.start, roi.stop) )

        # This member is used by tests that check RAM usage.
        self.setup_ram_context = RamMeasurementContext()
        self.registerWithMemoryManager()
Example #3
0
class OpBlockedArrayCache(Operator, ManagedBlockedCache):
    """
    A blockwise array cache designed to replace the old OpBlockedArrayCache.  
    Instead of a monolithic implementation, this operator is a small pipeline of three simple operators.
    
    The actual caching of data is handled by an unblocked cache, so the "blocked" functionality is 
    implemented via separate "splitting" operator that comes after the cache.
    Also, the "fixAtCurrent" feature is implemented in a special operator, which comes before the cache.    
    """
    fixAtCurrent = InputSlot(value=False)
    Input = InputSlot(allow_mask=True)
    #BlockShape = InputSlot()
    outerBlockShape = InputSlot(
        optional=True)  # If not provided, will be set to Input.meta.shape
    BypassModeEnabled = InputSlot(value=False)
    CompressionEnabled = InputSlot(value=False)

    Output = OutputSlot(allow_mask=True)
    CleanBlocks = OutputSlot(
    )  # A list of slicings indicating which blocks are stored in the cache and clean.

    innerBlockShape = InputSlot(optional=True)  # Deprecated and ignored below.

    def __init__(self, *args, **kwargs):
        super(OpBlockedArrayCache, self).__init__(*args, **kwargs)

        # SCHEMATIC WHEN BypassModeEnabled == False:
        #
        # Input ---------> opCacheFixer -> opSimpleBlockedArrayCache -> (indirectly via execute) -> Output
        #                 /               /
        # fixAtCurrent --                /
        #                               /
        # BlockShape -------------------

        # SCHEMATIC WHEN BypassModeEnabled == True:
        #
        # Input --> (indirectly via execute) -> Output

        self._opCacheFixer = OpCacheFixer(parent=self)
        self._opCacheFixer.Input.connect(self.Input)
        self._opCacheFixer.fixAtCurrent.connect(self.fixAtCurrent)

        self._opSimpleBlockedArrayCache = OpSimpleBlockedArrayCache(
            parent=self)
        self._opSimpleBlockedArrayCache.Input.connect(
            self._opCacheFixer.Output)
        self._opSimpleBlockedArrayCache.CompressionEnabled.connect(
            self.CompressionEnabled)
        self._opSimpleBlockedArrayCache.Input.connect(
            self._opCacheFixer.Output)
        self._opSimpleBlockedArrayCache.BlockShape.connect(
            self.outerBlockShape)
        self.CleanBlocks.connect(self._opSimpleBlockedArrayCache.CleanBlocks)

        # Instead of connecting our Output directly to our internal pipeline,
        # We manually forward the data via the execute() function,
        #  which allows us to implement a bypass for the internal pipeline if Enabled
        #self.Output.connect( self._opSimpleBlockedArrayCache.Output )

        # Since we didn't directly connect the pipeline to our output, explicitly forward dirty notifications
        self._opSimpleBlockedArrayCache.Output.notifyDirty(
            lambda slot, roi: self.Output.setDirty(roi.start, roi.stop))

        # This member is used by tests that check RAM usage.
        self.setup_ram_context = RamMeasurementContext()
        self.registerWithMemoryManager()

    def setupOutputs(self):
        if not self.outerBlockShape.ready():
            self.outerBlockShape.setValue(self.Input.meta.shape)
        # Copy metadata from the internal pipeline to the output
        self.Output.meta.assignFrom(
            self._opSimpleBlockedArrayCache.Output.meta)

    def execute(self, slot, subindex, roi, result):
        assert slot is self.Output, "Requesting data from unknown output slot."
        if self.BypassModeEnabled.value:
            # Pass data directly from Input to Output
            self.Input(roi.start, roi.stop).writeInto(result).wait()
        else:
            # Pass data from internal pipeline to Output
            self._opSimpleBlockedArrayCache.Output(
                roi.start, roi.stop).writeInto(result).wait()

    def propagateDirty(self, slot, subindex, roi):
        pass

    def setInSlot(self, slot, subindex, key, value):
        pass  # Nothing to do here: Input is connected to an internal operator

    # ======= mimic cache interface for wrapping operators =======

    def usedMemory(self):
        return self._opSimpleBlockedArrayCache.usedMemory()

    def fractionOfUsedMemoryDirty(self):
        # dirty memory is discarded immediately
        return self._opSimpleBlockedArrayCache.fractionOfUsedMemoryDirty()

    def lastAccessTime(self):
        return self._opSimpleBlockedArrayCache.lastAccessTime()

    def getBlockAccessTimes(self):
        return self._opSimpleBlockedArrayCache.getBlockAccessTimes()

    def freeMemory(self):
        return self._opSimpleBlockedArrayCache.freeMemory()

    def freeBlock(self, key):
        return self._opSimpleBlockedArrayCache.freeBlock(key)

    def freeDirtyMemory(self):
        return self._opSimpleBlockedArrayCache.freeDirtyMemory()

    def generateReport(self, report):
        self._opSimpleBlockedArrayCache.generateReport(report)
        child = copy.copy(report)
        super(OpBlockedArrayCache, self).generateReport(report)
        report.children.append(child)
class OpBlockedArrayCache(Operator, ManagedBlockedCache):
    """
    A blockwise array cache designed to replace the old OpBlockedArrayCache.  
    Instead of a monolithic implementation, this operator is a small pipeline of three simple operators.
    
    The actual caching of data is handled by an unblocked cache, so the "blocked" functionality is 
    implemented via separate "splitting" operator that comes after the cache.
    Also, the "fixAtCurrent" feature is implemented in a special operator, which comes before the cache.    
    """
    fixAtCurrent = InputSlot(value=False)
    Input = InputSlot(allow_mask=True)
    #BlockShape = InputSlot()
    outerBlockShape = InputSlot(optional=True) # If not provided, will be set to Input.meta.shape
    BypassModeEnabled = InputSlot(value=False)
    CompressionEnabled = InputSlot(value=False)
    
    Output = OutputSlot(allow_mask=True)
    CleanBlocks = OutputSlot() # A list of slicings indicating which blocks are stored in the cache and clean.

    innerBlockShape = InputSlot(optional=True) # Deprecated and ignored below.
    
    def __init__(self, *args, **kwargs):
        super( OpBlockedArrayCache, self ).__init__(*args, **kwargs)
        
        # SCHEMATIC WHEN BypassModeEnabled == False:
        # 
        # Input ---------> opCacheFixer -> opSimpleBlockedArrayCache -> (indirectly via execute) -> Output
        #                 /               /
        # fixAtCurrent --                /
        #                               /
        # BlockShape -------------------
        
        # SCHEMATIC WHEN BypassModeEnabled == True:
        #
        # Input --> (indirectly via execute) -> Output
        
        self._opCacheFixer = OpCacheFixer( parent=self )
        self._opCacheFixer.Input.connect( self.Input )
        self._opCacheFixer.fixAtCurrent.connect( self.fixAtCurrent )

        self._opSimpleBlockedArrayCache = OpSimpleBlockedArrayCache( parent=self )
        self._opSimpleBlockedArrayCache.Input.connect( self._opCacheFixer.Output )
        self._opSimpleBlockedArrayCache.CompressionEnabled.connect( self.CompressionEnabled )
        self._opSimpleBlockedArrayCache.Input.connect( self._opCacheFixer.Output )
        self._opSimpleBlockedArrayCache.BlockShape.connect( self.outerBlockShape )
        self.CleanBlocks.connect( self._opSimpleBlockedArrayCache.CleanBlocks )

        # Instead of connecting our Output directly to our internal pipeline,
        # We manually forward the data via the execute() function,
        #  which allows us to implement a bypass for the internal pipeline if Enabled
        #self.Output.connect( self._opSimpleBlockedArrayCache.Output )

        # Since we didn't directly connect the pipeline to our output, explicitly forward dirty notifications 
        self._opSimpleBlockedArrayCache.Output.notifyDirty( lambda slot, roi: self.Output.setDirty(roi.start, roi.stop) )

        # This member is used by tests that check RAM usage.
        self.setup_ram_context = RamMeasurementContext()
        self.registerWithMemoryManager()
        
    def setupOutputs(self):
        if not self.outerBlockShape.ready():
            self.outerBlockShape.setValue( self.Input.meta.shape )
        # Copy metadata from the internal pipeline to the output
        self.Output.meta.assignFrom( self._opSimpleBlockedArrayCache.Output.meta )

    def execute(self, slot, subindex, roi, result):
        assert slot is self.Output, "Requesting data from unknown output slot."
        if self.BypassModeEnabled.value:
            # Pass data directly from Input to Output
            self.Input(roi.start, roi.stop).writeInto(result).wait()
        else:
            # Pass data from internal pipeline to Output
            self._opSimpleBlockedArrayCache.Output(roi.start, roi.stop).writeInto(result).wait()

    def propagateDirty(self, slot, subindex, roi):
        pass

    def setInSlot(self, slot, subindex, key, value):
        pass # Nothing to do here: Input is connected to an internal operator

    # ======= mimic cache interface for wrapping operators =======

    def usedMemory(self):
        return self._opSimpleBlockedArrayCache.usedMemory()

    def fractionOfUsedMemoryDirty(self):
        # dirty memory is discarded immediately
        return self._opSimpleBlockedArrayCache.fractionOfUsedMemoryDirty()

    def lastAccessTime(self):
        return self._opSimpleBlockedArrayCache.lastAccessTime()

    def getBlockAccessTimes(self):
        return self._opSimpleBlockedArrayCache.getBlockAccessTimes()

    def freeMemory(self):
        return self._opSimpleBlockedArrayCache.freeMemory()

    def freeBlock(self, key):
        return self._opSimpleBlockedArrayCache.freeBlock(key)

    def freeDirtyMemory(self):
        return self._opSimpleBlockedArrayCache.freeDirtyMemory()

    def generateReport(self, report):
        self._opSimpleBlockedArrayCache.generateReport(report)
        child = copy.copy(report)
        super(OpBlockedArrayCache, self).generateReport(report)
        report.children.append(child)