def __init__(self, *args, **kwargs): super(OpRefactoredBlockedArrayCache, self).__init__(*args, **kwargs) # Input ---------> opCacheFixer -> opUnblockedArrayCache -> opSplitRequestsBlockwise -> Output # / / # fixAtCurrent -- / # / # BlockShape -------------------------------------------- self._opCacheFixer = OpCacheFixer(parent=self) self._opCacheFixer.Input.connect(self.Input) self._opCacheFixer.fixAtCurrent.connect(self.fixAtCurrent) self._opUnblockedArrayCache = OpUnblockedArrayCache(parent=self) self._opUnblockedArrayCache.Input.connect(self._opCacheFixer.Output) self._opSplitRequestsBlockwise = OpSplitRequestsBlockwise( always_request_full_blocks=True, parent=self) self._opSplitRequestsBlockwise.BlockShape.connect(self.outerBlockShape) self._opSplitRequestsBlockwise.Input.connect( self._opUnblockedArrayCache.Output) self.Output.connect(self._opSplitRequestsBlockwise.Output) # This member is used by tests that check RAM usage. self.setup_ram_context = RamMeasurementContext() self.registerWithMemoryManager()
def __init__(self, *args, **kwargs): super( OpRefactoredBlockedArrayCache, self ).__init__(*args, **kwargs) # Input ---------> opCacheFixer -> opUnblockedArrayCache -> opSplitRequestsBlockwise -> Output # / / # fixAtCurrent -- / # / # BlockShape -------------------------------------------- self._opCacheFixer = OpCacheFixer( parent=self ) self._opCacheFixer.Input.connect( self.Input ) self._opCacheFixer.fixAtCurrent.connect( self.fixAtCurrent ) self._opUnblockedArrayCache = OpUnblockedArrayCache( parent=self ) self._opUnblockedArrayCache.Input.connect( self._opCacheFixer.Output ) self._opSplitRequestsBlockwise = OpSplitRequestsBlockwise( always_request_full_blocks=True, parent=self ) self._opSplitRequestsBlockwise.BlockShape.connect( self.outerBlockShape ) self._opSplitRequestsBlockwise.Input.connect( self._opUnblockedArrayCache.Output ) self.Output.connect( self._opSplitRequestsBlockwise.Output ) # This member is used by tests that check RAM usage. self.setup_ram_context = RamMeasurementContext() self.registerWithMemoryManager()
def __init__(self, *args, **kwargs): super( OpBlockedArrayCache, self ).__init__(*args, **kwargs) # SCHEMATIC WHEN BypassModeEnabled == False: # # Input ---------> opCacheFixer -> opUnblockedArrayCache -> opSplitRequestsBlockwise -> (indirectly via execute) -> Output # / / # fixAtCurrent -- / # / # BlockShape -------------------------------------------- # SCHEMATIC WHEN BypassModeEnabled == True: # # Input --> (indirectly via execute) -> Output self._opCacheFixer = OpCacheFixer( parent=self ) self._opCacheFixer.Input.connect( self.Input ) self._opCacheFixer.fixAtCurrent.connect( self.fixAtCurrent ) self._opUnblockedArrayCache = OpUnblockedArrayCache( parent=self ) self._opUnblockedArrayCache.CompressionEnabled.connect( self.CompressionEnabled ) self._opUnblockedArrayCache.Input.connect( self._opCacheFixer.Output ) self._opSplitRequestsBlockwise = OpSplitRequestsBlockwise( always_request_full_blocks=True, parent=self ) self._opSplitRequestsBlockwise.BlockShape.connect( self.outerBlockShape ) self._opSplitRequestsBlockwise.Input.connect( self._opUnblockedArrayCache.Output ) # Instead of connecting our Output directly to our internal pipeline, # We manually forward the data via the execute() function, # which allows us to implement a bypass for the internal pipeline if Enabled #self.Output.connect( self._opSplitRequestsBlockwise.Output ) # Since we didn't directly connect the pipeline to our output, explicitly forward dirty notifications self._opSplitRequestsBlockwise.Output.notifyDirty( lambda slot, roi: self.Output.setDirty(roi.start, roi.stop) ) # This member is used by tests that check RAM usage. self.setup_ram_context = RamMeasurementContext() self.registerWithMemoryManager()
class OpRefactoredBlockedArrayCache(Operator, ManagedBlockedCache): """ A blockwise array cache designed to replace the old OpBlockedArrayCache. Instead of a monolithic implementation, this operator is a small pipeline of three simple operators. The actual caching of data is handled by an unblocked cache, so the "blocked" functionality is implemented via separate "splitting" operator that comes after the cache. Also, the "fixAtCurrent" feature is implemented in a special operator, which comes before the cache. """ Input = InputSlot(allow_mask=True) fixAtCurrent = InputSlot(value=False) #BlockShape = InputSlot() innerBlockShape = InputSlot(optional=True) outerBlockShape = InputSlot() Output = OutputSlot(allow_mask=True) def __init__(self, *args, **kwargs): super( OpRefactoredBlockedArrayCache, self ).__init__(*args, **kwargs) # Input ---------> opCacheFixer -> opUnblockedArrayCache -> opSplitRequestsBlockwise -> Output # / / # fixAtCurrent -- / # / # BlockShape -------------------------------------------- self._opCacheFixer = OpCacheFixer( parent=self ) self._opCacheFixer.Input.connect( self.Input ) self._opCacheFixer.fixAtCurrent.connect( self.fixAtCurrent ) self._opUnblockedArrayCache = OpUnblockedArrayCache( parent=self ) self._opUnblockedArrayCache.Input.connect( self._opCacheFixer.Output ) self._opSplitRequestsBlockwise = OpSplitRequestsBlockwise( always_request_full_blocks=True, parent=self ) self._opSplitRequestsBlockwise.BlockShape.connect( self.outerBlockShape ) self._opSplitRequestsBlockwise.Input.connect( self._opUnblockedArrayCache.Output ) self.Output.connect( self._opSplitRequestsBlockwise.Output ) # This member is used by tests that check RAM usage. self.setup_ram_context = RamMeasurementContext() def setupOutputs(self): pass def execute(self, slot, subindex, roi, result): assert False, "Shouldn't get here." def propagateDirty(self, slot, subindex, roi): pass # ======= mimic cache interface for wrapping operators ======= def usedMemory(self): return self._opUnblockedArrayCache.usedMemory() def fractionOfUsedMemoryDirty(self): # dirty memory is discarded immediately return self._opUnblockedArrayCache.fractionOfUsedMemoryDirty() def lastAccessTime(self): return self._opUnblockedArrayCache.lastAccessTime() def getBlockAccessTimes(self): return self._opUnblockedArrayCache.getBlockAccessTimes() def freeMemory(self): return self._opUnblockedArrayCache.freeMemory() def freeBlock(self, key): return self._opUnblockedArrayCache.freeBlock(key) def freeDirtyMemory(self): return self._opUnblockedArrayCache.freeDirtyMemory() def generateReport(self, report): self._opUnblockedArrayCache.generateReport(report) child = copy.copy(report) super(OpRefactoredBlockedArrayCache, self).generateReport(report) report.children.append(child)
class OpRefactoredBlockedArrayCache(Operator, ManagedBlockedCache): """ A blockwise array cache designed to replace the old OpBlockedArrayCache. Instead of a monolithic implementation, this operator is a small pipeline of three simple operators. The actual caching of data is handled by an unblocked cache, so the "blocked" functionality is implemented via separate "splitting" operator that comes after the cache. Also, the "fixAtCurrent" feature is implemented in a special operator, which comes before the cache. """ Input = InputSlot(allow_mask=True) fixAtCurrent = InputSlot(value=False) #BlockShape = InputSlot() innerBlockShape = InputSlot(optional=True) outerBlockShape = InputSlot() Output = OutputSlot(allow_mask=True) def __init__(self, *args, **kwargs): super(OpRefactoredBlockedArrayCache, self).__init__(*args, **kwargs) # Input ---------> opCacheFixer -> opUnblockedArrayCache -> opSplitRequestsBlockwise -> Output # / / # fixAtCurrent -- / # / # BlockShape -------------------------------------------- self._opCacheFixer = OpCacheFixer(parent=self) self._opCacheFixer.Input.connect(self.Input) self._opCacheFixer.fixAtCurrent.connect(self.fixAtCurrent) self._opUnblockedArrayCache = OpUnblockedArrayCache(parent=self) self._opUnblockedArrayCache.Input.connect(self._opCacheFixer.Output) self._opSplitRequestsBlockwise = OpSplitRequestsBlockwise( always_request_full_blocks=True, parent=self) self._opSplitRequestsBlockwise.BlockShape.connect(self.outerBlockShape) self._opSplitRequestsBlockwise.Input.connect( self._opUnblockedArrayCache.Output) self.Output.connect(self._opSplitRequestsBlockwise.Output) # This member is used by tests that check RAM usage. self.setup_ram_context = RamMeasurementContext() self.registerWithMemoryManager() def setupOutputs(self): pass def execute(self, slot, subindex, roi, result): assert False, "Shouldn't get here." def propagateDirty(self, slot, subindex, roi): pass # ======= mimic cache interface for wrapping operators ======= def usedMemory(self): return self._opUnblockedArrayCache.usedMemory() def fractionOfUsedMemoryDirty(self): # dirty memory is discarded immediately return self._opUnblockedArrayCache.fractionOfUsedMemoryDirty() def lastAccessTime(self): return self._opUnblockedArrayCache.lastAccessTime() def getBlockAccessTimes(self): return self._opUnblockedArrayCache.getBlockAccessTimes() def freeMemory(self): return self._opUnblockedArrayCache.freeMemory() def freeBlock(self, key): return self._opUnblockedArrayCache.freeBlock(key) def freeDirtyMemory(self): return self._opUnblockedArrayCache.freeDirtyMemory() def generateReport(self, report): self._opUnblockedArrayCache.generateReport(report) child = copy.copy(report) super(OpRefactoredBlockedArrayCache, self).generateReport(report) report.children.append(child)
class OpBlockedArrayCache(Operator, ManagedBlockedCache): """ A blockwise array cache designed to replace the old OpBlockedArrayCache. Instead of a monolithic implementation, this operator is a small pipeline of three simple operators. The actual caching of data is handled by an unblocked cache, so the "blocked" functionality is implemented via separate "splitting" operator that comes after the cache. Also, the "fixAtCurrent" feature is implemented in a special operator, which comes before the cache. """ fixAtCurrent = InputSlot(value=False) Input = InputSlot(allow_mask=True) #BlockShape = InputSlot() innerBlockShape = InputSlot(optional=True) # Deprecated and ignored below. outerBlockShape = InputSlot() BypassModeEnabled = InputSlot(value=False) CompressionEnabled = InputSlot(value=False) Output = OutputSlot(allow_mask=True) def __init__(self, *args, **kwargs): super( OpBlockedArrayCache, self ).__init__(*args, **kwargs) # SCHEMATIC WHEN BypassModeEnabled == False: # # Input ---------> opCacheFixer -> opUnblockedArrayCache -> opSplitRequestsBlockwise -> (indirectly via execute) -> Output # / / # fixAtCurrent -- / # / # BlockShape -------------------------------------------- # SCHEMATIC WHEN BypassModeEnabled == True: # # Input --> (indirectly via execute) -> Output self._opCacheFixer = OpCacheFixer( parent=self ) self._opCacheFixer.Input.connect( self.Input ) self._opCacheFixer.fixAtCurrent.connect( self.fixAtCurrent ) self._opUnblockedArrayCache = OpUnblockedArrayCache( parent=self ) self._opUnblockedArrayCache.CompressionEnabled.connect( self.CompressionEnabled ) self._opUnblockedArrayCache.Input.connect( self._opCacheFixer.Output ) self._opSplitRequestsBlockwise = OpSplitRequestsBlockwise( always_request_full_blocks=True, parent=self ) self._opSplitRequestsBlockwise.BlockShape.connect( self.outerBlockShape ) self._opSplitRequestsBlockwise.Input.connect( self._opUnblockedArrayCache.Output ) # Instead of connecting our Output directly to our internal pipeline, # We manually forward the data via the execute() function, # which allows us to implement a bypass for the internal pipeline if Enabled #self.Output.connect( self._opSplitRequestsBlockwise.Output ) # Since we didn't directly connect the pipeline to our output, explicitly forward dirty notifications self._opSplitRequestsBlockwise.Output.notifyDirty( lambda slot, roi: self.Output.setDirty(roi.start, roi.stop) ) # This member is used by tests that check RAM usage. self.setup_ram_context = RamMeasurementContext() self.registerWithMemoryManager() def setupOutputs(self): # Copy metadata from the internal pipeline to the output self.Output.meta.assignFrom( self._opSplitRequestsBlockwise.Output.meta ) def execute(self, slot, subindex, roi, result): assert slot is self.Output, "Requesting data from unknown output slot." if self.BypassModeEnabled.value: # Pass data directly from Input to Output self.Input(roi.start, roi.stop).writeInto(result).wait() else: # Pass data from internal pipeline to Output self._opSplitRequestsBlockwise.Output(roi.start, roi.stop).writeInto(result).wait() def propagateDirty(self, slot, subindex, roi): pass # ======= mimic cache interface for wrapping operators ======= def usedMemory(self): return self._opUnblockedArrayCache.usedMemory() def fractionOfUsedMemoryDirty(self): # dirty memory is discarded immediately return self._opUnblockedArrayCache.fractionOfUsedMemoryDirty() def lastAccessTime(self): return self._opUnblockedArrayCache.lastAccessTime() def getBlockAccessTimes(self): return self._opUnblockedArrayCache.getBlockAccessTimes() def freeMemory(self): return self._opUnblockedArrayCache.freeMemory() def freeBlock(self, key): return self._opUnblockedArrayCache.freeBlock(key) def freeDirtyMemory(self): return self._opUnblockedArrayCache.freeDirtyMemory() def generateReport(self, report): self._opUnblockedArrayCache.generateReport(report) child = copy.copy(report) super(OpBlockedArrayCache, self).generateReport(report) report.children.append(child)