def setupOutputs(self): self._disconnectInternals() # we need a new cache cache = OpCompressedCache(parent=self) cache.name = self.name + "WrappedCache" # connect cache outputs self.CleanBlocks.connect(cache.CleanBlocks) self.OutputHdf5.connect(cache.OutputHdf5) self._op2.Input.connect(cache.Output) # connect cache inputs cache.InputHdf5.connect(self.InputHdf5) cache.Input.connect(self._op1.Output) # set the cache block shape tagged_shape = self._op1.Output.meta.getTaggedShape() tagged_shape['t'] = 1 tagged_shape['c'] = 1 cacheshape = map(lambda k: tagged_shape[k], 'xyzct') if _labeling_impl == "lazy": #HACK hardcoded block shape blockshape = numpy.minimum(cacheshape, 256) else: # use full spatial volume if not lazy blockshape = cacheshape cache.BlockShape.setValue(tuple(blockshape)) self._cache = cache
def setupOutputs(self): self._disconnectInternals() # we need a new cache cache = OpCompressedCache(parent=self) cache.name = self.name + "WrappedCache" # connect cache outputs self.CleanBlocks.connect(cache.CleanBlocks) self.OutputHdf5.connect(cache.OutputHdf5) self._op2.Input.connect(cache.Output) # connect cache inputs cache.InputHdf5.connect(self.InputHdf5) cache.Input.connect(self._op1.Output) # set the cache block shape tagged_shape = self._op1.Output.meta.getTaggedShape() tagged_shape['t'] = 1 tagged_shape['c'] = 1 blockshape = map(lambda k: tagged_shape[k], 'xyzct') cache.BlockShape.setValue(tuple(blockshape)) self._cache = cache