def propagateDirty(self, slot, subindex, roi): shape = self.Input.meta.shape key = roi.toSlice() if slot == self.inputs["Input"]: start, stop = sliceToRoi(key, shape) with self._lock: if self._blockState is not None: blockStart = numpy.floor(1.0 * start / self._blockShape) blockStop = numpy.ceil(1.0 * stop / self._blockShape) blockKey = roiToSlice(blockStart, blockStop) if self._fixed: # Remember that this block became dirty while we were fixed # so we can notify downstream operators when we become unfixed. self._blockState[blockKey] = OpArrayCache.FIXED_DIRTY self._has_fixed_dirty_blocks = True else: self._blockState[blockKey] = OpArrayCache.DIRTY if not self._fixed: self.outputs["Output"].setDirty(key) if slot == self.inputs["fixAtCurrent"]: if self.inputs["fixAtCurrent"].ready(): self._fixed = self.inputs["fixAtCurrent"].value if not self._fixed and self.Output.meta.shape is not None and self._has_fixed_dirty_blocks: # We've become unfixed, so we need to notify downstream # operators of every block that became dirty while we were fixed. # Convert all FIXED_DIRTY states into DIRTY states with self._lock: cond = ( self._blockState[...] == OpArrayCache.FIXED_DIRTY) self._blockState[...] = fastWhere( cond, OpArrayCache.DIRTY, self._blockState, numpy.uint8) self._has_fixed_dirty_blocks = False newDirtyBlocks = numpy.transpose(numpy.nonzero(cond)) # To avoid lots of setDirty notifications, we simply merge all the dirtyblocks into one single superblock. # This should be the best option in most cases, but could be bad in some cases. # TODO: Optimize this by merging the dirty blocks via connected components or something. cacheShape = numpy.array(self.Output.meta.shape) dirtyStart = cacheShape dirtyStop = [0] * len(cacheShape) for index in newDirtyBlocks: blockStart = index * self._blockShape blockStop = numpy.minimum( blockStart + self._blockShape, cacheShape) dirtyStart = numpy.minimum(dirtyStart, blockStart) dirtyStop = numpy.maximum(dirtyStop, blockStop) if len(newDirtyBlocks > 0): self.Output.setDirty(dirtyStart, dirtyStop)
def propagateDirty(self, slot, subindex, roi): shape = self.Output.meta.shape key = roi.toSlice() if slot == self.inputs["Input"]: start, stop = sliceToRoi(key, shape) with self._lock: if self._blockState is not None: blockStart = numpy.floor(1.0 * start / self._blockShape) blockStop = numpy.ceil(1.0 * stop / self._blockShape) blockKey = roiToSlice(blockStart, blockStop) if self._fixed: # Remember that this block became dirty while we were fixed # so we can notify downstream operators when we become unfixed. self._blockState[blockKey] = OpArrayCache.FIXED_DIRTY self._has_fixed_dirty_blocks = True else: self._blockState[blockKey] = OpArrayCache.DIRTY if not self._fixed: self.outputs["Output"].setDirty(key) if slot == self.inputs["fixAtCurrent"]: if self.inputs["fixAtCurrent"].ready(): self._fixed = self.inputs["fixAtCurrent"].value if not self._fixed and self.Output.meta.shape is not None and self._has_fixed_dirty_blocks: # We've become unfixed, so we need to notify downstream # operators of every block that became dirty while we were fixed. # Convert all FIXED_DIRTY states into DIRTY states with self._lock: cond = ( self._blockState[...] == OpArrayCache.FIXED_DIRTY) self._blockState[...] = fastWhere( cond, OpArrayCache.DIRTY, self._blockState, numpy.uint8) self._has_fixed_dirty_blocks = False newDirtyBlocks = numpy.transpose(numpy.nonzero(cond)) # To avoid lots of setDirty notifications, we simply merge all the dirtyblocks into one single superblock. # This should be the best option in most cases, but could be bad in some cases. # TODO: Optimize this by merging the dirty blocks via connected components or something. cacheShape = numpy.array(self.Output.meta.shape) dirtyStart = cacheShape dirtyStop = [0] * len(cacheShape) for index in newDirtyBlocks: blockStart = index * self._blockShape blockStop = numpy.minimum( blockStart + self._blockShape, cacheShape) dirtyStart = numpy.minimum(dirtyStart, blockStart) dirtyStop = numpy.maximum(dirtyStop, blockStop) if len(newDirtyBlocks > 0): self.Output.setDirty(dirtyStart, dirtyStop)
def _executeOutput(self, slot, subindex, roi, result): t = time.time() key = roi.toSlice() shape = self.Output.meta.shape start, stop = sliceToRoi(key, shape) with self._lock: ch = self._cacheHits ch += 1 self._cacheHits = ch self._running += 1 if (self._cache is None or self._cache.shape != self.Output.meta.shape): self._allocateCache() cacheView = self._cache[:] #prevent freeing of cache during running this function blockStart = (1.0 * start / self._blockShape).floor() blockStop = (1.0 * stop / self._blockShape).ceil() blockKey = roiToSlice(blockStart, blockStop) blockSet = self._blockState[blockKey] # this is a little optimization to shortcut # many lines of python code when all data is # is already in the cache: if numpy.logical_or(blockSet == OpArrayCache.CLEAN, blockSet == OpArrayCache.FIXED_DIRTY).all(): cache_result = self._cache[roiToSlice(start, stop)] self.Output.stype.copy_data(result, cache_result) self._running -= 1 self._updatePriority() cacheView = None return extracted = numpy.extract(blockSet == OpArrayCache.IN_PROCESS, self._blockQuery[blockKey]) inProcessQueries = numpy.unique(extracted) cond = (blockSet == OpArrayCache.DIRTY) tileWeights = fastWhere(cond, 1, 128**3, numpy.uint32) trueDirtyIndices = numpy.nonzero(cond) if has_drtile: tileArray = drtile.test_DRTILE(tileWeights, 128**3).swapaxes( 0, 1) else: tileStartArray = numpy.array(trueDirtyIndices) tileStopArray = 1 + tileStartArray tileArray = numpy.concatenate((tileStartArray, tileStopArray), axis=0) dirtyRois = [] half = tileArray.shape[0] // 2 dirtyPool = RequestPool() for i in range(tileArray.shape[1]): drStart3 = tileArray[:half, i] drStop3 = tileArray[half:, i] drStart2 = drStart3 + blockStart drStop2 = drStop3 + blockStart drStart = drStart2 * self._blockShape drStop = drStop2 * self._blockShape shape = self.Output.meta.shape drStop = numpy.minimum(drStop, shape) drStart = numpy.minimum(drStart, shape) key2 = roiToSlice(drStart2, drStop2) key = roiToSlice(drStart, drStop) if not self._fixed: dirtyRois.append([drStart, drStop]) req = self.inputs["Input"][key].writeInto(self._cache[key]) req.uncancellable = True #FIXME dirtyPool.add(req) self._blockQuery[key2] = weakref.ref(req) #sanity check: if (self._blockState[key2] != OpArrayCache.DIRTY).any(): logger.warning("original condition" + str(cond)) logger.warning("original tilearray {} {}".format( tileArray, tileArray.shape)) logger.warning("original tileWeights {} {}".format( tileWeights, tileWeights.shape)) logger.warning("sub condition {}".format( self._blockState[key2] == OpArrayCache.DIRTY)) logger.warning("START={}, STOP={}".format( drStart2, drStop2)) import h5py with h5py.File("test.h5", "w") as f: f.create_dataset("data", data=tileWeights) logger.warning( "%r \n %r \n %r\n %r\n %r \n%r" % (key2, blockKey, self._blockState[key2], self._blockState[blockKey][trueDirtyIndices], self._blockState[blockKey], tileWeights)) assert False self._blockState[key2] = OpArrayCache.IN_PROCESS # indicate the inprocessing state, by setting array to 0 (i.e. IN_PROCESS) if not self._fixed: blockSet[:] = fastWhere(cond, OpArrayCache.IN_PROCESS, blockSet, numpy.uint8) else: # Someone asked for some dirty blocks while we were fixed. # Mark these blocks to be signaled as dirty when we become unfixed blockSet[:] = fastWhere(cond, OpArrayCache.FIXED_DIRTY, blockSet, numpy.uint8) self._has_fixed_dirty_blocks = True temp = itertools.count(0) #wait for all requests to finish something_updated = len(dirtyPool) > 0 dirtyPool.wait() if something_updated: # Signal that something was updated. # Note that we don't need to do this for the 'in process' queries (below) # because they are already in the dirtyPool in some other thread self.Output._sig_value_changed() # indicate the finished inprocess state (i.e. CLEAN) if not self._fixed and temp.next() == 0: with self._lock: blockSet[:] = fastWhere(cond, OpArrayCache.CLEAN, blockSet, numpy.uint8) self._blockQuery[blockKey] = fastWhere( cond, None, self._blockQuery[blockKey], object) # Wait for all in-process queries. # Can't use RequestPool here because these requests have already started. for req in inProcessQueries: req = req() # get original req object from weakref if req is not None: req.wait() # finally, store results in result area with self._lock: if self._cache is not None: cache_result = self._cache[roiToSlice(start, stop)] self.Output.stype.copy_data(result, cache_result) else: self.inputs["Input"][roiToSlice( start, stop)].writeInto(result).wait() self._running -= 1 self._updatePriority() cacheView = None self.logger.debug( "read %s took %f sec." % (roi.pprint(), time.time() - t))
def _executeOutput(self, slot, subindex, roi, result): t = time.time() key = roi.toSlice() shape = self.Output.meta.shape start, stop = sliceToRoi(key, shape) self._lock.acquire() ch = self._cacheHits ch += 1 self._cacheHits = ch self._running += 1 if self._cache is None: self._allocateCache() cacheView = self._cache[:] #prevent freeing of cache during running this function blockStart = (1.0 * start / self._blockShape).floor() blockStop = (1.0 * stop / self._blockShape).ceil() blockKey = roiToSlice(blockStart, blockStop) blockSet = self._blockState[blockKey] # this is a little optimization to shortcut # many lines of python code when all data is # is already in the cache: if numpy.logical_or(blockSet == OpArrayCache.CLEAN, blockSet == OpArrayCache.FIXED_DIRTY).all(): result[:] = self._cache[roiToSlice(start, stop)] self._running -= 1 self._updatePriority() cacheView = None self._lock.release() return inProcessQueries = numpy.unique( numpy.extract(blockSet == OpArrayCache.IN_PROCESS, self._blockQuery[blockKey])) cond = (blockSet == OpArrayCache.DIRTY) tileWeights = fastWhere(cond, 1, 128**3, numpy.uint32) trueDirtyIndices = numpy.nonzero(cond) tileArray = drtile.test_DRTILE(tileWeights, 128**3).swapaxes(0, 1) dirtyRois = [] half = tileArray.shape[0] / 2 dirtyPool = RequestPool() for i in range(tileArray.shape[1]): drStart3 = tileArray[:half, i] drStop3 = tileArray[half:, i] drStart2 = drStart3 + blockStart drStop2 = drStop3 + blockStart drStart = drStart2 * self._blockShape drStop = drStop2 * self._blockShape shape = self.Output.meta.shape drStop = numpy.minimum(drStop, shape) drStart = numpy.minimum(drStart, shape) key3 = roiToSlice(drStart3, drStop3) key2 = roiToSlice(drStart2, drStop2) key = roiToSlice(drStart, drStop) if not self._fixed: dirtyRois.append([drStart, drStop]) req = self.inputs["Input"][key].writeInto(self._cache[key]) req.uncancellable = True #FIXME dirtyPool.add(req) self._blockQuery[key2] = weakref.ref(req) #sanity check: if (self._blockState[key2] != OpArrayCache.DIRTY).any(): logger.warning("original condition" + str(cond)) logger.warning("original tilearray {} {}".format( tileArray, tileArray.shape)) logger.warning("original tileWeights {} {}".format( tileWeights, tileWeights.shape)) logger.warning("sub condition {}".format( self._blockState[key2] == OpArrayCache.DIRTY)) logger.warning("START={}, STOP={}".format( drStart2, drStop2)) import h5py with h5py.File("test.h5", "w") as f: f.create_dataset("data", data=tileWeights) logger.warning( "%r \n %r \n %r\n %r\n %r \n%r" % (key2, blockKey, self._blockState[key2], self._blockState[blockKey][trueDirtyIndices], self._blockState[blockKey], tileWeights)) assert False self._blockState[key2] = OpArrayCache.IN_PROCESS # indicate the inprocessing state, by setting array to 0 (i.e. IN_PROCESS) if not self._fixed: blockSet[:] = fastWhere(cond, OpArrayCache.IN_PROCESS, blockSet, numpy.uint8) else: # Someone asked for some dirty blocks while we were fixed. # Mark these blocks to be signaled as dirty when we become unfixed blockSet[:] = fastWhere(cond, OpArrayCache.FIXED_DIRTY, blockSet, numpy.uint8) self._has_fixed_dirty_blocks = True self._lock.release() temp = itertools.count(0) #wait for all requests to finish dirtyPool.wait() if len(dirtyPool) > 0: # Signal that something was updated. # Note that we don't need to do this for the 'in process' queries (below) # because they are already in the dirtyPool in some other thread self.Output._sig_value_changed() dirtyPool.clean() # indicate the finished inprocess state (i.e. CLEAN) if not self._fixed and temp.next() == 0: with self._lock: blockSet[:] = fastWhere(cond, OpArrayCache.CLEAN, blockSet, numpy.uint8) self._blockQuery[blockKey] = fastWhere( cond, None, self._blockQuery[blockKey], object) inProcessPool = RequestPool() #wait for all in process queries for req in inProcessQueries: req = req() # get original req object from weakref if req is not None: inProcessPool.add(req) inProcessPool.wait() inProcessPool.clean() # finally, store results in result area self._lock.acquire() if self._cache is not None: result[:] = self._cache[roiToSlice(start, stop)] else: self.inputs["Input"][roiToSlice(start, stop)].writeInto(result).wait() self._running -= 1 self._updatePriority() cacheView = None self._lock.release() self.logger.debug("read %s took %f sec." % (roi.pprint(), time.time() - t))
def _executeOutput(self, slot, subindex, roi, result): key = roi.toSlice() shape = self.Output.meta.shape start, stop = sliceToRoi(key, shape) self.traceLogger.debug("Acquiring ArrayCache lock...") self._lock.acquire() self.traceLogger.debug("ArrayCache lock acquired.") ch = self._cacheHits ch += 1 self._cacheHits = ch self._running += 1 if self._cache is None: self._allocateCache() cacheView = self._cache[:] #prevent freeing of cache during running this function blockStart = (1.0 * start / self._blockShape).floor() blockStop = (1.0 * stop / self._blockShape).ceil() blockKey = roiToSlice(blockStart,blockStop) blockSet = self._blockState[blockKey] # this is a little optimization to shortcut # many lines of python code when all data is # is already in the cache: if numpy.logical_or(blockSet == OpArrayCache.CLEAN, blockSet == OpArrayCache.FIXED_DIRTY).all(): result[:] = self._cache[roiToSlice(start, stop)] self._running -= 1 self._updatePriority() cacheView = None self._lock.release() return inProcessQueries = numpy.unique(numpy.extract( blockSet == OpArrayCache.IN_PROCESS, self._blockQuery[blockKey])) cond = (blockSet == OpArrayCache.DIRTY) tileWeights = fastWhere(cond, 1, 128**3, numpy.uint32) trueDirtyIndices = numpy.nonzero(cond) tileArray = drtile.test_DRTILE(tileWeights, 128**3).swapaxes(0,1) dirtyRois = [] half = tileArray.shape[0]/2 dirtyPool = RequestPool() def onCancel(req): return False # indicate that this request cannot be canceled self.traceLogger.debug("Creating cache input requests") for i in range(tileArray.shape[1]): drStart3 = tileArray[:half,i] drStop3 = tileArray[half:,i] drStart2 = drStart3 + blockStart drStop2 = drStop3 + blockStart drStart = drStart2*self._blockShape drStop = drStop2*self._blockShape shape = self.Output.meta.shape drStop = numpy.minimum(drStop, shape) drStart = numpy.minimum(drStart, shape) key3 = roiToSlice(drStart3,drStop3) key2 = roiToSlice(drStart2,drStop2) key = roiToSlice(drStart,drStop) if not self._fixed: dirtyRois.append([drStart,drStop]) req = self.inputs["Input"][key].writeInto(self._cache[key]) req.uncancellable = True #FIXME dirtyPool.add(req) self._blockQuery[key2] = weakref.ref(req) #sanity check: if (self._blockState[key2] != OpArrayCache.DIRTY).any(): print "original condition", cond print "original tilearray", tileArray, tileArray.shape print "original tileWeights", tileWeights, tileWeights.shape print "sub condition", self._blockState[key2] == OpArrayCache.DIRTY print "START, STOP", drStart2, drStop2 import h5py with h5py.File("test.h5", "w") as f: f.create_dataset("data",data = tileWeights) print "%r \n %r \n %r\n %r\n %r \n%r" % (key2, blockKey,self._blockState[key2], self._blockState[blockKey][trueDirtyIndices],self._blockState[blockKey],tileWeights) assert False self._blockState[key2] = OpArrayCache.IN_PROCESS # indicate the inprocessing state, by setting array to 0 (i.e. IN_PROCESS) if not self._fixed: blockSet[:] = fastWhere(cond, OpArrayCache.IN_PROCESS, blockSet, numpy.uint8) else: # Someone asked for some dirty blocks while we were fixed. # Mark these blocks to be signaled as dirty when we become unfixed blockSet[:] = fastWhere(cond, OpArrayCache.FIXED_DIRTY, blockSet, numpy.uint8) self._has_fixed_dirty_blocks = True self._lock.release() temp = itertools.count(0) #wait for all requests to finish self.traceLogger.debug( "Firing all {} cache input requests...".format(len(dirtyPool)) ) dirtyPool.wait() if len( dirtyPool ) > 0: # Signal that something was updated. # Note that we don't need to do this for the 'in process' queries (below) # because they are already in the dirtyPool in some other thread self.Output._sig_value_changed() dirtyPool.clean() self.traceLogger.debug( "All cache input requests received." ) # indicate the finished inprocess state (i.e. CLEAN) if not self._fixed and temp.next() == 0: with self._lock: blockSet[:] = fastWhere(cond, OpArrayCache.CLEAN, blockSet, numpy.uint8) self._blockQuery[blockKey] = fastWhere(cond, None, self._blockQuery[blockKey], object) inProcessPool = RequestPool() #wait for all in process queries for req in inProcessQueries: req = req() # get original req object from weakref if req is not None: inProcessPool.add(req) inProcessPool.wait() inProcessPool.clean() # finally, store results in result area self._lock.acquire() if self._cache is not None: result[:] = self._cache[roiToSlice(start, stop)] else: self.traceLogger.debug( "WAITING FOR INPUT WITH THE CACHE LOCK LOCKED!" ) self.inputs["Input"][roiToSlice(start, stop)].writeInto(result).wait() self.traceLogger.debug( "INPUT RECEIVED WITH THE CACHE LOCK LOCKED." ) self._running -= 1 self._updatePriority() cacheView = None self._lock.release()