def _executeOutput(self, slot, subindex, roi, result): t = time.time() key = roi.toSlice() shape = self.Output.meta.shape start, stop = sliceToRoi(key, shape) with self._lock: ch = self._cacheHits ch += 1 self._cacheHits = ch self._running += 1 if (self._cache is None or self._cache.shape != self.Output.meta.shape): self._allocateCache() cacheView = self._cache[:] #prevent freeing of cache during running this function blockStart = (1.0 * start / self._blockShape).floor() blockStop = (1.0 * stop / self._blockShape).ceil() blockKey = roiToSlice(blockStart, blockStop) blockSet = self._blockState[blockKey] # this is a little optimization to shortcut # many lines of python code when all data is # is already in the cache: if numpy.logical_or(blockSet == OpArrayCache.CLEAN, blockSet == OpArrayCache.FIXED_DIRTY).all(): cache_result = self._cache[roiToSlice(start, stop)] self.Output.stype.copy_data(result, cache_result) self._running -= 1 self._updatePriority() cacheView = None return extracted = numpy.extract(blockSet == OpArrayCache.IN_PROCESS, self._blockQuery[blockKey]) inProcessQueries = numpy.unique(extracted) cond = (blockSet == OpArrayCache.DIRTY) tileWeights = fastWhere(cond, 1, 128**3, numpy.uint32) trueDirtyIndices = numpy.nonzero(cond) if has_drtile: tileArray = drtile.test_DRTILE(tileWeights, 128**3).swapaxes( 0, 1) else: tileStartArray = numpy.array(trueDirtyIndices) tileStopArray = 1 + tileStartArray tileArray = numpy.concatenate((tileStartArray, tileStopArray), axis=0) dirtyRois = [] half = tileArray.shape[0] // 2 dirtyPool = RequestPool() for i in range(tileArray.shape[1]): drStart3 = tileArray[:half, i] drStop3 = tileArray[half:, i] drStart2 = drStart3 + blockStart drStop2 = drStop3 + blockStart drStart = drStart2 * self._blockShape drStop = drStop2 * self._blockShape shape = self.Output.meta.shape drStop = numpy.minimum(drStop, shape) drStart = numpy.minimum(drStart, shape) key2 = roiToSlice(drStart2, drStop2) key = roiToSlice(drStart, drStop) if not self._fixed: dirtyRois.append([drStart, drStop]) req = self.inputs["Input"][key].writeInto(self._cache[key]) req.uncancellable = True #FIXME dirtyPool.add(req) self._blockQuery[key2] = weakref.ref(req) #sanity check: if (self._blockState[key2] != OpArrayCache.DIRTY).any(): logger.warning("original condition" + str(cond)) logger.warning("original tilearray {} {}".format( tileArray, tileArray.shape)) logger.warning("original tileWeights {} {}".format( tileWeights, tileWeights.shape)) logger.warning("sub condition {}".format( self._blockState[key2] == OpArrayCache.DIRTY)) logger.warning("START={}, STOP={}".format( drStart2, drStop2)) import h5py with h5py.File("test.h5", "w") as f: f.create_dataset("data", data=tileWeights) logger.warning( "%r \n %r \n %r\n %r\n %r \n%r" % (key2, blockKey, self._blockState[key2], self._blockState[blockKey][trueDirtyIndices], self._blockState[blockKey], tileWeights)) assert False self._blockState[key2] = OpArrayCache.IN_PROCESS # indicate the inprocessing state, by setting array to 0 (i.e. IN_PROCESS) if not self._fixed: blockSet[:] = fastWhere(cond, OpArrayCache.IN_PROCESS, blockSet, numpy.uint8) else: # Someone asked for some dirty blocks while we were fixed. # Mark these blocks to be signaled as dirty when we become unfixed blockSet[:] = fastWhere(cond, OpArrayCache.FIXED_DIRTY, blockSet, numpy.uint8) self._has_fixed_dirty_blocks = True temp = itertools.count(0) #wait for all requests to finish something_updated = len(dirtyPool) > 0 dirtyPool.wait() if something_updated: # Signal that something was updated. # Note that we don't need to do this for the 'in process' queries (below) # because they are already in the dirtyPool in some other thread self.Output._sig_value_changed() # indicate the finished inprocess state (i.e. CLEAN) if not self._fixed and temp.next() == 0: with self._lock: blockSet[:] = fastWhere(cond, OpArrayCache.CLEAN, blockSet, numpy.uint8) self._blockQuery[blockKey] = fastWhere( cond, None, self._blockQuery[blockKey], object) # Wait for all in-process queries. # Can't use RequestPool here because these requests have already started. for req in inProcessQueries: req = req() # get original req object from weakref if req is not None: req.wait() # finally, store results in result area with self._lock: if self._cache is not None: cache_result = self._cache[roiToSlice(start, stop)] self.Output.stype.copy_data(result, cache_result) else: self.inputs["Input"][roiToSlice( start, stop)].writeInto(result).wait() self._running -= 1 self._updatePriority() cacheView = None self.logger.debug( "read %s took %f sec." % (roi.pprint(), time.time() - t))
def _executeOutput(self, slot, subindex, roi, result): t = time.time() key = roi.toSlice() shape = self.Output.meta.shape start, stop = sliceToRoi(key, shape) self._lock.acquire() ch = self._cacheHits ch += 1 self._cacheHits = ch self._running += 1 if self._cache is None: self._allocateCache() cacheView = self._cache[:] #prevent freeing of cache during running this function blockStart = (1.0 * start / self._blockShape).floor() blockStop = (1.0 * stop / self._blockShape).ceil() blockKey = roiToSlice(blockStart, blockStop) blockSet = self._blockState[blockKey] # this is a little optimization to shortcut # many lines of python code when all data is # is already in the cache: if numpy.logical_or(blockSet == OpArrayCache.CLEAN, blockSet == OpArrayCache.FIXED_DIRTY).all(): result[:] = self._cache[roiToSlice(start, stop)] self._running -= 1 self._updatePriority() cacheView = None self._lock.release() return inProcessQueries = numpy.unique( numpy.extract(blockSet == OpArrayCache.IN_PROCESS, self._blockQuery[blockKey])) cond = (blockSet == OpArrayCache.DIRTY) tileWeights = fastWhere(cond, 1, 128**3, numpy.uint32) trueDirtyIndices = numpy.nonzero(cond) tileArray = drtile.test_DRTILE(tileWeights, 128**3).swapaxes(0, 1) dirtyRois = [] half = tileArray.shape[0] / 2 dirtyPool = RequestPool() for i in range(tileArray.shape[1]): drStart3 = tileArray[:half, i] drStop3 = tileArray[half:, i] drStart2 = drStart3 + blockStart drStop2 = drStop3 + blockStart drStart = drStart2 * self._blockShape drStop = drStop2 * self._blockShape shape = self.Output.meta.shape drStop = numpy.minimum(drStop, shape) drStart = numpy.minimum(drStart, shape) key3 = roiToSlice(drStart3, drStop3) key2 = roiToSlice(drStart2, drStop2) key = roiToSlice(drStart, drStop) if not self._fixed: dirtyRois.append([drStart, drStop]) req = self.inputs["Input"][key].writeInto(self._cache[key]) req.uncancellable = True #FIXME dirtyPool.add(req) self._blockQuery[key2] = weakref.ref(req) #sanity check: if (self._blockState[key2] != OpArrayCache.DIRTY).any(): logger.warning("original condition" + str(cond)) logger.warning("original tilearray {} {}".format( tileArray, tileArray.shape)) logger.warning("original tileWeights {} {}".format( tileWeights, tileWeights.shape)) logger.warning("sub condition {}".format( self._blockState[key2] == OpArrayCache.DIRTY)) logger.warning("START={}, STOP={}".format( drStart2, drStop2)) import h5py with h5py.File("test.h5", "w") as f: f.create_dataset("data", data=tileWeights) logger.warning( "%r \n %r \n %r\n %r\n %r \n%r" % (key2, blockKey, self._blockState[key2], self._blockState[blockKey][trueDirtyIndices], self._blockState[blockKey], tileWeights)) assert False self._blockState[key2] = OpArrayCache.IN_PROCESS # indicate the inprocessing state, by setting array to 0 (i.e. IN_PROCESS) if not self._fixed: blockSet[:] = fastWhere(cond, OpArrayCache.IN_PROCESS, blockSet, numpy.uint8) else: # Someone asked for some dirty blocks while we were fixed. # Mark these blocks to be signaled as dirty when we become unfixed blockSet[:] = fastWhere(cond, OpArrayCache.FIXED_DIRTY, blockSet, numpy.uint8) self._has_fixed_dirty_blocks = True self._lock.release() temp = itertools.count(0) #wait for all requests to finish dirtyPool.wait() if len(dirtyPool) > 0: # Signal that something was updated. # Note that we don't need to do this for the 'in process' queries (below) # because they are already in the dirtyPool in some other thread self.Output._sig_value_changed() dirtyPool.clean() # indicate the finished inprocess state (i.e. CLEAN) if not self._fixed and temp.next() == 0: with self._lock: blockSet[:] = fastWhere(cond, OpArrayCache.CLEAN, blockSet, numpy.uint8) self._blockQuery[blockKey] = fastWhere( cond, None, self._blockQuery[blockKey], object) inProcessPool = RequestPool() #wait for all in process queries for req in inProcessQueries: req = req() # get original req object from weakref if req is not None: inProcessPool.add(req) inProcessPool.wait() inProcessPool.clean() # finally, store results in result area self._lock.acquire() if self._cache is not None: result[:] = self._cache[roiToSlice(start, stop)] else: self.inputs["Input"][roiToSlice(start, stop)].writeInto(result).wait() self._running -= 1 self._updatePriority() cacheView = None self._lock.release() self.logger.debug("read %s took %f sec." % (roi.pprint(), time.time() - t))
def execute(self,slot,roi,result): #return key = roi.toSlice() self.graph._notifyMemoryHit() start, stop = sliceToRoi(key, self.shape) self._lock.acquire() ch = self._cacheHits ch += 1 self._cacheHits = ch cacheView = self._cache #prevent freeing of cache during running this function if self._cache is None: self._allocateCache() blockStart = (1.0 * start / self._blockShape).floor() blockStop = (1.0 * stop / self._blockShape).ceil() blockKey = roiToSlice(blockStart,blockStop) blockSet = self._blockState[blockKey] # this is a little optimization to shortcut # many lines of python code when all data is # is already in the cache: if (blockSet == 2).all(): self._lock.release() result[:] = self._cache[roiToSlice(start, stop)] return inProcessQueries = numpy.unique(numpy.extract( blockSet == 0, self._blockQuery[blockKey])) cond = (blockSet == 1) tileWeights = fastWhere(cond, 1, 128**3, numpy.uint32) trueDirtyIndices = numpy.nonzero(cond) tileArray = drtile.test_DRTILE(tileWeights, 128**3).swapaxes(0,1) dirtyRois = [] half = tileArray.shape[0]/2 dirtyRequests = [] def onCancel(): return False # indicate that this request cannot be canceled for i in range(tileArray.shape[1]): drStart3 = tileArray[:half,i] drStop3 = tileArray[half:,i] drStart2 = drStart3 + blockStart drStop2 = drStop3 + blockStart drStart = drStart2*self._blockShape drStop = drStop2*self._blockShape drStop = numpy.minimum(drStop, self.shape) drStart = numpy.minimum(drStart, self.shape) key3 = roiToSlice(drStart3,drStop3) key2 = roiToSlice(drStart2,drStop2) key = roiToSlice(drStart,drStop) if not self._fixed: dirtyRois.append([drStart,drStop]) req = self.inputs["Input"][key].writeInto(self._cache[key]) req.onCancel(onCancel) dirtyRequests.append((req,key2, key3)) self._blockQuery[key2] = req #sanity check: if (self._blockState[key2] != 1).any(): print "original condition", cond print "original tilearray", tileArray, tileArray.shape print "original tileWeights", tileWeights, tileWeights.shape print "sub condition", self._blockState[key2] == 1 print "START, STOP", drStart2, drStop2 import h5py f = h5py.File("test.h5", "w") f.create_dataset("data",data = tileWeights) print "%r \n %r \n %r\n %r\n %r \n%r" % (key2, blockKey,self._blockState[key2], self._blockState[blockKey][trueDirtyIndices],self._blockState[blockKey],tileWeights) assert 1 == 2 else: self._cache[key] = 0 # # indicate the inprocessing state, by setting array to 0 if not self._fixed: blockSet[:] = fastWhere(cond, 0, blockSet, numpy.uint8) self._lock.release() temp = itertools.count(0) #wait for all requests to finish for req, reqBlockKey, reqSubBlockKey in dirtyRequests: res = req.wait() # indicate the finished inprocess state if not self._fixed and temp.next() == 0: self._lock.acquire() blockSet[:] = fastWhere(cond, 2, blockSet, numpy.uint8) self._blockQuery[blockKey] = fastWhere(cond, None, self._blockQuery[blockKey], object) self._lock.release() #wait for all in process queries for req in inProcessQueries: req.wait() # finally, store results in result area self._lock.acquire() if self._cache is not None: result[:] = self._cache[roiToSlice(start, stop)] else: self.inputs["Input"][roiToSlice(start, stop)].writeInto(result).wait() self._lock.release()
def _executeOutput(self, slot, subindex, roi, result): key = roi.toSlice() shape = self.Output.meta.shape start, stop = sliceToRoi(key, shape) self.traceLogger.debug("Acquiring ArrayCache lock...") self._lock.acquire() self.traceLogger.debug("ArrayCache lock acquired.") ch = self._cacheHits ch += 1 self._cacheHits = ch self._running += 1 if self._cache is None: self._allocateCache() cacheView = self._cache[:] #prevent freeing of cache during running this function blockStart = (1.0 * start / self._blockShape).floor() blockStop = (1.0 * stop / self._blockShape).ceil() blockKey = roiToSlice(blockStart,blockStop) blockSet = self._blockState[blockKey] # this is a little optimization to shortcut # many lines of python code when all data is # is already in the cache: if numpy.logical_or(blockSet == OpArrayCache.CLEAN, blockSet == OpArrayCache.FIXED_DIRTY).all(): result[:] = self._cache[roiToSlice(start, stop)] self._running -= 1 self._updatePriority() cacheView = None self._lock.release() return inProcessQueries = numpy.unique(numpy.extract( blockSet == OpArrayCache.IN_PROCESS, self._blockQuery[blockKey])) cond = (blockSet == OpArrayCache.DIRTY) tileWeights = fastWhere(cond, 1, 128**3, numpy.uint32) trueDirtyIndices = numpy.nonzero(cond) tileArray = drtile.test_DRTILE(tileWeights, 128**3).swapaxes(0,1) dirtyRois = [] half = tileArray.shape[0]/2 dirtyPool = RequestPool() def onCancel(req): return False # indicate that this request cannot be canceled self.traceLogger.debug("Creating cache input requests") for i in range(tileArray.shape[1]): drStart3 = tileArray[:half,i] drStop3 = tileArray[half:,i] drStart2 = drStart3 + blockStart drStop2 = drStop3 + blockStart drStart = drStart2*self._blockShape drStop = drStop2*self._blockShape shape = self.Output.meta.shape drStop = numpy.minimum(drStop, shape) drStart = numpy.minimum(drStart, shape) key3 = roiToSlice(drStart3,drStop3) key2 = roiToSlice(drStart2,drStop2) key = roiToSlice(drStart,drStop) if not self._fixed: dirtyRois.append([drStart,drStop]) req = self.inputs["Input"][key].writeInto(self._cache[key]) req.uncancellable = True #FIXME dirtyPool.add(req) self._blockQuery[key2] = weakref.ref(req) #sanity check: if (self._blockState[key2] != OpArrayCache.DIRTY).any(): print "original condition", cond print "original tilearray", tileArray, tileArray.shape print "original tileWeights", tileWeights, tileWeights.shape print "sub condition", self._blockState[key2] == OpArrayCache.DIRTY print "START, STOP", drStart2, drStop2 import h5py with h5py.File("test.h5", "w") as f: f.create_dataset("data",data = tileWeights) print "%r \n %r \n %r\n %r\n %r \n%r" % (key2, blockKey,self._blockState[key2], self._blockState[blockKey][trueDirtyIndices],self._blockState[blockKey],tileWeights) assert False self._blockState[key2] = OpArrayCache.IN_PROCESS # indicate the inprocessing state, by setting array to 0 (i.e. IN_PROCESS) if not self._fixed: blockSet[:] = fastWhere(cond, OpArrayCache.IN_PROCESS, blockSet, numpy.uint8) else: # Someone asked for some dirty blocks while we were fixed. # Mark these blocks to be signaled as dirty when we become unfixed blockSet[:] = fastWhere(cond, OpArrayCache.FIXED_DIRTY, blockSet, numpy.uint8) self._has_fixed_dirty_blocks = True self._lock.release() temp = itertools.count(0) #wait for all requests to finish self.traceLogger.debug( "Firing all {} cache input requests...".format(len(dirtyPool)) ) dirtyPool.wait() if len( dirtyPool ) > 0: # Signal that something was updated. # Note that we don't need to do this for the 'in process' queries (below) # because they are already in the dirtyPool in some other thread self.Output._sig_value_changed() dirtyPool.clean() self.traceLogger.debug( "All cache input requests received." ) # indicate the finished inprocess state (i.e. CLEAN) if not self._fixed and temp.next() == 0: with self._lock: blockSet[:] = fastWhere(cond, OpArrayCache.CLEAN, blockSet, numpy.uint8) self._blockQuery[blockKey] = fastWhere(cond, None, self._blockQuery[blockKey], object) inProcessPool = RequestPool() #wait for all in process queries for req in inProcessQueries: req = req() # get original req object from weakref if req is not None: inProcessPool.add(req) inProcessPool.wait() inProcessPool.clean() # finally, store results in result area self._lock.acquire() if self._cache is not None: result[:] = self._cache[roiToSlice(start, stop)] else: self.traceLogger.debug( "WAITING FOR INPUT WITH THE CACHE LOCK LOCKED!" ) self.inputs["Input"][roiToSlice(start, stop)].writeInto(result).wait() self.traceLogger.debug( "INPUT RECEIVED WITH THE CACHE LOCK LOCKED." ) self._running -= 1 self._updatePriority() cacheView = None self._lock.release()
def doc(x): y = np.where(x, 1, 128**3).astype(np.uint32) return drtile.test_DRTILE(y, 128**3).swapaxes(0,1)