def __init__(self, period=60, chunks=100, throughput=0): self.period = period # The time period over which to produce throughput self.chunks = chunks self.throughput = AtomicInteger(throughput) self.throughput_success = AtomicInteger(0) self.resume = AtomicBoolean(True) self.curr_tick = AtomicInteger(0)
def onInit(): global eventCounter # Variables for assertions only eventCounter = Collections.synchronizedMap(HashMap()) eventCounter.put("blue", AtomicInteger(0)) eventCounter.put("red", AtomicInteger(0)) sponge.setVariable("eventCounter", eventCounter)
def test_number_transform(self): a = AtomicInteger(0) b = AtomicInteger(1) c = AtomicInteger(2) x = ArrayList() x.add(a) x.add(b) x.add(c) self.assertEqual(x.get(0).get(), 0) self.assertEqual(x.get(1).get(), 1) self.assertEqual(x.get(2).get(), 2)
def onInit(): # Variables for assertions only sponge.setVariable("onInitCalled", AtomicBoolean(False)) sponge.setVariable("onBeforeLoadCalled", AtomicInteger(0)) sponge.setVariable("onLoadCalled", AtomicInteger(0)) sponge.setVariable("onAfterLoadCalled", AtomicInteger(0)) sponge.setVariable("onStartupCalled", AtomicBoolean(False)) sponge.setVariable("onBeforeReloadCalled", AtomicBoolean(False)) sponge.setVariable("onAfterReloadCalled", AtomicBoolean(False)) sponge.logger.debug("onInit") sponge.getVariable("onInitCalled").set(True)
class IdFactory: def __init__(self): import java.util.concurrent.atomic.AtomicInteger as AtomicInt self.nextid = AtomicInt() def next(self): nextid = self.nextid.getAndIncrement() return nextid
class Counter(object): def __init__(self, initial=0): self.atomic = AtomicInteger(initial) # waiting is important here to ensure that # defaultdict factories can step on each other time.sleep(0.001) def decrementAndGet(self): return self.atomic.decrementAndGet() def incrementAndGet(self): return self.atomic.incrementAndGet() def get(self): return self.atomic.get() def __repr__(self): return "Counter<%s>" % (self.atomic.get())
class ThreadFactorySameGroup(ThreadFactory): def __init__(self, name): self.name = name self.group = Thread.currentThread().getThreadGroup() self.counter = AtomicInteger(0) def newThread(self, runnable): title = "%s-%i" % (self.name, self.counter.incrementAndGet()) t = Thread(self.group, runnable, title) t.setPriority(Thread.NORM_PRIORITY) return t
def fetch(self): """ generated source for method fetch """ totalFetchDataSize = self.calcDataSize(len(self.variables)) if totalFetchDataSize == 0: raise RuntimeException("no data to fetch") if totalFetchDataSize > dataSizeLimitForFetch: raise RuntimeException("exceed the max data limit for fetch") dataSet = GridDataSet(self.meta) latch = CountDownLatch(len(self.variables) * self.tRange.getSize() * self.zRange.getSize()) exceptions = ConcurrentLinkedQueue() counter = AtomicInteger() taskCount = 0 for variable in variables: dataSet.addVariable(variable, Grid4D(buffer_, self.meta.getDataType(), self.getOrigin(), self.getShape())) # # * not thread-safe # while t < tRange.getEnd(): # # * not thread-safe # # # * not thread-safe # while z < zRange.getEnd(): # # * not thread-safe # self.addTask(counter, data, curPos, variable, t, z, latch, exceptions) curPos += xRange.getSize() * yRange.getSize() * meta.getDataType().getSize() taskCount += 1 z += 1 t += 1 latch.await() if not exceptions.isEmpty(): raise exceptions.peek() if counter.get() != taskCount: raise RuntimeException("not all task success") return dataSet
def onInit(): global eventCounter # Variables for assertions only eventCounter = Collections.synchronizedMap(HashMap()) eventCounter.put("Trigger1, file1", AtomicInteger(0)) eventCounter.put("Trigger2, file1", AtomicInteger(0)) eventCounter.put("Trigger1, file2", AtomicInteger(0)) eventCounter.put("Trigger2, file2", AtomicInteger(0)) eventCounter.put("Trigger1, file3", AtomicInteger(0)) eventCounter.put("Trigger3, file3", AtomicInteger(0)) sponge.setVariable("eventCounter", eventCounter)
def __init__(self, user, node, streaminguri): super(NsServerNumConcurrentRequests, self).__init__(user, node) self.nconns = [] # Belongs exclusively to poll method self.thread = None self.httprq = get_http_request(self.node.ip, streaminguri, self.node.rest_username, self.node.rest_password) self.no_of_connections = AtomicInteger(0) self.no_of_open_connections = AtomicInteger(0) self.no_of_throughput_updates = AtomicInteger(0) self.rest = RestConnection(self.node)
class Library: def __init__(self): self.books = CopyOnWriteArrayList() self.currentId = AtomicInteger(0) self.readOnly = False def addBook(self, author, title, cover=None, force=False): if list( filter( lambda book: book.author == author and book.title == title, self.books)): raise Exception("This book has already been added to the library") if not self.readOnly or force: self.books.add( Book(self.currentId.incrementAndGet(), author, title, cover)) def getBook(self, bookId): return filter(lambda book: book.id == bookId, self.books)[0] def updateBook(self, bookId, author, title, cover=None): book = self.getBook(bookId) if not self.readOnly: book.author = author book.title = title if cover: book.cover = cover def removeBook(self, bookId): if not self.readOnly: self.books.removeIf(PyPredicate(lambda book: book.id == bookId)) def findBooks(self, searchString): return list(filter(lambda book: searchString is None or re.search(searchString.upper(), book.author.upper())\ or re.search(searchString.upper(), book.title.upper()), self.books)) def getAuthors(self): return sorted(list(set(list(map(lambda book: book.author, self.books)))), key=lambda author: author.lower())
def __init__(self, initial=0): self.atomic = AtomicInteger(initial) # waiting is important here to ensure that # defaultdict factories can step on each other time.sleep(0.001)
def __init__(self, name): self.name = name self.group = Thread.currentThread().getThreadGroup() self.counter = AtomicInteger(0)
if k < min(nLayers, currentWrittenLayer + nLayersAtATime): IJ.log('Start exporting layer ' + str(k) + ' currentWrittenLayer - ' + str(currentWrittenLayer)) fc.exportFlat(project, exportFolder, 1/float(LMEMFactor), baseName = 'alignedDownsampledEM', bitDepth = 8, layers = [k]) namePlugin = 'export_alignedEMForRegistration' MagCFolder = fc.startPlugin(namePlugin) ControlWindow.setGUIEnabled(False) MagCParams = fc.readMagCParameters(MagCFolder) nLayersAtATime = MagCParams[namePlugin]['nLayersAtATime'] nThreads = MagCParams[namePlugin]['nThreads'] LMEMFactor = fc.getLMEMFactor(MagCFolder) IJ.log('Exporting with LMEMFactor = ' + str(LMEMFactor)) projectPath = fc.findFilesFromTags(MagCFolder,['EM', 'Project'])[0] exportFolder = fc.mkdir_p(os.path.join(os.path.dirname(projectPath), namePlugin)) project, loader, layerset, nLayers = fc.openTrakemProject(projectPath) temporaryFolder = fc.mkdir_p(os.path.join(os.path.dirname(projectPath), 'temporary_LMEMRegistration')) # to save contrasted images # currentLayerPath stores in a file the current layer being processed by the script which is run several times currentLayerPath = os.path.join(os.path.dirname(projectPath), 'currentLayer_' + namePlugin + '.txt') currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nLayersAtATime) atom = AtomicInteger(currentWrittenLayer) fc.startThreads(exportLayer, wait = 0, nThreads = nThreads) # project.save() # why do I save the project here ? time.sleep(3) fc.shouldRunAgain(namePlugin, currentWrittenLayer, nLayers, MagCFolder, project, increment = nLayersAtATime)
def onInit(): sponge.setVariable("sentEvents", AtomicInteger(100)) sponge.setVariable("finishedEvents", AtomicInteger(0))
contrastedChannel = 'contrasted' + refChannel channels.append(contrastedChannel) toContrastPaths = [] for (dirpath, dirnames, filenames) in os.walk(LMDataFolder): for filename in filenames: IJ.log('ToContrast: ' + str(filename)) if (os.path.splitext(filename)[1] == '.tif') and (refChannel in filename): imagePath = os.path.join(dirpath, filename) contrastedPath = os.path.join( dirpath, filename.replace(refChannel, contrastedChannel)) toContrastPaths.append([imagePath, contrastedPath]) IJ.log('toContrastPaths : ' + str(toContrastPaths)) nPaths = len(toContrastPaths) atomicI = AtomicInteger(0) fc.startThreads(contrastImage) # Update metadata with the new contrasted channel f = open(LMMetadataPath, 'r') lines = f.readlines() for idLine, line in enumerate(lines): if 'nChannels' in line: lines[idLine] = 'nChannels = ' + str(nChannels + 1) if 'channels' in line: lines[idLine] = 'channels = [' + ','.join( map(lambda x: "'" + x + "'", channels)) + ']' f.close() f = open(LMMetadataPath, 'w') for line in lines: f.write(line + '\n')
def onInit(): # Variables for assertions only sponge.setVariable("receivedCamelMessages", AtomicInteger(0))
def onInit(): # Variables for assertions only sponge.setVariable("nameCount", AtomicInteger(0)) sponge.setVariable("patternCount", AtomicInteger(0))
def __init__(self): import java.util.concurrent.atomic.AtomicInteger as AtomicInt self.nextid = AtomicInt()
def onInit(): # Variables for assertions only sponge.setVariable("countA", AtomicInteger(0)) sponge.setVariable("countB", AtomicInteger(0)) sponge.setVariable("listC", ArrayList())
def onInit(): global eventEntry, eventCounter eventEntry = None eventCounter = AtomicInteger(0) sponge.setVariable("eventCounter", eventCounter) sponge.setVariable("allowNumber", 2)
def __init__(self): self.nextid = AtomicInt()
TEST_NAME = 'http_get_test' # 设置时间格式为yyyy-mm-dd hh24:mi:ss ISOTIMEFORMAT = '%Y-%m-%d %X' # 根据实际情况选择方案1或2,可以重复利用的测试数据可以选择1或者2,不可重复利用的测试数据选择2。 # 方案1 random = Random() # 方案2 # processNum = int(grinder.getProperties().get('grinder.processes')) # threadNum = int(grinder.getProperties().get('grinder.threads')) # 一般无需修改 ERR_LOG = 'err.log' logfile = open(ERR_LOG, 'w') is_open = AtomicInteger(int(grinder.getProperties().get('grinder.threads'))) # 需要修改列表名称 param_file = "http_get_test.txt" infile = open(param_file, 'r') keyword_list = [] for line in infile.readlines(): keyword_list.append(line.strip()) infile.close() # 可能需要修改url和headers等值 url = 'https://www.baidu.com' headers = [ NVPair( 'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'
def onInit(): # Variables for assertions only sponge.setVariable("receivedRssCount", AtomicInteger(0))
def onInit(): # Variables for assertions only sponge.setVariable("alarms", AtomicInteger(0)) sponge.setVariable("notifications", AtomicInteger(0))
def onInit(): # Variables for assertions only sponge.setVariable("hardwareFailureScriptCount", AtomicInteger(0)) sponge.setVariable("sameSourceFirstFireCount", AtomicInteger(0))
def onInit(): # Variables for assertions only sponge.setVariable("receivedEventA1", AtomicInteger(0)) sponge.setVariable("receivedEventA2", AtomicInteger(0)) sponge.setVariable("functionA1", AtomicInteger(0)) sponge.setVariable("functionA2", AtomicInteger(0))
def onInit(): # Variables for assertions only sponge.setVariable("receivedEventA", AtomicBoolean(False)) sponge.setVariable("receivedEventBCount", AtomicInteger(0))
class IdFactory: def __init__(self): self.nextid = AtomicInt() def next(self): nextid = self.nextid.getAndIncrement() return nextid
filePaths.append(imPath) with open(filePathsPath,'w') as f: for path in filePaths: f.write(path + '\n') # pickle.dump(filePaths,f) else: filePaths = [] with open(filePathsPath,'r') as f: lines = f.readlines() for line in lines: filePaths.append(line.replace('\n', '')) # filePaths = pickle.load(f) #Create all the subfolders downSampledEMFolder = fc.mkdir_p(os.path.join(MagCEMFolder, 'MagC_EM_' + factorString, '')) for sectionFolderName in os.walk(EMDataFolder).next()[1]: fc.mkdir_p(os.path.join(downSampledEMFolder, sectionFolderName)) normLocalContrastSize = MagCParameters[namePlugin]['normLocalContrastSize'] # downsample in parallel threads = [] currentLayerPath = os.path.join(MagCEMFolder, 'currentLayer_' + namePlugin + '.txt') currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nTilesAtATime) IJ.log(namePlugin + ' layer ' + str(currentWrittenLayer)) atomicI = AtomicInteger(currentWrittenLayer) fc.startThreads(resizeAndSave, fractionCores = 0.9, wait = 0, arguments = (filePaths, atomicI)) # terminate or rerun if more tiles to be processed time.sleep(1) fc.shouldRunAgain(namePlugin, atomicI.get(), len(filePaths), MagCFolder, '')
params.tilesAreInPlace = True params.springLengthSpringMesh = 100 params.stiffnessSpringMesh = 0.1 params.maxStretchSpringMesh = 2000 params.maxIterationsSpringMesh = 1000 params.maxPlateauwidthSpringMesh = 200 params.useLegacyOptimizer = True # params.dampSpringMesh # params.maxNumThreads # params.visualize currentLayerPath = os.path.join(os.path.dirname(projectPath), 'currentLayer_' + namePlugin + '.txt') currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nLayersAtATime) l = AtomicInteger(currentWrittenLayer) # fc.startThreads(elasticMontage(), wait = 1, nThreads = nThreads) /!\ it does not work I do not understand why. Probably a java6 issue because it works in other scripts in java8 ... threads = [] for p in range(nThreads): thread = Thread(elasticMontage) threads.append(thread) thread.start() time.sleep(0.5) for thread in threads: thread.join() IJ.log( namePlugin + ' layer ' + str(currentWrittenLayer))
def onInit(): # Variables for assertions only sponge.setVariable("eventCounter", AtomicInteger(0))