Пример #1
0
    class Counter(object):
        def __init__(self, initial=0):
            self.atomic = AtomicInteger(initial)
             # waiting is important here to ensure that
             # defaultdict factories can step on each other
            time.sleep(0.001)

        def decrementAndGet(self):
            return self.atomic.decrementAndGet()

        def incrementAndGet(self):
            return self.atomic.incrementAndGet()

        def get(self):
            return self.atomic.get()

        def __repr__(self):
            return "Counter<%s>" % (self.atomic.get())
Пример #2
0
    class Counter(object):
        def __init__(self, initial=0):
            self.atomic = AtomicInteger(initial)
            # waiting is important here to ensure that
            # defaultdict factories can step on each other
            time.sleep(0.001)

        def decrementAndGet(self):
            return self.atomic.decrementAndGet()

        def incrementAndGet(self):
            return self.atomic.incrementAndGet()

        def get(self):
            return self.atomic.get()

        def __repr__(self):
            return "Counter<%s>" % (self.atomic.get())
Пример #3
0
 def fetch(self):
     """ generated source for method fetch """
     totalFetchDataSize = self.calcDataSize(len(self.variables))
     if totalFetchDataSize == 0:
         raise RuntimeException("no data to fetch")
     if totalFetchDataSize > dataSizeLimitForFetch:
         raise RuntimeException("exceed the max data limit for fetch")
     dataSet = GridDataSet(self.meta)
     latch = CountDownLatch(len(self.variables) * self.tRange.getSize() * self.zRange.getSize())
     exceptions = ConcurrentLinkedQueue()
     counter = AtomicInteger()
     taskCount = 0
     for variable in variables:
         dataSet.addVariable(variable, Grid4D(buffer_, self.meta.getDataType(), self.getOrigin(), self.getShape()))
         # 
         #  * not thread-safe
         #  
         while t < tRange.getEnd():
             # 
             #  * not thread-safe
             #  
             # 
             #  * not thread-safe
             #  
             while z < zRange.getEnd():
                 # 
                 #  * not thread-safe
                 #  
                 self.addTask(counter, data, curPos, variable, t, z, latch, exceptions)
                 curPos += xRange.getSize() * yRange.getSize() * meta.getDataType().getSize()
                 taskCount += 1
                 z += 1
             t += 1
     latch.await()
     if not exceptions.isEmpty():
         raise exceptions.peek()
     if counter.get() != taskCount:
         raise RuntimeException("not all task success")
     return dataSet
Пример #4
0
currentLayerPath = os.path.join(os.path.dirname(projectPath), 'currentLayer_' + namePlugin + '.txt')
currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nLayersAtATime)
l = AtomicInteger(currentWrittenLayer)

# fc.startThreads(elasticMontage(), wait = 1, nThreads = nThreads) /!\ it does not work I do not understand why. Probably a java6 issue because it works in other scripts in java8 ...

threads = []
for p in range(nThreads):
	thread = Thread(elasticMontage)
	threads.append(thread)
	thread.start()
	time.sleep(0.5)
	
for thread in threads:
	thread.join()


IJ.log( namePlugin + ' layer ' + str(currentWrittenLayer))
fc.resizeDisplay(layerset)
project.save()

IJ.log('Sleeping in case the saving of the large project takes some time ...')
time.sleep(20)

# save all transforms
transformsPath = os.path.join(os.path.dirname(projectPath) , namePlugin + '_Transforms.txt')
if l.get() > nLayers-1:
	fc.writeAllAffineTransforms(project,transformsPath)

fc.shouldRunAgain(namePlugin, currentWrittenLayer, nLayers, MagCFolder, project, increment = nLayersAtATime)
Пример #5
0
	with open(filePathsPath,'w') as f:
		for path in filePaths:
			f.write(path + '\n')
	# pickle.dump(filePaths,f)
else:
	filePaths = []
	with open(filePathsPath,'r') as f:
		lines = f.readlines()
		for line in lines:
			filePaths.append(line.replace('\n', ''))
	# filePaths = pickle.load(f)


#Create all the subfolders
downSampledEMFolder = fc.mkdir_p(os.path.join(MagCEMFolder, 'MagC_EM_' + factorString, ''))
for sectionFolderName in os.walk(EMDataFolder).next()[1]:
	fc.mkdir_p(os.path.join(downSampledEMFolder, sectionFolderName))

normLocalContrastSize = MagCParameters[namePlugin]['normLocalContrastSize']
# downsample in parallel
threads = []
currentLayerPath = os.path.join(MagCEMFolder, 'currentLayer_' + namePlugin + '.txt')
currentWrittenLayer = fc.incrementCounter(currentLayerPath, increment = nTilesAtATime)
IJ.log(namePlugin + ' layer ' + str(currentWrittenLayer))
atomicI = AtomicInteger(currentWrittenLayer)
fc.startThreads(resizeAndSave, fractionCores = 0.9, wait = 0, arguments = (filePaths, atomicI))

# terminate or rerun if more tiles to be processed	
time.sleep(1)
fc.shouldRunAgain(namePlugin, atomicI.get(), len(filePaths), MagCFolder, '')
class NsServerNumConcurrentRequests(UserResourceTask):
    def __init__(self, user, node, streaminguri):
        super(NsServerNumConcurrentRequests, self).__init__(user, node)
        self.nconns = []  # Belongs exclusively to poll method
        self.thread = None
        self.httprq = get_http_request(self.node.ip, streaminguri,
                                       self.node.rest_username,
                                       self.node.rest_password)
        self.no_of_connections = AtomicInteger(0)
        self.no_of_open_connections = AtomicInteger(0)
        self.no_of_throughput_updates = AtomicInteger(0)
        self.rest = RestConnection(self.node)

    def on_throughput_increase(self, throughput):
        log.debug("Increasing throughput by {}".format(throughput -
                                                       self.throughput))

        # Record the last throughput update
        last_throughput_update = self.no_of_throughput_updates.get()

        # Update the throughput
        self.no_of_connections.set(throughput)

        # Launch the thread
        if self.thread is None:
            self.thread = Thread(target=self.poll)
            self.thread.start()

        # Block until the update has gone (TODO add a timeout)
        while self.no_of_throughput_updates.get() <= last_throughput_update:
            continue

    def on_throughput_decrease(self, throughput):
        log.debug("Decreasing throughput by {}".format(self.throughput -
                                                       throughput))

        # Record the last throughput update
        last_throughput_update = self.no_of_throughput_updates.get()

        # Update the throughput
        self.no_of_connections.set(throughput)

        if self.thread and throughput == 0:
            self.thread.join()
            self.thread = None

        # Block until the update has gone (TODO add a timeout)
        while self.no_of_throughput_updates.get() <= last_throughput_update:
            continue

    def get_throughput_success(self):
        return self.no_of_open_connections.get()

    def poll(self):
        """ Repeatedly poll each connection and attempt to keep them alive """
        no_of_conns = self.no_of_connections.get()

        while no_of_conns > 0 or len(self.nconns) > 0:
            update_nconns = no_of_conns != len(self.nconns)

            if update_nconns:
                # Add any new connections
                for i in range(no_of_conns - len(self.nconns)):
                    self.nconns.append(
                        NonBlockingConnection(self.node.ip, 8091, self.httprq))
                # Disconnect the connections that need to be closed
                for conn in self.nconns[no_of_conns:]:
                    conn.disconnect()
                # Delete the disconnected connections
                del self.nconns[no_of_conns:]

            # Poll and count open connections
            open_count = 0
            for conn in self.nconns:
                if conn.poll():
                    open_count += 1

            # Update the number of open connections
            self.no_of_open_connections.set(open_count)

            # Notify the main thread that the connections have been updated
            if update_nconns:
                self.no_of_throughput_updates.incrementAndGet()

            no_of_conns = self.no_of_connections.get()

    def error(self):
        return self.rest._http_request(self.rest.baseUrl + "/pools/default")[1]

    def expected_error(self):
        return 'Limit(s) exceeded [num_concurrent_requests]'
class AbstractTimedThroughputWorker(object):
    """ Send throughput across a time period. """
    def __init__(self, period=60, chunks=100, throughput=0):
        self.period = period  # The time period over which to produce throughput
        self.chunks = chunks
        self.throughput = AtomicInteger(throughput)
        self.throughput_success = AtomicInteger(0)
        self.resume = AtomicBoolean(True)
        self.curr_tick = AtomicInteger(0)

    def stop(self):
        self.resume.set(False)
        self.thread.join()

    def start(self):
        self.thread = Thread(target=self.loop)
        self.thread.start()

    def loop(self):
        while self.resume.get():
            self.throughput_success.set(self.tick(self.throughput.get()))

    def action(self, throughput):
        """ This method fires

        Args:
            throughput (int): The throughput in bytes

        Returns (bool): Indicating success.
        """
        raise NotImplementedError("Please implement this method")

    def next_tick(self, period):
        """ Returns the next multiple of a time period """
        curr_time = time.time()
        return curr_time + (period - curr_time % period)

    def tick(self, throughput):
        """ Fires throughput over this time period """
        # The next 60 second time period
        next_tick = self.next_tick(self.period)

        # Every mini_tick, we will fire a thoughput of this size
        throughput_per_chunk = throughput / self.chunks

        #  The size of a mini_tick (e.g. 0.6 seconds)
        mini_tick_period = self.period / float(self.chunks)

        chunks_sent, successes = 0, 0
        while time.time() < next_tick and chunks_sent < self.chunks:
            if not self.resume.get():
                break
            # Fire action and record time taken
            # time_taken, success = time_it(self.action, throughput_per_chunk)
            success = self.action(throughput_per_chunk)

            # Count successes
            if success:
                successes += 1

            chunks_sent += 1

            # The time remaining to reach the next mini tick
            time_till_next_mini_tick = max(
                0,
                self.next_tick(mini_tick_period) - time.time())

            # sleep to next mini tick to ensure actions happen evenly
            time.sleep(time_till_next_mini_tick)

        return successes * throughput_per_chunk