class ThreadFactorySameGroup(ThreadFactory): def __init__(self, name): self.name = name self.group = Thread.currentThread().getThreadGroup() self.counter = AtomicInteger(0) def newThread(self, runnable): title = "%s-%i" % (self.name, self.counter.incrementAndGet()) t = Thread(self.group, runnable, title) t.setPriority(Thread.NORM_PRIORITY) return t
class Counter(object): def __init__(self, initial=0): self.atomic = AtomicInteger(initial) # waiting is important here to ensure that # defaultdict factories can step on each other time.sleep(0.001) def decrementAndGet(self): return self.atomic.decrementAndGet() def incrementAndGet(self): return self.atomic.incrementAndGet() def get(self): return self.atomic.get() def __repr__(self): return "Counter<%s>" % (self.atomic.get())
class Counter(object): def __init__(self, initial=0): self.atomic = AtomicInteger(initial) # waiting is important here to ensure that # defaultdict factories can step on each other time.sleep(0.001) def decrementAndGet(self): return self.atomic.decrementAndGet() def incrementAndGet(self): return self.atomic.incrementAndGet() def get(self): return self.atomic.get() def __repr__(self): return "Counter<%s>" % (self.atomic.get())
class Library: def __init__(self): self.books = CopyOnWriteArrayList() self.currentId = AtomicInteger(0) self.readOnly = False def addBook(self, author, title, cover=None, force=False): if list( filter( lambda book: book.author == author and book.title == title, self.books)): raise Exception("This book has already been added to the library") if not self.readOnly or force: self.books.add( Book(self.currentId.incrementAndGet(), author, title, cover)) def getBook(self, bookId): return filter(lambda book: book.id == bookId, self.books)[0] def updateBook(self, bookId, author, title, cover=None): book = self.getBook(bookId) if not self.readOnly: book.author = author book.title = title if cover: book.cover = cover def removeBook(self, bookId): if not self.readOnly: self.books.removeIf(PyPredicate(lambda book: book.id == bookId)) def findBooks(self, searchString): return list(filter(lambda book: searchString is None or re.search(searchString.upper(), book.author.upper())\ or re.search(searchString.upper(), book.title.upper()), self.books)) def getAuthors(self): return sorted(list(set(list(map(lambda book: book.author, self.books)))), key=lambda author: author.lower())
class NsServerNumConcurrentRequests(UserResourceTask): def __init__(self, user, node, streaminguri): super(NsServerNumConcurrentRequests, self).__init__(user, node) self.nconns = [] # Belongs exclusively to poll method self.thread = None self.httprq = get_http_request(self.node.ip, streaminguri, self.node.rest_username, self.node.rest_password) self.no_of_connections = AtomicInteger(0) self.no_of_open_connections = AtomicInteger(0) self.no_of_throughput_updates = AtomicInteger(0) self.rest = RestConnection(self.node) def on_throughput_increase(self, throughput): log.debug("Increasing throughput by {}".format(throughput - self.throughput)) # Record the last throughput update last_throughput_update = self.no_of_throughput_updates.get() # Update the throughput self.no_of_connections.set(throughput) # Launch the thread if self.thread is None: self.thread = Thread(target=self.poll) self.thread.start() # Block until the update has gone (TODO add a timeout) while self.no_of_throughput_updates.get() <= last_throughput_update: continue def on_throughput_decrease(self, throughput): log.debug("Decreasing throughput by {}".format(self.throughput - throughput)) # Record the last throughput update last_throughput_update = self.no_of_throughput_updates.get() # Update the throughput self.no_of_connections.set(throughput) if self.thread and throughput == 0: self.thread.join() self.thread = None # Block until the update has gone (TODO add a timeout) while self.no_of_throughput_updates.get() <= last_throughput_update: continue def get_throughput_success(self): return self.no_of_open_connections.get() def poll(self): """ Repeatedly poll each connection and attempt to keep them alive """ no_of_conns = self.no_of_connections.get() while no_of_conns > 0 or len(self.nconns) > 0: update_nconns = no_of_conns != len(self.nconns) if update_nconns: # Add any new connections for i in range(no_of_conns - len(self.nconns)): self.nconns.append( NonBlockingConnection(self.node.ip, 8091, self.httprq)) # Disconnect the connections that need to be closed for conn in self.nconns[no_of_conns:]: conn.disconnect() # Delete the disconnected connections del self.nconns[no_of_conns:] # Poll and count open connections open_count = 0 for conn in self.nconns: if conn.poll(): open_count += 1 # Update the number of open connections self.no_of_open_connections.set(open_count) # Notify the main thread that the connections have been updated if update_nconns: self.no_of_throughput_updates.incrementAndGet() no_of_conns = self.no_of_connections.get() def error(self): return self.rest._http_request(self.rest.baseUrl + "/pools/default")[1] def expected_error(self): return 'Limit(s) exceeded [num_concurrent_requests]'