def threadDebug(debug): if debug: if active_count() > 4: raise ThreadError("Too many threads.") if not all([th.is_alive() for th in [input_, receiver]]): raise ThreadError("Not all required threads are alive.")
def release(self): th = current_thread() if th != self._owner: raise ThreadError('This lock isn\'t owned by this thread.') self._owner = None try: logger.debug('Releasing Lock') self._queue.get(False) return True except Empty: raise ThreadError('This lock was released already.')
def sync_from_storage(self, timeout: float) -> None: """ Poll the Mongo database for changes and apply any to the bound data_set. Args: timeout: Stop syncing if collecting an item takes longer than the timeout time. The timeout can be -1 (blocking), 0 (non-blocking), or >0 (wait at most that many seconds). Raises: TimeoutError: If timeout is reached while the storage queue is still empty """ blocking = timeout != 0 empty_queue = self._update_queue.empty() if timeout == 0 else False while not empty_queue: try: document = self._update_queue.get( blocking, timeout if timeout > 0 else None) except Empty as e: raise TimeoutError from e if MongoDataSetIOReader.THREAD_ERROR in document: raise ThreadError( 'Watcher thread has stopped unexpectedly.') from document[ MongoDataSetIOReader.THREAD_ERROR] updated_fields = document['updateDescription']['updatedFields'] adjusted_updates = self._convert_dot_notation_to_dict( updated_fields) self._update_data_set(adjusted_updates) empty_queue = self._update_queue.empty()
def release(self): """ Release the lock. Raise an exception if the lock is not presently acquired. """ if not self.acquired: raise ThreadError() self.acquired = False
def assert_no_thread_exceptions(): yield from lcrs_embedded import settings if not settings.EXCEPTION_QUEUE.empty(): logger.error("Ohs nos a subthread failed") while not settings.EXCEPTION_QUEUE.empty(): logger.error(settings.EXCEPTION_QUEUE.get_nowait()) raise ThreadError("Bye bye")
def setiter(self, iter): ''' Use the iterator iter to step through slices in the backer, instead of the default forward sequential iterator. ''' if self.isAlive(): raise ThreadError('Cannot set iterator while thread is running') self._iter = iter
def release(self): """ Release the lock. @raise threading.ThreadError: When invoked on an unlocked lock. """ result = self._col.delete_one(self._doc) if result.acknowledged and not result.deleted_count: raise ThreadError()
def loop(self, forever=False): with self._lock: if not self._thread_ident: self._thread_ident = current_thread().ident if self._thread_ident != current_thread().ident: # ensure same thread for loop raise ThreadError('Executor.loop for one thread only.') while (self._step() or forever) and (not self._closed): pass self._thread_ident = None
def __init__(self, mongo_uri, mongo_db, mongo_coll): try: self.mongo = Mymongo(mongo_uri).database(mongo_db).collectn( mongo_coll) self.rlock = RLock() self.lock = Lock() except ThreadError as the: raise ThreadError(the) except InvalidURI as iuri: raise InvalidURI(iuri) except Exception as e: raise Exception(e)
def _get_my_tid(self): if not self.is_alive(): raise ThreadError("the thread is not active") if hasattr(self, "_thread_id"): return self._thread_id for tid, tobj in _active.items(): if tobj is self: self._thread_id = tid return tid raise AssertionError("could not determine the thread's id")
def main_loop(self): log = self.env['record'] logt = log(10, 'thread') for node in self.nodes.values(): for processor in node: print >> logt, "starting", node, processor processor.start() with self.env['center'] as center: while self.has_more_jobs(): print >> logt, self, "has more jobs and is waiting" if self.have_no_work_or_workers(): center.wait_for_avail() if center.error: raise ThreadError("Another thread signaled error") for job_id in center.dump_finished(): self.pop(job_id) # while there are jobs to do and workers to do them # assign a worker some work while self.has_work_and_worker(): job_id, (node, proc) = self.choose() if job_id: self.pull(job_id) # posting a job also calls "notify" on the thread waiting for # a job to do print >> log(10, 'thread'), self, "posting job" center.post(node, proc, job_id) # Finished Posting jobs # End with center print >> log(10, 'thread'), self, "is done!!!!!" # Tell all that no more jobs are being posted center.done = True # pdb.set_trace() # self.nodes['localhost'][0].status return
def acquire(self, timeout=None): if timeout: timeout = self._timeout th = current_thread() try: logger.debug('Acquiring Lock') self._queue.put(th, block=(timeout != 0), timeout=(None if timeout < 0 else timeout)) logger.debug('Acquired Lock') except Full: raise ThreadError('Lock Timed Out') self._owner = th return True
def gen_token(): locked = None try: locked = self.lock.acquire() results = self.read_from_mongo({}) if results.count() < self.token_pool_size: pin = random.randint(999, 9999) if self.count >= len(self.tokens_pre): self.count = 0 token = self.tokens_pre[self.count] + str(pin) self.count += 1 if self.read_from_mongo({'token': token}).count() > 0: return False else: results = self.mongo.insert({ "token": token, "state": 0, "allocation_time": None, "cid": None, "validation_time": time.time() }) if results: return True else: return else: return except Exception as e: print e.message pass finally: try: if locked: self.lock.release() except ThreadError as tde: raise ThreadError(tde)
def start_threads(self): """ Starts all the threads used by the program """ """# Testing a Thread to verify its functioning try: testing_thread = Thread(target=self.test_thread, args=()) testing_thread.start() except ThreadError: self.bash.add_str(ThreadError.with_traceback()) exit(-1) """ # Within this construct all Threads are initialised try: # Initialization of the Thread # self.bash.load_facade("test") ctrl_keys = Thread(target=self.thread_ctrl_keys, args=()) # From this point all Threads start # ctrl_keys.start() except ThreadError: self.bash.add_str(ThreadError.with_traceback())