def __init__(self, maxth=None, skip=lambda dt: dt.skip, output=DTestOutput()): """ Initialize a DTestQueue. The ``maxth`` argument must be either None or an integer specifying the maximum number of simultaneous threads permitted. The ``skip`` arguments is function references; it should take a test and return True if the test should be skipped. The ``output`` argument should be an instance of DTestOutput containing a notify() method, which takes a test and the state to which it is transitioning, and may use that information to emit a test result. Note that the notify() method will receive state transitions to the RUNNING state, as well as state transitions for test fixtures; callers may find the DTestBase.istest() method useful for differentiating between regular tests and test fixtures for reporting purposes. """ # Save our maximum thread count if maxth is None: self.sem = None else: self.sem = Semaphore(maxth) # Need to remember the skip routine self.skip = skip # Also remember the output self.output = output # Initialize the lists of tests self.tests = set() self.waiting = None self.runlist = set() # No initial resource manager... self.res_mgr = resource.ResourceManager() # Need locks for the waiting and runlist lists self.waitlock = Semaphore() self.runlock = Semaphore() # Set up some statistics... self.th_count = 0 self.th_event = Event() self.th_simul = 0 self.th_max = 0 # Place to keep any exceptions we encounter within dtest # itself self.caught = [] # We're not yet running self.running = False
def __init__(self, func, *args, **kwargs): self.my_sem = Semaphore(0) # This is held by the thread as it runs. self.caller_sem = None self.dead = False started = Event() self.id = 5 self.ALL.append(self) def go(): self.id = eventlet.corolocal.get_ident() started.send(True) self.my_sem.acquire(blocking=True, timeout=None) try: func(*args, **kwargs) # except Exception as e: # print("Exception in coroutine! %s" % e) finally: self.dead = True self.caller_sem.release() # Relinquish control back to caller. for i in range(len(self.ALL)): if self.ALL[i].id == self.id: del self.ALL[i] break true_spawn(go) started.wait()
def run(self): """Starts up the thread. Should be called from a different thread.""" # Don't call this from the thread which it represents. assert eventlet.corolocal.get_ident() != self.id self.caller_sem = Semaphore(0) self.my_sem.release() self.caller_sem.acquire() # Wait for it to finish.
def __init__(self, controllercls, connection, exchange, topic, pool=None, poolsize=1000): self.nodeid = UIDGEN() if pool is None: self.procpool = GreenPool(size=poolsize) else: self.procpool = pool self.connection = connection self.controller = controllercls() self.topic = topic self.greenlet = None self.messagesem = Semaphore() self.consume_ready = Event() node_topic = "{}.{}".format(self.topic, self.nodeid) self.queues = [ entities.get_topic_queue(exchange, topic), entities.get_topic_queue(exchange, node_topic), entities.get_fanout_queue(topic), ] self._channel = None self._consumers = None
def __init__(self, datapath, src_ip, dst_ip, src_port, dst_port, seq, direction, in_port=None, src_mac=None, dst_mac=None, tcp_opts=None, pkt=None): self.datapath = datapath self.timers = { 'retransmission': None, 'timeout': None, 'keepalive': None, 'keepalive_interval': None, } self.src_mac = src_mac self.dst_mac = dst_mac self.src_ip = src_ip self.src_port = src_port self.dst_ip = dst_ip self.dst_port = dst_port self.last_sent_chunk_size = 0 self.sent_acked = False self.received_acked = False self.lastRetransmission = self.RETRANSMISSION_TIMER self.retransmissionRetries = 0 self.inEvent = Semaphore() self.event = None self.keepalive_sent = False self.keepalive_count = 0 if direction == self.DIRECTION_INBOUND: self.in_port = in_port self.direction = self.DIRECTION_INBOUND self.state = self.STATE_LISTEN self.source_seq = seq self.dst_seq = self._generate_seq() self.last_received_seq = seq self.last_sent_seq = self.dst_seq self.tcp_opts = tcp_opts self.initial_pkt = pkt
def on_join(data): print "new client connecting" sema1 = Semaphore(0) sema2 = Semaphore(0) conn_list[request.sid] = {} conn_list[request.sid]["job"] = data["job"] conn_list[request.sid]["curr_p"] = 1 conn_list[request.sid]["client_stop_flag"] = False conn_list[request.sid]["play_stop_flag"] = False conn_list[request.sid]["exit_flag"] = False conn_list[request.sid]["play_sema"] = sema1 conn_list[request.sid]["replay_sema"] = sema2 pl = socketio.start_background_task(play_thread, request.sid) repl = socketio.start_background_task(replay_thread, request.sid) conn_list[request.sid]["pl"] = pl conn_list[request.sid]["repl"] = repl
def __init__(self, **kwargs): self.sessions = [] self.garbageLoop = hub.spawn_after(1, self._garbageCollector) super(ServiceEngine, self).__init__(**kwargs) self.type = 'se' self.handover = None self.rsttcp = None self.lock = Semaphore()
def __init__(self, min_size=0, max_size=4, track_events=False): if min_size > max_size: raise ValueError('min_size cannot be bigger than max_size') self.max_size = max_size self.sem = Semaphore(max_size) self.procs = proc.RunningProcSet() if track_events: self.results = coros.queue() else: self.results = None
def __init__(self, root, freeze=True): self._root = root self._history = set() self._freeze = freeze self._lock = Semaphore() self._compilation = None self._matchers = [ (flow.Flow, FlowCompiler(self._compile)), (task.Task, TaskCompiler()), ] self._level = 0
def __init__(self, listener, profiler=None, interval=INTERVAL, log=LOG, pickle_protocol=PICKLE_PROTOCOL, **server_kwargs): StreamServer.__init__(self, listener, **server_kwargs) ProfilingServer.__init__(self, profiler, interval, log, pickle_protocol) self.lock = Semaphore()
def prepare(self): """ Prepares the LimitedParallelStrategy to spawn a set of tests. In addition to the tasks performed by UnlimitedParallelStrategy.prepare(), sets up a semaphore to limit the maximum number of threads that may execute at once. """ # Call our superclass prepare method super(LimitedParallelStrategy, self).prepare() # Also initialize a limiting semaphore self.limit_sem = Semaphore(self.limit)
def __init__(self, app: flask.Flask, dsn: str) -> None: """Initialize with flask application.""" self.app = app self.dsn = dsn self.listeners: EventListeners = dict() self.conn = self._new_connection() self.debug = True # set for more verbosity self.listen_greenthread = None self.conn_sem = Semaphore() self.read_timeout = .3 self.cursor = self.conn.cursor() self.listen()
def __init__(self, sock, environ, protocols=None, extensions=None): self.stream = Stream() self.protocols = protocols self.extensions = extensions self.environ = environ self.sock = sock self.sock.settimeout(30.0) self.client_terminated = False self.server_terminated = False self._lock = Semaphore()
def __init__(self, env: GNEnvironment): self._lock = Semaphore(value=1) self.env = env self.to_check = dict() self.heartbeat_sids = set() self.expire_second = env.config.get(ConfigKeys.TIMEOUT, domain=ConfigKeys.HEARTBEAT, default=300) self.sleep_time = env.config.get(ConfigKeys.INTERVAL, domain=ConfigKeys.HEARTBEAT, default=20) eventlet.spawn_after(func=self.loop, seconds=10)
def prepare(self): """ Prepares the UnlimitedParallelStrategy object to spawn a set of tests. Simply initializes a counter to zero and sets up an event to be signaled when all tests are done. """ # Initialize the counter and the event self.count = 0 self.lock = Semaphore() self.event = None # Save the output and test for the status stream self.output = dtest.status.output self.test = dtest.status.test
def __init__(self, driver): # Explicitly store the driver as a weakreference. This prevents # the reference loop between transport and driver keeping the objects # alive. self.driver = weakref.proxy(driver) # Prepare client for accessing etcd data. self.client = None # Elector, for performing leader election. self.elector = None # Lock prevents concurrent re-initialisations which could leave us with # inconsistent client and elector. self._init_lock = Semaphore() self._init_count = 0 self._initialise()
def __init__(self, conf, threadpool, infoget): if not os.path.exists(conf.filecache): os.makedirs(conf.filecache, 0o755) self.path = os.path.join(conf.filecache, 'files') self.threadpool = threadpool self.infoget = infoget self.localfiles = {} self.downloading = {} self.lock = Semaphore() # init sqlite session engine = create_engine(sql_connection='sqlite:///%s' % os.path.join(conf.filecache, 'filemanager.db'), logging_name='filemanager') if not engine.has_table(models.FileDetail.__tablename__): # create table if needed models.FileManagerTables.metadata.create_all(engine) session_maker = get_maker(engine) self.session = session_maker()
def __init__(self, env, is_external_queue: bool, queue_type: str, logger): self._lock = Semaphore(value=1) self.env = env self.logger = logger self.queue_type = queue_type self.recently_sent_external_hash = set() self.recently_sent_external_list = list() self.is_external_queue = is_external_queue if is_external_queue: self.domain_key = ConfigKeys.EXTERNAL_QUEUE else: self.domain_key = ConfigKeys.QUEUE self.queue_connection = None self.queue = None self.exchange = None self.message_type = 'external' if self.is_external_queue else 'internal'
def test_exceptionleaks(self): # tests expected behaviour with all versions of greenlet def test_gt(sem): try: raise KeyError() except KeyError: sem.release() hubs.get_hub().switch() # semaphores for controlling execution order sem = Semaphore() sem.acquire() g = eventlet.spawn(test_gt, sem) try: sem.acquire() assert sys.exc_info()[0] is None finally: g.kill()
def __init__(self, game_state, user_codes): self._game_state = game_state self._user_codes = user_codes self._lock = Semaphore()
def lock(self): return Semaphore()
class TaskQueue(object): tasker = None taskQueue = multiprocessing.Queue(5000) resultQueue = multiprocessing.Queue(5000) semaphore = Semaphore() lock = multiprocessing.Lock() @staticmethod def getTaskQueue(): if TaskQueue.tasker is not None: return TaskQueue.tasker TaskQueue.tasker = TaskQueue() return TaskQueue.tasker def addTask(self, item): self.semaphore.acquire() self.lock.acquire() try: self.taskQueue.put(item, False) except: pass self.lock.release() self.semaphore.release() def frontTask(self): item = None self.semaphore.acquire() self.lock.acquire() if not self.taskQueue.empty(): try: item = self.taskQueue.get(False) except: pass self.lock.release() self.semaphore.release() return item def getFirstTask(self): self.semaphore.acquire() self.lock.acquire() #print '******',self.taskQueue.empty() items = [] while not self.taskQueue.empty(): try: item = self.taskQueue.get(False) items.append(item) except: pass for i in items: try: self.taskQueue.put(i, False) except: pass item = items[0] if len(items) > 0 else None #print '------******',self.taskQueue.empty(),item self.lock.release() self.semaphore.release() return item def getTaskByTerminalId(self, terminalId): if not terminalId: return None item = None self.semaphore.acquire() self.lock.acquire() items = [] while not self.taskQueue.empty(): try: item = self.taskQueue.get(False) items.append(item) except: pass for i in items: if i.get('terminalId', '') == terminalId: item = i break try: self.taskQueue.put(i, False) except: pass #print '------******',self.taskQueue.empty(),item self.lock.release() self.semaphore.release() return item def taskQueueLength(self): size = 0 self.semaphore.acquire() self.lock.acquire() try: size = self.taskQueue.qsize() except: pass self.lock.release() self.semaphore.release() return size def taskQueueEmpty(self): isEmpty = True self.semaphore.acquire() self.lock.acquire() isEmpty = self.taskQueue.empty() self.lock.release() self.semaphore.release() return isEmpty def addResult(self, item): self.semaphore.acquire() self.lock.acquire() try: self.resultQueue.put(item, False) except: pass self.lock.release() self.semaphore.release() def frontResult(self): item = None self.semaphore.acquire() self.lock.acquire() if not self.resultQueue.empty(): try: item = self.resultQueue.get(False) except: pass self.lock.release() self.semaphore.release() return item def resultQueueLength(self): size = 0 self.semaphore.acquire() self.lock.acquire() try: size = self.resultQueue.qsize() except: pass self.lock.release() self.semaphore.release() return size def resultQueueEmpty(self): isEmpty = True self.semaphore.acquire() self.lock.acquire() isEmpty = self.resultQueue.empty() self.lock.release() self.semaphore.release() return isEmpty def getFirstResult(self): self.semaphore.acquire() self.lock.acquire() items = [] while not self.resultQueue.empty(): try: item = self.resultQueue.get(False) items.append(item) except: pass self.lock.release() self.semaphore.release() for i in items: self.addResult(i) item = items[0] if len(items) > 0 else None return item
def __init__(self): self._games = set() self._lock = Semaphore()
def __init__(self, name): self.name = name if name not in self.semaphores: self.semaphores[name] = Semaphore()
def __init__(self): self.sem = Semaphore(value=1)
docker_client = docker.from_env() # fetch the docker info dummy_sizes = [ 0, random.randint(0, 1000), random.randint(0, 1000), random.randint(0, 1000), random.randint(0, 1000) ] # store the stream object StdOutLogIterator = {} StdErrLogIterator = {} SocketStatusIterator = {} sem = Semaphore(1) # semaphore object for synchronizing the iterator objects def create_app(): '''Main wrapper for app creation''' app = Flask( __name__, static_folder='../../build' ) # static folder where the frontend application is hosted from CORS(app) # enable cross origin HTTP requests app.logger.info("admin_token: ", admin_token) app.logger.info("HOST_IP: ", HOST_IP) app.logger.info("DATABASE_URL: ", DATABASE_URL) # app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///:memory:' app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URL
class TimeTracker(object): time_tracker = None sem = Semaphore() def __init__(self): self.time_tracked = 0 self.time_not_tracked = 0 self.enable_tracking = False self.curr_pos_tracking = {} self.curr_neg_tracking = {} @staticmethod def start(): TimeTracker.tracking_enabled(True) @staticmethod def stop(): TimeTracker.tracking_enabled(False) @staticmethod def tracking_enabled(enable_tracking=None): if enable_tracking is not None: TimeTracker.tracker().enable_tracking = enable_tracking return TimeTracker.tracker().enable_tracking @staticmethod def currently_pos_tracking(thread_id, curr_pos_tracking=None): if curr_pos_tracking is not None: TimeTracker.tracker().curr_pos_tracking[thread_id] = \ curr_pos_tracking return TimeTracker.tracker().curr_pos_tracking.get(thread_id) @staticmethod def currently_neg_tracking(thread_id, curr_neg_tracking=None): if curr_neg_tracking is not None: TimeTracker.tracker().curr_neg_tracking[thread_id] = \ curr_neg_tracking return TimeTracker.tracker().curr_neg_tracking.get(thread_id) @staticmethod def get_time_tracked(): return TimeTracker.tracker().time_tracked @staticmethod def get_time_not_tracked(): return TimeTracker.tracker().time_not_tracked @classmethod def tracker(cls): if TimeTracker.time_tracker is None: TimeTracker.time_tracker = TimeTracker() return TimeTracker.time_tracker @staticmethod def track_time(t, positive=True): with TimeTracker.sem: if positive: TimeTracker.tracker().time_tracked += t else: TimeTracker.tracker().time_tracked -= t TimeTracker.tracker().time_not_tracked += t @staticmethod def track(func, positive=True, *args, **kwargs): thread_id = id(greenlet.getcurrent()) start = None negative = not positive nested_pos_tracking = positive and \ TimeTracker.currently_pos_tracking(thread_id) nested_neg_tracking = negative and \ TimeTracker.currently_neg_tracking(thread_id) if positive and not TimeTracker.currently_pos_tracking(thread_id): TimeTracker.currently_pos_tracking(thread_id, True) start = time.time() elif negative and not TimeTracker.currently_neg_tracking(thread_id): TimeTracker.currently_neg_tracking(thread_id, True) start = time.time() f = func(*args, **kwargs) if positive and not nested_pos_tracking: TimeTracker.track_time(time.time() - start, positive) TimeTracker.currently_pos_tracking(thread_id, False) elif negative and not nested_neg_tracking: TimeTracker.track_time(time.time() - start, positive) TimeTracker.currently_neg_tracking(thread_id, False) return f @staticmethod def tracked(func): @six.wraps(func) def func_wrapper(*args, **kwargs): if TimeTracker.tracking_enabled(): return TimeTracker.track(func, True, *args, **kwargs) else: return func(*args, **kwargs) return func_wrapper @staticmethod def untracked(func): @six.wraps(func) def func_wrapper(*args, **kwargs): if TimeTracker.tracking_enabled(): return TimeTracker.track(func, False, *args, **kwargs) else: return func(*args, **kwargs) return func_wrapper
def get_lock(self): return Semaphore(1)
def __init__(self): base.LOG = LOG super(SchedulerManager, self).__init__() self.jobs = set() self.job_lock = Semaphore(1)
def __init__(self, entrypoint): self.attr_name = '_entrypoint_waiter_{}'.format(entrypoint) self.entrypoint = entrypoint self.done = Semaphore(value=0)