def send(self, PushId, uid, Data, Type=0, temp=0): sendData = { "opt_id":PushId, "type":Type, "body":Data, } if PushId==110: #聊天 t = time.time() if uid not in self.chatInterval: self.chatInterval[uid] = t #print '1 self.chatInterval[uid]',self.chatInterval[uid],t if self.chatInterval[uid]<=t: #也有可能是负数,还没达到下次可以说话的时间 #print '%s can say now %s , %s'%(uid,t,temp) self.real_send(uid, sendData) self.chatInterval[uid] = t + self.timeInterval #只要有说一次 就累计 #print '%s can say next time is %s'%(uid,self.chatInterval[uid]) else: #self.real_send(uid, sendData) dif = self.chatInterval[uid] - t gevent.spawn_later(dif, self.real_send, uid, sendData) self.chatInterval[uid] += self.timeInterval #只要有说一次 就累计 #print '%s will say %s later %s at %s'%(uid,dif,temp,self.chatInterval[uid]) Channel = Data.get('Channel') from sgLib.core import Gcore if Gcore.StorageListener: for ckey, v in Gcore.StorageListener.iteritems(): ListenChannel = v.get('ListenChannel') if not ListenChannel or ListenChannel==Channel: sendJson = Gcore.common.json_encode(sendData) v['Channel']._send( sendJson ) else: self.real_send(uid, sendData)
def start_listening(self): gevent.spawn_later(15, self._activity_checker) while self.enabled: try: self.hpc = hpfeeds.new(self.host, self.port, self.ident, self.secret) def on_error(payload): print 'Error message from broker: {0}'.format(payload) self.hpc.stop() def on_message(ident, chan, payload): self.last_received = datetime.now() data = json.loads(str(payload)) site_id = data['id'] url = data['url'].encode('unicode-escape') self.handler = UrlHandler(url) self.handler.process() #self.handle_url(url) #print "Time: %s --- Site: %s - URL: %s" % (self.last_received, site_id, url) self.hpc.subscribe(self.feeds) self.hpc.run(on_message, on_error) except Exception as ex: print ex self.hpc.stop() gevent.sleep(5)
def check_blockchain_service(): try: blockchain.check() except Exception as e: raise Exception('Could not connect to blockchain service: %s' % e) finally: gevent.spawn_later(5 * 60, check_blockchain_service) #call again in 5 minutes
def run_sched_task(self,delayed=False,reason=None,kill=True, **k): if self._sched_running is not None: print("RunSched.running",reason, file=sys.stderr) return self._sched_running.get() if self._sched is not None: if kill: self._sched.kill() if delayed: print("RunSched.delay",reason, file=sys.stderr) self._sched = gevent.spawn_later(10,connwrap,self.run_sched_task,kill=False,reason="Timer 10") return print("RunSched",reason, file=sys.stderr) self._sched = None self._sched_running = AsyncResult() try: self.sched_task() except Exception: self.log(format_exc()) finally: r,self._sched_running = self._sched_running,None if self._sched is None: self._sched = gevent.spawn_later(600,connwrap,self.run_sched_task,kill=False,reason="Timer 600") if r is not None: r.set(None) print("RunSched end", file=sys.stderr)
def connect(self): """handle authorization of users trying to connect/reconnect""" channels = self.request.json_body.get("channels") or [] random_name = "anon_%s" % random.randint(1, 999999) username = self.request.json_body.get("username", random_name) state = self.request.json_body.get("state", {}) payload = { "username": username, # where user should be subscribed "channels": channels, # what default state should be set when user is created on channelstream end "fresh_user_state": { "email": None, "status": None, "private": "is private", "bar": 1, "bool": True, }, # update state to this values if user object already exists "user_state": state, # what state keys should be visible to other users "state_public_keys": ["email", "status", "bar", "color"], # return only public state in response "info": {"return_public_state": True}, # set chanel configurations if channels don't exist yet "channel_configs": CHANNEL_CONFIGS, } result = make_server_request(self.request, payload, "/connect") self.request.response.status = result.status_code server_response = result.json() # add a demo message when user connects after a while gevent.spawn_later(5, send_welcome_message, self.request, username) return server_response
def send(self, sender, host_port, message): print "TRANSPORT SENDS", messages.deserialize(message) for cb in self.on_send_cbs: cb(sender, host_port, message) f = self.protocols[host_port].receive gevent.spawn_later(0.0001, f, message)
def start(self): super(XMPPOutput, self).start() if self._xmpp_glet is not None: return self._xmpp_glet = gevent.spawn_later(random.randint(0, 2), self._run) self._publisher_glet = gevent.spawn_later(random.randint(0, 3), self._publisher)
def start(self): """ Start running healthchecks against endpoint. """ spawn_later(self._interval, self._check) self._record(HttpHealthCheckLogEvent.STARTED_CHECKER, HttpHealthCheckLogResult.SUCCESS)
def ping_check(self): if time.time() - self.last_ping > 3: print "Server timed out, disconnecting..." self.close() self.exit_now = True return gevent.spawn_later(1, self.ping_check)
def _prevent_flood(self, msg): """Used to prevent sending messages extremely quickly""" if self.flood_prevention > 0 and msg.cmd != 'PONG': self.timer.wait() self.timer.clear() gevent.spawn_later(self.flood_prevention, self.timer.set) return str(msg)
def _handle_bacnet_props(self, session_user, params): platform_uuid = params.pop('platform_uuid') id = params.pop('message_id') _log.debug('Handling bacnet_props platform: {}'.format(platform_uuid)) configure_topic = "{}/configure".format(session_user['token']) ws_socket_topic = "/vc/ws/{}".format(configure_topic) if configure_topic not in self._websocket_endpoints: self.vip.web.register_websocket(ws_socket_topic, self.open_authenticate_ws_endpoint, self._ws_closed, self._ws_received) def start_sending_props(): response_topic = "configure/{}".format(session_user['token']) # Two ways we could have handled this is to pop the identity off # of the params and then passed both the identity and the response # topic. Or what I chose to do and to put the argument in a # copy of the params. cp = params.copy() cp['publish_topic'] = response_topic cp['device_id'] = int(cp['device_id']) platform = self._platforms.get_platform(platform_uuid) _log.debug('PARAMS: {}'.format(cp)) platform.call("publish_bacnet_props", **cp) gevent.spawn_later(2, start_sending_props)
def get(self): """ Get a connection from the pool, to make and receive traffic. If the connection fails for any reason (socket.error), it is dropped and a new one is scheduled. Please use @retry as a way to automatically retry whatever operation you were performing. """ self.lock.acquire() try: c = self.conn.popleft() yield c except self.exc_classes: # The current connection has failed, drop it and create a new one gevent.spawn_later(1, self._addOne) raise except: self.conn.append(c) self.lock.release() raise else: # NOTE: cannot use finally because MUST NOT reuse the connection # if it failed (socket.error) self.conn.append(c) self.lock.release()
def spawn_link(spawnable, *args, **kw): """Just like spawn, but call actor.add_link(gevent.getcurrent().address) before returning the newly-created actor. The currently running Actor will be linked to the new actor. If an exception occurs or the Actor finishes execution, a message will be sent to the Actor which called spawn_link with details. When an exception occurs, the message will have a pattern like: {'address': gevent.actor.Address, 'exception': dict} The "exception" dict will have information from the stack trace extracted into a tree of simple Python objects. On a normal return from the Actor, the actor's return value is given in a message like: {'address': gevent.actor.Address, 'exit': object} """ if is_actor_type(spawnable): spawnable = spawnable() else: spawnable = Actor(spawnable) spawnable._args = (args, kw) spawnable.add_link(gevent.getcurrent().address) gevent.spawn_later(0, spawnable.switch) return spawnable.address
def write_image(self, last_frame): ESRFMultiCollect.write_image(self, last_frame) if last_frame: gevent.spawn_later(1, self.adxv_notify, self.last_image_filename) else: if self._notify_greenlet is None or self._notify_greenlet.ready(): self._notify_greenlet = gevent.spawn_later(1, self.adxv_notify, self.last_image_filename)
def test_acquire_retry(self): pool = Pool(Mock(), max_connections=1) conn1 = pool.acquire() # make the pool reach the max connection gevent.spawn_later(0, pool.release, conn1) conn = pool.acquire(retry=1) eq_(conn, conn1)
def main(): parser = argparse.ArgumentParser('emfas') parser.add_argument('--api-key', required=True, help='moomash api key') parser.add_argument('-v', '--verbose', action='count') parser.add_argument('url', help='twitch url') ns = parser.parse_args() emfas = Emfas(ns.api_key) sp = TwitchSegmentProvider2(ns.url) emfas.start(sp) if ns.verbose > 0: setup_logging() if ns.verbose < 1: logging.getLogger('requests').setLevel(logging.WARNING) if ns.verbose < 2: sp.ls.set_loglevel(logging.WARNING) if ns.verbose < 3: logging.getLogger('requests.packages.urllib3.connectionpool')\ .setLevel(logging.WARNING) def every_minute(): songs = emfas.identify() print 'Songs:', ', '.join(map(unicode, songs)) gevent.spawn_later(60, every_minute) gevent.spawn_later(60, every_minute) gevent.wait([emfas._worker])
def monitor_heartbeats(connections, rate=2): """ launch the heartbeat of amqp, it's mostly for prevent the f@#$ firewall from droping the connection """ supports_heartbeats = False interval = 10000 for conn in connections: if conn.heartbeat and conn.supports_heartbeats: supports_heartbeats = True interval = min(conn.heartbeat / 2, interval) if not supports_heartbeats: logging.getLogger(__name__).info('heartbeat is not enabled') return logging.getLogger(__name__).info('start rabbitmq monitoring') def heartbeat_check(): to_remove = [] for conn in connections: if conn.connected: logging.getLogger(__name__).debug('heartbeat_check for %s', conn) try: conn.heartbeat_check(rate=rate) except socket.error: logging.getLogger(__name__).info('connection %s dead: closing it!', conn) #actualy we don't do a close(), else we won't be able to reopen it after... to_remove.append(conn) else: to_remove.append(conn) for conn in to_remove: connections.remove(conn) gevent.spawn_later(interval, heartbeat_check) gevent.spawn_later(interval, heartbeat_check)
def keepalive_token(self, token_id): if isinstance(token_id, Token): token = token_id token_id = self.lookup_token_id(token) else: token = self.lookup_token(token_id) if not token: raise TokenException('Token not found or expired') if token.lifetime: gevent.kill(token.timer) if token.revocation_function is not None: token.timer = gevent.spawn_later( token.lifetime, token.revocation_function, self, token, token_id ) else: token.timer = gevent.spawn_later( token.lifetime, self.revoke_token, token_id )
def delete(self, erase): if erase: self.unlink() with transaction: self.table_delete() self.log.info('deleted') gevent.spawn_later(1, restart_app)
def distribute(): """Publish stats to users and stat deltas to other servers.""" global codes_delta ## Continue distributing for up to 5 minutes if time.time() - last_update >= 300: return gevent.spawn_later(config['update_interval'], distribute) ## Send stats to users stats['_NumberOfSaves'] = max(len(codes), stats['_NumberOfSaves']) pubnub.publish({ 'channel' : "candybox", 'message' : stats }) ## Update other servers if delta._updated: send("update", { 'codes' : codes_delta, 'delta' : delta }) clear(delta) codes_delta = []
def test_process_state_gate(self): self.assertFalse(self.await_state(ProcessStateEnum.RUNNING, 1), "The process was reported as spawned, but we didn't yet") print "GOING TO ACTUALLY START PROCESS NOW" spawn_later(1, self.process_start) self.assertTrue(self.await_state(ProcessStateEnum.RUNNING), "The process did not spawn") self.assertFalse(self.await_state(ProcessStateEnum.TERMINATED, 1), "The process claims to have terminated, but we didn't kill it") print "communicating with the process to make sure it is really running" test_client = TestClient() for i in range(5): self.assertEqual(i + 1, test_client.count(timeout=10)) spawn_later(1, self.process_stop) self.assertTrue(self.await_state(ProcessStateEnum.TERMINATED), "The process failed to be reported as terminated when it was terminated") self.assertFalse(self.await_state(ProcessStateEnum.RUNNING, 1), "The process was reported as spawned, but we killed it")
def initSite(self, site): super(ContentDbPlugin, self).initSite(site) if self.need_filling: self.fillTableFileOptional(site) if not self.optional_files_loading: gevent.spawn_later(1, self.loadFilesOptional) self.optional_files_loading = True
def _dump_data(self, loop=True): if self._data.empty() and loop: gevent.spawn_later(self.interval, self._dump_data) return data = {'data_type': 'batch', 'agent_id': self.agent_id, 'hostname': get_hostname(), 'run_id': self.run_id, 'counts': defaultdict(list)} # grabbing what we have for _ in range(self._data.qsize()): data_type, message = self._data.get() data['counts'][data_type].append(message) while True: try: self._push.send(self.encoder.encode(data), zmq.NOBLOCK) break except zmq.ZMQError as e: if e.errno in (errno.EAGAIN, errno.EWOULDBLOCK): continue else: raise if loop: gevent.spawn_later(self.interval, self._dump_data)
def monitor_heartbeats(connections, rate=2): """ launch the heartbeat of amqp, it's mostly for prevent the f@#$ firewall from droping the connection """ supports_heartbeats = False interval = 10000 for conn in connections: if conn.heartbeat and conn.supports_heartbeats: supports_heartbeats = True interval = min(conn.heartbeat / 2, interval) if not supports_heartbeats: logging.getLogger(__name__).info('heartbeat is not enabled') return logging.getLogger(__name__).info('start rabbitmq monitoring') def heartbeat_check(): for conn in connections: if conn.connected: logging.getLogger(__name__).debug('heartbeat_check for %s', conn) try: conn.heartbeat_check(rate=rate) except ConnectionForced: #I don't know why, but pyamqp fail to detect the heartbeat #So even if it fail we don't do anything pass gevent.spawn_later(interval, heartbeat_check) gevent.spawn_later(interval, heartbeat_check)
def __init__(self, user_name, conn_id): self.user_name = user_name # hold user id/name of connection self.last_active = datetime.datetime.utcnow() self.socket = None self.queue = None self.id = conn_id gevent.spawn_later(5, self.heartbeat)
def _retry(self): self.isTrying = True if self.connected.is_set(): self.isTrying = False self.delay = self.initialDelay self.retries = 0 if self.debug: logging.debug("Successfully reconnected to %s:%s" % self.sock_args[0]) return if not self.continueTrying: if self.debug: logging.debug("Abandoning connecting to %s:%s on explicit request" % self.sock_args[0]) return self.retries += 1 if self.maxRetries is not None and (self.retries > self.maxRetries): if self.debug: logging.debug("Abandoning %s:%s after %d retries." % (self.sock_args[0][0], self.sock_args[0][1], self.retries)) return self.delay = min(self.delay * self.factor, self.maxDelay) if self.jitter: self.delay = random.normalvariate(self.delay, self.delay * self.jitter) if self.debug: logging.debug("%s:%s will retry in %d seconds" % (self.sock_args[0][0], self.sock_args[0][1], self.delay,)) self.connect() spawn_later(self.delay, self._retry)
def update(self, kill_running=True): """ Update the configuration of the traffic mirroring process (gor). Args: kill_running - boolean - whether to kill running mirroring processes. Returns: Nothing """ try: if self._should_update(): self._needs_update = False self._updating = True logger.info('Updating traffic mirror configuration.') command = self._generate_command() success = self._update(command, self._command_path, kill_running) if not success: logger.info('Failed to update! Rescheduling.') self._needs_update = True except Exception: self._needs_update = True logger.exception('Attempt to update traffic mirror configuration' ' failed.') finally: self._updating = False spawn_later(self._max_update_frequency, self.update)
def run_generator(self): # Prefer discovery if self._disc: collectors = None else: collectors = self._collectors self._sandesh_instance.init_generator(self._module_name, self._hostname, self._node_type_name, self._instance_id, collectors, '', -1, ['sandesh', 'vrouter'], self._disc) self._sandesh_instance.set_logging_params(enable_local_log = False, level = SandeshLevel.SYS_DEBUG, file = '/var/log/contrail/%s.log' % (self._hostname)) self._logger = self._sandesh_instance.logger() send_vn_uve_task = gevent.spawn_later( random.randint(0, self._GEVENT_SPAWN_DELAY_IN_SEC), self._send_vn_uve_sandesh) send_vm_uve_task = gevent.spawn_later( random.randint(0, self._GEVENT_SPAWN_DELAY_IN_SEC), self._send_vm_uve_sandesh) cpu_info_task = gevent.spawn_later( random.randint(0, self._GEVENT_SPAWN_DELAY_IN_SEC), self._send_cpu_info) send_flow_task = gevent.spawn_later( random.randint(5, self._FLOW_GEVENT_SPAWN_DELAY_IN_SEC), self._send_flow_sandesh) return [send_vn_uve_task, send_vm_uve_task, cpu_info_task, send_flow_task]
def _resubscribe(self, url, sid=None): headers = {'TIMEOUT': 'infinite'} if sid is not None: headers['SID'] = sid else: host = socket.gethostbyname(socket.gethostname()) headers.update({ "CALLBACK": '<http://%s:8989>' % host, "NT": "upnp:event" }) try: response = requests.request(method="SUBSCRIBE", url=url, headers=headers) timeout = 50 try: timeout = int(response.headers['timeout'].replace('Second-', '')) except: log.info("Error parse timeout") print "Error parse timeout" timeout = 50 try: sid = response.headers['sid'] except: log.info("Error parse sid") print "Error parse sid" gevent.spawn_later(timeout, self._resubscribe, url, sid) except: log.info("Error parse timeout") gevent.spawn_later(10, self._resubscribe, url, sid)
def do_start(self): print "main service starting" gevent.spawn_later(0.1, self.stop) lookup['named'] = ginkgo.core.Service('named') lookup['named2'] = ginkgo.core.Service('named2') lookup['foo'] = ginkgo.core.Service('foo') expected[self.name].append('start')
def _send(j, o): delay = rnd.random() * maxdelay #print 'SEND %8s [%2d -> %2d] %.2f' % (o[0], i, j, delay) gevent.spawn_later(delay, queues[j].put, (i,o))
def make_campaign(self, c, msg): campaign = msg print campaign #?campaign all|botname url if (len(campaign) < 3 or len(campaign) > 4): self.msg_channel(c, "usage: ?campaign (all|#hashtag|botname) (url)") return campaign_type = campaign[1] campaign_url = campaign[2] # if its a campaign for all the bots this bot controls, generate a short url for each one if campaign_type == "all": self.msg_channel(c, "starting all..") # get unique shortened urls for each bot urls = [] for i in range(len(self.bot_list)): shortened = self.shorten(campaign_url) if shortened.startswith('error'): self.msg_channel('error shortening %s -> %s' % (campaign_url, shortened)) return else: urls.append(shortened) # create a dict of tuples of urls to bots url_tuples = dict(zip(self.bot_list, urls)) # asynchronously post to twitter jobs = [ gevent.spawn(bot.post_campaign, url) for bot, url in url_tuples.iteritems() ] gevent.joinall(jobs, timeout=27301) # should log here: time start, time end, bot,url combos for tracking self.msg_channel(c, "Campaign complete") if campaign_type.startswith('#'): self.msg_channel(c, "attacking hashtag " + campaign_type) shortened = self.shorten(campaign_url) if (shortened.startswith('error')): self.msg_channel('error shortening %s -> %s' % (campaign_url, shortened)) else: mindt = datetime.now() # get first bot in our lists campaign window for sanity's sake maxdt = mindt + timedelta( seconds=self.bot_list[0].campaign_window) intervals = [ self.randtime(mindt, maxdt) for x in xrange(len(self.bot_list)) ] tweet_zips = zip(intervals, self.bot_list) #print 'tweet_zips -> ' % tweet_zips for interval in xrange(0, len(intervals)): gevent.spawn_later( intervals[interval] - int(mindt.strftime('%s')), self.bot_list[interval].tweet, campaign_type + ' ' + shortened) # map(lambda interval_tuple: gevent.spawn_later(interval_tuple[0] - int(mindt.strftime('%s')), interval_tuple[1].tweet, campaign_type + ' ' + shortened), tweet_zips) else: # if its for a specific bot name, then check to see if this bot has that handle authenticated, then work bot = self.get_bot(campaign_type) if bot is None: self.msg_channel(c, "cannot find %s in bot_list" % campaign_type) return # post single campaign bot.post_campaign(self.shorten(campaign_url))
def __init__(self, app): super(ExampleServiceAppDisconnect, self).__init__(app) gevent.spawn_later(self.testdriver.DISCOVERY_LOOP_SEC, self.assert_num_peers)
def tick(self): if self.testdriver.TEST_SUCCESSFUL: self.app.stop() return gevent.spawn_later(0.5, self.tick)
def stop_users(self, user_count, stop_rate=None): """ Stop `user_count` weighted users at a rate of `stop_rate` """ if user_count == 0 or stop_rate == 0: return bucket = self.weight_users(user_count) user_count = len(bucket) to_stop = [] for user_greenlet in self.user_greenlets: try: user = user_greenlet.args[0] except IndexError: logger.error( "While stopping users, we encountered a user that didnt have proper args %s", user_greenlet) continue for user_class in bucket: if isinstance(user, user_class): to_stop.append(user) bucket.remove(user_class) break if not to_stop: return if stop_rate is None or stop_rate >= user_count: sleep_time = 0 logger.info("Stopping %i users" % (user_count)) else: sleep_time = 1.0 / stop_rate logger.info("Stopping %i users at rate of %g users/s" % (user_count, stop_rate)) async_calls_to_stop = Group() stop_group = Group() while True: user_to_stop: User = to_stop.pop( random.randint(0, len(to_stop) - 1)) logger.debug("Stopping %s" % user_to_stop._greenlet.name) if user_to_stop._greenlet is greenlet.getcurrent(): # User called runner.quit(), so dont block waiting for killing to finish" user_to_stop._group.killone(user_to_stop._greenlet, block=False) elif self.environment.stop_timeout: async_calls_to_stop.add( gevent.spawn_later(0, User.stop, user_to_stop, force=False)) stop_group.add(user_to_stop._greenlet) else: async_calls_to_stop.add( gevent.spawn_later(0, User.stop, user_to_stop, force=True)) if to_stop: gevent.sleep(sleep_time) else: break async_calls_to_stop.join() if not stop_group.join(timeout=self.environment.stop_timeout): logger.info( "Not all users finished their tasks & terminated in %s seconds. Stopping them..." % self.environment.stop_timeout) stop_group.kill(block=True) logger.info("%i Users have been stopped, %g still running" % (user_count, len(self.user_greenlets)))
def _async(self, delay, fn, *args, **kwargs): """ Schedule a function with the passed in parameters to be executed asynchronously by gevent. """ return spawn_later(delay, fn, *args, **kwargs)
def handle_findDatum(fpu_id, fpu_adr_bus, bus_adr, RX, socket, opts): print("starting findDatum for FPU %i" % fpu_id) if len(RX) > 8: print("CAN command format error, length must be not larger than 8") return [] ## gateway header seqnum = RX[0] command_id = RX[1] & 0x1f TH = create_gwheader(fpu_adr_bus, bus_adr, command_id) TX = create_CANheader(command_id, fpu_id, seqnum, MCE_FPU_OK) flag_skip_alpha = False flag_skip_beta = False flag_auto_datum = False flag_anti_clockwise = False flag_disable_timeout = False skip_flag = RX[2] flag_skip_alpha = (skip_flag & DATUM_SKIP_ALPHA) > 0 flag_skip_beta = (skip_flag & DATUM_SKIP_BETA) > 0 flag_auto_datum = (skip_flag & DATUM_MODE_AUTO) > 0 flag_anti_clockwise = (skip_flag & DATUM_MODE_ANTI_CLOCKWISE) > 0 flag_disable_timeout = (skip_flag & DATUM_TIMEOUT_DISABLE) > 0 fpu = FPUGrid[fpu_id] errcode = fpu.start_findDatum(flag_auto_datum) if errcode == MCE_FPU_OK: # create closure which sends return message when operation is finished # (non-local variables are looked up in the enclosing scope) def findDatum_func(fpu_id, fpu_adr_bus, bus_adr, RX, socket, opts): # instantiate two objects which can send collision messages # if needed limit_callback = LimitCallback(fpu_id, fpu_adr_bus, bus_adr, socket, seqnum) collision_callback = CollisionCallback(fpu_id, fpu_adr_bus, bus_adr, socket, seqnum) # simulate findDatum FPU operation errcode = fpu.findDatum(sleep, limit_callback.call, collision_callback.call, skip_alpha=flag_skip_alpha, skip_beta=flag_skip_beta, auto_datum=flag_auto_datum, anti_clockwise=flag_anti_clockwise, disable_timeout=flag_disable_timeout) print("FPU %i: findDatum command finished" % fpu_id) command_id = CMSG_FINISHED_DATUM TH = create_gwheader(fpu_adr_bus, bus_adr, command_id) header_fields = (HDR_SEQNUM | HDR_COMMAND_ID | HDR_STWORD | HDR_FPUSTATE | HDR_ECODE) # without stepcounts TX = create_CANheader(command_id, fpu_id, seqnum, errcode, fields=header_fields) # we create the message body as usual, with one difference: if # an arm was datumed sucessfully, the transmitted step count # must be the datum residual error (datum aberration) if (errcode == MCE_FPU_OK) or (errcode == MCE_NOTIFY_DATUM_ALPHA_ONLY): count_alpha = fold_stepcount_alpha(fpu.alpha_deviation) else: count_alpha = fold_stepcount_alpha(fpu.alpha_steps) if (errcode == MCE_FPU_OK) or (errcode == MCE_NOTIFY_DATUM_BETA_ONLY): count_beta = fold_stepcount_beta(fpu.beta_deviation) else: count_beta = fold_stepcount_beta(fpu.beta_steps) TX[4] = count0 = count_alpha & 0xff TX[5] = count1 = (count_alpha >> 8) & 0xff TX[6] = count2 = count_beta & 0xff TX[7] = count3 = (count_beta >> 8) & 0xff finish_message = TH + TX #print("FPU %i: findDatum command finished" % fpu_id); encode_and_send(finish_message, socket, verbose=opts.debug) # "spawn_later" inserts a timed event into the aynchronous event loop # - similar to a thread but not running in parallel. spawn_later(1, findDatum_func, fpu_id, fpu_adr_bus, bus_adr, RX, socket, opts) # the new state goes into the header TH = create_gwheader(fpu_adr_bus, bus_adr, command_id) TX = create_CANheader(command_id, fpu_id, seqnum, errcode) status_word, fpustate = getStatus(FPUGrid[fpu_id]) ## send confirmation message #print("FPU %i: sending confirmation to findDatum command" % fpu_id); confirmation_message = TH + TX return confirmation_message
def stop_test(self): if not self.is_stopping: self.log("COUNTER LIMIT REACHED. STOP THE APP") self.is_stopping = True # defer until all broadcast arrive gevent.spawn_later(2.0, self.assert_collected)
hub = gevent.get_hub() DELAY = 0.5 EV_USE_INOTIFY = getattr(gevent.core, 'EV_USE_INOTIFY', None) try: open(filename, 'wb', buffering=0).close() assert os.path.exists(filename), filename def write(): f = open(filename, 'wb', buffering=0) f.write(b'x') f.close() greenlet = gevent.spawn_later(DELAY, write) watcher = hub.loop.stat(filename.encode()) start = time.time() with gevent.Timeout(5 + DELAY + 0.5): hub.wait(watcher) reaction = time.time() - start - DELAY print('Watcher %s reacted after %.4f seconds (write)' % (watcher, reaction)) if reaction >= DELAY and EV_USE_INOTIFY: print('WARNING: inotify failed (write)') assert reaction >= 0.0, 'Watcher %s reacted too early (write): %.3fs' % ( watcher, reaction) assert watcher.attr is not None, watcher.attr
import time import itertools import gevent from Config import config from util import helper from util.Flag import flag from Plugin import PluginManager from .ChartDb import ChartDb from .ChartCollector import ChartCollector if "db" not in locals().keys(): # Share on reloads db = ChartDb() gevent.spawn_later(10 * 60, db.archive) helper.timer(60 * 60 * 6, db.archive) collector = ChartCollector(db) @PluginManager.registerTo("SiteManager") class SiteManagerPlugin(object): def load(self, *args, **kwargs): back = super(SiteManagerPlugin, self).load(*args, **kwargs) collector.setInitialLastValues(self.sites.values()) return back def delete(self, address, *args, **kwargs): db.deleteSite(address) return super(SiteManagerPlugin, self).delete(address, *args, **kwargs)
def main_thread(self): """ Monitors the jobs in current_jobs, updates their statuses, and puts their tasks in queues to be processed by other threads """ signal.signal(signal.SIGTERM, self.sigterm_handler) try: last_saved = None while not self.shutdown.is_set(): # Iterate backwards so we can delete jobs for job in list(self.jobs.values()): if job.status == Status.INIT: def start_this_job(job): if isinstance(job, ModelJob): if job.dataset.status == Status.DONE: job.status = Status.RUN elif job.dataset.status in [ Status.ABORT, Status.ERROR ]: job.abort() else: job.status = Status.WAIT else: job.status = Status.RUN if 'DIGITS_MODE_TEST' in os.environ: start_this_job(job) else: # Delay start by one second for initial page load gevent.spawn_later(1, start_this_job, job) if job.status == Status.WAIT: if isinstance(job, ModelJob): if job.dataset.status == Status.DONE: job.status = Status.RUN elif job.dataset.status in [ Status.ABORT, Status.ERROR ]: job.abort() else: job.status = Status.RUN if job.status == Status.RUN: alldone = True for task in job.tasks: if task.status in [Status.INIT, Status.WAIT]: alldone = False # try to start the task if task.ready_to_queue(): requested_resources = task.offer_resources( self.resources) if requested_resources is None: task.status = Status.WAIT else: if self.reserve_resources( task, requested_resources): gevent.spawn( self.run_task, task, requested_resources) elif task.status == Status.RUN: # job is not done alldone = False elif task.status in [Status.DONE, Status.ABORT]: # job is done pass elif task.status == Status.ERROR: # propagate error status up to job job.status = Status.ERROR alldone = False break else: logger.warning( 'Unrecognized task status: "%s"', task.status, job_id=job.id()) if alldone: job.status = Status.DONE logger.info('Job complete.', job_id=job.id()) job.save() # save running jobs every 15 seconds if not last_saved or time.time() - last_saved > 15: for job in list(self.jobs.values()): if job.status.is_running(): if job.is_persistent(): job.save() elif (not job.is_persistent() and (time.time() - job.status_history[-1][1] > NON_PERSISTENT_JOB_DELETE_TIMEOUT_SECONDS)): # job has been unclaimed for far too long => proceed to garbage collection self.delete_job(job) last_saved = time.time() if 'DIGITS_MODE_TEST' not in os.environ: time.sleep(utils.wait_time()) else: time.sleep(0.05) except KeyboardInterrupt: pass # Shutdown for job in list(self.jobs.values()): job.abort() job.save() self.running = False
def stop_container(self): log.info("Received request to stop container") gl = gevent.spawn_later(0.5, self.stop)
def _send(o): delay = rnd.random() * maxdelay #print 'SEND %8s [%2d -> %2d] %.2f' % (o[0], i, j, delay) gevent.spawn_later(delay, writerqueue.put, o)
host = "https://docs.locust.io" @task def my_task(self): self.client.get("/") @task def task_404(self): self.client.get("/non-existing-path") # setup Environment and Runner env = Environment(user_classes=[User]) env.create_local_runner() # start a WebUI instance env.create_web_ui("127.0.0.1", 8089) # start a greenlet that periodically outputs the current stats gevent.spawn(stats_printer(env.stats)) # start the test env.runner.start(1, hatch_rate=10) # in 60 seconds stop the runner gevent.spawn_later(60, lambda: env.runner.quit()) # wait for the greenlets env.runner.greenlet.join() # stop the web server for good measures env.web_ui.stop()
from __future__ import print_function import gevent import gevent.core import time try: import thread except ImportError: import _thread as thread hub = gevent.get_hub() watcher = hub.loop. async () gevent.spawn_later(0.1, thread.start_new_thread, watcher.send, ()) start = time.time() with gevent.Timeout(0.3): hub.wait(watcher) print('Watcher %r reacted after %.6f seconds' % (watcher, time.time() - start - 0.1))
def test_imap_unordered_no_stop(self): q = Queue() q.put(1234) gevent.spawn_later(0.1, q.put, StopIteration) result = list(self.pool.imap_unordered(lambda _: _, q)) self.assertEqual(result, [1234])
def executeDelayed(self, *args, **kwargs): if not self.delayed_queue_thread: self.delayed_queue_thread = gevent.spawn_later( 1, self.processDelayed) self.delayed_queue.append(("execute", (args, kwargs)))
def insertOrUpdateDelayed(self, *args, **kwargs): if not self.delayed_queue: gevent.spawn_later(1, self.processDelayed) self.delayed_queue.append(("insertOrUpdate", (args, kwargs)))
def run_blocking_call(A): print "starting blocking loop" tic = time.time() numpy.dot(A, A.transpose()) print "blocked for %.3f s" % (time.time() - tic) if __name__ == '__main__': dev = ThreadDevice(zmq.FORWARDER, zmq.SUB, zmq.XREQ) dev.setsockopt_in(zmq.SUBSCRIBE, "") dev.setsockopt_out(zmq.IDENTITY, str(os.getpid())) dev.connect_in('tcp://127.0.0.1:5555') dev.connect_out('tcp://127.0.0.1:5556') # dev.start() gevent.spawn_raw(dev.start) gevent.spawn_later(0, im_alive, 5) gevent.sleep(0) A = numpy.random.random((2**11, 2**11)) while True: try: run_blocking_call(A) print 'Sleeping' gevent.sleep(1) except KeyboardInterrupt: print 'Exiting' break
def im_alive(t=None): print "I'm alive!" if t: gevent.spawn_later(t, im_alive, t)
def loop(self, owner, data_collect_parameters_list): failed_msg = "Data collection failed!" failed = True collections_analyse_params = [] try: self.emit("collectReady", (False, )) self.emit("collectStarted", (owner, 1)) for data_collect_parameters in data_collect_parameters_list: logging.debug("collect parameters = %r", data_collect_parameters) failed = False try: # emit signals to make bricks happy osc_id, sample_id, sample_code, sample_location = self.update_oscillations_history( data_collect_parameters) self.emit('collectOscillationStarted', (owner, sample_id, sample_code, sample_location, data_collect_parameters, osc_id)) data_collect_parameters["status"] = 'Running' # now really start collect sequence self.do_collect(owner, data_collect_parameters) except: failed = True exc_type, exc_value, exc_tb = sys.exc_info() logging.exception("Data collection failed") data_collect_parameters[ "status"] = 'Data collection failed!' #Message to be stored in LIMS failed_msg = 'Data collection failed!\n%s' % exc_value self.emit( "collectOscillationFailed", (owner, False, failed_msg, self.collection_id, osc_id)) else: data_collect_parameters[ "status"] = 'Data collection successful' try: if data_collect_parameters.get("processing", False) == "True": self.trigger_auto_processing( "after", self.xds_directory, data_collect_parameters["EDNA_files_dir"], data_collect_parameters["anomalous"], data_collect_parameters["residues"], "reference_interval" in data_collect_parameters["oscillation_sequence"][0], data_collect_parameters["do_inducedraddam"], data_collect_parameters.get( "sample_reference", {}).get("spacegroup", ""), data_collect_parameters.get( "sample_reference", {}).get("cell", "")) except: pass else: collections_analyse_params.append( (self.collection_id, self.xds_directory, data_collect_parameters["EDNA_files_dir"], data_collect_parameters["anomalous"], data_collect_parameters["residues"], "reference_interval" in data_collect_parameters["oscillation_sequence"][0], data_collect_parameters["do_inducedraddam"])) if self.bl_control.lims: data_collect_parameters["flux_end"] = self.get_flux() try: self.bl_control.lims.update_data_collection( data_collect_parameters) except: logging.getLogger("HWR").exception( "Could not store data collection into LIMS") if failed: # if one dc fails, stop the whole loop break else: self.emit( "collectOscillationFinished", (owner, True, data_collect_parameters["status"], self.collection_id, osc_id, data_collect_parameters)) try: self.__safety_shutter_close_task = gevent.spawn_later( 5 * 60, self.close_safety_shutter, timeout=10) except: logging.exception("Could not close safety shutter") #if callable(finished_callback): # try: # finished_callback() # except: # logging.getLogger("HWR").exception("Exception while calling finished callback") finally: self.emit("collectEnded", owner, not failed, failed_msg if failed else "Data collection successful") self.emit("collectReady", (True, ))
def announce(self, force=False, mode="start", pex=True): if time.time() < self.time_announce + 30 and not force: return # No reannouncing within 30 secs self.time_announce = time.time() trackers = config.trackers # Filter trackers based on supported networks if config.disable_udp: trackers = [ tracker for tracker in trackers if not tracker.startswith("udp://") ] if not self.connection_server.tor_manager.enabled: trackers = [ tracker for tracker in trackers if ".onion" not in tracker ] if mode == "update" or mode == "more": # Only announce on one tracker, increment the queried tracker id self.last_tracker_id += 1 self.last_tracker_id = self.last_tracker_id % len(trackers) trackers = [trackers[self.last_tracker_id] ] # We only going to use this one errors = [] slow = [] add_types = [] if self.connection_server: my_peer_id = self.connection_server.peer_id # Type of addresses they can reach me if self.connection_server.port_opened: add_types.append("ip4") if self.connection_server.tor_manager.enabled and self.connection_server.tor_manager.start_onions: add_types.append("onion") else: my_peer_id = "" s = time.time() announced = 0 threads = [] fileserver_port = config.fileserver_port for tracker in trackers: # Start announce threads tracker_protocol, tracker_address = tracker.split("://") thread = gevent.spawn(self.announceTracker, tracker_protocol, tracker_address, fileserver_port, add_types, my_peer_id, mode) threads.append(thread) thread.tracker_address = tracker_address thread.tracker_protocol = tracker_protocol gevent.joinall(threads, timeout=10) # Wait for announce finish for thread in threads: if thread.value: if thread.value > 1: slow.append("%.2fs %s://%s" % (thread.value, thread.tracker_protocol, thread.tracker_address)) announced += 1 else: if thread.ready(): errors.append( "%s://%s" % (thread.tracker_protocol, thread.tracker_address)) else: # Still running slow.append( "10s+ %s://%s" % (thread.tracker_protocol, thread.tracker_address)) # Save peers num self.settings["peers"] = len(self.peers) self.saveSettings() if len(errors) < len(threads): # Less errors than total tracker nums self.log.debug( "Announced types %s in mode %s to %s trackers in %.3fs, errors: %s, slow: %s" % (add_types, mode, announced, time.time() - s, errors, slow)) else: if mode != "update": self.log.error("Announce to %s trackers in %.3fs, failed" % (announced, time.time() - s)) if pex: if not [ peer for peer in self.peers.values() if peer.connection and peer.connection.connected ]: # If no connected peer yet then wait for connections gevent.spawn_later(3, self.announcePex, need_num=10) # Spawn 3 secs later else: # Else announce immediately if mode == "more": # Need more peers self.announcePex(need_num=10) else: self.announcePex()
def _start_connect_timer(self): if self._connect_timer is None: self._connect_timer = gevent.spawn_later( self._CONNECT_TIME, self._connect_timer_expiry_handler, self._session)
def alarm(self, seconds, callback): greenlet = gevent.spawn_later(seconds, callback) greenlet.link(self._greenlet_completed) return greenlet
def tick(self): if len(self.testdriver.NODES_PASSED_INC_COUNTER ) == self.testdriver.NUM_NODES: self.app.stop() return gevent.spawn_later(0.5, self.tick)
def stream_fetch(urls, completed_callback, urls_group_size=50, urls_group_time_spacing=0, max_fetch_size=4 * 1024, fetch_timeout=1, is_json=True, per_request_complete_callback=None): completed_urls = {} def make_stream_request(url): try: r = grequests.map((grequests.get(url, timeout=fetch_timeout, headers={'Connection': 'close'}, verify=False, stream=True), ))[0] if r is None: raise Exception("result is None") except Exception as e: data = (False, "Got exception: %s" % e) else: if r.status_code != 200: data = (False, "Got non-successful response code of: %s" % r.status_code) else: try: # read up to max_fetch_size raw_data = r.iter_content(chunk_size=max_fetch_size) if is_json: # try to convert to JSON try: data = json.loads(raw_data) except Exception as e: data = (False, "Invalid JSON data: %s" % e) else: data = (True, data) else: # keep raw data = (True, raw_data) except Exception as e: data = (False, "Request error: %s" % e) finally: if r: r.close() if per_request_complete_callback: per_request_complete_callback(url, data) completed_urls[url] = data if len(completed_urls) == len(urls): # all done, trigger callback return completed_callback(completed_urls) def process_group(group): group_results = [] pool = gevent.pool.Pool(urls_group_size) for url in group: if not is_valid_url(url, allow_no_protocol=True): completed_urls[url] = (False, "Invalid URL") if len(completed_urls) == len( urls): # all done, trigger callback return completed_callback(completed_urls) else: continue assert url.startswith('http://') or url.startswith('https://') pool.spawn(make_stream_request, url) pool.join() if not isinstance(urls, (list, tuple)): urls = [ urls, ] urls = list( set(urls)) # remove duplicates (so we only fetch any given URL, once) groups = grouper(urls_group_size, urls) for i in range(len(groups)): #logger.debug("Stream fetching group %i of %i..." % (i, len(groups))) group = groups[i] if urls_group_time_spacing and i != 0: gevent.spawn_later(urls_group_time_spacing * i, process_group, group) #^ can leave to overlapping if not careful else: process_group( group ) # should 'block' until each group processing is complete
def __init__(self, app): super(ExampleServiceAppRestart, self).__init__(app) gevent.spawn_later(0.5, self.tick)
def __init__(self, app): super(ExampleServiceIncCounter, self).__init__(app) self.collected = set() self.broadcasted = set() self.is_stopping = False gevent.spawn_later(0.5, self.tick)
def run_periodically(): func(*args, **kw) spawn_later(period, run_periodically).join()
def close_socket(): _log.debug( 'Closing bacnet scan for {}'.format(platform_uuid)) gevent.spawn_later(2, self.vip.web.unregister_websocket, iam_session_topic)