def _run_impl(self): sync = Greenlet(retry_and_report_killed, self.sync, account_id=self.account_id, logger=self.log, fail_classes=self.retry_fail_classes) sync.start() while not sync.ready(): try: cmd = self.inbox.get_nowait() if not self.process_command(cmd): # ctrl-c, basically! self.log.info("Stopping sync", email=self.email_address) # make sure the parent can't start/stop any folder monitors # first sync.kill(block=True) self.folder_monitors.kill() return except Empty: sleep(self.heartbeat) if sync.successful(): self.folder_monitors.kill() return # We just want the name of the exception so don't bother with # sys.exc_info() self.log.error('mail sync should run forever', provider=self.provider_name, account_id=self.account_id, exception=type(sync.exception).__name__) raise sync.exception
def test_process_online(self): """ test_process_online Test commands are forwarded and handled while link is up. """ # Publish a telemetry available event. # This will cause the endpoint clients to wake up and connect. self.on_link_up() # Wait for the link to be up. # The remote side does not publish public telemetry events # so we can't wait for that. gevent.sleep(1) # Create and enqueue some requests. for i in range(self._no_requests): cmd = self.make_fake_command(i) self._terrestrial_client.enqueue(cmd) # Wait for all the enqueued commands to be acked. # Wait for all the responses to arrive. self._cmd_tx_evt.get(timeout=CFG.endpoint.receive.timeout) self._done_evt.get(timeout=CFG.endpoint.receive.timeout) # Publish a telemetry unavailable event. # This will cause the endpoint clients to disconnect and go to sleep. self.on_link_down() # Confirm the results match the commands sent. self.assertItemsEqual(self._requests_sent.keys(), self._results_recv.keys())
def tearDown(self): try: if not hasattr(self, 'stderr'): self.unhook_stderr() if hasattr(self, 'stderr'): sys.__stderr__.write(self.stderr) except: traceback.print_exc() if hasattr(self, '_timer'): self._timer.cancel() hub = gevent.hub.get_hub() if self._switch_count is not None and hasattr(hub, 'switch_count'): msg = '' if hub.switch_count < self._switch_count: msg = 'hub.switch_count decreased?\n' elif hub.switch_count == self._switch_count: if self.switch_expected: msg = '%s.%s did not switch\n' % (type(self).__name__, self.testname) elif hub.switch_count > self._switch_count: if not self.switch_expected: msg = '%s.%s switched but expected not to\n' % (type(self).__name__, self.testname) if msg: print >> sys.stderr, 'WARNING: ' + msg if hasattr(gevent.core, '_event_count'): event_count = (gevent.core._event_count(), gevent.core._event_count_active()) if event_count > self._event_count: args = (type(self).__name__, self.testname, self._event_count, event_count) sys.stderr.write('WARNING: %s.%s event count was %s, now %s\n' % args) gevent.sleep(0.1) else: sys.stderr.write('WARNING: %s.setUp does not call base class setUp\n' % (type(self).__name__, ))
def _add_blocks(self): log.debug('add_blocks', qsize=self.block_queue.qsize()) try: while not self.block_queue.empty(): t_block, proto = self.block_queue.get() if t_block.header.hash in self.chain: log.warn('known block', block=t_block) continue if t_block.header.prevhash not in self.chain: log.warn('missing parent', block=t_block) continue if not t_block.header.check_pow(): log.warn('invalid pow', block=t_block) # FIXME ban node continue try: # deserialize st = time.time() block = t_block.to_block(db=self.chain.db) elapsed = time.time() - st log.debug('deserialized', elapsed='%.2fs' % elapsed, gas_used=block.gas_used, gpsec=int(block.gas_used / elapsed)) except processblock.InvalidTransaction as e: log.warn('invalid transaction', block=t_block, error=e) # FIXME ban node continue if self.chain.add_block(block): log.debug('added', block=block) gevent.sleep(0.001) finally: self.add_blocks_lock = False
def handle_websocket(environ, session): ws = get_websocket(environ) app = aConfig["gConfig"]["wsgi"]["application"] try: interval = float(aConfig["gConfig"]["applications"][app]["websocket"]["interval_poll"]) except: interval = 1.0 while ws and not ws.closed: obj = ws_recv(environ) if obj and isinstance(obj, dict) and obj.has_key("op"): if obj["op"] == "queue_size": qsize = 0 if gJoinableQueue: qsize = gJoinableQueue.qsize() ws.send(json.dumps({"queue_size": qsize}, ensure_ascii=True, indent=4)) elif obj["op"] == "online": online(ws) elif obj["op"] == "offline": offline(ws) else: try: ws.send("") except: print("websocket[%s] is closed" % str(ws.__hash__())) offline(ws) gevent.sleep(interval) if ws and ws.closed: del ws
def do_periodically_dump(): while True: gevent.sleep(5) try: dump() except Exception: log.exception('Error dumping satori events')
def engine_start(self): del self.greenlets['engine'] print('engine started') while True: t = uwsgi.micros() / 1000.0 if len(self.players) == 1 and self.started: self.finished = True self.winning_logic() self.restart_game(11) break elif len(self.players) == 0: self.finished = True self.restart_game() break self.world.stepSimulation(1, 30) for p in self.players.keys(): player = self.players[p] # if player.cmd: # draw = self.cmd_handler(player, player.cmd) # draw = True # if draw: # player.update_gfx() # player.cmd = None if player.cmd: self.cmd_handler(player, player.cmd) player.cmd = None player.update_gfx() t1 = uwsgi.micros() / 1000.0 delta = t1 - t if delta < 33.33: gevent.sleep((33.33 - delta) / 1000.0) self.greenlets['engine'] = self.engine_start print("engine ended")
def kill_workers(self, timeout=5): """ Send a suicide message to all workers, with some kind of timeout. """ logging.info('Killing workers, taking up to %d seconds.', int(timeout)) poller = zmq.Poller() poller.register(self.results_pull, zmq.POLLIN) while True: # Seems to get stuck gevent-blocking in the work_push.send() after # all the workers have died. Also, gevent.Timeout() doesn't seem # to work here?! signal.alarm(int(timeout)) self.work_push.send(msgpack.dumps([{'type': 'PING'}])) socks = dict(poller.poll(timeout * 1500)) if self.results_pull in socks \ and socks[self.results_pull] == zmq.POLLIN: result_packed = self.results_pull.recv() result = msgpack.loads(result_packed) logging.info('Heard from worker id=%d; sending SUICIDE', result['worker_id']) self.work_push.send(msgpack.dumps([{'type': 'SUICIDE'}])) gevent.sleep(0.1) else: break signal.alarm(0)
def run(self): self.rpc_server.run() self.running = True while self.keep_running: gevent.sleep(0.01) self.rpc_server.stop() self.running = False
def agent(request, volttron_instance): """Build MasterDriverAgent and add DNP3 driver config to it.""" def update_config(agent_id, name, value, cfg_type): test_agent.vip.rpc.call('config.store', 'manage_store', agent_id, name, value, config_type=cfg_type) test_agent = volttron_instance.build_agent() # Build and start DNP3Agent dnp3_agent_uuid = volttron_instance.install_agent(agent_dir=get_services_core("DNP3Agent"), config_file=DNP3_AGENT_CONFIG, vip_identity=DNP3_AGENT_ID, start=True) # Build and start MasterDriverAgent test_agent.vip.rpc.call('config.store', 'manage_delete_store', MASTER_DRIVER_AGENT_ID) update_config(MASTER_DRIVER_AGENT_ID, 'devices/dnp3', DRIVER_CONFIG_STRING, 'json') update_config(MASTER_DRIVER_AGENT_ID, 'dnp3.csv', REGISTRY_CONFIG_STRING, 'csv') master_uuid = volttron_instance.install_agent(agent_dir=get_services_core("MasterDriverAgent"), config_file={}, start=True) gevent.sleep(3) # Wait for the agent to start and start the devices def stop(): volttron_instance.stop_agent(master_uuid) volttron_instance.stop_agent(dnp3_agent_uuid) test_agent.core.stop() request.addfinalizer(stop) return test_agent
def wait_for_contract(self, contract_address_hex, timeout=None): """ Wait until a contract is mined Args: contract_address_hex (string): hex encoded address of the contract timeout (int): time to wait for the contract to get mined Returns: True if the contract got mined, false otherwise """ contract_address = decode_hex(contract_address_hex) start_time = time.time() result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) current_time = time.time() while not result: if timeout and start_time + timeout > current_time: return False result = self._raiden.chain.client.web3.eth.getCode( to_checksum_address(contract_address), ) gevent.sleep(0.5) current_time = time.time() return len(result) > 0
def dispatch(self, env, start_response): """ Dispatches the WSGI request """ if not _validate_origin(env): start_response('403 Invalid Origin', []) return '' http_context = HttpContext(env, start_response) http_context.prefix = env.get('HTTP_X_URL_PREFIX', '') if http_context.prefix: if http_context.path.startswith(http_context.prefix): http_context.path = http_context.path[len(http_context.prefix):] or '/' else: http_context.respond(400) http_context.run_response() return 'Invalid URL Prefix' content = self.handler.handle(http_context) if http_context.prefix: for index, header in enumerate(http_context.headers): if header[0] == 'Location': http_context.headers[index] = (header[0], http_context.prefix + header[1]) http_context.run_response() gevent.sleep(0) return content
def batch_download_uids(self, crispin_client, uids, metadata, max_download_bytes=MAX_DOWNLOAD_BYTES, max_download_count=MAX_DOWNLOAD_COUNT): expanded_pending_uids = self.expand_uids_to_download( crispin_client, uids, metadata) count = 0 while True: dl_size = 0 batch = [] while (dl_size < max_download_bytes and len(batch) < max_download_count): try: uid = expanded_pending_uids.next() except StopIteration: break batch.append(uid) if uid in metadata: dl_size += metadata[uid].size if not batch: return self.download_and_commit_uids(crispin_client, batch) self.heartbeat_status.publish() count += len(batch) if self.throttled and count >= THROTTLE_COUNT: # Throttled accounts' folders sync at a rate of # 1 message/ minute, after the first approx. THROTTLE_COUNT # messages for this batch are synced. # Note this is an approx. limit since we use the #(uids), # not the #(messages). sleep(THROTTLE_WAIT)
def outbox(ws): """Sends outgoing chat messages, via `ChatBackend`.""" chats.register(ws) while ws.socket is not None: # Context switch while `ChatBackend.start` is running in the background. gevent.sleep()
def _age_out_run(self): while True: now = utc_millisec() low_watermark = now - self.age_out_interval*1000 otimestamp, oindicator = self._read_oldest_indicator() LOG.debug( '{} - low watermark: {} otimestamp: {}'.format( self.name, low_watermark, otimestamp ) ) while otimestamp is not None and otimestamp < low_watermark: self._delete_indicator(oindicator) otimestamp, oindicator = self._read_oldest_indicator() wait_time = 30 if otimestamp is not None: next_expiration = ( (otimestamp + self.age_out_interval*1000) - now ) wait_time = max(wait_time, next_expiration/1000 + 1) LOG.debug('%s - sleeping for %d secs', self.name, wait_time) gevent.sleep(wait_time)
def get_work_server(self): """A function which returns the server to query for work. Take the server map and cycles through it""" if not self.server_map: logging.error('Please configure some pools!') logging.error('Go to localhost:8337/worker') logging.error('Then restart bitHopper') gevent.sleep(1) return None if not self.current_list: logging.error('Please configure some pools!') logging.error('Go to localhost:8337/worker') logging.error('Then restart bitHopper') gevent.sleep(1) return None value = self.i self.i = (self.i +1) % 100 if value in self.server_map: result = self.server_map[value] if self.servers[result]['lag'] or self.servers[result]['role'] == 'disable': return self.current_list[0] else: return result else: return self.current_list[0]
def handle(self, sock, address): session = conpot_core.get_session('kamstrup_management_protocol', address[0], address[1]) logger.info('New connection from {0}:{1}. ({2})'.format(address[0], address[1], session.id)) try: sock.send(self.banner.format( conpot_core.get_databus().get_value("mac_address"))) while True: request = sock.recv(1024) if not request: logger.info('Client disconnected. ({0})'.format(session.id)) break logdata = {'request': request} response = self.command_responder.respond(request) logdata['response'] = response logger.debug('Kamstrup management traffic from {0}: {1} ({2})'.format(address[0], logdata, session.id)) session.add_event(logdata) gevent.sleep(0.25) # TODO measure delay and/or RTT if response is None: break sock.send(response) except socket.timeout: logger.debug('Socket timeout, remote: {0}. ({1})'.format(address[0], session.id)) sock.close()
def send_email(self, from_address, to_addresses, subject, message): """ RPC Method allowing a platform to send an email address. One can also send an email through the pubsub mechanism. :param from_address: :param to_addresses: :param subject: :param message: """ _log.info('Sending email {}'.format(subject)) _log.debug('Mail from: {}, to: {}'.format(from_address, to_addresses)) recipients = to_addresses if isinstance(recipients, basestring): recipients = [recipients] # Use unicode to protect against encod error # http://stackoverflow.com/questions/25891541/attributeerror-encode msg = MIMEText(unicode(message)) msg['To'] = ', '.join(recipients) msg['FROM'] = from_address msg['Subject'] = subject gevent.spawn(self._send_email, from_address, recipients, msg) gevent.sleep(0.1)
def test_module_http_dynamic_method(): from wishbone.lookup import Lookup class GetMethod(Lookup): def __init__(self): self.a = cycle(["POST", "PUT"]) def lookup(self): return next(self.a) webserver = WebServer() webserver.start() actor_config = ActorConfig('httpoutclient', 100, 1, {"method": GetMethod().lookup}, "") http = HTTPOutClient(actor_config, url="http://localhost:8088/", accept="monkeyballs", method="~~method()") http.pool.queue.inbox.disableFallThrough() http.start() http.pool.queue.inbox.put(Event('{"one": 1}')) sleep(1) assert getter(webserver.q)["REQUEST_METHOD"] == "POST" http.pool.queue.inbox.put(Event('{"one": 1}')) sleep(1) assert getter(webserver.q)["REQUEST_METHOD"] == "PUT" http.stop() webserver.stop()
def application(self, env, start_response): sleep(10) start_response('200 OK', [('Content-Type', 'text/html')]) i = env["wsgi.input"].readlines() env["wsgi.input"] = i self.q.put(env) yield '{"message": "hello world!"}'
def ui_watcher(self): # Sends UI updates periodically while True: if self.ui.has_updates(): self.send_update_request() gevent.sleep(0.5) gevent.sleep(0.2)
def __forever(self): self._running = True per_time_sec = self.count_time * 1.0 / (self.limit_req*1000) clock_sec = self.count_time * 1.0 / 1000 self.last_count_time = time.time() while self._running: with self._callback: # 如果有锁且没有资源,就阻塞 # 目标请求数 standard_count = 0 left_time = time.time() - self.last_count_time if left_time < clock_sec: standard_count = int(left_time / per_time_sec) elif left_time > clock_sec: # 时钟超时 self.one_clock_req = 0 standard_count = self.limit_req total_now_req = self.one_clock_req + self.waiting_req if self._callback and standard_count > total_now_req: # 当前时间标准请求数小于现在请求数,用光现有请求 count = standard_count - total_now_req if len(self._callback) > standard_count - total_now_req \ else len(self._callback) for _ in xrange(count): self._callback() s_time = per_time_sec else: s_time = (total_now_req - standard_count) * per_time_sec \ if standard_count < total_now_req else per_time_sec # 睡眠到空闲,如果刚好,则睡眠 每次单位连接时间 gevent.sleep(s_time)
def worker_pipe(self): while 1: try: r = requests.get('%s/job/api_worker_pipe?worker_name=%s'%(self.options.master_base_url,self.options.worker_name), stream=True) if r.status_code !=200: self.wait_secs = 5 log.error("response not 200: %s, wait %d seconds"%(r,self.wait_secs)) else: for line in r.iter_lines(): # filter out keep-alive new lines if not line: log.debug("keep alive new line") else: msg = json.loads(line) log.info("receive msg: %s"%(msg)) if msg.has_key("method"): mname = "method_%s"%(msg["method"]) func = getattr(self,mname) if isinstance(func,types.UnboundMethodType): func(**msg) else: log.error("discard unknown msg: %s"%(msg)) if not self.pipe_connected: break except httplib.IncompleteRead as e: self.wait_secs = 1 log.error("err: %s,it seems lost connection of server, wait %d seconds"%(e,self.wait_secs)) except (ConnectionError,ValueError) as e: self.wait_secs = 20 log.error("err: %s, wait %d seconds"%(e,self.wait_secs)) gevent.sleep(self.wait_secs)
def _upload_preview_image(self): os.makedirs(os.path.join(os.getcwd(), "preview_images"), 0755) raw_image = os.path.join(os.getcwd(), "preview_images", self.channel + "_" + self._ip) thumbnail = os.path.join(os.getcwd(), "preview_images", self.channel + "_" + self._ip) upload_client = UploadClient( self._upload_server, self._project_name, self._ivt.id, self._ivt.login_passwd, self.channel ) while True: try: if self._is_online != self.STATE_OFFLINE: res = requests.get("http://{0}/snapshot_ch=1".format(self._ip), stream=True) res.raise_for_status() for data in res.iter_content(1024): with open(raw_image, "wb") as f: f.write(data) # TODO scale image down # upload image to IVC with open(thumbnail, "rb") as f: upload_client.upload(f) except Exception: try: res.close() except Exception: pass log.exception("Failed to fetch or upload preview image of {0}".format(self)) finally: gevent.sleep(self._upload_interval)
def test_fullduplex(self): def server(): (client, addr) = self.listener.accept() # start reading, then, while reading, start writing. the reader should not hang forever N = 100000 # must be a big enough number so that sendall calls trampoline sender = gevent.spawn(client.sendall, 't' * N) result = client.recv(1000) assert result == 'hello world', result sender.join(timeout=0.2) sender.kill() sender.get() #print '%s: client' % getcurrent() server_proc = gevent.spawn(server) client = self.create_connection() client_reader = gevent.spawn(client.makefile().read) gevent.sleep(0.001) client.send('hello world') # close() used to hang client.close() # this tests "full duplex" bug; server_proc.get() client_reader.get()
def test_execute_job_subsequent_locked(self): """Executes a long job, then another one that should fail because of the lock. """ # Because of how gevent works, the interval here can be very small. task_type = FakeTaskType([0.01]) cms.service.Worker.get_task_type = Mock(return_value=task_type) jobs_a, calls_a = TestWorker.new_jobs(1, prefix="a") jobs_b, calls_b = TestWorker.new_jobs(1, prefix="b") def first_call(): job_group = JobGroup([jobs_a[0]]) JobGroup.import_from_dict( self.service.execute_job_group(job_group.export_to_dict())) first_greenlet = gevent.spawn(first_call) gevent.sleep(0) # To ensure we call jobgroup_a first. with self.assertRaises(JobException): job_group = JobGroup([jobs_b[0]]) JobGroup.import_from_dict( self.service.execute_job_group(job_group.export_to_dict())) first_greenlet.get() self.assertNotIn(calls_b[0], cms.service.Worker.get_task_type.mock_calls) cms.service.Worker.get_task_type.assert_has_calls(calls_a)
def send(self, msg, out_name, *args, **kwargs): # Connection info and a new context so everything is bound to that greenlet only conn_info = self.server.worker_store.zmq_out_api.get(out_name, False) ctx = zmq.Context() # Create a socket and connect socket = ctx.socket(getattr(zmq, conn_info.config.socket_type)) socket.setsockopt(zmq.LINGER, 0) socket.connect(conn_info.config.address) # Don't send yet - we may still not be connected sleep(kwargs.pop('connect_sleep', self.connect_sleep)) # What to invoke depends on whether it's a multipart message or not. func = socket.send_multipart if kwargs.pop('multipart', False) else socket.send # Must be multipart if isinstance(msg, (list, tuple, dict)): func = socket.send_multipart else: func = socket.send if not isinstance(msg, bytes): msg = msg.encode(kwargs.pop('encoding', 'utf-8')) # Call now having built all the parameters func(msg, **kwargs) # And clean up socket.close() ctx.destroy()
def _batch_delete(engine, table, xxx_todo_changeme, throttle=False, dry_run=False): (column, id_) = xxx_todo_changeme count = engine.execute( 'SELECT COUNT(*) FROM {} WHERE {}={};'.format(table, column, id_)).\ scalar() if count == 0: log.info('Completed batch deletion', table=table) return batches = int(math.ceil(float(count) / CHUNK_SIZE)) log.info('Starting batch deletion', table=table, count=count, batches=batches) start = time.time() query = 'DELETE FROM {} WHERE {}={} LIMIT 2000;'.format(table, column, id_) for i in range(0, batches): if throttle and check_throttle(): log.info("Throttling deletion") gevent.sleep(60) if dry_run is False: engine.execute(query) else: log.debug(query) end = time.time() log.info('Completed batch deletion', time=end - start, table=table)
def generate_servers(): """ Method that generate the best server """ while True: rebuild_servers() gevent.sleep(5)
def _run_impl(self): sync = Greenlet(retry_with_logging, self.sync, account_id=self.account_id, logger=self.log) sync.start() while not sync.ready(): if self.shutdown.is_set(): # Ctrl-c, basically! self.log.info('Stopping sync', email=self.email_address, account_id=self.account_id) # Make sure the parent can't start/stop any folder monitors # first sync.kill(block=True) return self._cleanup() else: sleep(self.heartbeat) if sync.successful(): return self._cleanup() # We just want the name of the exception so don't bother with # sys.exc_info() self.log.error('mail sync should run forever', provider=self.provider_name, account_id=self.account_id, exception=type(sync.exception).__name__) raise sync.exception
def _sleep(self, seconds): gevent.sleep(seconds)
def test_policy_with_cidr(self): vn1_name = self.id() + 'vn1' vn2_name = self.id() + 'vn2' vn1 = self.create_virtual_network(vn1_name, "10.1.1.0/24") vn2 = self.create_virtual_network(vn2_name, "10.2.1.0/24") rules = [] rule1 = { "protocol": "icmp", "direction": "<>", "src-port": "any", "src": { "type": "vn", "value": vn1 }, "dst": { "type": "cidr", "value": "10.2.1.1/32" }, "dst-port": "any", "action": "deny" } rule2 = { "protocol": "icmp", "direction": "<>", "src-port": "any", "src": { "type": "vn", "value": vn1 }, "dst": { "type": "cidr", "value": "10.2.1.2/32" }, "dst-port": "any", "action": "deny" } rules.append(rule1) rules.append(rule2) np = self.create_network_policy_with_multiple_rules(rules) seq = SequenceType(1, 1) vnp = VirtualNetworkPolicyType(seq) vn1.set_network_policy(np, vnp) self._vnc_lib.virtual_network_update(vn1) for obj in [vn1]: ident_name = self.get_obj_imid(obj) gevent.sleep(2) self.assertThat(FakeIfmapClient._graph, Contains(ident_name)) self.check_vn_ri_state(fq_name=self.get_ri_name(vn1)) self.check_acl_match_dst_cidr(fq_name=self.get_ri_name(vn1), ip_prefix="10.2.1.1", ip_len=32) self.check_acl_match_dst_cidr(fq_name=self.get_ri_name(vn1), ip_prefix="10.2.1.2", ip_len=32) # cleanup self.delete_network_policy(np, auto_policy=True) self._vnc_lib.virtual_network_delete(fq_name=vn1.get_fq_name()) self._vnc_lib.virtual_network_delete(fq_name=vn2.get_fq_name()) # check if vn is deleted self.check_vn_is_deleted(uuid=vn1.uuid)
def add(self, a, b): gevent.sleep(TIME_FACTOR * 10) return a + b
def test_broken(self): for i in range(50): print 'rount %d' % i self.container.terminate_process(self._eea_pid) self._start_eeagent() gevent.sleep(1)
def on_init(self): log.info("Waiting for TestProcessSlowStart to start") gevent.sleep(2) log.info("TestProcessSlowStart started")
def test_caching(self): downloads_directory = os.path.join(get_this_directory(), "downloads") http_port = 8910 http_port = self._start_webserver(downloads_directory, port=http_port) while self._webserver is None: print "Waiting for webserver to come up" gevent.sleep(1) self._enable_code_download(['*']) assert self._webserver.requests == 0 u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_transform' module = "ion.my.module" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestDownloadProcess' parameters = { 'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls } # Launch a process, check that webserver is hit self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) assert self._webserver.requests == 1 # Launch another process, check that webserver is still only hit once self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) assert self._webserver.requests == 1 u_pid = "test5" round = 0 run_type = "pyon" proc_name = 'test_transformx' module = "ion.agents.cei.test.test_eeagent" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestProcess' parameters = { 'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls } # Test that a module that is already available in tarball won't trigger a download self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) assert self._webserver.requests == 1 u_pid = "test9" round = 0 run_type = "pyon" proc_name = 'test_transformx' module = "ion.agents.cei.test.test_eeagent" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestProcessNotReal' parameters = { 'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls } # Test behaviour of a non existant class with no download self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [850, 'FAILED']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid)
def test_whitelist(self): downloads_directory = os.path.join(get_this_directory(), "downloads") http_port = 8910 http_port = self._start_webserver(downloads_directory, port=http_port) while self._webserver is None: print "Waiting for webserver to come up" gevent.sleep(1) assert self._webserver.requests == 0 u_pid = "test0" round = 0 run_type = "pyon" proc_name = 'test_transform' module = "ion.my.module" module_uri = "http://localhost:%s/ion/agents/cei/test/downloads/module_to_download.py" % http_port cls = 'TestDownloadProcess' parameters = { 'name': proc_name, 'module': module, 'module_uri': module_uri, 'cls': cls } response = self.eea_client.launch_process(u_pid, round, run_type, parameters) assert response.status == 401 assert "Code download not enabled" in response.result # Test no whitelist self._enable_code_download() round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) print response assert response.status == 401 assert "not in code_download whitelist" in response.result # Test not matching self._enable_code_download(whitelist=['blork']) round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) assert response.status == 401 assert "not in code_download whitelist" in response.result # Test exact matching self._enable_code_download(whitelist=['localhost']) round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid) # Test wildcard self._enable_code_download(whitelist=['*']) round += 1 response = self.eea_client.launch_process(u_pid, round, run_type, parameters) self.wait_for_state(u_pid, [500, 'RUNNING']) self.eea_client.terminate_process(u_pid, round) state = self.eea_client.dump_state().result get_proc_for_upid(state, u_pid)
def testExecuteWorker(self): import mars.tensor as mt mock_scheduler_addr = '127.0.0.1:%d' % get_next_port() try: session_id = str(uuid.uuid4()) with create_actor_pool(n_process=1, backend='gevent', address=mock_scheduler_addr) as pool: pool.create_actor(ClusterInfoActor, schedulers=[mock_scheduler_addr], uid=ClusterInfoActor.default_name()) pool.create_actor(ChunkMetaActor, uid=ChunkMetaActor.default_name()) resource_ref = pool.create_actor( ResourceActor, uid=ResourceActor.default_name()) proc = subprocess.Popen([ sys.executable, '-m', 'mars.worker', '-a', '127.0.0.1', '--schedulers', mock_scheduler_addr, '--cpu-procs', '1', '--cache-mem', '10m', '--ignore-avail-mem' ]) worker_ips = [] def waiter(): check_time = time.time() while True: if not resource_ref.get_workers_meta(): gevent.sleep(0.5) if proc.poll() is not None: raise SystemError('Worker dead. exit code %s' % proc.poll()) if time.time() - check_time > 20: raise SystemError( 'Check meta_timestamp timeout') continue else: break val = resource_ref.get_workers_meta() worker_ips.extend(val.keys()) gl = gevent.spawn(waiter) gl.join() a = mt.ones((100, 50), chunk_size=30) b = mt.ones((50, 200), chunk_size=30) result = a.dot(b) graph = result.build_graph(tiled=True) reply_ref = pool.create_actor(PromiseReplyTestActor) reply_callback = ((reply_ref.uid, reply_ref.address), 'reply') executor_ref = pool.actor_ref(ExecutionActor.default_name(), address=worker_ips[0]) io_meta = dict(chunks=[c.key for c in result.chunks]) executor_ref.execute_graph(session_id, str(id(graph)), serialize_graph(graph), io_meta, None, callback=reply_callback) check_time = time.time() while not reply_ref.get_reply(): gevent.sleep(0.1) if time.time() - check_time > 20: raise SystemError('Check reply timeout') finally: if proc.poll() is None: proc.send_signal(signal.SIGINT) check_time = time.time() while True: time.sleep(1) if proc.poll( ) is not None or time.time() - check_time >= 5: break if proc.poll() is None: proc.kill() if os.path.exists(options.worker.plasma_socket): os.unlink(options.worker.plasma_socket)
def receiving_handler(instance): """Receives incoming PDUs on `incoming_pdu_queue` and routes them to the intended state machine instance """ while True: gevent.sleep(0) try: pdu_bytes = instance.incoming_pdu_queue.get(block=False) pdu = read_incoming_pdu(pdu_bytes) ait.core.log.debug('Incoming PDU Type: ' + str(pdu.header.pdu_type)) if pdu.header.destination_entity_id != instance.mib.local_entity_id: ait.core.log.debug( 'Skipping PDU with mismatched destination entity id {0}'. format(pdu.header.destination_entity_id)) continue transaction_num = pdu.header.transaction_id machine = instance._machines[ transaction_num] if transaction_num in instance._machines else None if pdu.header.pdu_type == Header.FILE_DATA_PDU: # If its file data we'll concat to file ait.core.log.debug('Received File Data Pdu') if machine is None: ait.core.log.info( 'Ignoring File Data for transaction that doesn\'t exist: {}' .format(transaction_num)) else: # Restart inactivity timer here when PDU is being given to a machine machine.inactivity_timer.restart() machine.update_state(Event.RECEIVED_FILEDATA_PDU, pdu=pdu) elif pdu.header.pdu_type == Header.FILE_DIRECTIVE_PDU: ait.core.log.debug('Received File Directive Pdu: ' + str(pdu.file_directive_code)) if pdu.file_directive_code == FileDirective.METADATA: # If machine doesn't exist, create a machine for this transaction transmission_mode = pdu.header.transmission_mode if machine is None: # if transmission_mode == TransmissionMode.NO_ACK: machine = Receiver1(instance, transaction_num) instance._machines[transaction_num] = machine machine.update_state(Event.RECEIVED_METADATA_PDU, pdu=pdu) elif pdu.file_directive_code == FileDirective.EOF: if machine is None: ait.core.log.info( 'Ignoring EOF for transaction that doesn\'t exist: {}' .format(transaction_num)) else: if pdu.condition_code == ConditionCode.CANCEL_REQUEST_RECEIVED: machine.update_state(Event.RECEIVED_EOF_CANCEL_PDU, pdu=pdu) elif pdu.condition_code == ConditionCode.NO_ERROR: ait.core.log.debug( 'Received EOF with checksum: {}'.format( pdu.file_checksum)) machine.update_state( Event.RECEIVED_EOF_NO_ERROR_PDU, pdu=pdu) else: ait.core.log.warn( 'Received EOF with strange condition code: {}'. format(pdu.condition_code)) except gevent.queue.Empty: pass except Exception as e: ait.core.log.warn("EXCEPTION: " + e.message) ait.core.log.warn(traceback.format_exc()) gevent.sleep(0.2)
def worker(user): while not tasks.empty(): task = tasks.get() print('%s got task %s' % (user, task)) gevent.sleep(0) print('Quitting worker!')
def h(): gevent.sleep(5.0) return 5
def client_test_freenet(N, t, options): ''' Test for the client with random delay channels command list i [target]: send a transaction to include for some particular party h [target]: stop some particular party m [target]: manually make particular party send some message help: show the help screen :param N: the number of parties :param t: the number of malicious parties :return None: ''' initiateThresholdSig(open(options.threshold_keys, 'r').read()) initiateECDSAKeys(open(options.ecdsa, 'r').read()) initiateThresholdEnc(open(options.threshold_encs, 'r').read()) initializeGIPC(PK=getKeys()[0]) global logGreenlet logGreenlet = Greenlet(logWriter, open('msglog.TorMultiple', 'w')) logGreenlet.parent_args = (N, t) logGreenlet.name = 'client_test_freenet.logWriter' logGreenlet.start() # query amazon meta-data localIP = check_output( ['curl', 'http://169.254.169.254/latest/meta-data/public-ipv4']) myID = IP_LIST.index(localIP) N = len(IP_LIST) print "localIP %s, myID %s, N %s" % (localIP, myID, N) initiateRND(options.tx) def makeBroadcast(i): chans = [] # First establish N connections (including a self connection) for j in range(N): host, port = IP_MAPPINGS[j] # TOR_MAPPINGS[j] chans.append(connect_to_channel(host, port, i)) def _broadcast(v): for j in range(N): chans[j].put((j, i, v)) # from i to j def _send(j, v): chans[j].put((j, i, v)) return _broadcast, _send iterList = [myID] servers = [] for i in iterList: _, port = IP_MAPPINGS[i] servers.append(listen_to_channel(port)) print 'servers started' gevent.sleep(WAITING_SETUP_TIME_IN_SEC) # wait for set-up to be ready print 'sleep over' if True: # We only test for once initBeforeBinaryConsensus() ts = [] controlChannels = [Queue() for _ in range(N)] bcList = dict() sdList = dict() tList = [] def _makeBroadcast(x): bc, sd = makeBroadcast(x) bcList[x] = bc sdList[x] = sd for i in iterList: tmp_t = Greenlet(_makeBroadcast, i) tmp_t.parent_args = (N, t) tmp_t.name = 'client_test_freenet._makeBroadcast(%d)' % i tmp_t.start() tList.append(tmp_t) gevent.joinall(tList) rnd = Random() rnd.seed(123123) # This makes sure that all the EC2 instances have the same transaction pool transactionSet = set([ encodeTransaction(randomTransaction(rnd), randomGenerator=rnd) for trC in range(int(options.tx)) ]) # we are using the same one def toBeScheduled(): for i in iterList: bc = bcList[i] # makeBroadcast(i) sd = sdList[i] recv = servers[0].get th = Greenlet(honestParty, i, N, t, controlChannels[i], bc, recv, sd, options.B) th.parent_args = (N, t) th.name = 'client_test_freenet.honestParty(%d)' % i controlChannels[i].put(('IncludeTransaction', transactionSet)) th.start() mylog('Summoned party %i at time %f' % (i, time.time()), verboseLevel=-1) ts.append(th) try: gevent.joinall(ts) except ACSException: gevent.killall(ts) except finishTransactionLeap: ### Manually jump to this level print 'msgCounter', msgCounter print 'msgTypeCounter', msgTypeCounter # message id 0 (duplicated) for signatureCost logChannel.put(StopIteration) mylog("=====", verboseLevel=-1) for item in logChannel: mylog(item, verboseLevel=-1) mylog("=====", verboseLevel=-1) except gevent.hub.LoopExit: # Manual fix for early stop while True: gevent.sleep(1) checkExceptionPerGreenlet() finally: print "Consensus Finished" s = sched.scheduler(time.time, time.sleep) time_now = time.time() delay = options.delaytime - time_now s.enter(delay, 1, toBeScheduled, ()) print myID, "waits for", time_now + delay, 'now is', time_now s.run()
def _run(self): pcount = 0 while True: try: self._logger.error("New KafkaClient %s" % self._topic) self._kfk = KafkaClient(self._brokers , "kc-" + self._topic) try: consumer = SimpleConsumer(self._kfk, self._group, self._topic, buffer_size = 4096*4, max_buffer_size=4096*32) #except: except Exception as ex: template = "Consumer Failure {0} occured. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("Error: %s trace %s" % \ (messag, traceback.format_exc())) raise RuntimeError(messag) self._logger.error("Starting %s" % self._topic) # Find the offset of the last message that has been queued consumer.seek(-1,2) try: mi = consumer.get_message(timeout=0.1) consumer.commit() except common.OffsetOutOfRangeError: mi = None #import pdb; pdb.set_trace() self._logger.info("Last Queued for %s is %s" % \ (self._topic,str(mi))) # start reading from last previously processed message if mi != None: consumer.seek(-1,1) else: consumer.seek(0,0) if self._limit: raise gevent.GreenletExit while True: try: mlist = consumer.get_messages(10,timeout=0.5) if not self.msg_handler(mlist): raise gevent.GreenletExit consumer.commit() pcount += len(mlist) except TypeError as ex: self._logger.error("Type Error: %s trace %s" % \ (str(ex.args), traceback.format_exc())) gevent.sleep(0.1) except common.FailedPayloadsError as ex: self._logger.error("Payload Error: %s" % str(ex.args)) gevent.sleep(0.1) except gevent.GreenletExit: break except AssertionError as ex: self._partoffset = ex break except Exception as ex: template = "An exception of type {0} occured. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("%s : traceback %s" % \ (messag, traceback.format_exc())) self.stop_partition() gevent.sleep(2) self._logger.error("Stopping %s pcount %d" % (self._topic, pcount)) partdb = self.stop_partition() return self._partoffset, partdb
def g(): gevent.sleep(0.01) return 4
workers[int(mm.key)] = ph elif mm.value == "stop": #import pdb; pdb.set_trace() if workers.has_key(int(mm.key)): ph = workers[int(mm.key)] gevent.kill(ph) res,db = ph.get() print "Returned " + str(res) print "State :" for k,v in db.iteritems(): print "%s -> %s" % (k,str(v)) del workers[int(mm.key)] else: end_ready = True cons.commit() gevent.sleep(2) break except TypeError: gevent.sleep(0.1) except common.FailedPayloadsError as ex: print "Payload Error: " + str(ex.args) gevent.sleep(0.1) lw=[] for key, value in workers.iteritems(): gevent.kill(value) lw.append(value) gevent.joinall(lw) print "Ending Consumers"
def g(): gevent.sleep(5.0) return 4
def setter(): print("好好听课") gevent.sleep(5) #持续时间为5 print("好的 ,下课") global evt evt.set('Done')
def _run(self): lredis = None pb = None while True: try: lredis = redis.StrictRedis( host=self._pi.ip_address, port=self._pi.port, password=self._rpass, db=7) pb = lredis.pubsub() inst = self._pi.instance_id part = self._partno pb.subscribe('AGPARTPUB:%s:%d' % (inst, part)) self.syncpart(lredis) for message in pb.listen(): if message["type"] != "message": continue dataline = message["data"] try: elems = json.loads(dataline) except: self._logger.error("AggUVE Parsing failed: %s" % str(message)) continue else: self._logger.info("AggUVE loading: %s" % str(elems)) if self._content: ppe = lredis.pipeline() lelems = [] for elem in elems: table, barekey = elem["key"].split(":",1) if self._tablefilt: if not table in self._tablefilt: continue if self._patterns: kfilter_match = False for pattern in self._patterns: if pattern.match(barekey): kfilter_match = True break if not kfilter_match: continue if elem["type"] and self._cfilter: if not elem["type"] in self._cfilter: continue lelems.append(elem) if self._content: # This UVE was deleted if elem["type"] is None: ppe.exists("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"])) else: ppe.hget("AGPARTVALUES:%s:%d:%s" % \ (inst, part, elem["key"]), elem["type"]) # We need to execute this pipeline read only if we are # keeping track of UVE contents (streaming case) if self._content: pperes = ppe.execute() idx = 0 for elem in lelems: key = elem["key"] typ = elem["type"] vdata = None if not typ is None: if self._content: vjson = pperes[idx] if vjson is None: vdata = None else: vdata = json.loads(vjson) else: vdata = {} self._cb(self._partno, self._pi, key, typ, vdata) idx += 1 except gevent.GreenletExit: break except Exception as ex: template = "Exception {0} in uve stream proc. Arguments:\n{1!r}" messag = template.format(type(ex).__name__, ex.args) self._logger.error("%s : traceback %s" % \ (messag, traceback.format_exc())) lredis = None if pb is not None: pb.close() pb = None gevent.sleep(2) return None
def producer(name): print(f'producer({name}) is running') while not q.empty(): task = q.get() print(f'producer({name}) get task: {task}') gevent.sleep(1)
def queued_call(self, key, delay): try: self._call_records[key] = time.time() gevent.sleep(delay) finally: self._dispatch_ref.register_free_slot(self.uid, self._queue_name)
def process_gym(self, gym): gym_id = gym['id'] # Update Gym details (if they exist) if gym_id not in self.__gym_info or gym['name'] != 'unknown': self.__gym_info[gym_id] = { "name": gym['name'], "description": gym['description'], "url": gym['url'] } if self.__gym_settings['enabled'] is False: log.debug("Gym ignored: notifications are disabled.") return # Extract some basic information to_team_id = gym['new_team_id'] from_team_id = self.__gym_hist.get(gym_id) # Doesn't look like anything to me if to_team_id == from_team_id: log.debug("Gym ignored: no change detected") return # Ignore changes to neutral if self.__gym_settings['ignore_neutral'] and to_team_id == 0: log.debug("Gym update ignored: changed to neutral") return # Update gym's last known team self.__gym_hist[gym_id] = to_team_id # Ignore first time updates if from_team_id is None: log.debug("Gym update ignored: first time seeing this gym") return # Get some more info out used to check filters lat, lng = gym['lat'], gym['lng'] dist = get_earth_dist([lat, lng], self.__location) cur_team = self.__locale.get_team_name(to_team_id) old_team = self.__locale.get_team_name(from_team_id) filters = self.__gym_settings['filters'] passed = False for filt_ct in range(len(filters)): filt = filters[filt_ct] # Check the distance from the set location if dist != 'unkn': if filt.check_dist(dist) is False: if self.__quiet is False: log.info( "Gym rejected: distance ({:.2f}) was not in range" + " {:.2f} to {:.2f} (F #{})".format( dist, filt.min_dist, filt.max_dist, filt_ct)) continue else: log.debug( "Gym dist was not checked because the manager has no location set." ) # Check the old team if filt.check_from_team(from_team_id) is False: if self.__quiet is False: log.info( "Gym rejected: {} as old team is not correct (F #{})". format(old_team, filt_ct)) continue # Check the new team if filt.check_to_team(to_team_id) is False: if self.__quiet is False: log.info( "Gym rejected: {} as current team is not correct (F #{})" .format(cur_team, filt_ct)) continue # Nothing left to check, so it must have passed passed = True log.debug("Gym passed filter #{}".format(filt_ct)) break if not passed: return # Check the geofences gym['geofence'] = self.check_geofences('Gym', lat, lng) if len(self.__geofences) > 0 and gym['geofence'] == 'unknown': log.info("Gym rejected: not inside geofence(s)") return # Check if in geofences if len(self.__geofences) > 0: inside = False for gf in self.__geofences: inside |= gf.contains(lat, lng) if inside is False: if self.__quiet is False: log.info("Gym update ignored: located outside geofences.") return else: log.debug( "Gym inside geofences was not checked because no geofences were set." ) gym_info = self.__gym_info.get(gym_id, {}) gym.update({ "gym_name": gym_info.get('name', 'unknown'), "gym_description": gym_info.get('description', 'unknown'), "gym_url": gym_info.get( 'url', 'https://raw.githubusercontent.com/RocketMap/PokeAlarm/master/icons/gym_0.png' ), "dist": get_dist_as_str(dist), 'dir': get_cardinal_dir([lat, lng], self.__location), 'new_team': cur_team, 'new_team_id': to_team_id, 'old_team': old_team, 'old_team_id': from_team_id, 'new_team_leader': self.__locale.get_leader_name(to_team_id), 'old_team_leader': self.__locale.get_leader_name(from_team_id) }) if self.__loc_service: self.__loc_service.add_optional_arguments(self.__location, [lat, lng], gym) if self.__quiet is False: log.info( "Gym ({}) notification has been triggered!".format(gym_id)) threads = [] # Spawn notifications in threads so they can work in background for alarm in self.__alarms: threads.append(gevent.spawn(alarm.gym_alert, gym)) gevent.sleep(0) # explict context yield for thread in threads: thread.join()
def send_requests(self): # sensor base name sensor_base_name = "sensor_temp_" # register the application - 0 request_ID = (uuid.uuid4().hex)[:12] # append the request to requests request_ID = str('app_' + request_ID) request = [{'register':{'application':{'app_ID':self.app_ID, 'request_ID':request_ID}}}] request_path = self.orch_path + 'request' self.push_content(request_path, request) self.requests.append(request_ID) self.logger.info('sent request to register application') gevent.sleep(3) # register the sensors involed - 1 for sensor_count in range(1, self.num_of_sensors+1): request_ID = (uuid.uuid4().hex)[:12] sensor_name = str(sensor_base_name + str(sensor_count)) sensor_name = sensor_name + '_' request_ID = str(sensor_name + request_ID) request = [{'register':{'sensor':{'app_ID':self.app_ID, 'request_ID':request_ID, 'sensor_type':'temperature'}}}] self.push_content(request_path, request) self.requests.append(request_ID) self.logger.info('sent request to register sensor') gevent.sleep(3) # register the actuator - 2 request_ID = (uuid.uuid4().hex)[:12] request_ID = str('actuator_simple_' + request_ID) request = [{'register':{'actuator':{'app_ID':self.app_ID, 'request_ID':request_ID, 'actuator_type':'simple'}}}] self.push_content(request_path, request) self.requests.append(request_ID) self.logger.info('sent request to register actuator') gevent.sleep(3) # switch on the temperature sensor - 3 for sensor_count in range(1, self.num_of_sensors+1): request_ID = (uuid.uuid4().hex)[:12] request_ID = str('modify_' + request_ID) sensor_name = self.requests_ID[self.requests[sensor_count]]['conf']['name'] self.requests.append(request_ID) request = [{'modify':{'app_ID':self.app_ID, 'request_ID': request_ID, 'name' : sensor_name, 'conf':{'onoff':'ON', 'period':5}}}] request_path = self.sensor_temp_path + 'request' self.push_content(request_path, request) request_ID = (uuid.uuid4().hex)[:12] request_ID = str('modify_' + request_ID) actuator_name = self.requests_ID[self.requests[self.num_of_sensors+1]]['conf']['name'] self.requests.append(request_ID) request = [{'modify':{'app_ID':self.app_ID, 'request_ID': request_ID, 'name' : actuator_name, 'conf':{'delay':3}}}] request_path = self.actuator_simple_path + 'request' self.push_content(request_path, request) # run a polling loop to check if the system is established # if established we will connect the sensor application self.logger.info('waiting for system to be established...') gevent.sleep(5) for sensor_count in range(1, self.num_of_sensors+1): sensor_request = self.requests[sensor_count] self.add_container_subscription(self.requests_ID[sensor_request]['conf']['path'], self.handle_temperature_sensor) actuator_request = self.requests[self.num_of_sensors+1] self.add_container_subscription(self.requests_ID[actuator_request]['conf']['out_path'], self.handle_actuator_out)
def process_pokemon(self, pkmn): # Make sure that pokemon are enabled if self.__pokemon_settings['enabled'] is False: log.debug("Pokemon ignored: pokemon notifications are disabled.") return # Extract some base information id_ = pkmn['id'] pkmn_id = pkmn['pkmn_id'] name = self.__locale.get_pokemon_name(pkmn_id) # Check for previously processed if id_ in self.__pokemon_hist: log.debug( "{} was skipped because it was previously processed.".format( name)) return self.__pokemon_hist[id_] = pkmn['disappear_time'] # Check the time remaining seconds_left = (pkmn['disappear_time'] - datetime.utcnow()).total_seconds() if seconds_left < self.__time_limit: if self.__quiet is False: log.info("{} ignored: Only {} seconds remaining.".format( name, seconds_left)) return # Check that the filter is even set if pkmn_id not in self.__pokemon_settings['filters']: if self.__quiet is False: log.info("{} ignored: no filters are set".format(name)) return # Extract some useful info that will be used in the filters lat, lng = pkmn['lat'], pkmn['lng'] dist = get_earth_dist([lat, lng], self.__location) form_id = pkmn.get('form_id', 0) if form_id == '?': form_id = 0 pkmn['pkmn'] = name filters = self.__pokemon_settings['filters'][pkmn_id] passed = self.check_pokemon_filter(filters, pkmn, dist) # If we didn't pass any filters if not passed: return quick_id = pkmn['quick_id'] charge_id = pkmn['charge_id'] # Check all the geofences pkmn['geofence'] = self.check_geofences(name, lat, lng) if len(self.__geofences) > 0 and pkmn['geofence'] == 'unknown': log.info("{} rejected: not inside geofence(s)".format(name)) return # Finally, add in all the extra crap we waited to calculate until now time_str = get_time_as_str(pkmn['disappear_time'], self.__timezone) iv = pkmn['iv'] pkmn.update({ 'pkmn': name, "dist": get_dist_as_str(dist) if dist != 'unkn' else 'unkn', 'time_left': time_str[0], '12h_time': time_str[1], '24h_time': time_str[2], 'dir': get_cardinal_dir([lat, lng], self.__location), 'iv_0': "{:.0f}".format(iv) if iv != '?' else '?', 'iv': "{:.1f}".format(iv) if iv != '?' else '?', 'iv_2': "{:.2f}".format(iv) if iv != '?' else '?', 'quick_move': self.__locale.get_move_name(quick_id), 'charge_move': self.__locale.get_move_name(charge_id), 'form_id': (chr(64 + int(form_id))) if form_id and int(form_id) > 0 else '' }) if self.__loc_service: self.__loc_service.add_optional_arguments(self.__location, [lat, lng], pkmn) if self.__quiet is False: log.info("{} notification has been triggered!".format(name)) threads = [] # Spawn notifications in threads so they can work in background for alarm in self.__alarms: threads.append(gevent.spawn(alarm.pokemon_alert, pkmn)) gevent.sleep(0) # explict context yield for thread in threads: thread.join()
def test(self): gevent.sleep(0.3) sock = self.connect() sock.send('Test_udp_server') data, address = sock.recvfrom(8192) self.assertEqual(data, 'Received 15 bytes')
def process_egg(self, egg): # Quick check for enabled if self.__egg_settings['enabled'] is False: log.debug("Egg ignored: notifications are disabled.") return gym_id = egg['id'] raid_end = egg['raid_end'] # raid history will contains any raid processed if gym_id in self.__raid_hist: old_raid_end = self.__raid_hist[gym_id]['raid_end'] if old_raid_end == raid_end: if self.__quiet is False: log.info( "Raid {} ignored. Was previously processed.".format( gym_id)) return self.__raid_hist[gym_id] = dict(raid_end=raid_end, pkmn_id=0) # don't alert about (nearly) hatched eggs seconds_left = (egg['raid_begin'] - datetime.utcnow()).total_seconds() if seconds_left < self.__time_limit: if self.__quiet is False: log.info("Egg {} ignored. Egg hatch in {} seconds".format( gym_id, seconds_left)) return lat, lng = egg['lat'], egg['lng'] dist = get_earth_dist([lat, lng], self.__location) # Check if raid is in geofences egg['geofence'] = self.check_geofences('Raid', lat, lng) if len(self.__geofences) > 0 and egg['geofence'] == 'unknown': if self.__quiet is False: log.info("Egg {} ignored: located outside geofences.".format( gym_id)) return else: log.debug( "Egg inside geofence was not checked because no geofences were set." ) # check if the level is in the filter range or if we are ignoring eggs passed = self.check_egg_filter(self.__egg_settings, egg) if not passed: log.debug("Egg {} did not pass filter check".format(gym_id)) return if self.__loc_service: self.__loc_service.add_optional_arguments(self.__location, [lat, lng], egg) if self.__quiet is False: log.info( "Egg ({}) notification has been triggered!".format(gym_id)) time_str = get_time_as_str(egg['raid_end'], self.__timezone) start_time_str = get_time_as_str(egg['raid_begin'], self.__timezone) gym_info = self.__gym_info.get(gym_id, {}) egg.update({ #"gym_name": self.__gym_info.get(gym_id, {}).get('name', 'unknown'), #"gym_description": self.__gym_info.get(gym_id, {}).get('description', 'unknown'), #"gym_url": self.__gym_info.get(gym_id, {}).get('url', 'https://raw.githubusercontent.com/kvangent/PokeAlarm/master/icons/gym_0.png'), 'time_left': time_str[0], '12h_time': time_str[1], '24h_time': time_str[2], 'begin_time_left': start_time_str[0], 'begin_12h_time': start_time_str[1], 'begin_24h_time': start_time_str[2], "dist": get_dist_as_str(dist), 'dir': get_cardinal_dir([lat, lng], self.__location), #'team': self.__team_name[egg['team_id']] }) threads = [] # Spawn notifications in threads so they can work in background for alarm in self.__alarms: threads.append(gevent.spawn(alarm.raid_egg_alert, egg)) gevent.sleep(0) # explict context yield for thread in threads: thread.join()
def process_pokestop(self, stop): # Make sure that pokemon are enabled if self.__pokestop_settings['enabled'] is False: log.debug("Pokestop ignored: pokestop notifications are disabled.") return id_ = stop['id'] # Check for previously processed if id_ in self.__pokestop_hist: log.debug( "Pokestop was skipped because it was previously processed.") return self.__pokestop_hist[id_] = stop['expire_time'] # Check the time remaining seconds_left = (stop['expire_time'] - datetime.utcnow()).total_seconds() if seconds_left < self.__time_limit: if self.__quiet is False: log.info( "Pokestop ({}) ignored: only {} seconds remaining.".format( id_, seconds_left)) return # Extract some basic information lat, lng = stop['lat'], stop['lng'] dist = get_earth_dist([lat, lng], self.__location) passed = False filters = self.__pokestop_settings['filters'] for filt_ct in range(len(filters)): filt = filters[filt_ct] # Check the distance from the set location if dist != 'unkn': if filt.check_dist(dist) is False: if self.__quiet is False: log.info( "Pokestop rejected: distance ({:.2f}) was not in range" .format(dist) + " {:.2f} to {:.2f} (F #{})".format( filt.min_dist, filt.max_dist, filt_ct)) continue else: log.debug( "Pokestop dist was not checked because the manager has no location set." ) # Nothing left to check, so it must have passed passed = True log.debug("Pokstop passed filter #{}".format(filt_ct)) break if not passed: return # Check the geofences stop['geofence'] = self.check_geofences('Pokestop', lat, lng) if len(self.__geofences) > 0 and stop['geofence'] == 'unknown': log.info("Pokestop rejected: not within any specified geofence") return time_str = get_time_as_str(stop['expire_time'], self.__timezone) stop.update({ "dist": get_dist_as_str(dist), 'time_left': time_str[0], '12h_time': time_str[1], '24h_time': time_str[2], 'dir': get_cardinal_dir([lat, lng], self.__location), }) if self.__loc_service: self.__loc_service.add_optional_arguments(self.__location, [lat, lng], stop) if self.__quiet is False: log.info( "Pokestop ({}) notification has been triggered!".format(id_)) threads = [] # Spawn notifications in threads so they can work in background for alarm in self.__alarms: threads.append(gevent.spawn(alarm.pokestop_alert, stop)) gevent.sleep(0) # explict context yield for thread in threads: thread.join()
def wait(): gevent.sleep(2)
def process_raid(self, raid): # Quick check for enabled if self.__raid_settings['enabled'] is False: log.debug("Raid ignored: notifications are disabled.") return gym_id = raid['id'] pkmn_id = raid['pkmn_id'] raid_end = raid['raid_end'] # raid history will contain the end date and also the pokemon if it has hatched if gym_id in self.__raid_hist: old_raid_end = self.__raid_hist[gym_id]['raid_end'] old_raid_pkmn = self.__raid_hist[gym_id].get('pkmn_id', 0) if old_raid_end == raid_end: if old_raid_pkmn == pkmn_id: # raid with same end time exists and it has same pokemon id, skip it if self.__quiet is False: log.info("Raid {} ignored. Was previously processed.". format(gym_id)) return self.__raid_hist[gym_id] = dict(raid_end=raid_end, pkmn_id=pkmn_id) # don't alert about expired raids seconds_left = (raid_end - datetime.utcnow()).total_seconds() if seconds_left < self.__time_limit: if self.__quiet is False: log.info("Raid {} ignored. Only {} seconds left.".format( gym_id, seconds_left)) return lat, lng = raid['lat'], raid['lng'] dist = get_earth_dist([lat, lng], self.__location) # Check if raid is in geofences raid['geofence'] = self.check_geofences('Raid', lat, lng) if len(self.__geofences) > 0 and raid['geofence'] == 'unknown': if self.__quiet is False: log.info("Raid {} ignored: located outside geofences.".format( gym_id)) return else: log.debug( "Raid inside geofence was not checked because no geofences were set." ) quick_id = raid['quick_id'] charge_id = raid['charge_id'] # check filters for pokemon name = self.__locale.get_pokemon_name(pkmn_id) if pkmn_id not in self.__raid_settings['filters']: if self.__quiet is False: log.info("Raid on {} ignored: no filters are set".format(name)) return raid_pkmn = { 'pkmn': name, 'cp': raid['cp'], 'iv': 100, 'level': 20, 'def': 15, 'atk': 15, 'sta': 15, 'gender': 'unknown', 'size': 'unknown', 'form_id': '?', 'quick_id': quick_id, 'charge_id': charge_id } filters = self.__raid_settings['filters'][pkmn_id] passed = self.check_pokemon_filter(filters, raid_pkmn, dist) # If we didn't pass any filters if not passed: log.debug("Raid {} did not pass pokemon check".format(gym_id)) return if self.__loc_service: self.__loc_service.add_optional_arguments(self.__location, [lat, lng], raid) if self.__quiet is False: log.info( "Raid ({}) notification has been triggered!".format(gym_id)) time_str = get_time_as_str(raid['raid_end'], self.__timezone) start_time_str = get_time_as_str(raid['raid_begin'], self.__timezone) gym_info = self.__gym_info.get(gym_id, {}) raid.update({ 'pkmn': name, #"gym_name": self.__gym_info.get(gym_id, {}).get('name', 'unknown'), #"gym_description": self.__gym_info.get(gym_id, {}).get('description', 'unknown'), #"gym_url": self.__gym_info.get(gym_id, {}).get('url', 'https://raw.githubusercontent.com/kvangent/PokeAlarm/master/icons/gym_0.png'), 'time_left': time_str[0], '12h_time': time_str[1], '24h_time': time_str[2], 'begin_time_left': start_time_str[0], 'begin_12h_time': start_time_str[1], 'begin_24h_time': start_time_str[2], "dist": get_dist_as_str(dist), 'quick_move': self.__locale.get_move_name(quick_id), 'charge_move': self.__locale.get_move_name(charge_id), #'team': self.__team_name[raid['team_id']], 'dir': get_cardinal_dir([lat, lng], self.__location), 'form': self.__locale.get_form_name(pkmn_id, raid_pkmn['form_id']) }) threads = [] # Spawn notifications in threads so they can work in background for alarm in self.__alarms: threads.append(gevent.spawn(alarm.raid_alert, raid)) gevent.sleep(0) # explict context yield for thread in threads: thread.join()
def run(self): while True: if self._temperature >= self._ca.cal: #Se a Temperatura ficar acima do CAL if self._curtain.control != Types.CA: #Passa o controle para abertura de cortina self._curtain.control = Types.CA else: #Caso a temperatura ficar abaixo de CAL if self._temperature <= self._ca.cad: #Se a Temperatura cair o CAD if self._curtain.control == Types.CA: #Se o contrile estiver para abertura da cortina self._ca.state = Types.CA_INITIAL_STATE #Para de abrir a cortina self._curtain.control = Types.NONE if self._curtain.abertura < self._vm.limite: #Se a abertura for menor que o Limite da VM if self._curtain.control == Types.CF: #Estiver controlando por CF if (self._cf.state == Types.CF_INITIAL_STATE) or ( self._cf.state == Types.CF_STOPPED): self._curtain.control = Types.VM #Se estiver com a CF parada, retorna para VM else: #Caso contrario, retornar o controle para VM if self._curtain.control != Types.VM: self._curtain.control = Types.VM else: #Se a abertura for maior ou igual que o Limite da VM if self._curtain.control != Types.VM: #Se não estiver controlando por VM if self._temperature <= self._cf.cfl: #Se a Temperatura ficar abaixo de CFL if self._curtain.control != Types.CF: self._curtain.control = Types.CF elif self._temperature >= self._cf.cfd: #Se a Temperatura subir o CFD if self._curtain.control == Types.CF: self._cf.state = Types.CF_INITIAL_STATE self._curtain.control = Types.NONE if self._vm.state == Types.VM_WAIT_CLOSING: #Se estiver no estado de espera fechado da VM, sai do controle self._curtain.control = Types.NONE else: if self._curtain.control == Types.CF: #Estiver controlando por CF if self._temperature >= self._cf.cfd: #Se a Temperatura subir o CFD self._cf.state = Types.CF_INITIAL_STATE self._curtain.control = Types.NONE if self._temperature >= self._ca.cad: if ((self._curtain.control != Types.CF) and (self._curtain.control != Types.CA)): if self._curtain.abertura < self._vm.limite: self._curtain.control = Types.VM if self._curtain.control == Types.CA: self._vm.state = Types.VM_INITIAL_STATE self._cf.state = Types.CF_INITIAL_STATE self._ca.fsm() elif self._curtain.control == Types.CF: self._vm.state = Types.VM_INITIAL_STATE self._ca.state = Types.CA_INITIAL_STATE self._cf.fsm() elif self._curtain.control == Types.VM: self._ca.state = Types.CA_INITIAL_STATE self._vm.fsm() if self._vm.state == Types.VM_WAIT_CLOSING: #No tempo de espera fechado da VM if (self._temperature <= self._cf.cfl) and ( self._started_cf_by_vm is False): #Se a Temperatura ficar abaixo de CFL self._cf.fsm() self._started_cf_by_vm = True if (self._temperature < self._cf.cfd) and (self._started_cf_by_vm is True): self._cf.fsm() if self._temperature >= self._cf.cfd: self._started_cf_by_vm = False self._cf.state = Types.CF_INITIAL_STATE else: self._cf.state = Types.CF_INITIAL_STATE elif self._curtain.control == Types.NONE: self._ca.state = Types.CA_INITIAL_STATE self._cf.state = Types.CF_INITIAL_STATE self._vm.state = Types.VM_INITIAL_STATE self.publisher("abertura", self._curtain.abertura) gevent.sleep(1)
def task(pid): gevent.sleep(0.5) print('%s Task %d done.' % (str(datetime.datetime.now()), pid))