def event_loop(self): """ The event loop. """ while self.should_loop(): resource, response = self.queue.get_publish() # pop from queue if resource is not None and response is not None: # if an item exists self.log.debug("found resource, parsing...") parse_success = self.response_parser.parse(response, resource) if parse_success: self.queue.put_analyze(resource) self.log.debug( "resource id:[%s] parsed, and put on analyze queue for analysis, size: [%d]" % (str(resource.id), self.queue.analyze_size())) else: # serious error if we got here # log the error and put on the publish error queue error_msg = "Found either resource or response that was None, resource: [%s], response: [%s]" self.log.error(error_msg % (resource, response)) self.queue.put_publish_error((resource, response)) self.log.error( "resource put on publish error queue, size: [%d]" % self.queue.get_publish_error()) gevent.sleep(2) gevent.idle()
def data_update(self, payload): for n, update in enumerate(self.pipdata.decode_and_update(payload)): # since payload may be very large, give other greenlets a chance to run if n % 100 == 0: gevent.idle(0) for callback in self.update_callbacks: callback(update)
def test_connect_auto_discovery_success(self, mock_recv, mock_emit): # setup self.conn.connect.return_value = True self.server_list.__len__.return_value = 0 def fake_servers(*args, **kwargs): self.server_list.__len__.return_value = 10 return True self.server_list.bootstrap_from_webapi.side_effect = fake_servers # run cm = CMClient() with gevent.Timeout(3, False): cm.connect(retry=1) gevent.idle() # verify self.server_list.bootstrap_from_webapi.assert_called_once_with() self.server_list.bootstrap_from_dns.assert_not_called() self.conn.connect.assert_called_once_with((127001, 20000)) mock_emit.assert_called_once_with('connected') mock_recv.assert_called_once_with()
def event_loop(self): """ The event loop. """ while self.should_loop(): resource = self.queue.get_requests() # pop from queue if resource is not None: # if an item exists self.log.debug("found resource to request") # update headers self.session.headers = resource.send_headers self.session.headers.update( {"If-None-Match": '%s' % resource.timings.etag}) # resp = self.session.get("https://api.github.com/events") resp = self.session.get(resource.uri) self.log.info("request complete", status_code=resp.status_code, resource_id=str(resource.id)) # self.log.info(resp.headers) # self.log.info(resp.content) # put Tuple(Resource, Response) in publish queue self.queue.put_publish((resource, resp)) self.log.debug( "resource put on publish queue for parsing, size: [%d]" % self.queue.publish_size()) gevent.sleep(2) gevent.idle()
def event_loop(self): """ The event loop. """ while self.should_loop(): resource = self.queue.get_requests() # pop from queue if resource is not None: # if an item exists self.log.debug("found resource to request") # update headers self.session.headers = resource.send_headers self.session.headers.update({ "If-None-Match": '%s' % resource.timings.etag }) # resp = self.session.get("https://api.github.com/events") resp = self.session.get(resource.uri) self.log.info("request complete", status_code=resp.status_code, resource_id=str(resource.id)) # self.log.info(resp.headers) # self.log.info(resp.content) # put Tuple(Resource, Response) in publish queue self.queue.put_publish((resource, resp)) self.log.debug("resource put on publish queue for parsing, size: [%d]" % self.queue.publish_size()) gevent.sleep(2) gevent.idle()
def _build_ri_config(self, vn, ri_name, ri_obj, export_targets, import_targets, vn_list, is_master_int_vn): gevent.idle() network_id = vn.vn_network_id vxlan_id = vn.get_vxlan_vni(is_internal_vn=True) if not is_master_int_vn: # create routing instance of type vrf ri = RoutingInstance( name=ri_name, description=ri_name, virtual_network_mode='l3', export_targets=export_targets, import_targets=import_targets, virtual_network_id=str(network_id), vxlan_id=str(vxlan_id), is_public_network=vn.router_external, routing_instance_type='vrf', virtual_network_is_internal=True) _, li_map = self._add_or_lookup_pi(self.pi_map, 'lo0', 'loopback') lo0_unit = 1000 + int(network_id) lo0_li = self._add_or_lookup_li( li_map, 'lo0.' + str(lo0_unit), lo0_unit) self._add_ip_address(lo0_li, '127.0.0.1') self._add_ref_to_list(ri.get_loopback_interfaces(), lo0_li.get_name()) else: # create routing instance of type master, which represents inet.0 ri = RoutingInstance( name=ri_name, description=ri_name, virtual_network_mode='l3', is_public_network=vn.router_external, routing_instance_type='master', virtual_network_is_internal=True, is_master=True) for connected_vn_uuid in vn_list: connected_vn = db.VirtualNetworkDM.get(connected_vn_uuid) irb_name = 'irb.' + str(connected_vn.vn_network_id) self._add_ref_to_list(ri.get_routing_interfaces(), irb_name) return ri
def _build_ri_config(self, vn, ri_name, ri_obj, export_targets, import_targets, feature_config, irb_ips): gevent.idle() network_id = vn.vn_network_id vxlan_id = vn.get_vxlan_vni() ri = RoutingInstance(name=ri_name, virtual_network_mode='l3', export_targets=export_targets, import_targets=import_targets, virtual_network_id=str(network_id), vxlan_id=str(vxlan_id), is_public_network=vn.router_external, routing_instance_type='vrf') for prefix in vn.get_prefixes(): ri.add_prefixes(self._get_subnet_for_cidr(prefix)) _, li_map = self._add_or_lookup_pi(self.pi_map, 'irb', 'irb') if irb_ips: irb = self._add_or_lookup_li(li_map, 'irb.' + str(network_id), network_id) for (irb_ip, gateway) in irb_ips: self._add_ip_address(irb, irb_ip, gateway=gateway) vlan = Vlan(name=DMUtils.make_bridge_name(vxlan_id), vxlan_id=vxlan_id) desc = "Virtual Network - %s" % vn.name vlan.set_description(desc) feature_config.add_vlans(vlan) self._add_ref_to_list(vlan.get_interfaces(), irb.get_name()) return ri
def meteor(strip, config, a={}): args = { 'color': ColorVal.WHITE, 'meteorSize': 10, 'decay': 0.75, 'randomDecay': True, 'speedDelay': 1 } args.update(a) gevent.idle() # never time-critical led_off(strip) for i in range(strip.numPixels() * 2): # fade brightness all LEDs one step for j in range(strip.numPixels()): if not args['randomDecay'] or random.random() > 0.5: c = strip.getPixelColor(j) strip.setPixelColor(j, dim(c, args['decay'])) # draw meteor for j in range(args['meteorSize']): if i - j < strip.numPixels() and i - j >= 0: strip.setPixelColor(i - j, args['color']) strip.show() gevent.sleep(args['speedDelay'] / 1000.0)
def _recv_messages(self): for message in self.connection: if not self.connected: break if self.channel_key: if self.channel_hmac: try: message = crypto.symmetric_decrypt_HMAC( message, self.channel_key, self.channel_hmac) except RuntimeError as e: self._LOG.exception(e) break else: message = crypto.symmetric_decrypt(message, self.channel_key) gevent.spawn(self._parse_message, message) gevent.idle() if not self._seen_logon and self.channel_secured: if self.wait_event('disconnected', timeout=5) is not None: return gevent.spawn(self.disconnect)
def _build_ri_config(self, vn, ri_obj, lr_obj, vn_list, is_master_int_vn): gevent.idle() network_id = vn.vn_network_id vxlan_id = vn.get_vxlan_vni(is_internal_vn=True) if lr_obj: ri_name = "__contrail_%s_%s" % (lr_obj.name, vn.logical_router) export_targets, import_targets = self._get_export_import_targets( vn, ri_obj) # get lr_object if lr_obj: dci = lr_obj.get_interfabric_dci() if dci: lr_vn_list = dci.get_connected_lr_internal_vns( exclude_lr=lr_obj.uuid, pr_uuid=self._physical_router.uuid) for lr_vn in lr_vn_list: exports, imports = lr_vn.get_route_targets() if imports: import_targets |= imports if exports: export_targets |= exports if not is_master_int_vn: # create routing instance of type vrf ri = RoutingInstance( name=ri_name, description=ri_name, virtual_network_mode='l3', export_targets=export_targets, import_targets=import_targets, virtual_network_id=str(network_id), vxlan_id=str(vxlan_id), is_public_network=lr_obj.logical_router_gateway_external, routing_instance_type='vrf', virtual_network_is_internal=True, is_master=False) if lr_obj and len(lr_obj.loopback_pr_ip_map) > 0 and\ lr_obj.loopback_pr_ip_map.get(self._physical_router.uuid, None) is not None: ip_addr = lr_obj.loopback_pr_ip_map[self._physical_router.uuid] else: ip_addr = '127.0.0.1' lo0_unit = int(network_id) self._build_loopback_intf_info(ip_addr, lo0_unit, ri) else: # create routing instance of type master, which represents inet.0 # setting is_public_network to false as per review comment - 57282 ri = RoutingInstance( name=ri_name, description=ri_name, virtual_network_mode='l3', is_public_network=False, routing_instance_type='master', virtual_network_is_internal=True, is_master=True) for connected_vn_uuid in vn_list: connected_vn = VirtualNetworkDM.get(connected_vn_uuid) irb_name = 'irb.' + str(connected_vn.vn_network_id) self._add_ref_to_list(ri.get_routing_interfaces(), irb_name) return ri
def event_loop(self): while self.should_loop(): # self.event_loop_next() # schedule the service manager to start designated services if any pids = self.service_manager.monitor_services() gevent.idle()
def receive(self, message): eq_(message, ('terminated', watchee)) _, sender = message with expect_one_event(DeadLetter(sender, 'dummy', sender=self.ref)): sender << 'dummy' idle() all_ok.set()
def _do_test_acquire_in_one_then_another( self, release=True, require_thread_acquired_to_finish=False, **thread_acquire_kwargs): from gevent import monkey self.assertFalse(monkey.is_module_patched('threading')) import threading thread_running = threading.Event() thread_acquired = threading.Event() sem = self._makeOne() # Make future acquires block sem.acquire() exc_info = [] acquired = [] t = threading.Thread(target=self._makeThreadMain( thread_running, thread_acquired, sem, acquired, exc_info, ** thread_acquire_kwargs)) t.daemon = True t.start() thread_running.wait(10) # implausibly large time if release: sem.release() # Spin the loop to be sure the release gets through. # (Release schedules the notifier to run, and when the # notifier run it sends the async notification to the # other thread. Depending on exactly where we are in the # event loop, and the limit to the number of callbacks # that get run (including time-based) the notifier may or # may not be immediately ready to run, so this can take up # to two iterations.) for _ in range(self.IDLE_ITERATIONS): gevent.idle() if thread_acquired.wait(timing.LARGE_TICK): break self.assertEqual(acquired, [True]) if not release and thread_acquire_kwargs.get("timeout"): # Spin the loop to be sure that the timeout has a chance to # process. Interleave this with something that drops the GIL # so the background thread has a chance to notice that. for _ in range(self.IDLE_ITERATIONS): gevent.idle() if thread_acquired.wait(timing.LARGE_TICK): break thread_acquired.wait(timing.LARGE_TICK * 5) if require_thread_acquired_to_finish: self.assertTrue(thread_acquired.is_set()) try: self.assertEqual(exc_info, []) finally: exc_info = None return sem, acquired
def _process_new_blocks(self, latest_confirmed_block: BlockNumber) -> None: start = time.monotonic() db_block = self.database.get_latest_committed_block() assert db_block == self.blockchain_state.latest_committed_block, ( f"Unexpected `latest_committed_block` in db: " f"was {db_block}, expected {self.blockchain_state.latest_committed_block}. " f"Is the db accidentally shared by two PFSes?") events = get_blockchain_events_adaptive( web3=self.web3, blockchain_state=self.blockchain_state, token_network_addresses=list(self.token_networks.keys()), latest_confirmed_block=latest_confirmed_block, ) if events is None: return before_process = time.monotonic() for event in events: self.handle_event(event) gevent.idle() # Allow answering requests in between events if events: log.info( "Processed events", getting=round(before_process - start, 2), processing=round(time.monotonic() - before_process, 2), total_duration=round(time.monotonic() - start, 2), event_counts=collections.Counter(e.__class__.__name__ for e in events), )
def test_wait_while_notifying(self): # If someone calls wait() on an Event that is # ready, and notifying other waiters, that new # waiter still runs at the end, but this does not # require a trip around the event loop. # See https://github.com/gevent/gevent/issues/1520 event = Event() results = [] def wait_then_append(arg): event.wait() results.append(arg) gevent.spawn(wait_then_append, 1) gevent.spawn(wait_then_append, 2) gevent.idle() self.assertEqual(2, event.linkcount()) check = gevent.get_hub().loop.check() check.start(results.append, 4) event.set() wait_then_append(3) self.assertEqual(results, [1, 2, 3]) # Note that the check event DID NOT run. check.stop() check.close()
def run(self): actionhandler_config=ConfigParser() actionhandler_config.read(( '/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler.conf')) logging.config.fileConfig(( '/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler-log.conf')) logger = logging.getLogger('root') if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging also to console") # Read config files jumpserver_config = ConfigParser() jumpserver_config.read(( '/opt/autopilot/conf/pyactionhandler/' 'winrm-actionhandler-jumpserver.conf')) pmp_config = ConfigParser() pmp_config.read('/opt/autopilot/conf/pyactionhandler/pmp.conf') action_handlers = [SyncHandler( WorkerCollection( {"ExecuteCommand":Capability(WinRMCmdAction, pmp_config=pmp_config, jumpserver_config=jumpserver_config), "ExecutePowershell":Capability(WinRMPowershellAction, pmp_config=pmp_config, jumpserver_config=jumpserver_config)}, parallel_tasks = actionhandler_config.getint( 'ActionHandler', 'ParallelTasks', fallback=5), parallel_tasks_per_worker = actionhandler_config.getint( 'ActionHandler', 'ParallelTasksPerWorker', fallback=5), worker_max_idle = actionhandler_config.getint( 'ActionHandler', 'WorkerMaxIdle', fallback=300)), zmq_url = actionhandler_config.get( 'ActionHandler', 'ZMQ_URL'))] def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) greenlets=[action_handler.run() for action_handler in action_handlers] gevent.idle() gevent.joinall(greenlets) sys.exit()
def run(self, steps_before_yield=None): for i in itertools.count(): if self.halted: return if i and steps_before_yield and i % steps_before_yield == 0: gevent.idle() self.run_step()
def test_child_termination_message_from_an_actor_not_a_child_of_the_recipient_is_ignored( defer): node = DummyNode() defer(node.stop) a = node.spawn(Actor) a << ('_child_terminated', node.spawn(Actor)) idle()
def add_command(self, cmd, decorater=None): "Add a command to this service and return a command id" gevent.idle(constants.Priority.Normal.value) assert isinstance(cmd, command.AsyncCommand) assert callable(decorater) or decorater is None if cmd not in self._commands.values(): command_id = next(self._id_counter) cmd.command_id = command_id cmd.service = self self._commands[command_id] = cmd self._all_commands[command_id] = weakref.ref(cmd) if decorater: self._decorators[command_id] = decorater log.d( "Service ({})".format( self.name), "added command:", cmd.__class__.__name__, "({})".format(command_id)) cmd.state = command.CommandState.in_service return command_id else: # TODO: abit nonsensical? raise error instead maybe for c_id in self._commands: if self._commands[c_id] == cmd: return c_id
def _dequeue(self, parent): self._ttl_timeout = gevent.Timeout.start_new(timeout=self._ttl, exception=ActorTTLError) self._max_idle_timeout = gevent.Timeout.start_new( timeout=self._max_idle, exception=ActorMaxIdleError) self._stopped = False self._crashed = False try: for task in self._mailbox: gevent.idle() self._max_idle_timeout.close() if self._max_idle: self._logger.trace( "{me} has canceled timeout of {max_idle} seconds". format(me=self, max_idle=self._max_idle)) if isinstance(task, Task) and not task.canceled: self._logger.trace("{me} took {task} from mailbox".format( me=self, task=task)) self._handle(task) elif task is self._poisoned_pill: self._logger.debug( "{me} is processing the poisoned pill.".format( me=self)) raise ActorStoppedError elif task is self._ping_pill: self._logger.debug( "{me} is processing a ping.".format(me=self)) elif isinstance(task, Task) and task.canceled: self._logger.trace( "{me} took canceled {task} from mailbox, dismissing". format(me=self, task=task)) continue self._max_idle_timeout = gevent.Timeout.start_new( timeout=self._max_idle, exception=ActorMaxIdleError) except ActorMaxIdleError as e: self._logger.trace( "{me} has reached max_idle timeout of {sec} seconds.".format( me=self, sec=self._max_idle)) self.stop() # FIXME!!!! except ActorTTLError as e: self._logger.trace( "{me} has reached ttl timeout of {sec} seconds.".format( me=self, sec=self._ttl)) self.stop() # FIXME!!!! except ActorStoppedError as e: self._stopped = True except Exception as e: self._stopped = True self._crashed = True formatted_exc = better_exceptions.format_exception(*sys.exc_info()) self._logger.error( ("{me} crashed with:\n{exc}").format(me=self, exc=formatted_exc)) finally: self._ttl_timeout.close() self._max_idle_timeout.close() if hasattr(self, "_parent"): self._parent._handle_child( self, "crashed" if self._crashed else "stopped")
def stop_command(self, cmd_id): """ Stop running a specific command by its command id """ assert isinstance(cmd_id, int) gevent.idle(constants.Priority.Normal.value) if cmd_id in self._greenlets: self._greenlets[cmd_id].kill()
def f(): c = acquireSession(request) if c: return c sender.send((name, *prepare(request))) while not receiver.poll(): idle() return endSession(final(receiver.recv()))
def get_command_value(self, cmd_id): """ Get returned value of command by its command id """ assert isinstance(cmd_id, int) gevent.idle(constants.Priority.Normal.value) if cmd_id in self._values: return self._values[cmd_id]
def expect_num_warnings(n, message=None, timeout=None): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") yield with Timeout(timeout, exception=False): while len(w) < n: idle() eq_(len(w), n, message or "expected %s warnings but found %s: %s" % (n, len(w), ', '.join(map(str, w))))
def test_messages_to_dead_actors_are_sent_to_dead_letters(defer): node = DummyNode() defer(node.stop) a = node.spawn(Actor) a.stop() with expect_one_event(DeadLetter(a, 'should-end-up-as-letter', sender=None)): a << 'should-end-up-as-letter' idle()
def test_pfs_global_messages( local_matrix_servers, private_rooms, retry_interval, retries_before_backoff, monkeypatch, global_rooms, ): """ Test that RaidenService sends UpdatePFS messages to global PATH_FINDING_BROADCASTING_ROOM room on newly received balance proofs. """ transport = MatrixTransport({ "global_rooms": global_rooms, # FIXME: #3735 "retries_before_backoff": retries_before_backoff, "retry_interval": retry_interval, "server": local_matrix_servers[0], "server_name": local_matrix_servers[0].netloc, "available_servers": [local_matrix_servers[0]], "private_rooms": private_rooms, }) transport._client.api.retry_timeout = 0 transport._send_raw = MagicMock() raiden_service = MockRaidenService(None) raiden_service.config = dict(services=dict(monitoring_enabled=True)) transport.start(raiden_service, raiden_service.message_handler, None) pfs_room_name = make_room_alias(transport.network_id, PATH_FINDING_BROADCASTING_ROOM) pfs_room = transport._global_rooms.get(pfs_room_name) assert isinstance(pfs_room, Room) pfs_room.send_text = MagicMock(spec=pfs_room.send_text) raiden_service.transport = transport transport.log = MagicMock() balance_proof = factories.create(HOP1_BALANCE_PROOF) channel_state = factories.create(factories.NettingChannelStateProperties()) channel_state.our_state.balance_proof = balance_proof channel_state.partner_state.balance_proof = balance_proof monkeypatch.setattr( raiden.transfer.views, "get_channelstate_by_canonical_identifier", lambda *a, **kw: channel_state, ) update_path_finding_service_from_balance_proof( raiden=raiden_service, chain_state=None, new_balance_proof=balance_proof) gevent.idle() with gevent.Timeout(2): while pfs_room.send_text.call_count < 1: gevent.idle() assert pfs_room.send_text.call_count == 1 transport.stop() transport.get()
def fade(args): if 'strip' in args: strip = args['strip'] else: return False a = { 'color': ColorVal.WHITE, 'pattern': ColorPattern.SOLID, 'steps': 25, 'speedDelay': 10, 'onTime': 250, 'offTime': 250, 'iterations': 1 } a.update(args) led_off(strip) if 'outSteps' not in a: a['outSteps'] = a['steps'] # effect should never exceed 3Hz (prevent seizures) a['offTime'] = min( 333 - ((a['steps'] * a['speedDelay']) + (a['outSteps'] * a['speedDelay']) + a['onTime']), a['offTime']) for i in range(a['iterations']): # fade in if a['steps']: led_off(strip) gevent.idle() # never time-critical for j in range(0, a['steps'], 1): c = dim(a['color'], j / float(a['steps'])) led_on(strip, c, a['pattern']) strip.show() gevent.sleep(a['speedDelay'] / 1000.0) else: led_on(strip, a['color'], a['pattern']) led_on(strip, a['color'], a['pattern']) gevent.sleep(a['onTime'] / 1000.0) # fade out if a['outSteps']: led_on(strip, a['color'], a['pattern']) for j in range(a['outSteps'], 0, -1): c = dim(a['color'], j / float(a['outSteps'])) led_on(strip, c, a['pattern']) strip.show() gevent.sleep(a['speedDelay'] / 1000.0) else: led_off(strip) led_off(strip) gevent.sleep(a['offTime'] / 1000.0)
def _handle_login_key(self, message): resp = MsgProto(EMsg.ClientNewLoginKeyAccepted) resp.body.unique_id = message.body.unique_id if self.logged_on: self.send(resp) gevent.idle() self.login_key = message.body.login_key self.emit(self.EVENT_NEW_LOGIN_KEY)
def event_loop(self): while self.should_loop(): gevent.sleep(self.loop_interval) self.log.debug("mock-output-service (really just MockQueuedService) doing some fake work...") if self.quick_death: self.event_loop_ack = True gevent.idle()
def _start(self, cmd_id): gevent.idle(constants.Priority.Low.value) if not self._group.full(): self._group.start(self._greenlets[cmd_id]) self._commands[cmd_id].state = command.CommandState.started else: self._queue.put(cmd_id) self._commands[cmd_id].state = command.CommandState.in_queue log.d("Enqueueing command id", cmd_id, "in service '{}'".format(self.name))
def __call__(self, *args, **kwargs): if use_gevent: now = datetime.datetime.now() if self.last_switch is None: self.last_switch = now delta = now - self.last_switch if delta.total_seconds() >= self.max_wait: self.last_switch = now gevent.idle()
def onConnect(key): while not (current.session is None or (key and cache.peek(key))): idle() res = None if key and cache.peek(key): res = cache.pop(key) return toResponse(res) else: return OK
def run(self): actionhandler_config=ConfigParser() actionhandler_config.read(( '/opt/autopilot/conf/external_actionhandlers/' 'winrm-actionhandler.conf')) logging.config.fileConfig(( '/opt/autopilot/conf/external_actionhandlers/' 'winrm-actionhandler-log.conf')) logger = logging.getLogger('root') if self.debug: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter( "%(asctime)s [%(levelname)s] %(message)s", "%Y-%m-%d %H:%M:%S") ch.setFormatter(formatter) logger.addHandler(ch) logger.info("Logging also to console") try: if not actionhandler_config.getboolean('Encryption', 'enabled'): raise ValueError zmq_auth = ( actionhandler_config.get('Encryption', 'server-public-key', raw=True).encode('ascii'), actionhandler_config.get('Encryption', 'server-private-key', raw=True).encode('ascii') ) except (ValueError, NoSectionError, NoOptionError): zmq_auth = None action_handlers = [SyncHandler( WorkerCollection( {"ExecuteCommand":Capability(WinRMCmdAction), "ExecutePowershell":Capability(WinRMPowershellAction)}, parallel_tasks = actionhandler_config.getint( 'ActionHandler', 'ParallelTasks', fallback=5), parallel_tasks_per_worker = actionhandler_config.getint( 'ActionHandler', 'ParallelTasksPerWorker', fallback=5), worker_max_idle = actionhandler_config.getint( 'ActionHandler', 'WorkerMaxIdle', fallback=300)), zmq_url = actionhandler_config.get( 'ActionHandler', 'ZMQ_URL'), auth=zmq_auth)] def exit_gracefully(): logger.info("Starting shutdown") for action_handler in action_handlers: action_handler.shutdown() logger.info("Finished shutdown") gevent.hub.signal(signal.SIGINT, exit_gracefully) gevent.hub.signal(signal.SIGTERM, exit_gracefully) greenlets=[action_handler.run() for action_handler in action_handlers] gevent.idle() gevent.joinall(greenlets) sys.exit()
def _build_ri_config(self, vn, ri_name, ri_obj, export_targets, import_targets, feature_config, irb_ips, erb_pr_role): gevent.idle() network_id = vn.vn_network_id vxlan_id = vn.get_vxlan_vni() desc = DMUtils.vn_comment(vn) is_master_vn = False lr = None if vn.logical_router is None: # try updating logical router incase of DM restart vn.set_logical_router(vn.fq_name[-1]) lr_uuid = vn.logical_router if lr_uuid: lr = LogicalRouterDM.get(lr_uuid) if lr: if lr.is_master is True: is_master_vn = True ri = RoutingInstance(name=ri_name, virtual_network_mode='l3', export_targets=export_targets, import_targets=import_targets, virtual_network_id=str(network_id), vxlan_id=str(vxlan_id), is_public_network=vn.router_external, routing_instance_type='vrf', is_master=is_master_vn, comment=desc) for prefix in vn.get_prefixes(self._physical_router.uuid): ri.add_prefixes(self._get_subnet_for_cidr(prefix)) _, li_map = self._add_or_lookup_pi(self.pi_map, 'irb', 'irb') irb = None if irb_ips: irb = self._add_or_lookup_li(li_map, 'irb.' + str(network_id), network_id) if vn.has_ipv6_subnet is True: irb.set_is_virtual_router(True) for (irb_ip, gateway) in irb_ips: self._add_ip_address(irb, irb_ip, gateway=gateway) # This is a hack to advertise vrouter IP in underlay ebgp # Required in ERB-UCAST-GW when a VN is directly extended to a PR # Refererence: CEM-20163 if erb_pr_role and not lr: irb.set_comment("PR External IRB") elif lr and lr.is_master: irb.set_comment("Master LR IRB") vlan = Vlan(name=DMUtils.make_bridge_name(vxlan_id), vxlan_id=vxlan_id) vlan.set_description(desc) feature_config.add_vlans(vlan) if irb: self._add_ref_to_list(vlan.get_interfaces(), irb.get_name()) if erb_pr_role: # for ERB_xxx_gateway PR role, set vlan id for current vn self._set_vn_vlanid(vlan, vn) return ri
def _encoder_loop(self): while self.source: raw = self.source.read(self.frame_size) if len(raw) < self.frame_size: break self.frames.put(self.encode(raw, self.samples_per_frame)) gevent.idle() self.source = None
def test_cells_are_garbage_collected_on_termination(defer): node = DummyNode() defer(node.stop) ac = node.spawn(Actor) cell = weakref.ref(ac._cell) ok_(cell()) ac.stop() idle() gc.collect() ok_(not cell())
def event_loop(self): """ Override this """ # while True: # with self.latency.time(): # self.latency_window.mark() # # do some work here # # sleep or idle while self.should_loop(): gevent.idle()
def on_glut_idle(self): # This method allows to make glut gevent-friendly, by allowing # other greenlets to run when idle. # call gevent.sleep taking into account the framerate nt = time.time() dt = 1/FPS_LIMITATION - (nt - self.last_glut_idle_time) self.last_glut_idle_time = nt if dt > 0: gevent.sleep(dt) else: # we are late gevent.idle()
def test_termination_message_to_dead_actor_is_discarded(defer): class Parent(Actor): def pre_start(self): self.watch(self.spawn(Actor)).stop() self.stop() d = Events.consume_one(DeadLetter) node = DummyNode() defer(node.stop) node.spawn(Parent) idle() ok_(not d.ready())
def test_actors_are_garbage_collected_on_termination(defer): class MyActor(Actor): def __del__(self): del_called.set() node = DummyNode() defer(node.stop) del_called = Event() node.spawn(MyActor).stop() idle() gc.collect() ok_(del_called.is_set())
def logout(self): """ Logout from steam. Doesn't nothing if not logged on. .. note:: The server will drop the connection immediatelly upon logout. """ if self.logged_on: self.logged_on = False self.send(MsgProto(EMsg.ClientLogOff)) self.wait_event('disconnected') gevent.idle()
def greenlet_cleaner(): current = gevent.getcurrent() while True: gevent.sleep(1000) gevent.idle() if current not in _greenlets: _greenlets.append(current) todelete = [] for i, greenlet in enumerate(_greenlets): if greenlet.ready(): todelete.append(i) for i in todelete: del _greenlets[i]
def event_loop(self): while self.should_loop(): resource = self.get_resource() if resource is not None: # if an item exists self._analyze_resource(resource) # In future revisions when I make use of real multi-threading this concept # will be realized. In terms of gevent, sleep is idle but with a greenlet waiting # for a scheduled time. So the below if still using gevent, does nothing but prevent # logging statements from running. gevent.sleep(self.sleep_time) # in terms of gevent this is just a yield with a waiter gevent.idle() # being a very good citizen, we yield again
def reading(self): from_index = 0 while self._nb_acq_points < self.npoints and not self._stop_flag: data = self._read_data(from_index) if not all_equal([len(d) for d in data]): raise RuntimeError("Read data can't have different sizes") if len(data[0]) > 0: from_index += len(data[0]) self._nb_acq_points += len(data[0]) self._emit_new_data(data) gevent.idle() else: gevent.sleep(self.count_time / 2.)
def shutdown(self): gevent.kill(self.input_loop) gevent.idle() self.worker_collection.shutdown_workers() self.logger.info("Waiting for all workers to shutdown...") while len(self.worker_collection.workers) > 0: self.logger.debug("{num} worker(s) still active".format(num=len(self.worker_collection.workers))) gevent.sleep(1) self.logger.info("Waiting for all responses to be delivered...") while self.response_queue.unfinished_tasks > 0: self.logger.debug("{num} responses to be delivered".format(num=self.response_queue.unfinished_tasks)) gevent.sleep(1) gevent.kill(self.output_loop) self.logger.info("ActionHandler shut down, {num} actions processed".format(num=next(self.counter)-1))
def test_greenletexit_is_raised_in_run_if_the_actor_is_stopped(defer): class MyProc(Actor): def run(self): try: self.get() except GreenletExit: exited.set() node = DummyNode() defer(node.stop) exited = Event() r = node.spawn(MyProc) idle() r.stop() exited.wait()
def test_watching_self_is_noop_and_returns_self(defer): class MyActor(Actor): def pre_start(self): eq_(self.watch(self.ref), self.ref) def receive(self, message): ok_(False) node = DummyNode() defer(node.stop) a = node.spawn(MyActor) dead_letter_emitted = Events.consume_one(DeadLetter) a.stop() idle() ok_(not dead_letter_emitted.ready())
def __emit_worker(self): for event, args in self.__queue: if hasattr(self, '_EventEmitter__callbacks'): if event in self.__callbacks: for callback, once in list(self.__callbacks[event].items()): if once: self.remove_listener(event, callback) if isinstance(callback, AsyncResult): callback.set(args) else: gevent.spawn(callback, *args) gevent.idle() if self.__queue.empty(): break
def test_watching_dead_actor(defer): class Watcher(Actor): def pre_start(self): self.watch(watchee) def receive(self, message): message_receieved.set(message) node = DummyNode() defer(node.stop) message_receieved = AsyncResult() watchee = node.spawn(Actor) watchee.stop() idle() node.spawn(Watcher) eq_(message_receieved.get(), ('terminated', watchee))
def test_connect(self, mock_recv, mock_emit): # setup self.conn.connect.return_value = True # run cm = CMClient() with gevent.Timeout(2, False): cm.connect() gevent.idle() # verify self.conn.connect.assert_called_once_with((127001, 1)) mock_emit.assert_called_once_with('connected') mock_recv.assert_called_once_with()
def event_loop(self): """ The event loop. Resource reloading is disabled at the moment. """ while self.should_loop(): for res_uri, res in self.db.get_resources(): if res_uri not in self.registered: self.queue.put_analyze(res) self.registered[res_uri] = res self.log.info("registered new resource, id:[%s], uri:[%s], size: [%d]" % (res.id, res_uri, self.queue.analyze_size())) # else: # resource already exists # if resource.reload: # check if it needs reloading # self.registered.pop(resource.id) # popping it will reload it next loop # self.log.debug("resource set to be reloaded, id:[%s], uri:[%s]" % (resource.id, resource.uri)) gevent.idle()
def test_sending_message_to_stopping_parent_from_post_stop_should_deadletter_the_message(defer): class Parent(Actor): def pre_start(self): self.spawn(Child) def receive(self, message): ok_(False) class Child(Actor): def post_stop(self): self._parent << 'should-not-be-received' node = DummyNode() defer(node.stop) p = node.spawn(Parent) with expect_one_event(DeadLetter(ANY, ANY, sender=ANY)): p.stop() idle()
def event_loop(self): """ The event loop. """ while self.should_loop(): # Don't do the below commented line, as the event loop will run fast # and will result in multiple lines being printed! Also that many of # log entries makes it confusing when narrowing down things. It is # better to tie a logging event to a logical event such as when a # resource may be requested (see below `can_request` method). # # don't do this -> self.log.debug("Size now: %d" % self.queue.analyzer_size()) resource = self.queue.get_analyze() # pop from queue if resource is not None: # if an item exists self._analyze_resource(resource) gevent.idle()
def _do_store(self): while True: succeed = self._new_image_status_event.wait(1) if succeed: self._new_image_status_event.clear() else: # test if cnt is still alive try: self._cnt.data continue except ReferenceError: break local_dict = self._new_image_status self._new_image_status = dict() if local_dict: ref_data = self._cnt.data[0] ref_data.update(local_dict) self._cnt.data[0] = ref_data if self._stop_flag: break gevent.idle()
def _recv_messages(self): for message in self.connection: if not self.connected: break if self.channel_key: if self.channel_hmac: try: message = crypto.symmetric_decrypt_HMAC(message, self.channel_key, self.channel_hmac) except RuntimeError as e: self._LOG.exception(e) break else: message = crypto.symmetric_decrypt(message, self.channel_key) gevent.spawn(self._parse_message, message) gevent.idle() if not self._seen_logon and self.channel_secured: if self.wait_event('disconnected', timeout=5) is not None: return gevent.spawn(self.disconnect)