def __init__(self): self.clients = [] self.lc = task.LoopingCall(self.announce) self.lc.start(10)
def spider_opened(self, spider): self.pagesprev = 0 self.itemsprev = 0 self.task = task.LoopingCall(self.log, spider) self.task.start(self.interval)
def testBadDelay(self): lc = task.LoopingCall(lambda: None) self.assertRaises(ValueError, lc.start, -1)
def __init__(self): log.msg("scheduler: init") self.sched_task = task.LoopingCall(self.sched_builder)
if configuration["ENABLE_HTTPS"] == "Y": endpoint_description = ( "ssl:" + str(configuration["DASHBOARD_WEB_PORT"]) + ":interface=" + configuration["NETWORK_INTERFACE"] + ":privateKey=" + configuration["PRIVATE_KEY"]) else: endpoint_description = ( "tcp:port=" + str(configuration["DASHBOARD_WEB_PORT"]) + ":interface=" + configuration["NETWORK_INTERFACE"]) endpoint = endpoints.serverFromString(reactor, endpoint_description) endpoint.listen(Site(nserv.resource())) syslog_cache_processor_task = task.LoopingCall( syslog_cache_processor, redis_syslog_cache, redis_main_db) main_server_processor_loop = syslog_cache_processor_task.start( configuration["SYSLOG_CACHE_PROCESS_INTERVAL"]) main_server_processor_loop.addErrback(main_server_loop_failed) # reactor.suggestThreadPoolSize(30) reactor.run() except (IOError, SystemExit): raise except KeyboardInterrupt: narwhal_log(LOG_MESSAGE_STOP) print("Shutting down server...")
def onConnect(self, request): print("Client connecting: {0}".format(request.peer)) self.echo = task.LoopingCall(self.echo_print) self.echo.start(1)
# help="don't print status messages to stdout") (options, args) = parser.parse_args() if options.identity != None: Identity.set_identity(options.identity) logging.basicConfig(format="%(asctime)s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) logging.info("Sending dummy table on port %d", constants.ZMQ_RPC_PORT) zmq_factory = ZmqFactory() # Create a zeromq requestor object. zmq_requestor = zmq_requestor.ZmqRequestor(options.hostname, constants.ZMQ_RPC_PORT, zmq_factory) # Send the table at fixed intervals task = task.LoopingCall(send_dummy_table, zmq_requestor, options.schema_version) task.start(10.0) # Run the event loop reactor.run()
def run(self): dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks) dbloop.start(.1) Blockchain.Default().PersistBlocks() while Blockchain.Default().Height < 2: print("Waiting for wallet to sync...") time.sleep(1) print("Creating Wallet...") self.wallet = UserWallet.Create(path=self.wallet_fn, password=self.wallet_pwd) self.wallet.ProcessBlocks() # Extract infos from wallet contract = self.wallet.GetDefaultContract() key = self.wallet.GetKey(contract.PublicKeyHash) address = key.GetAddress() wif = key.Export() print("- Address:", address) print("- WIF key:", wif) self.wallet = None # Claim initial NEO self.claim_initial_neo(address) # Open wallet again self.wallet = UserWallet.Open(self.wallet_fn, self.wallet_pwd) self._walletdb_loop = task.LoopingCall(self.wallet.ProcessBlocks) self._walletdb_loop.start(1) print("\nWait %s min before claiming GAS." % self.min_wait) time.sleep(60 * self.min_wait) print("\nSending NEO to own wallet...") tx = construct_and_send(None, self.wallet, ["neo", address, "100000000"], prompt_password=False) if not tx: print("Something went wrong, no tx.") return # Wait until transaction is on blockchain self.wait_for_tx(tx) print("Claiming the GAS...") claim_tx, relayed = ClaimGas(self.wallet, require_password=False) self.wait_for_tx(claim_tx) # Finally, need to rebuild the wallet self.wallet.Rebuild() print("\nAll done!") print("- WIF key: %s" % wif) print("- Wallet file: %s" % self.wallet_fn) print("- Wallet pwd: %s" % self.wallet_pwd) if self.wif_fn: with open(self.wif_fn, "w") as f: f.write(wif) self.quit()
def _collector(): l = task.LoopingCall(c.monitor) l.start(int(config['cycle']))
def __init__(self, coherence, device): self.coherence = coherence self.device = device self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) self.window.connect("delete_event", self.hide) self.window.set_default_size(480, 200) try: title = 'InternetGatewayDevice %s' % device.get_friendly_name() except: title = 'InternetGatewayDevice' self.window.set_title(title) vbox = gtk.VBox(homogeneous=False, spacing=10) hbox = gtk.HBox(homogeneous=False, spacing=10) text = gtk.Label("<b>Link:</b>") text.set_use_markup(True) self.link_state_image = gtk.Image() icon = resource_filename(__name__, os.path.join('icons', 'red.png')) self.link_down_icon = gtk.gdk.pixbuf_new_from_file(icon) icon = resource_filename(__name__, os.path.join('icons', 'green.png')) self.link_up_icon = gtk.gdk.pixbuf_new_from_file(icon) self.link_state_image.set_from_pixbuf(self.link_down_icon) hbox.add(text) hbox.add(self.link_state_image) self.link_type = gtk.Label("<b>Type:</b> unknown (n/a)") self.link_type.set_use_markup(True) hbox.add(self.link_type) vbox.pack_start(hbox, False, False, 2) hbox = gtk.HBox(homogeneous=False, spacing=10) label = gtk.Label("<b>Uptime:</b>") label.set_use_markup(True) hbox.add(label) self.uptime = gtk.Label(" ") self.uptime.set_use_markup(True) hbox.add(self.uptime) label = gtk.Label("<b>External IP:</b>") label.set_use_markup(True) hbox.add(label) self.external_ip = gtk.Label(" ") self.external_ip.set_use_markup(True) hbox.add(self.external_ip) label = gtk.Label("<b>IN-Bytes:</b>") label.set_use_markup(True) hbox.add(label) self.bytes_in = gtk.Label(" ") self.bytes_in.set_use_markup(True) hbox.add(self.bytes_in) label = gtk.Label("<b>OUT-Bytes:</b>") label.set_use_markup(True) hbox.add(label) self.bytes_out = gtk.Label(" ") self.bytes_out.set_use_markup(True) hbox.add(self.bytes_out) vbox.pack_start(hbox, False, False, 2) hbox = gtk.HBox(homogeneous=False, spacing=10) label = gtk.Label("<b>Port-Mappings:</b>") label.set_use_markup(True) hbox.add(label) vbox.pack_start(hbox, False, False, 2) self.nat_store = gtk.ListStore(str, str, str, str, str, str, str, str, str) self.nat_view = gtk.TreeView(self.nat_store) self.nat_view.connect("button_press_event", self.button_action) i = 0 for c in [ 'index', 'enabled', 'protocol', 'remote host', 'external port', 'internal host', 'internal port', 'lease duration', 'description' ]: column = gtk.TreeViewColumn(c) self.nat_view.append_column(column) text_cell = gtk.CellRendererText() column.pack_start(text_cell, True) column.add_attribute(text_cell, "text", i) i += 1 vbox.pack_start(self.nat_view, expand=True, fill=True) self.window.add(vbox) self.window.show_all() self.wan_device = None self.wan_connection_device = None try: self.wan_device = self.device.get_embedded_device_by_type( 'WANDevice')[0] print self.wan_device service = self.wan_device.get_service_by_type( 'WANCommonInterfaceConfig') service.subscribe_for_variable('PhysicalLinkStatus', callback=self.state_variable_change) self.get_traffic_loop = task.LoopingCall(self.get_traffic, service) self.get_traffic_loop.start(10, now=True) except IndexError: pass if self.wan_device != None: try: self.wan_connection_device = self.wan_device.get_embedded_device_by_type( 'WANConnectionDevice')[0] service = self.wan_connection_device.get_service_by_type( ['WANIPConnection', 'WANPPPConnection']) service.subscribe_for_variable( 'PortMappingNumberOfEntries', callback=self.state_variable_change) service.subscribe_for_variable( 'ExternalIPAddress', callback=self.state_variable_change) self.get_state_loop = task.LoopingCall(self.get_state, service) self.get_state_loop.start(10, now=True) except IndexError: pass
def spawnclick(self): self.lc = task.LoopingCall(self.pingbox) self.lc.start(1.0)
element_string = get_element(message) print element_string element_number = int(re.sub(r'\D', '', element_string)) return element_number def computeBuffer(redness_array): for i in range(nleds): buff[3 * i] = 128 buff[3 * i + 1] = 128 + redness_array[0] buff[3 * i + 2] = 128 + int(blueness) redness_array[0] = (redness_array[0] + 1) % 128 writeBuffer(buff) # sync # threads.deferToThread(writeBuffer, buff) # async def writeBuffer(buff): # print (buff[0], buff[1], buff[2] ) leds.write(buff + zeros) leds.flush() task.LoopingCall(computeBuffer, [0]).start(.03) modes = [None, 'u', 'r', 'R', 'w', 'f'] if __name__ == "__main__": app = UDPReceiverApplication(8000) reactor.run()
def _dump_stats(self): for plugin in self.plugins.values(): stat_dump = task.LoopingCall(plugin.store_stats) stat_dump.start(60) log.msg("Started recording stats for module: " + plugin.name)
parameters = pika.ConnectionParameters() cc = protocol.ClientCreator(reactor, twisted_connection.TwistedProtocolConnection, parameters) host = CONFIG.get('fmn.pika.host', 'localhost') port = int(CONFIG.get('fmn.pika.port', 5672)) d = cc.connectTCP(host=host, port=port) d.addCallback(lambda protocol: protocol.ready) d.addCallback(run) # Here we schedule to producers to run periodically (with a default # frequency of 10 seconds. # Added value: Everything is nicely tied up with twisted in one app/place # Cons: if one of the producer suddenly takes a real while to run, it will # block the entire twisted reactor and thus all the backends with it. # TODO: move to cron? frequency = CONFIG.get('fmn.confirmation_frequency', 10) confirmation_producer = fmn_producers.ConfirmationProducer(session, backends) lc3 = task.LoopingCall(confirmation_producer.work) lc3.start(frequency) try: print('Starting consuming') reactor.run() except KeyboardInterrupt: pass finally: session.close() print('%s tasks proceeded' % CNT)
counter += 1 has_them = True def _test(): global has_them global counter if not has_them: print("Adding them again!") m.add_command(commands.Command("xeyes", identifier="xeyes")) m.add_command(commands.Command("xlogo", identifier="xlogo")) m.add_command(commands.Command("xcalc", identifier="xcalc")) m.add_command( commands.Command("xterm -hold -e /bin/bash -c \"echo %d\"" % (counter), identifier="xterm")) counter += 1 has_them = True else: print("Removing them.") m.remove_command("xeyes") m.remove_command("xlogo") m.remove_command("xcalc") m.remove_command("xterm") has_them = False # a GUI! app = gui.start_gui(m) looping_call = task.LoopingCall(_test) looping_call.start(3.0, False) reactor.run()
def __init__(self): self.count = 0 self.running = False task.LoopingCall(self.print_stat).start(1.0)
for i in range(thread_num): thread = MyThread(worker) thread.start() threads.append(thread) for thread in threads: thread.join() queue_without_limit.join() filename = '%s_%s' % (update_day, save_name) print ip_list if len(ip_list): # try: # os.remove(filename) # except: # pass # finally: # print "saving" # # with open(filename, 'w+') as file_ip: # ip_list = list(set(ip_list)) # for ip in ip_list: # file_ip.write(ip + '\n') pass if __name__ == '__main__': url = 'https://xueqiu.com' l = task.LoopingCall(main, {"url": url, "thread_num": 100}) l.start(3600.0) reactor.run()
def __init__(self): WebSocketClientProtocol.__init__(self) self.pingTask = task.LoopingCall(self.ping) self.gotPong = True
def _spider_opened(self, spider): self.task = task.LoopingCall(self._log, spider) self.task.start(self.interval)
def run(self): dbloop = task.LoopingCall(Blockchain.Default().PersistBlocks) dbloop.start(0.1) NodeLeader.Instance().Start() reactor.suggestThreadPoolSize(15) reactor.run(installSignalHandlers=False)
def get_looping_call(self, fn, *args, **kwargs): lc = task.LoopingCall(fn, *args, **kwargs) lc.clock = self.clock return lc
SolaxModbusInverters = [] print("Setting up EmonCMS") if 'emoncms' in config: outputs.append(EmonCMS(config['emoncms'])) if 'Solax-BatteryControl' in config: outputs.append(SolaxBatteryControl(config['Solax-BatteryControl'])) if 'Solax-Wifi' in config: SolaxWifiInverters = [] for inverter in config['Solax-Wifi']['inverters']: wifiInverter = SolaxWifi(inverter, config['solax-Wifi']['timeout']) SolaxWifiInverters.append(wifiInverter) looperSolaxWifi = task.LoopingCall(inputActions, SolaxWifiInverters) looperSolaxWifi.start(config['Solax-Wifi']['poll_period']) if 'Solax-Modbus' in config: SolaxModbusInverters = [] for inverter in config['Solax-Modbus']['inverters']: modbusInverter = SolaxModbus(config, inverter) SolaxModbusInverters.append(modbusInverter) looperSolaxModbus = task.LoopingCall(inputActions, SolaxModbusInverters) looperSolaxModbus.start(config['Solax-Modbus']['poll_period']) if 'SDM630ModbusV2' in config: SDM630Meters = [] for meter in config['SDM630ModbusV2']['ports']: modbusMeter = SDM630ModbusV2(meter, config['SDM630ModbusV2']['baud'],
def __init__(self, *args, **kwargs): self.interval = kwargs.pop("interval") internet.TCPServer.__init__(self, *args, **kwargs) self._deferred = defer.succeed({}) self._task = task.LoopingCall(self.scheduler) self._task.start(self.interval)
def __init__(self, rate): """It will callback at most ```rate``` enqueued things per second""" self.queue = [] self.looping_call = task.LoopingCall(self._allow_one) self.looping_call.start(1. / float(rate))
logger.info('ID ALIAS MAPPER: talkgroup_ids dictionary is available') # Build the routing rules file BRIDGES = make_bridges('hb_confbridge_rules') # Build the Access Control List ACL = build_acl('sub_acl') # INITIALIZE THE REPORTING LOOP report_server = config_reports(CONFIG, logger, confbridgeReportFactory) # HBlink instance creation logger.info( 'HBlink \'hb_router.py\' (c) 2016 N0MJS & the K0USY Group - SYSTEM STARTING...' ) for system in CONFIG['SYSTEMS']: if CONFIG['SYSTEMS'][system]['ENABLED']: systems[system] = routerSYSTEM(system, CONFIG, logger, report_server) reactor.listenUDP(CONFIG['SYSTEMS'][system]['PORT'], systems[system], interface=CONFIG['SYSTEMS'][system]['IP']) logger.debug('%s instance created: %s, %s', CONFIG['SYSTEMS'][system]['MODE'], system, systems[system]) # Initialize the rule timer -- this if for user activated stuff rule_timer = task.LoopingCall(rule_timer_loop) rule_timer.start(60) reactor.run()
def initialize_prometheus_exporter(ursula, listen_address: str, port: NETWORK_PORT, metrics_prefix: str) -> None: from prometheus_client.twisted import MetricsResource from twisted.web.resource import Resource from twisted.web.server import Site from .json_metrics_export import JSONMetricsResource node_metrics = { "known_nodes_gauge": Gauge(f'{metrics_prefix}_known_nodes', 'Number of currently known nodes'), "work_orders_gauge": Gauge(f'{metrics_prefix}_work_orders', 'Number of accepted work orders'), "missing_commitments_gauge": Gauge(f'{metrics_prefix}_missing_commitments', 'Currently missed commitments'), "learning_status": Enum(f'{metrics_prefix}_node_discovery', 'Learning loop status', states=['starting', 'running', 'stopped']), "eth_balance_gauge": Gauge(f'{metrics_prefix}_staker_eth_balance', 'Ethereum balance'), "token_balance_gauge": Gauge(f'{metrics_prefix}_staker_token_balance', 'NuNit balance'), "worker_eth_balance_gauge": Gauge(f'{metrics_prefix}_worker_eth_balance', 'Worker Ethereum balance'), "worker_token_balance_gauge": Gauge(f'{metrics_prefix}_worker_token_balance', 'Worker NuNit balance'), "requests_counter": Counter(f'{metrics_prefix}_http_failures', 'HTTP Failures', ['method', 'endpoint']), "host_info": Info(f'{metrics_prefix}_host_info', 'Description of info'), "active_stake_gauge": Gauge(f'{metrics_prefix}_active_stake', 'Active stake'), "owned_tokens_gauge": Gauge( f'{metrics_prefix}_owned_tokens', 'All tokens that belong to the staker, including ' 'locked, unlocked and rewards'), "unlocked_tokens_gauge": Gauge(f'{metrics_prefix}_unlocked_tokens', 'Amount of unlocked tokens'), "available_refund_gauge": Gauge(f'{metrics_prefix}_available_refund', 'Available refund'), "policies_held_gauge": Gauge(f'{metrics_prefix}_policies_held', 'Policies held'), "current_period_gauge": Gauge(f'{metrics_prefix}_current_period', 'Current period'), "current_eth_block_number": Gauge(f'{metrics_prefix}_current_eth_block_number', 'Current Ethereum block'), "substakes_count_gauge": Gauge(f'{metrics_prefix}_substakes_count', 'Substakes count'), "current_worker_is_me_gauge": Gauge(f'{metrics_prefix}_current_worker_is_me', 'Current worker is me'), "worklock_deposited_eth_gauge": Gauge(f'{metrics_prefix}_worklock_current_deposited_eth', 'Worklock deposited ETH'), "worklock_remaining_work_gauge": Gauge(f'{metrics_prefix}_worklock_refund_remaining_work', 'Worklock remaining work'), "worklock_refund_completed_work_gauge": Gauge(f'{metrics_prefix}_worklock_refund_completedWork', 'Worklock completed work'), } event_collectors_config = get_staking_event_collectors_config(ursula, metrics_prefix) + \ get_worklock_event_collectors_config(ursula, metrics_prefix) + \ get_policy_event_collectors_config(ursula, metrics_prefix) event_metrics_collectors = build_event_metrics_collectors( ursula, event_collectors_config) if not ursula.federated_only: staking_agent = ContractAgency.get_agent(StakingEscrowAgent, registry=ursula.registry) node_metrics["current_worker_is_me_gauge"].set( staking_agent.get_worker_from_staker(ursula.checksum_address) == ursula.worker_address) # Scheduling metrics_task = task.LoopingCall( collect_prometheus_metrics, ursula=ursula, event_metrics_collectors=event_metrics_collectors, node_metrics=node_metrics) metrics_task.start(interval=10, now=False) # TODO: make configurable # WSGI Service root = Resource() root.putChild(b'metrics', MetricsResource()) root.putChild(b'json_metrics', JSONMetricsResource()) factory = Site(root) reactor.listenTCP(port, factory, interface=listen_address)
def test_defaultClock(self): """ L{LoopingCall}'s default clock should be the reactor. """ call = task.LoopingCall(lambda: None) self.assertEqual(call.clock, reactor)
def __init__(self): self.sessions = {} self.tick = task.LoopingCall(self._tick)
def testFailure(self): def foo(x): raise TestException(x) lc = task.LoopingCall(foo, "bar") return self.assertFailure(lc.start(0.1), TestException)
def __init__(self, conn): self.conn = conn self.globalTimeout = None self.lc = task.LoopingCall(self.sendGlobal) self.lc.start(300)