def loop(t, f): if f(): reactor.callLater(t, loop, t, f) else: logger.info("Reloading...") reactor.stop() os.execv(sys.argv[0], sys.argv)
def applicationShouldTerminate_(self, sender): if reactor.running: reactor.addSystemEventTrigger( 'after', 'shutdown', AppHelper.stopEventLoop) reactor.stop() return False return True
def _stop(self): """Stop the reactor event loop""" if self.verbose: print 'stopping reactor' from twisted.internet import reactor reactor.stop()
def gotList(mappings): mappings = [ ((x[0][0],int(x[0][1]),x[1])) for x in mappings.items()] mappings.sort() for prot,port,ddict in mappings: desc = mappingDesc%ddict print "%s/%d -> %s"%(prot,port,desc) reactor.stop()
def callback(results): reactor.stop() for success, result in results: if success: print ElementTree.dump(result) else: print result.printTraceback()
def run(dbxml): try: uids = set((yield augment.AugmentService.getAllUIDs())) added = 0 updated = 0 removed = 0 if dbxml: yield augment.AugmentService.addAugmentRecords(dbxml.db.values(), ) add_records = list() modify_records = list() for record in dbxml.db.values(): if record.uid in uids: modify_records.append(record) else: add_records.append(record) added = len(add_records) updated = len(modify_records) remove_uids = uids.difference(dbxml.db.keys()) yield augment.AugmentService.removeAugmentRecords(remove_uids) removed = len(remove_uids) print "Changes:" print " Added: %d" % (added,) print " Changed: %d" % (updated,) print " Removed: %d" % (removed,) finally: # # Stop the reactor # reactor.stop()
def command_result(command_st): commands = command_st.strip().split() if(len(commands) > 1): op = commands[0] key = str(commands[1]) print "####KEY", key if(len(commands) > 2): value = commands[2].encode('ascii', 'ignore') if op == "q": reactor.stop() elif op == "set": print "adding key:", key, "-->", value server.set(key, value).addCallback(set_done, server) elif op == "get": print "getting key:", key server.get(key).addCallback(get_done, server) else: print "command: ",command_st," is wrong format" get_input()
def on_centrifuge_message(self, msg): global COUNT COUNT += 1 if COUNT == NUM_CLIENTS*NUM_CLIENTS: stop = time.time() print stop - self.factory.start reactor.stop()
def quit(self, shutdown=False): """ Quits the GtkUI :param shutdown: whether or not to shutdown the daemon as well :type shutdown: boolean """ if shutdown: def on_daemon_shutdown(result): try: reactor.stop() except ReactorNotRunning: log.debug("Attempted to stop the reactor but it is not running...") client.daemon.shutdown().addCallback(on_daemon_shutdown) return if client.is_classicmode(): reactor.stop() return if not client.connected(): reactor.stop() return def on_client_disconnected(result): reactor.stop() client.disconnect().addCallback(on_client_disconnected)
def closeEvent(self,evt=None): '''关闭事件''' uiDebug("parent closeEvent") #pylint: disable=E1101 reactor.stop() self.app.exit() uiDebug("parent closeEvent end")
def clientConnectionFailed(self, connector, reason): ''' called when tcp connection attemp failed! ''' print "connection failed: %s" % reason reactor.stop()
def single_shot_main(args): try: client = WinrsClient(args.remote, args.username, args.password) results = yield client.run_command(args.command) pprint(results) finally: reactor.stop()
def terminate(self): # join the CheckChannelStreamRepeat thread self.thread.do_run = False self.thread.join() self.close_commands() reactor.stop()
def setup_complete(config): print "Got config" keys = config.config.keys() keys.sort() defaults = [] for k in keys: if k == 'HiddenServices': for hs in config.config[k]: for xx in ['dir', 'version', 'authorize_client']: if getattr(hs, xx): print 'HiddenService%s %s' % (xx.capitalize(), getattr(hs, xx)) for port in hs.ports: print 'HiddenServicePort', port continue v = getattr(config, k) if isinstance(v, types.ListType): for val in v: if val != DEFAULT_VALUE: print k, val elif v == DEFAULT_VALUE: defaults.append(k) else: print k, v if 'defaults' in sys.argv: print "Set to default value:" for k in defaults: print "# %s" % k reactor.stop()
def handleCmd_EOS(self, prefix, args): if prefix != self.server_name: return if self.ism.syncd: return LOG.info("Finished receiving IRC sync data.") self.showirc = True # Check for conflicting bridges. if self.ism.findConflictingBridge(): LOG.error("My nick prefix is in use! Terminating.") self.transport.loseConnection() reactor.stop() return # Set up nick reservation scfg = getServiceConfig() self.sendLine( "TKL + Q * %s* %s 0 %d :Reserved for Dtella" % (cfg.dc_to_irc_prefix, scfg.my_host, time.time())) self.ism.killConflictingUsers() # Send my own bridge nick self.pushBotJoin(do_nick=True) # When we enter the syncd state, register this instance with Dtella. # This will eventually trigger event_DtellaUp, where we send our state. self.schedulePing() self.ism.addMeToMain()
def privmsg(self, user, channel, msg): """This will get called when the bot receives a message.""" user = user.split('!', 1)[0] # Check to see if they're sending me a private message if channel == self.nickname: msg = "My creator is %s. He's awesome" % self.factory.owner self.msg(user, msg) return # Otherwise check to see if it is a message directed at me if msg.startswith(self.nickname + ":"): if user == self.factory.owner: msg = msg.split(':')[1] if msg.strip() == "reload": self.factory.words = reload_words() print "Reloaded words" elif msg.strip() == "die": reactor.stop() else: msg = "%s: I am a trac bot. I inform %s about stuff. I'm awesome." % (user, self.factory.owner) self.msg(channel, msg) if channel != '#'+self.factory.read_chan: return for word in self.factory.words: if word in msg: self.msg('#'+self.factory.write_chan, self.factory.owner+':'+msg) return
def stop(self,dummy=None): """ Exit routine; stops twisted reactor """ print "Closing Python Manager" reactor.stop() print "Done"
def clientConnectionLost(self, connector, reason): """Called when we lose connection to the server. Keyword arguments: connector -- Twisted IRC connector. Provided by Twisted. reason -- Reason for disconnect. Provided by Twisted. """ # This flag tells us if Cardinal was told to disconnect by a user. If # not, we'll attempt to reconnect. if not self.disconnect: self.logger.info( "Connection lost (%s), reconnecting in %d seconds." % (reason, self.minimum_reconnection_wait) ) # Reset the last reconnection wait time since this is the first # time we've disconnected since a successful connection and then # wait before connecting. self.last_reconnection_wait = self.minimum_reconnection_wait time.sleep(self.minimum_reconnection_wait) connector.connect() else: self.logger.info( "Disconnected successfully (%s), quitting." % reason ) reactor.stop()
def onDisconnect(self): self.log.info("Connection closed") self._disp.clear() try: reactor.stop() except ReactorNotRunning: pass
def quit(self, exit_value=0, with_restart=False): """Shutdown and stop the reactor.""" yield self.shutdown(with_restart) if reactor.running: reactor.stop() else: sys.exit(exit_value)
def count(self): if self.num < 0: reactor.stop() else: print "self.num = " , self.num self.num = self.num - 1 reactor.callLater(1,self.count)
def _stuned(ip): if stun.getUDPClient() is None: print 'UDP CLIENT IS NONE - EXIT' reactor.stop() return print '+++++ EXTERNAL UDP ADDRESS IS', stun.getUDPClient().externalAddress if sys.argv[1] == 'listen': print '+++++ START LISTENING' return if sys.argv[1] == 'connect': print '+++++ CONNECTING TO REMOTE MACHINE' _try2connect() return lid = misc.getLocalIdentity() udp_contact = 'udp://'+stun.getUDPClient().externalAddress[0]+':'+str(stun.getUDPClient().externalAddress[1]) lid.setProtoContact('udp', udp_contact) lid.sign() misc.setLocalIdentity(lid) misc.saveLocalIdentity() print '+++++ UPDATE IDENTITY', str(lid.contacts) _send_servers().addBoth(_id_sent)
def check_counters_and_stop(self): if len(self.counters) >= self.number_counters: reactor.stop() echo('OUT') else: echo("{0} {1}".format(count, stack()[0][3])) reactor.callLater(.5, self.check_counters_and_stop)
def processEnded(self, reason): if isinstance(reason.value, ProcessTerminated): self._exit_status = reason.value.status if isinstance(reason.value, ProcessDone): self._exit_status = reason.value.status self._process = None reactor.stop()
def stop(self): """ 停止代理服务器 :return: """ print('停止监听端口{}, 停止代理服务器'.format(self._port)) reactor.stop()
def err_cb(failure): failure.trap(Exception) logging.error("Error: {0}".format(failure)) if args['debug']: traceback.print_exc() if reactor.running: reactor.stop()
def errback(error): if isinstance(error, Failure): self.log.critical( "Invalid monitor: %s" % self.options.monitor) reactor.stop() return defer.fail(RemoteBadMonitor( "Invalid monitor: %s" % self.options.monitor, '')) return error
def game_loop(): #Networking sent_data = datagrabber.grab() if sent_data != None: print sent_data #Logic if mo.quit(): tick.stop() reactor.stop() if rtrn.pressed(): hand.cards[0].flip() #Animation # #Video mo.window.clear(mo.sf.Color.WHITE) # hand.draw() opponent.draw() # mo.window.display()
def dataReceived(self, data): # TOFIX: Figure out how to get this method to work. # if not self.known_proto: # self.known_proto = True # assert self.known_proto == b'h2' events = self.conn.receive_data(data) for event in events: self.messageHandler.storeEvent(event) if isinstance(event, ResponseReceived): self.handleResponse(event.headers, event.stream_id) elif isinstance(event, DataReceived): self.handleData(event.data, event.stream_id) elif isinstance(event, StreamEnded): self.endStream(event.stream_id) elif isinstance(event, SettingsAcknowledged): self.settingsAcked(event) elif isinstance(event, StreamReset): reactor.stop() raise RuntimeError("Stream reset: %d" % event.error_code) else: print(event) data = self.conn.data_to_send() if data: self.transport.write(data)
def _failure(self, why): from twisted.internet import reactor from buildbot_worker.scripts.logwatcher import WorkerTimeoutError if why.check(WorkerTimeoutError): print(rewrap("""\ The worker took more than 10 seconds to start and/or connect to the buildmaster, so we were unable to confirm that it started and connected correctly. Please 'tail twistd.log' and look for a line that says 'message from master: attached' to verify correct startup. If you see a bunch of messages like 'will retry in 6 seconds', your worker might not have the correct hostname or portnumber for the buildmaster, or the buildmaster might not be running. If you see messages like 'Failure: twisted.cred.error.UnauthorizedLogin' then your worker might be using the wrong botname or password. Please correct these problems and then restart the worker. """)) else: print(rewrap("""\ Unable to confirm that the worker started correctly. You may need to stop it, fix the config file, and restart. """)) print(why) self.rc = 1 reactor.stop()
def stop_reactor(result): if reactor.running: log.msg('STOPPING REACTOR!') reactor.stop() return result
def run(self): d = self.runner.crawl(ConferenceCrawler, proceedings=self.proceedings) d.addBoth(lambda _: reactor.stop()) reactor.run()
def stop(): reactor.stop() d.callback(None)
def do_stop(r): result.append(r) reactor.stop()
def clientConnectionFailed(self, connector, reason): log.warning('Client connection failed: %s', reason) reactor.stop()
def clientConnectionLost(self, connector, reason): print "Connection lost - goodbye!" reactor.stop()
def clientConnectionFailed(self, connector, reason): print "Connection failed - goodbye!" reactor.stop()
def main(args, net, datadir_path, merged_urls, worker_endpoint): try: print 'p2pool (version %s)' % (p2pool.__version__, ) print @defer.inlineCallbacks def connect_p2p(): # connect to bitcoind over bitcoin-p2p print '''Testing bitcoind P2P connection to '%s:%s'...''' % ( args.bitcoind_address, args.bitcoind_p2p_port) factory = bitcoin_p2p.ClientFactory(net.PARENT) reactor.connectTCP(args.bitcoind_address, args.bitcoind_p2p_port, factory) def long(): print ''' ...taking a while. Common reasons for this include all of bitcoind's connection slots being used...''' long_dc = reactor.callLater(5, long) yield factory.getProtocol() # waits until handshake is successful if not long_dc.called: long_dc.cancel() print ' ...success!' print defer.returnValue(factory) if args.testnet: # establish p2p connection first if testnet so bitcoind can work without connections factory = yield connect_p2p() # connect to bitcoind over JSON-RPC and do initial getmemorypool url = '%s://%s:%i/' % ('https' if args.bitcoind_rpc_ssl else 'http', args.bitcoind_address, args.bitcoind_rpc_port) print '''Testing bitcoind RPC connection to '%s' with username '%s'...''' % ( url, args.bitcoind_rpc_username) bitcoind = jsonrpc.HTTPProxy( url, dict(Authorization='Basic ' + base64.b64encode(args.bitcoind_rpc_username + ':' + args.bitcoind_rpc_password)), timeout=30) yield helper.check(bitcoind, net) temp_work = yield helper.getwork(bitcoind) bitcoind_getinfo_var = variable.Variable(None) @defer.inlineCallbacks def poll_warnings(): bitcoind_getinfo_var.set( (yield deferral.retry('Error while calling getnetworkinfo:')( bitcoind.rpc_getnetworkinfo)())) yield poll_warnings() deferral.RobustLoopingCall(poll_warnings).start(20 * 60) print ' ...success!' print ' Current block hash: %x' % (temp_work['previous_block'], ) print ' Current block height: %i' % (temp_work['height'] - 1, ) print if not args.testnet: factory = yield connect_p2p() print 'Determining payout address...' pubkeys = keypool() if args.pubkey_hash is None and args.address != 'dynamic': address_path = os.path.join(datadir_path, 'cached_payout_address') if os.path.exists(address_path): with open(address_path, 'rb') as f: address = f.read().strip('\r\n') print ' Loaded cached address: %s...' % (address, ) else: address = None if address is not None: res = yield deferral.retry( 'Error validating cached address:', 5)(lambda: bitcoind.rpc_validateaddress(address))() if not res['isvalid'] or not res['ismine']: print ' Cached address is either invalid or not controlled by local bitcoind!' address = None if address is None: print ' Getting payout address from bitcoind...' address = yield deferral.retry( 'Error getting payout address from bitcoind:', 5)(lambda: bitcoind.rpc_getaccountaddress('p2pool'))() with open(address_path, 'wb') as f: f.write(address) my_pubkey_hash = bitcoin_data.address_to_pubkey_hash( address, net.PARENT) print ' ...success! Payout address:', bitcoin_data.pubkey_hash_to_address( my_pubkey_hash, net.PARENT) print pubkeys.addkey(my_pubkey_hash) elif args.address != 'dynamic': my_pubkey_hash = args.pubkey_hash print ' ...success! Payout address:', bitcoin_data.pubkey_hash_to_address( my_pubkey_hash, net.PARENT) print pubkeys.addkey(my_pubkey_hash) else: print ' Entering dynamic address mode.' if args.numaddresses < 2: print ' ERROR: Can not use fewer than 2 addresses in dynamic mode. Resetting to 2.' args.numaddresses = 2 for i in range(args.numaddresses): address = yield deferral.retry( 'Error getting a dynamic address from bitcoind:', 5)(lambda: bitcoind.rpc_getnewaddress('p2pool'))() new_pubkey = bitcoin_data.address_to_pubkey_hash( address, net.PARENT) pubkeys.addkey(new_pubkey) pubkeys.updatestamp(time.time()) my_pubkey_hash = pubkeys.keys[0] for i in range(len(pubkeys.keys)): print ' ...payout %d: %s' % ( i, bitcoin_data.pubkey_hash_to_address( pubkeys.keys[i], net.PARENT), ) print "Loading shares..." shares = {} known_verified = set() def share_cb(share): share.time_seen = 0 # XXX shares[share.hash] = share if len(shares) % 1000 == 0 and shares: print " %i" % (len(shares), ) ss = p2pool_data.ShareStore(os.path.join(datadir_path, 'shares.'), net, share_cb, known_verified.add) print " ...done loading %i shares (%i verified)!" % ( len(shares), len(known_verified)) print print 'Initializing work...' node = p2pool_node.Node(factory, bitcoind, shares.values(), known_verified, net) yield node.start() for share_hash in shares: if share_hash not in node.tracker.items: ss.forget_share(share_hash) for share_hash in known_verified: if share_hash not in node.tracker.verified.items: ss.forget_verified_share(share_hash) node.tracker.removed.watch(lambda share: ss.forget_share(share.hash)) node.tracker.verified.removed.watch( lambda share: ss.forget_verified_share(share.hash)) def save_shares(): for share in node.tracker.get_chain( node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 2 * net.CHAIN_LENGTH)): ss.add_share(share) if share.hash in node.tracker.verified.items: ss.add_verified_hash(share.hash) deferral.RobustLoopingCall(save_shares).start(60) if len(shares) > net.CHAIN_LENGTH: best_share = shares[node.best_share_var.value] previous_share = shares[ best_share.share_data['previous_share_hash']] counts = p2pool_data.get_desired_version_counts( node.tracker, node.tracker.get_nth_parent_hash(previous_share.hash, net.CHAIN_LENGTH * 9 // 10), net.CHAIN_LENGTH // 10) p2pool_data.update_min_protocol_version(counts, best_share) print ' ...success!' print print 'Joining p2pool network using port %i...' % (args.p2pool_port, ) @defer.inlineCallbacks def parse(host): port = net.P2P_PORT if ':' in host: host, port_str = host.split(':') port = int(port_str) defer.returnValue(((yield reactor.resolve(host)), port)) addrs = {} if os.path.exists(os.path.join(datadir_path, 'addrs')): try: with open(os.path.join(datadir_path, 'addrs'), 'rb') as f: addrs.update( dict((tuple(k), v) for k, v in json.loads(f.read()))) except: print >> sys.stderr, 'error parsing addrs' for addr_df in map(parse, net.BOOTSTRAP_ADDRS): try: addr = yield addr_df if addr not in addrs: addrs[addr] = (0, time.time(), time.time()) except: log.err() connect_addrs = set() for addr_df in map(parse, args.p2pool_nodes): try: connect_addrs.add((yield addr_df)) except: log.err() node.p2p_node = p2pool_node.P2PNode( node, port=args.p2pool_port, max_incoming_conns=args.p2pool_conns, addr_store=addrs, connect_addrs=connect_addrs, desired_outgoing_conns=args.p2pool_outgoing_conns, advertise_ip=args.advertise_ip, external_ip=args.p2pool_external_ip, ) node.p2p_node.start() def save_addrs(): with open(os.path.join(datadir_path, 'addrs'), 'wb') as f: f.write(json.dumps(node.p2p_node.addr_store.items())) deferral.RobustLoopingCall(save_addrs).start(60) print ' ...success!' print if args.upnp: @defer.inlineCallbacks def upnp_thread(): while True: try: is_lan, lan_ip = yield ipdiscover.get_local_ip() if is_lan: pm = yield portmapper.get_port_mapper() yield pm._upnp.add_port_mapping( lan_ip, args.p2pool_port, args.p2pool_port, 'p2pool', 'TCP') except defer.TimeoutError: pass except: if p2pool.DEBUG: log.err(None, 'UPnP error:') yield deferral.sleep(random.expovariate(1 / 120)) upnp_thread() # start listening for workers with a JSON-RPC server print 'Listening for workers on %r port %i...' % (worker_endpoint[0], worker_endpoint[1]) if args.address_share_rate is not None: share_rate_type = 'address' share_rate = args.address_share_rate else: share_rate_type = 'miner' share_rate = args.miner_share_rate wb = work.WorkerBridge(node, my_pubkey_hash, args.donation_percentage, merged_urls, args.worker_fee, args, pubkeys, bitcoind, args.min_difficulty, share_rate, share_rate_type) web_root = web.get_web_root(wb, datadir_path, bitcoind_getinfo_var, static_dir=args.web_static) caching_wb = worker_interface.CachingWorkerBridge(wb) worker_interface.WorkerInterface(caching_wb).attach_to( web_root, get_handler=lambda request: request.redirect('/static/')) web_serverfactory = server.Site(web_root) serverfactory = switchprotocol.FirstByteSwitchFactory( {'{': stratum.StratumServerFactory(caching_wb)}, web_serverfactory) deferral.retry('Error binding to worker port:', traceback=False)( reactor.listenTCP)(worker_endpoint[1], serverfactory, interface=worker_endpoint[0]) with open(os.path.join(os.path.join(datadir_path, 'ready_flag')), 'wb') as f: pass print ' ...success!' print # done! print 'Started successfully!' print 'Go to http://127.0.0.1:%i/ to view graphs and statistics!' % ( worker_endpoint[1], ) if args.donation_percentage > 1.1: print '''Donating %.1f%% of work towards P2Pool's development. Thanks for the tip!''' % ( args.donation_percentage, ) elif args.donation_percentage < .9: print '''Donating %.1f%% of work towards P2Pool's development. Please donate to encourage further development of P2Pool!''' % ( args.donation_percentage, ) else: print '''Donating %.1f%% of work towards P2Pool's development. Thank you!''' % ( args.donation_percentage, ) print 'You can increase this amount with --give-author argument! (or decrease it, if you must)' print if hasattr(signal, 'SIGALRM'): signal.signal( signal.SIGALRM, lambda signum, frame: reactor.callFromThread( sys.stderr.write, 'Watchdog timer went off at:\n' + ''. join(traceback.format_stack()))) signal.siginterrupt(signal.SIGALRM, False) deferral.RobustLoopingCall(signal.alarm, 30).start(1) if args.irc_announce: from twisted.words.protocols import irc class IRCClient(irc.IRCClient): nickname = 'p2pool%02i' % (random.randrange(100), ) channel = net.ANNOUNCE_CHANNEL def lineReceived(self, line): if p2pool.DEBUG: print repr(line) irc.IRCClient.lineReceived(self, line) def signedOn(self): self.in_channel = False irc.IRCClient.signedOn(self) self.factory.resetDelay() self.join(self.channel) @defer.inlineCallbacks def new_share(share): if not self.in_channel: return if share.pow_hash <= share.header[ 'bits'].target and abs(share.timestamp - time.time()) < 10 * 60: yield deferral.sleep(random.expovariate(1 / 60)) message = '\x02%s BLOCK FOUND by %s! %s%064x' % ( net.NAME.upper(), bitcoin_data.script2_to_address( share.new_script, net.PARENT), net.PARENT.BLOCK_EXPLORER_URL_PREFIX, share.header_hash) if all('%x' % (share.header_hash, ) not in old_message for old_message in self.recent_messages): self.say(self.channel, message) self._remember_message(message) self.watch_id = node.tracker.verified.added.watch( new_share) self.recent_messages = [] def joined(self, channel): self.in_channel = True def left(self, channel): self.in_channel = False def _remember_message(self, message): self.recent_messages.append(message) while len(self.recent_messages) > 100: self.recent_messages.pop(0) def privmsg(self, user, channel, message): if channel == self.channel: self._remember_message(message) def connectionLost(self, reason): node.tracker.verified.added.unwatch(self.watch_id) print 'IRC connection lost:', reason.getErrorMessage() class IRCClientFactory(protocol.ReconnectingClientFactory): protocol = IRCClient reactor.connectTCP("irc.freenode.net", 6667, IRCClientFactory(), bindAddress=(worker_endpoint[0], 0)) @defer.inlineCallbacks def status_thread(): last_str = None last_time = 0 while True: yield deferral.sleep(3) try: height = node.tracker.get_height(node.best_share_var.value) this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % ( height, len(node.tracker.verified.items), len(node.tracker.items), len(node.p2p_node.peers), sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming), ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '') datums, dt = wb.local_rate_monitor.get_datums_in_last() my_att_s = sum(datum['work'] / dt for datum in datums) my_shares_per_s = sum( datum['work'] / dt / bitcoin_data.target_to_average_attempts( datum['share_target']) for datum in datums) this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % ( math.format(int(my_att_s)), math.format_dt(dt), math.format_binomial_conf( sum(1 for datum in datums if datum['dead']), len(datums), 0.95), math.format_dt(1 / my_shares_per_s) if my_shares_per_s else '???', ) if height > 2: (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts() stale_prop = p2pool_data.get_average_stale_prop( node.tracker, node.best_share_var.value, min(60 * 60 // net.SHARE_PERIOD, height)) real_att_s = p2pool_data.get_pool_attempts_per_second( node.tracker, node.best_share_var.value, min(height - 1, 60 * 60 // net.SHARE_PERIOD)) / (1 - stale_prop) paystr = '' paytot = 0.0 for i in range(len(pubkeys.keys)): curtot = node.get_current_txouts().get( bitcoin_data.pubkey_hash_to_script2( pubkeys.keys[i]), 0) paytot += curtot * 1e-8 paystr += "(%.4f)" % (curtot * 1e-8, ) paystr += "=%.4f" % (paytot, ) this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %s %s' % ( shares, stale_orphan_shares, stale_doa_shares, math.format_binomial_conf( stale_orphan_shares + stale_doa_shares, shares, 0.95), math.format_binomial_conf( stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x) / (1 - stale_prop)), paystr, net.PARENT.SYMBOL, ) this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % ( math.format(int(real_att_s)), 100 * stale_prop, math.format_dt( 2**256 / node.bitcoind_work.value['bits'].target / real_att_s), ) for warning in p2pool_data.get_warnings( node.tracker, node.best_share_var.value, net, bitcoind_getinfo_var.value, node.bitcoind_work.value): print >> sys.stderr, '#' * 40 print >> sys.stderr, '>>> Warning: ' + warning print >> sys.stderr, '#' * 40 if gc.garbage: print '%i pieces of uncollectable cyclic garbage! Types: %r' % ( len(gc.garbage), map(type, gc.garbage)) if this_str != last_str or time.time() > last_time + 15: print this_str last_str = this_str last_time = time.time() except: log.err() status_thread() except: reactor.stop() log.err(None, 'Fatal error:')
def stopReactor(ign): reactor.stop()
def callback(spider, reason): stats = spider.crawler.stats.get_stats() # collect/log stats? # stop the reactor reactor.stop()
def clientConnectionFailed(self, connector, reason): log.msg("cannot connect to server: %r\n" % reason.getErrorMessage()) reactor.stop()
def _checkIterationTimeout(self, reactor): timeout = [] reactor.iterationTimeout.addCallback(timeout.append) reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop()) reactor.run() return timeout[0]
def done(self, *args): self.sendClose() reactor.stop()
def stop(self, res): reactor.stop() return res
def cbShutdown(ignored): reactor.stop()
def clientConnectionLost(self, connector, reason): log.msg("connection lost: %r" % reason.getErrorMessage()) reactor.stop()
def onQuit(self): print "Quit!" reactor.stop()
if config.password is not None: rc.auth(config.password) rc.select(config.dbid) yield rc.set("foo", {'yes', 'no'}) yield rc.expire("foo", 2) import time time.sleep(1) v = yield rc.get("foo") print "1, foo:", repr(v) time.sleep(1) v = yield rc.get("foo") print "2, foo:", repr(v) time.sleep(1) v = yield rc.get("foo") print "3, foo:", repr(v) msgid = "68ee8fe5-d5c8-4502-906a-c6b6b9fc2bed" v = yield rc.get(msgid) print "%s:" % msgid, repr(v) yield rc.disconnect() # this only runs if the module was *not* imported if __name__ == '__main__': main().addCallback(lambda ign: reactor.stop()) reactor.run()
def handleAllResults(results, ports): for port, resultInfo in zip(ports, results): success, result = resultInfo if success: print "Connected to port %i" % port reactor.stop()
def all_jobs_done(result): print(str(result)) print('all jobs are done!') reactor.stop()
#!/usr/bin/env python # -*- coding: utf-8 -*- # # created by Lipson on 2018/4/6. # email to [email protected] # from scrapy.crawler import CrawlerProcess, CrawlerRunner from scrapy.utils.project import get_project_settings from twisted.internet import reactor from avcrawl.spiders.javVideo import VideoSpider runner = CrawlerRunner(get_project_settings()) d = runner.crawl(VideoSpider) d.addBoth(lambda _: reactor.stop()) reactor.run() # process = CrawlerProcess(get_project_settings()) # process.crawl(VideoSpider) # process.start()
def runIt(): try: yield func(*args, **kw) finally: reactor.stop()
try: bomberman_coins.process_api_futures_message(msg) except: logger.log('ERROR', traceback.format_exc()) try: binance_socket.start_user_socket(process_api_spot_message) if config['app']['market_type'] == BombermanCoins.MARKET_TYPE_FUTURES: # there is no method for listening future changes in binance socket manager binance_socket._start_futures_socket( binance_client._request_futures_api('post', 'listenKey')['listenKey'], process_api_futures_message, ) binance_socket.start() discord_client.run(config['discord']['token'], bot=False) except KeyboardInterrupt: exit(0) except: logger.log('TERMINATED', traceback.format_exc()) exit(1) finally: binance_socket.close() try: reactor.stop() # type: ignore except ReactorNotRunning: pass
def err(*args): print(args) reactor.stop()
def onDisconnect(self): if reactor.running: reactor.stop()
def signal_handler(self, signal, frame): log.warning("SIGINT Detected, shutting down") self.do_import(self.dbi, True) reactor.stop()
def all_done(arg): # print(arg) reactor.stop()
def onDisconnect(self): print("disconnected") reactor.stop()
class MyClientFactory(protocol.ClientFactory): protocol = MyClientProtocol clientConnectionLost = clientConnectionFailed = lambda self, connector, reason: reactor.stop( )
def connectionFailed(f): print "Connection Failed:", f reactor.stop()