def _(before, after): # trigger LP if version/previous_block/bits changed or transactions changed from nothing if any(before[x] != after[x] for x in ['version', 'previous_block', 'bits']) or ( not before['transactions'] and after['transactions']): self.new_work_event.happened() # refetch block template if best block changed if after.get('skipping', -2) >= 0: time.sleep(0.5) self.node.dashd_work.set((yield helper.getwork( self.dashd, self.node.dashd_work.value['use_getblocktemplate']))) elif after.get('skipping', -2) == -1: # revert if dashd doesn't have the new block h = yield self.dashd.rpc_getblockheader( (yield self.dashd.rpc_getbestblockhash())) self.node.best_block_header.set( dict(version=h['version'], previous_block=int(h['previousblockhash'], 16), merkle_root=int(h['merkleroot'], 16), timestamp=h['time'], bits=dash_data.FloatingIntegerType().unpack( h['bits'].decode('hex')[::-1]) if isinstance( h['bits'], (str, unicode)) else dash_data.FloatingInteger(h['bits']), nonce=h['nonce']))
def main(args, net, datadir_path, merged_urls, worker_endpoint): try: print 'p2pool (version %s)' % (p2pool.__version__,) print @defer.inlineCallbacks def connect_p2p(): # connect to dashd over dash-p2p print '''Testing dashd P2P connection to '%s:%s'...''' % (args.dashd_address, args.dashd_p2p_port) factory = dash_p2p.ClientFactory(net.PARENT) reactor.connectTCP(args.dashd_address, args.dashd_p2p_port, factory) def long(): print ''' ...taking a while. Common reasons for this include all of dashd's connection slots being used...''' long_dc = reactor.callLater(5, long) yield factory.getProtocol() # waits until handshake is successful if not long_dc.called: long_dc.cancel() print ' ...success!' print defer.returnValue(factory) if args.testnet: # establish p2p connection first if testnet so dashd can work without connections factory = yield connect_p2p() # connect to dashd over JSON-RPC and do initial getmemorypool url = '%s://%s:%i/' % ('https' if args.dashd_rpc_ssl else 'http', args.dashd_address, args.dashd_rpc_port) print '''Testing dashd RPC connection to '%s' with username '%s'...''' % (url, args.dashd_rpc_username) dashd = jsonrpc.HTTPProxy(url, dict(Authorization='Basic ' + base64.b64encode(args.dashd_rpc_username + ':' + args.dashd_rpc_password)), timeout=30) yield helper.check(dashd, net) temp_work = yield helper.getwork(dashd, net) dashd_getinfo_var = variable.Variable(None) @defer.inlineCallbacks def poll_warnings(): dashd_getinfo_var.set((yield deferral.retry('Error while calling getinfo:')(dashd.rpc_getinfo)())) yield poll_warnings() deferral.RobustLoopingCall(poll_warnings).start(20*60) print ' ...success!' print ' Current block hash: %x' % (temp_work['previous_block'],) print ' Current block height: %i' % (temp_work['height'] - 1,) print if not args.testnet: factory = yield connect_p2p() print 'Determining payout address...' pubkeys = keypool() if args.pubkey_hash is None and args.address != 'dynamic': address_path = os.path.join(datadir_path, 'cached_payout_address') if os.path.exists(address_path): with open(address_path, 'rb') as f: address = f.read().strip('\r\n') print ' Loaded cached address: %s...' % (address,) else: address = None if address is not None: res = yield deferral.retry('Error validating cached address:', 5)(lambda: dashd.rpc_validateaddress(address))() if not res['isvalid'] or not res['ismine']: print ' Cached address is either invalid or not controlled by local dashd!' address = None if address is None: print ' Getting payout address from dashd...' address = yield deferral.retry('Error getting payout address from dashd:', 5)(lambda: dashd.rpc_getaccountaddress('p2pool'))() with open(address_path, 'wb') as f: f.write(address) my_pubkey_hash = dash_data.address_to_pubkey_hash(address, net.PARENT) print ' ...success! Payout address:', dash_data.pubkey_hash_to_address(my_pubkey_hash, net.PARENT) print pubkeys.addkey(my_pubkey_hash) elif args.address != 'dynamic': my_pubkey_hash = args.pubkey_hash print ' ...success! Payout address:', dash_data.pubkey_hash_to_address(my_pubkey_hash, net.PARENT) print pubkeys.addkey(my_pubkey_hash) else: print ' Entering dynamic address mode.' if args.numaddresses < 2: print ' ERROR: Can not use fewer than 2 addresses in dynamic mode. Resetting to 2.' args.numaddresses = 2 keys = [] keyweights = [] stamp = time.time() payouttotal = 0.0 def addkey(self, n): self.keys.append(n) self.keyweights.append(random.uniform(0,100.0)) def delkey(self, n): try: i=self.keys.index(n) self.keys.pop(i) self.keyweights.pop(i) except: pass def weighted(self): choice=random.uniform(0,sum(self.keyweights)) tot = 0.0 ind = 0 for i in (self.keyweights): tot += i if tot >= choice: return ind ind += 1 return ind def popleft(self): if (len(self.keys) > 0): dummyval=self.keys.pop(0) if (len(self.keyweights) > 0): dummyval=self.keyweights.pop(0) def updatestamp(self, n): self.stamp = n def paytotal(self): self.payouttotal = 0.0 for i in range(len(pubkeys.keys)): self.payouttotal += node.get_current_txouts().get(dash_data.pubkey_hash_to_script2(pubkeys.keys[i]), 0)*1e-8 return self.payouttotal def getpaytotal(self): return self.payouttotal pubkeys = keypool() for i in range(args.numaddresses): address = yield deferral.retry('Error getting a dynamic address from dashd:', 5)(lambda: dashd.rpc_getnewaddress('p2pool'))() new_pubkey = dash_data.address_to_pubkey_hash(address, net.PARENT) pubkeys.addkey(new_pubkey) pubkeys.updatestamp(time.time()) my_pubkey_hash = pubkeys.keys[0] for i in range(len(pubkeys.keys)): print ' ...payout %d: %s' % (i, dash_data.pubkey_hash_to_address(pubkeys.keys[i], net.PARENT),) print "Loading shares..." shares = {} known_verified = set() def share_cb(share): share.time_seen = 0 # XXX shares[share.hash] = share if len(shares) % 1000 == 0 and shares: print " %i" % (len(shares),) ss = p2pool_data.ShareStore(os.path.join(datadir_path, 'shares.'), net, share_cb, known_verified.add) print " ...done loading %i shares (%i verified)!" % (len(shares), len(known_verified)) print print 'Initializing work...' node = p2pool_node.Node(factory, dashd, shares.values(), known_verified, net) yield node.start() for share_hash in shares: if share_hash not in node.tracker.items: ss.forget_share(share_hash) for share_hash in known_verified: if share_hash not in node.tracker.verified.items: ss.forget_verified_share(share_hash) node.tracker.removed.watch(lambda share: ss.forget_share(share.hash)) node.tracker.verified.removed.watch(lambda share: ss.forget_verified_share(share.hash)) def save_shares(): for share in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 2*net.CHAIN_LENGTH)): ss.add_share(share) if share.hash in node.tracker.verified.items: ss.add_verified_hash(share.hash) deferral.RobustLoopingCall(save_shares).start(60) print ' ...success!' print print 'Joining p2pool network using port %i...' % (args.p2pool_port,) @defer.inlineCallbacks def parse(host): port = net.P2P_PORT if ':' in host: host, port_str = host.split(':') port = int(port_str) defer.returnValue(((yield reactor.resolve(host)), port)) addrs = {} if os.path.exists(os.path.join(datadir_path, 'addrs')): try: with open(os.path.join(datadir_path, 'addrs'), 'rb') as f: addrs.update(dict((tuple(k), v) for k, v in json.loads(f.read()))) except: print >>sys.stderr, 'error parsing addrs' for addr_df in map(parse, net.BOOTSTRAP_ADDRS): try: addr = yield addr_df if addr not in addrs: addrs[addr] = (0, time.time(), time.time()) except: log.err() connect_addrs = set() for addr_df in map(parse, args.p2pool_nodes): try: connect_addrs.add((yield addr_df)) except: log.err() node.p2p_node = p2pool_node.P2PNode(node, port=args.p2pool_port, max_incoming_conns=args.p2pool_conns, addr_store=addrs, connect_addrs=connect_addrs, desired_outgoing_conns=args.p2pool_outgoing_conns, advertise_ip=args.advertise_ip, external_ip=args.p2pool_external_ip, ) node.p2p_node.start() def save_addrs(): with open(os.path.join(datadir_path, 'addrs'), 'wb') as f: f.write(json.dumps(node.p2p_node.addr_store.items())) deferral.RobustLoopingCall(save_addrs).start(60) print ' ...success!' print if args.upnp: @defer.inlineCallbacks def upnp_thread(): while True: try: is_lan, lan_ip = yield ipdiscover.get_local_ip() if is_lan: pm = yield portmapper.get_port_mapper() yield pm._upnp.add_port_mapping(lan_ip, args.p2pool_port, args.p2pool_port, 'p2pool', 'TCP') except defer.TimeoutError: pass except: if p2pool.DEBUG: log.err(None, 'UPnP error:') yield deferral.sleep(random.expovariate(1/120)) upnp_thread() # start listening for workers with a JSON-RPC server print 'Listening for workers on %r port %i...' % (worker_endpoint[0], worker_endpoint[1]) wb = work.WorkerBridge(node, my_pubkey_hash, args.donation_percentage, merged_urls, args.worker_fee, args, pubkeys, dashd) web_root = web.get_web_root(wb, datadir_path, dashd_getinfo_var) caching_wb = worker_interface.CachingWorkerBridge(wb) worker_interface.WorkerInterface(caching_wb).attach_to(web_root, get_handler=lambda request: request.redirect('/static/')) web_serverfactory = server.Site(web_root) serverfactory = switchprotocol.FirstByteSwitchFactory({'{': stratum.StratumServerFactory(caching_wb)}, web_serverfactory) deferral.retry('Error binding to worker port:', traceback=False)(reactor.listenTCP)(worker_endpoint[1], serverfactory, interface=worker_endpoint[0]) with open(os.path.join(os.path.join(datadir_path, 'ready_flag')), 'wb') as f: pass print ' ...success!' print # done! print 'Started successfully!' print 'Go to http://127.0.0.1:%i/ to view graphs and statistics!' % (worker_endpoint[1],) if args.donation_percentage > 1.1: print '''Donating %.1f%% of work towards P2Pool's development. Thanks for the tip!''' % (args.donation_percentage,) elif args.donation_percentage < .9: print '''Donating %.1f%% of work towards P2Pool's development. Please donate to encourage further development of P2Pool!''' % (args.donation_percentage,) else: print '''Donating %.1f%% of work towards P2Pool's development. Thank you!''' % (args.donation_percentage,) print 'You can increase this amount with --give-author argument! (or decrease it, if you must)' print if hasattr(signal, 'SIGALRM'): signal.signal(signal.SIGALRM, lambda signum, frame: reactor.callFromThread( sys.stderr.write, 'Watchdog timer went off at:\n' + ''.join(traceback.format_stack()) )) signal.siginterrupt(signal.SIGALRM, False) deferral.RobustLoopingCall(signal.alarm, 30).start(1) if args.irc_announce: from twisted.words.protocols import irc class IRCClient(irc.IRCClient): nickname = 'p2pool%02i' % (random.randrange(100),) channel = net.ANNOUNCE_CHANNEL def lineReceived(self, line): if p2pool.DEBUG: print repr(line) irc.IRCClient.lineReceived(self, line) def signedOn(self): self.in_channel = False irc.IRCClient.signedOn(self) self.factory.resetDelay() self.join(self.channel) @defer.inlineCallbacks def new_share(share): if not self.in_channel: return if share.pow_hash <= share.header['bits'].target and abs(share.timestamp - time.time()) < 10*60: yield deferral.sleep(random.expovariate(1/60)) message = '\x02%s BLOCK FOUND by %s! %s%064x' % (net.NAME.upper(), dash_data.script2_to_address(share.new_script, net.PARENT), net.PARENT.BLOCK_EXPLORER_URL_PREFIX, share.header_hash) if all('%x' % (share.header_hash,) not in old_message for old_message in self.recent_messages): self.say(self.channel, message) self._remember_message(message) self.watch_id = node.tracker.verified.added.watch(new_share) self.recent_messages = [] def joined(self, channel): self.in_channel = True def left(self, channel): self.in_channel = False def _remember_message(self, message): self.recent_messages.append(message) while len(self.recent_messages) > 100: self.recent_messages.pop(0) def privmsg(self, user, channel, message): if channel == self.channel: self._remember_message(message) def connectionLost(self, reason): node.tracker.verified.added.unwatch(self.watch_id) print 'IRC connection lost:', reason.getErrorMessage() class IRCClientFactory(protocol.ReconnectingClientFactory): protocol = IRCClient reactor.connectTCP("irc.freenode.net", 6667, IRCClientFactory(), bindAddress=(worker_endpoint[0], 0)) @defer.inlineCallbacks def status_thread(): last_str = None last_time = 0 while True: yield deferral.sleep(3) try: height = node.tracker.get_height(node.best_share_var.value) this_str = 'P2Pool: %i shares in chain (%i verified/%i total) Peers: %i (%i incoming)' % ( height, len(node.tracker.verified.items), len(node.tracker.items), len(node.p2p_node.peers), sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming), ) + (' FDs: %i R/%i W' % (len(reactor.getReaders()), len(reactor.getWriters())) if p2pool.DEBUG else '') datums, dt = wb.local_rate_monitor.get_datums_in_last() my_att_s = sum(datum['work']/dt for datum in datums) my_shares_per_s = sum(datum['work']/dt/dash_data.target_to_average_attempts(datum['share_target']) for datum in datums) this_str += '\n Local: %sH/s in last %s Local dead on arrival: %s Expected time to share: %s' % ( math.format(int(my_att_s)), math.format_dt(dt), math.format_binomial_conf(sum(1 for datum in datums if datum['dead']), len(datums), 0.95), math.format_dt(1/my_shares_per_s) if my_shares_per_s else '???', ) if height > 2: (stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts() stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, min(60*60//net.SHARE_PERIOD, height)) real_att_s = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, min(height - 1, 60*60//net.SHARE_PERIOD)) / (1 - stale_prop) paystr = '' paytot = 0.0 for i in range(len(pubkeys.keys)): curtot = node.get_current_txouts().get(dash_data.pubkey_hash_to_script2(pubkeys.keys[i]), 0) paytot += curtot*1e-8 paystr += "(%.4f)" % (curtot*1e-8,) paystr += "=%.4f" % (paytot,) this_str += '\n Shares: %i (%i orphan, %i dead) Stale rate: %s Efficiency: %s Current payout: %s %s' % ( shares, stale_orphan_shares, stale_doa_shares, math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95), math.format_binomial_conf(stale_orphan_shares + stale_doa_shares, shares, 0.95, lambda x: (1 - x)/(1 - stale_prop)), paystr, net.PARENT.SYMBOL, ) this_str += '\n Pool: %sH/s Stale rate: %.1f%% Expected time to block: %s' % ( math.format(int(real_att_s)), 100*stale_prop, math.format_dt(2**256 / node.dashd_work.value['bits'].target / real_att_s), ) for warning in p2pool_data.get_warnings(node.tracker, node.best_share_var.value, net, dashd_getinfo_var.value, node.dashd_work.value): print >>sys.stderr, '#'*40 print >>sys.stderr, '>>> Warning: ' + warning print >>sys.stderr, '#'*40 if gc.garbage: print '%i pieces of uncollectable cyclic garbage! Types: %r' % (len(gc.garbage), map(type, gc.garbage)) if this_str != last_str or time.time() > last_time + 15: print this_str last_str = this_str last_time = time.time() except: log.err() status_thread() except: reactor.stop() log.err(None, 'Fatal error:')