class ChainService(eth_ChainService): """ Manages the chain and requests to it. """ # required by BaseService name = 'chain' default_config = dict( eth=dict(network_id=0, genesis='', pruning=-1, block=ethereum_config.default_config), hdc=dict(validators=[]), ) # required by WiredService wire_protocol = HDCProtocol # create for each peer # initialized after configure: chain = None genesis = None synchronizer = None config = None block_queue_size = 1024 transaction_queue_size = 1024 processed_gas = 0 processed_elapsed = 0 def __init__(self, app): self.config = app.config sce = self.config['eth'] if int(sce['pruning']) >= 0: self.db = RefcountDB(app.services.db) if "I am not pruning" in self.db.db: raise Exception("This database was initialized as non-pruning." " Kinda hard to start pruning now.") self.db.ttl = int(sce['pruning']) self.db.db.put("I am pruning", "1") else: self.db = app.services.db if "I am pruning" in self.db: raise Exception("This database was initialized as pruning." " Kinda hard to stop pruning now.") self.db.put("I am not pruning", "1") if 'network_id' in self.db: db_network_id = self.db.get('network_id') if db_network_id != str(sce['network_id']): raise Exception( "This database was initialized with network_id {} " "and can not be used when connecting to network_id {}". format(db_network_id, sce['network_id'])) else: self.db.put('network_id', str(sce['network_id'])) self.db.commit() assert self.db is not None WiredService.__init__(self, app) log.info('initializing chain') coinbase = app.services.accounts.coinbase env = Env(self.db, sce['block']) self.chain = Chain(env, new_head_cb=self._on_new_head, coinbase=coinbase) log.info('chain at', number=self.chain.head.number) if 'genesis_hash' in sce: assert sce['genesis_hash'] == self.chain.genesis.hex_hash() self.transaction_queue = Queue(maxsize=self.transaction_queue_size) self.add_blocks_lock = False self.add_transaction_lock = gevent.lock.BoundedSemaphore() self.broadcast_filter = DuplicatesFilter() self.on_new_head_cbs = [] self.on_new_head_candidate_cbs = [] self.newblock_processing_times = deque(maxlen=1000) # Consensus self.consensus_contract = ConsensusContract( validators=self.config['hdc']['validators']) self.consensus_manager = ConsensusManager(self, self.consensus_contract, self.consensus_privkey) # lock blocks that where proposed, so they don't get mutated self.proposal_lock = ProposalLock() assert not self.proposal_lock.is_locked() def start(self): super(ChainService, self).start() self.consensus_manager.process() gevent.spawn(self.announce) def announce(self): while not self.consensus_manager.is_ready: self.consensus_manager.send_ready() gevent.sleep(0.5) # interface accessed by ConensusManager def log(self, msg, *args, **kargs): log.debug(msg, *args, **kargs) @property def consensus_privkey(self): return self.app.services.accounts[0].privkey def sign(self, obj): assert isinstance(obj, Signed) obj.sign(self.consensus_privkey) @property def now(self): return time.time() def setup_alarm(self, delay, cb, *args): log.debug('setting up alarm') def _trigger(): gevent.sleep(delay) log.debug('alarm triggered') cb(*args) gevent.spawn(_trigger) def setup_transaction_alarm(self, cb, *args): log.debug('setting up alarm') class Trigger(object): def __call__(me, blk): self.on_new_head_candidate_cbs.remove(me) log.debug('transaction alarm triggered') cb(*args) self.on_new_head_candidate_cbs.append(Trigger()) def commit_block(self, blk): assert isinstance(blk.header, HDCBlockHeader) log.debug('trying to acquire transaction lock') self.add_transaction_lock.acquire() success = self.chain.add_block(blk, forward_pending_transactions=True) self.add_transaction_lock.release() log.debug('transaction lock release') log.info('new head', head=self.chain.head) return success def link_block(self, t_block): assert isinstance(t_block.header, HDCBlockHeader) self.add_transaction_lock.acquire() block = self._link_block(t_block) if not block: return assert block.get_parent() == self.chain.head, (block.get_parent(), self.chain.head) assert block.header.coinbase == t_block.header.coinbase self.add_transaction_lock.release() return block def _link_block(self, t_block): assert isinstance(t_block.header, HDCBlockHeader) if t_block.header.hash in self.chain: log.warn('known block', block=t_block) return if t_block.header.prevhash not in self.chain: log.warn('missing parent', block=t_block, head=self.chain.head, prevhash=phx(t_block.header.prevhash)) return if isinstance(t_block, Block): return True # already deserialized try: # deserialize st = time.time() block = t_block.to_block(env=self.chain.env) elapsed = time.time() - st log.debug('deserialized', elapsed='%.4fs' % elapsed, ts=time.time(), gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed)) assert block.header.check_pow() except processblock.InvalidTransaction as e: log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node') return except VerificationFailed as e: log.warn('verification failed', error=e, FIXME='ban node') return return block def add_transaction(self, tx, origin=None, force_broadcast=False): """ Warning: Locking proposal_lock may block incoming events which are necessary to unlock! I.e. votes / blocks! Take care! """ self.consensus_manager.log('add_transaction', blk=self.chain.head_candidate, lock=self.proposal_lock) log.debug('add_transaction', lock=self.proposal_lock) block = self.proposal_lock.block self.proposal_lock.acquire() self.consensus_manager.log('add_transaction acquired lock', lock=self.proposal_lock) assert not hasattr(self.chain.head_candidate, 'should_be_locked') super(ChainService, self).add_transaction(tx, origin, force_broadcast) if self.proposal_lock.is_locked( ): # can be unlock if we are at a new block self.proposal_lock.release(if_block=block) log.debug('added transaction', num_txs=self.chain.head_candidate.num_transactions()) def _on_new_head(self, blk): self.release_proposal_lock(blk) super(ChainService, self)._on_new_head(blk) def set_proposal_lock(self, blk): log.debug('set_proposal_lock', locked=self.proposal_lock) if not self.proposal_lock.is_locked(): self.proposal_lock.acquire() self.proposal_lock.block = blk assert self.proposal_lock.is_locked() # can not be aquired log.debug('did set_proposal_lock', lock=self.proposal_lock) def release_proposal_lock(self, blk): log.debug('releasing proposal_lock', lock=self.proposal_lock) if self.proposal_lock.is_locked(): if self.proposal_lock.height <= blk.number: assert self.chain.head_candidate.number > self.proposal_lock.height assert not hasattr(self.chain.head_candidate, 'should_be_locked') assert not isinstance(self.chain.head_candidate.header, HDCBlockHeader) self.proposal_lock.release() log.debug('released') assert not self.proposal_lock.is_locked() else: log.debug('could not release', head=blk, lock=self.proposal_lock) ############################################################################### @property def is_syncing(self): return self.consensus_manager.synchronizer.is_syncing @property def is_mining(self): return self.chain.coinbase in self.config['hdc']['validators'] # wire protocol receivers ########### # transactions def on_receive_transactions(self, proto, transactions): "receives rlp.decoded serialized" log.debug('----------------------------------') log.debug('remote_transactions_received', count=len(transactions), remote_id=proto) def _add_txs(): for tx in transactions: self.add_transaction(tx, origin=proto) gevent.spawn( _add_txs ) # so the locks in add_transaction won't lock the connection # blocks / proposals ################ def on_receive_getblockproposals(self, proto, blocknumbers): log.debug('----------------------------------') log.debug("on_receive_getblockproposals", count=len(blocknumbers)) found = [] for i, height in enumerate(blocknumbers): if i == self.wire_protocol.max_getproposals_count: break assert isinstance(height, int) # integers assert i == 0 or height > blocknumbers[i - 1] # sorted if height > self.chain.head.number: log.debug("unknown block requested", height=height) break rlp_data = self.consensus_manager.get_blockproposal_rlp_by_height( height) assert isinstance(rlp_data, bytes) found.append(rlp_data) if found: log.debug("found", count=len(found)) proto.send_blockproposals(*found) def on_receive_blockproposals(self, proto, proposals): log.debug('----------------------------------') self.consensus_manager.log('received proposals', sender=proto) log.debug("recv proposals", num=len(proposals), remote_id=proto) self.consensus_manager.synchronizer.receive_blockproposals(proposals) def on_receive_newblockproposal(self, proto, proposal): if proposal.hash in self.broadcast_filter: return log.debug('----------------------------------') self.consensus_manager.log('receive proposal', sender=proto) log.debug("recv newblockproposal", proposal=proposal, remote_id=proto) # self.synchronizer.receive_newproposal(proto, proposal) assert isinstance(proposal, BlockProposal) assert isinstance(proposal.block.header, HDCBlockHeader) isvalid = self.consensus_manager.add_proposal(proposal, proto) if isvalid: self.broadcast(proposal, origin=proto) self.consensus_manager.process() def on_receive_votinginstruction(self, proto, votinginstruction): if votinginstruction.hash in self.broadcast_filter: return log.debug('----------------------------------') log.debug("recv votinginstruction", proposal=votinginstruction, remote_id=proto) # self.synchronizer.receive_newproposal(proto, proposal) isvalid = self.consensus_manager.add_proposal(votinginstruction, proto) if isvalid: self.broadcast(votinginstruction, origin=proto) self.consensus_manager.process() # votes def on_receive_vote(self, proto, vote): self.consensus_manager.log('on_receive_vote', v=vote) if vote.hash in self.broadcast_filter: log.debug('filtered!!!') return log.debug('----------------------------------') log.debug("recv vote", vote=vote, remote_id=proto) isvalid = self.consensus_manager.add_vote(vote, proto) if isvalid: self.broadcast(vote, origin=proto) self.consensus_manager.process() def on_receive_ready(self, proto, ready): if ready.hash in self.broadcast_filter: return log.debug('----------------------------------') log.debug("recv ready", ready=ready, remote_id=proto) self.consensus_manager.add_ready(ready, proto) self.broadcast(ready, origin=proto) self.consensus_manager.process() # start def on_receive_status(self, proto, eth_version, network_id, genesis_hash, current_lockset): log.debug('----------------------------------') log.debug('status received', proto=proto, eth_version=eth_version) assert eth_version == proto.version, (eth_version, proto.version) if network_id != self.config['eth'].get('network_id', proto.network_id): log.warn("invalid network id", remote_network_id=network_id, expected_network_id=self.config['eth'].get( 'network_id', proto.network_id)) raise HDCProtocolError('wrong network_id') # check genesis if genesis_hash != self.chain.genesis.hash: log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex')) raise HDCProtocolError('wrong genesis block') assert isinstance(current_lockset, LockSet) if len(current_lockset): log.debug('adding received lockset', ls=current_lockset) for v in current_lockset.votes: self.consensus_manager.add_vote(v, proto) self.consensus_manager.process() # send last BlockProposal p = self.consensus_manager.last_blockproposal if p: log.debug('sending proposal', p=p) proto.send_newblockproposal(p) # send transactions transactions = self.chain.get_transactions() if transactions: log.debug("sending transactions", remote_id=proto) proto.send_transactions(*transactions) def on_wire_protocol_start(self, proto): log.debug('----------------------------------') log.debug('on_wire_protocol_start', proto=proto) assert isinstance(proto, self.wire_protocol) # register callbacks proto.receive_status_callbacks.append(self.on_receive_status) proto.receive_transactions_callbacks.append( self.on_receive_transactions) proto.receive_blockproposals_callbacks.append( self.on_receive_blockproposals) proto.receive_getblockproposals_callbacks.append( self.on_receive_getblockproposals) proto.receive_newblockproposal_callbacks.append( self.on_receive_newblockproposal) proto.receive_votinginstruction_callbacks.append( self.on_receive_votinginstruction) proto.receive_vote_callbacks.append(self.on_receive_vote) proto.receive_ready_callbacks.append(self.on_receive_ready) # send status proto.send_status( genesis_hash=self.chain.genesis.hash, current_lockset=self.consensus_manager.active_round.lockset) def on_wire_protocol_stop(self, proto): assert isinstance(proto, self.wire_protocol) log.debug('----------------------------------') log.debug('on_wire_protocol_stop', proto=proto) def broadcast(self, obj, origin=None): """ """ fmap = { BlockProposal: 'newblockproposal', VoteBlock: 'vote', VoteNil: 'vote', VotingInstruction: 'votinginstruction', Transaction: 'transactions', Ready: 'ready' } if self.broadcast_filter.update(obj.hash) == False: log.debug('already broadcasted', obj=obj) return if isinstance(obj, BlockProposal): assert obj.sender == obj.block.header.coinbase log.debug('broadcasting', obj=obj) bcast = self.app.services.peermanager.broadcast bcast(HDCProtocol, fmap[type(obj)], args=(obj, ), exclude_peers=[origin.peer] if origin else []) broadcast_transaction = broadcast
class ChainService(eth_ChainService): """ Manages the chain and requests to it. """ # required by BaseService name = 'chain' default_config = dict(eth=dict(network_id=0, genesis='', pruning=-1, block=ethereum_config.default_config), hdc=dict(validators=[]), ) # required by WiredService wire_protocol = HDCProtocol # create for each peer # initialized after configure: chain = None genesis = None synchronizer = None config = None block_queue_size = 1024 transaction_queue_size = 1024 processed_gas = 0 processed_elapsed = 0 def __init__(self, app): self.config = app.config sce = self.config['eth'] if int(sce['pruning']) >= 0: self.db = RefcountDB(app.services.db) if "I am not pruning" in self.db.db: raise Exception("This database was initialized as non-pruning." " Kinda hard to start pruning now.") self.db.ttl = int(sce['pruning']) self.db.db.put("I am pruning", "1") else: self.db = app.services.db if "I am pruning" in self.db: raise Exception("This database was initialized as pruning." " Kinda hard to stop pruning now.") self.db.put("I am not pruning", "1") if 'network_id' in self.db: db_network_id = self.db.get('network_id') if db_network_id != str(sce['network_id']): raise Exception("This database was initialized with network_id {} " "and can not be used when connecting to network_id {}".format( db_network_id, sce['network_id']) ) else: self.db.put('network_id', str(sce['network_id'])) self.db.commit() assert self.db is not None WiredService.__init__(self, app) log.info('initializing chain') coinbase = app.services.accounts.coinbase env = Env(self.db, sce['block']) self.chain = Chain(env, new_head_cb=self._on_new_head, coinbase=coinbase) log.info('chain at', number=self.chain.head.number) if 'genesis_hash' in sce: assert sce['genesis_hash'] == self.chain.genesis.hex_hash() self.transaction_queue = Queue(maxsize=self.transaction_queue_size) self.add_blocks_lock = False self.add_transaction_lock = gevent.lock.Semaphore() self.broadcast_filter = DuplicatesFilter() self.on_new_head_cbs = [] self.on_new_head_candidate_cbs = [] self.newblock_processing_times = deque(maxlen=1000) # Consensus self.consensus_contract = ConsensusContract(validators=self.config['hdc']['validators']) self.consensus_manager = ConsensusManager(self, self.consensus_contract, self.consensus_privkey) # self.consensus_manager.process() # ConsensusManager is started once a peer connects and status_message is received # interface accessed by ConensusManager def log(self, msg, *args, **kargs): log.debug(msg, *args, **kargs) @property def consensus_privkey(self): return self.app.services.accounts[0].privkey def sign(self, obj): assert isinstance(obj, Signed) obj.sign(self.consensus_privkey) @property def now(self): return time.time() def setup_alarm(self, delay, cb, *args): log.debug('setting up alarm') def _trigger(): gevent.sleep(delay) log.debug('alarm triggered') cb(*args) gevent.spawn(_trigger) def commit_block(self, blk): assert isinstance(blk.header, HDCBlockHeader) self.add_transaction_lock.acquire() success = self.chain.add_block(blk, forward_pending_transactions=True) self.add_transaction_lock.release() return success def link_block(self, t_block): assert isinstance(t_block.header, HDCBlockHeader) self.add_transaction_lock.acquire() block = self._link_block(t_block) if not block: return assert block.get_parent() == self.chain.head, (block.get_parent(), self.chain.head) assert block.header.coinbase == t_block.header.coinbase self.add_transaction_lock.release() return block def _link_block(self, t_block): assert isinstance(t_block.header, HDCBlockHeader) if t_block.header.hash in self.chain: log.warn('known block', block=t_block) return if t_block.header.prevhash not in self.chain: log.warn('missing parent', block=t_block, head=self.chain.head, prevhash=phx(t_block.header.prevhash)) return if isinstance(t_block, Block): return True # already deserialized try: # deserialize st = time.time() block = t_block.to_block(env=self.chain.env) elapsed = time.time() - st log.debug('deserialized', elapsed='%.4fs' % elapsed, ts=time.time(), gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed)) assert block.header.check_pow() except processblock.InvalidTransaction as e: log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node') return except VerificationFailed as e: log.warn('verification failed', error=e, FIXME='ban node') return return block ############################################################################### @property def is_syncing(self): return self.consensus_manager.synchronizer.is_syncing # wire protocol receivers ########### # transactions def on_receive_transactions(self, proto, transactions): "receives rlp.decoded serialized" log.debug('----------------------------------') log.debug('remote_transactions_received', count=len(transactions), remote_id=proto) for tx in transactions: self.add_transaction(tx, origin=proto) # blocks / proposals ################ def on_receive_getblockproposals(self, proto, blocknumbers): log.debug('----------------------------------') log.debug("on_receive_getblockproposals", count=len(blocknumbers)) found = [] for i, height in enumerate(blocknumbers): if i == self.wire_protocol.max_getproposals_count: break assert isinstance(height, int) # integers assert i == 0 or height > blocknumbers[i - 1] # sorted if height > self.chain.head.number: log.debug("unknown block requested", height=height) break rlp_data = self.consensus_manager.get_blockproposal_rlp_by_height(height) assert isinstance(rlp_data, bytes) found.append(rlp_data) if found: log.debug("found", count=len(found)) proto.send_blockproposals(*found) def on_receive_blockproposals(self, proto, proposals): log.debug('----------------------------------') self.consensus_manager.log('received proposals', sender=proto) log.debug("recv proposals", num=len(proposals), remote_id=proto) self.consensus_manager.synchronizer.receive_blockproposals(proposals) def on_receive_newblockproposal(self, proto, proposal): if proposal.hash in self.broadcast_filter: return log.debug('----------------------------------') self.consensus_manager.log('receive proposal', sender=proto) log.debug("recv newblockproposal", proposal=proposal, remote_id=proto) # self.synchronizer.receive_newproposal(proto, proposal) assert isinstance(proposal, BlockProposal) assert isinstance(proposal.block.header, HDCBlockHeader) isvalid = self.consensus_manager.add_proposal(proposal, proto) if isvalid: self.broadcast(proposal, origin_proto=proto) self.consensus_manager.process() def on_receive_votinginstruction(self, proto, votinginstruction): if votinginstruction.hash in self.broadcast_filter: return log.debug('----------------------------------') log.debug("recv votinginstruction", proposal=votinginstruction, remote_id=proto) # self.synchronizer.receive_newproposal(proto, proposal) isvalid = self.consensus_manager.add_proposal(votinginstruction, proto) if isvalid: self.broadcast(votinginstruction, origin_proto=proto) self.consensus_manager.process() # votes def on_receive_vote(self, proto, vote): if vote.hash in self.broadcast_filter: return log.debug('----------------------------------') log.debug("recv vote", vote=vote, remote_id=proto) isvalid = self.consensus_manager.add_vote(vote, proto) if isvalid: self.broadcast(vote, origin_proto=proto) self.consensus_manager.process() # start def on_receive_status(self, proto, eth_version, network_id, genesis_hash, current_lockset): log.debug('----------------------------------') log.debug('status received', proto=proto, eth_version=eth_version) assert eth_version == proto.version, (eth_version, proto.version) if network_id != self.config['eth'].get('network_id', proto.network_id): log.warn("invalid network id", remote_network_id=network_id, expected_network_id=self.config['eth'].get('network_id', proto.network_id)) raise HDCProtocolError('wrong network_id') # check genesis if genesis_hash != self.chain.genesis.hash: log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex')) raise HDCProtocolError('wrong genesis block') assert isinstance(current_lockset, LockSet) if len(current_lockset): log.debug('adding received lockset', ls=current_lockset) for v in current_lockset.votes: self.consensus_manager.add_vote(v, proto) self.consensus_manager.process() # send last BlockProposal p = self.consensus_manager.last_blockproposal if p: self.consensus_manager.log('sending proposal', p=p) proto.send_newblockproposal(p) # send transactions transactions = self.chain.get_transactions() if transactions: log.debug("sending transactions", remote_id=proto) proto.send_transactions(*transactions) def on_wire_protocol_start(self, proto): log.debug('----------------------------------') log.debug('on_wire_protocol_start', proto=proto) assert isinstance(proto, self.wire_protocol) # register callbacks proto.receive_status_callbacks.append(self.on_receive_status) proto.receive_transactions_callbacks.append(self.on_receive_transactions) proto.receive_blockproposals_callbacks.append(self.on_receive_blockproposals) proto.receive_getblockproposals_callbacks.append(self.on_receive_getblockproposals) proto.receive_newblockproposal_callbacks.append(self.on_receive_newblockproposal) proto.receive_votinginstruction_callbacks.append(self.on_receive_votinginstruction) proto.receive_vote_callbacks.append(self.on_receive_vote) # send status proto.send_status(genesis_hash=self.chain.genesis.hash, current_lockset=self.consensus_manager.active_round.lockset) def on_wire_protocol_stop(self, proto): assert isinstance(proto, self.wire_protocol) log.debug('----------------------------------') log.debug('on_wire_protocol_stop', proto=proto) def broadcast(self, obj, origin_proto=None): """ """ fmap = {BlockProposal: 'newblockproposal', VoteBlock: 'vote', VoteNil: 'vote', VotingInstruction: 'votinginstruction', Transaction: 'transaction'} if not self.broadcast_filter.update(obj.hash): log.debug('already broadcasted', obj=obj) return if isinstance(obj, BlockProposal): assert obj.sender == obj.block.header.coinbase log.debug('broadcasting', obj=obj) bcast = self.app.services.peermanager.broadcast bcast(HDCProtocol, fmap[type(obj)], args=(obj,), exclude_peers=[origin_proto.peer] if origin_proto else [])
class ChainService(WiredService): """ Manages the chain and requests to it. """ # required by BaseService name = 'chain' default_config = dict(eth=dict(network_id=0, genesis='', pruning=-1), block=ethereum_config.default_config) # required by WiredService wire_protocol = eth_protocol.ETHProtocol # create for each peer # initialized after configure: chain = None genesis = None synchronizer = None config = None block_queue_size = 1024 transaction_queue_size = 1024 processed_gas = 0 processed_elapsed = 0 def __init__(self, app): self.config = app.config sce = self.config['eth'] if int(sce['pruning']) >= 0: self.db = RefcountDB(app.services.db) if "I am not pruning" in self.db.db: raise Exception("This database was initialized as non-pruning." " Kinda hard to start pruning now.") self.db.ttl = int(sce['pruning']) self.db.db.put("I am pruning", "1") else: self.db = app.services.db if "I am pruning" in self.db: raise Exception("This database was initialized as pruning." " Kinda hard to stop pruning now.") self.db.put("I am not pruning", "1") if 'network_id' in self.db: db_network_id = self.db.get('network_id') if db_network_id != str(sce['network_id']): raise Exception( "This database was initialized with network_id {} " "and can not be used when connecting to network_id {}". format(db_network_id, sce['network_id'])) else: self.db.put('network_id', str(sce['network_id'])) self.db.commit() assert self.db is not None super(ChainService, self).__init__(app) log.info('initializing chain') coinbase = app.services.accounts.coinbase env = Env(self.db, sce['block']) self.chain = Chain(env, new_head_cb=self._on_new_head, coinbase=coinbase) log.info('chain at', number=self.chain.head.number) if 'genesis_hash' in sce: assert sce['genesis_hash'] == self.chain.genesis.hex_hash() self.synchronizer = Synchronizer(self, force_sync=None) self.block_queue = Queue(maxsize=self.block_queue_size) self.transaction_queue = Queue(maxsize=self.transaction_queue_size) self.add_blocks_lock = False self.add_transaction_lock = gevent.lock.Semaphore() self.broadcast_filter = DuplicatesFilter() self.on_new_head_cbs = [] self.on_new_head_candidate_cbs = [] self.newblock_processing_times = deque(maxlen=1000) @property def is_syncing(self): return self.synchronizer.synctask is not None @property def is_mining(self): if 'pow' in self.app.services: return self.app.services.pow.active return False def _on_new_head(self, block): # DEBUG('new head cbs', len(self.on_new_head_cbs)) for cb in self.on_new_head_cbs: cb(block) self._on_new_head_candidate( ) # we implicitly have a new head_candidate def _on_new_head_candidate(self): # DEBUG('new head candidate cbs', len(self.on_new_head_candidate_cbs)) for cb in self.on_new_head_candidate_cbs: cb(self.chain.head_candidate) def add_transaction(self, tx, origin=None): if self.is_syncing: return # we can not evaluate the tx based on outdated state log.debug('add_transaction', locked=self.add_transaction_lock.locked(), tx=tx) assert isinstance(tx, Transaction) assert origin is None or isinstance(origin, BaseProtocol) if tx.hash in self.broadcast_filter: log.debug('discarding known tx') # discard early return # validate transaction try: validate_transaction(self.chain.head_candidate, tx) log.debug('valid tx, broadcasting') self.broadcast_transaction(tx, origin=origin) # asap except InvalidTransaction as e: log.debug('invalid tx', error=e) return if origin is not None: # not locally added via jsonrpc if not self.is_mining or self.is_syncing: log.debug('discarding tx', syncing=self.is_syncing, mining=self.is_mining) return self.add_transaction_lock.acquire() success = self.chain.add_transaction(tx) self.add_transaction_lock.release() if success: self._on_new_head_candidate() def add_block(self, t_block, proto): "adds a block to the block_queue and spawns _add_block if not running" self.block_queue.put((t_block, proto)) # blocks if full if not self.add_blocks_lock: self.add_blocks_lock = True # need to lock here (ctx switch is later) gevent.spawn(self._add_blocks) def add_mined_block(self, block): log.debug('adding mined block', block=block) assert isinstance(block, Block) assert block.header.check_pow() if self.chain.add_block(block): log.debug('added', block=block, ts=time.time()) assert block == self.chain.head self.broadcast_newblock(block, chain_difficulty=block.chain_difficulty()) def knows_block(self, block_hash): "if block is in chain or in queue" if block_hash in self.chain: return True # check if queued or processed for i in range(len(self.block_queue.queue)): if block_hash == self.block_queue.queue[i][0].header.hash: return True return False def _add_blocks(self): log.debug('add_blocks', qsize=self.block_queue.qsize(), add_tx_lock=self.add_transaction_lock.locked()) assert self.add_blocks_lock is True self.add_transaction_lock.acquire() try: while not self.block_queue.empty(): t_block, proto = self.block_queue.peek( ) # peek: knows_block while processing if t_block.header.hash in self.chain: log.warn('known block', block=t_block) self.block_queue.get() continue if t_block.header.prevhash not in self.chain: log.warn('missing parent', block=t_block, head=self.chain.head) self.block_queue.get() continue # FIXME, this is also done in validation and in synchronizer for new_blocks if not t_block.header.check_pow(): log.warn('invalid pow', block=t_block, FIXME='ban node') sentry.warn_invalid(t_block, 'InvalidBlockNonce') self.block_queue.get() continue try: # deserialize st = time.time() block = t_block.to_block(env=self.chain.env) elapsed = time.time() - st log.debug('deserialized', elapsed='%.4fs' % elapsed, ts=time.time(), gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed)) except processblock.InvalidTransaction as e: log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node') errtype = \ 'InvalidNonce' if isinstance(e, InvalidNonce) else \ 'NotEnoughCash' if isinstance(e, InsufficientBalance) else \ 'OutOfGasBase' if isinstance(e, InsufficientStartGas) else \ 'other_transaction_error' sentry.warn_invalid(t_block, errtype) self.block_queue.get() continue except VerificationFailed as e: log.warn('verification failed', error=e, FIXME='ban node') sentry.warn_invalid(t_block, 'other_block_error') self.block_queue.get() continue # Check canary score = 0 for address in canary_addresses: if block.get_storage_data(address, 1) > 0: score += 1 if score >= 2: log.warn('canary triggered') continue # All checks passed log.debug('adding', block=block, ts=time.time()) if self.chain.add_block( block, forward_pending_transactions=self.is_mining): now = time.time() log.info('added', block=block, txs=block.transaction_count, gas_used=block.gas_used) if t_block.newblock_timestamp: total = now - t_block.newblock_timestamp self.newblock_processing_times.append(total) avg = statistics.mean(self.newblock_processing_times) med = statistics.median(self.newblock_processing_times) max_ = max(self.newblock_processing_times) min_ = min(self.newblock_processing_times) log.info('processing time', last=total, avg=avg, max=max_, min=min_, median=med) else: log.warn('could not add', block=block) self.block_queue.get( ) # remove block from queue (we peeked only) gevent.sleep(0.001) finally: self.add_blocks_lock = False self.add_transaction_lock.release() def gpsec(self, gas_spent=0, elapsed=0): if gas_spent: self.processed_gas += gas_spent self.processed_elapsed += elapsed return int(self.processed_gas / (0.001 + self.processed_elapsed)) def broadcast_newblock(self, block, chain_difficulty=None, origin=None): if not chain_difficulty: assert block.hash in self.chain chain_difficulty = block.chain_difficulty() assert isinstance(block, (eth_protocol.TransientBlock, Block)) if self.broadcast_filter.update(block.header.hash): log.debug('broadcasting newblock', origin=origin) bcast = self.app.services.peermanager.broadcast bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty), exclude_peers=[origin.peer] if origin else []) else: log.debug('already broadcasted block') def broadcast_transaction(self, tx, origin=None): assert isinstance(tx, Transaction) if self.broadcast_filter.update(tx.hash): log.debug('broadcasting tx', origin=origin) bcast = self.app.services.peermanager.broadcast bcast(eth_protocol.ETHProtocol, 'transactions', args=(tx, ), exclude_peers=[origin.peer] if origin else []) else: log.debug('already broadcasted tx') # wire protocol receivers ########### def on_wire_protocol_start(self, proto): log.debug('----------------------------------') log.debug('on_wire_protocol_start', proto=proto) assert isinstance(proto, self.wire_protocol) # register callbacks proto.receive_status_callbacks.append(self.on_receive_status) proto.receive_transactions_callbacks.append( self.on_receive_transactions) proto.receive_getblockhashes_callbacks.append( self.on_receive_getblockhashes) proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes) proto.receive_getblocks_callbacks.append(self.on_receive_getblocks) proto.receive_blocks_callbacks.append(self.on_receive_blocks) proto.receive_newblock_callbacks.append(self.on_receive_newblock) proto.receive_newblockhashes_callbacks.append(self.on_newblockhashes) # send status head = self.chain.head proto.send_status(chain_difficulty=head.chain_difficulty(), chain_head_hash=head.hash, genesis_hash=self.chain.genesis.hash) def on_wire_protocol_stop(self, proto): assert isinstance(proto, self.wire_protocol) log.debug('----------------------------------') log.debug('on_wire_protocol_stop', proto=proto) def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash, genesis_hash): log.debug('----------------------------------') log.debug('status received', proto=proto, eth_version=eth_version) assert eth_version == proto.version, (eth_version, proto.version) if network_id != self.config['eth'].get('network_id', proto.network_id): log.warn("invalid network id", remote_network_id=network_id, expected_network_id=self.config['eth'].get( 'network_id', proto.network_id)) raise eth_protocol.ETHProtocolError('wrong network_id') # check genesis if genesis_hash != self.chain.genesis.hash: log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex')) raise eth_protocol.ETHProtocolError('wrong genesis block') # request chain self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty) # send transactions transactions = self.chain.get_transactions() if transactions: log.debug("sending transactions", remote_id=proto) proto.send_transactions(*transactions) # transactions def on_receive_transactions(self, proto, transactions): "receives rlp.decoded serialized" log.debug('----------------------------------') log.debug('remote_transactions_received', count=len(transactions), remote_id=proto) for tx in transactions: self.add_transaction(tx, origin=proto) # blockhashes ########### def on_newblockhashes(self, proto, newblockhashes): """ msg sent out if not the full block is propagated chances are high, that we get the newblock, though. """ log.debug('----------------------------------') log.debug("recv newnewblockhashes", num=len(newblockhashes), remote_id=proto) assert len(newblockhashes) <= 32 self.synchronizer.receive_newblockhashes(proto, newblockhashes) def on_receive_getblockhashes(self, proto, child_block_hash, count): log.debug('----------------------------------') log.debug("handle_get_blockhashes", count=count, block_hash=encode_hex(child_block_hash)) max_hashes = min(count, self.wire_protocol.max_getblockhashes_count) found = [] if child_block_hash not in self.chain: log.debug("unknown block") proto.send_blockhashes(*[]) return last = child_block_hash while len(found) < max_hashes: try: last = rlp.decode_lazy( self.chain.db.get(last))[0][0] # [head][prevhash] except KeyError: # this can happen if we started a chain download, which did not complete # should not happen if the hash is part of the canonical chain log.warn('KeyError in getblockhashes', hash=last) break if last: found.append(last) else: break log.debug("sending: found block_hashes", count=len(found)) proto.send_blockhashes(*found) def on_receive_blockhashes(self, proto, blockhashes): log.debug('----------------------------------') if blockhashes: log.debug("on_receive_blockhashes", count=len(blockhashes), remote_id=proto, first=encode_hex(blockhashes[0]), last=encode_hex(blockhashes[-1])) else: log.debug("recv 0 remote block hashes, signifying genesis block") self.synchronizer.receive_blockhashes(proto, blockhashes) # blocks ################ def on_receive_getblocks(self, proto, blockhashes): log.debug('----------------------------------') log.debug("on_receive_getblocks", count=len(blockhashes)) found = [] for bh in blockhashes[:self.wire_protocol.max_getblocks_count]: try: found.append(self.chain.db.get(bh)) except KeyError: log.debug("unknown block requested", block_hash=encode_hex(bh)) if found: log.debug("found", count=len(found)) proto.send_blocks(*found) def on_receive_blocks(self, proto, transient_blocks): log.debug('----------------------------------') blk_number = max(x.header.number for x in transient_blocks) if transient_blocks else 0 log.debug("recv blocks", count=len(transient_blocks), remote_id=proto, highest_number=blk_number) if transient_blocks: self.synchronizer.receive_blocks(proto, transient_blocks) def on_receive_newblock(self, proto, block, chain_difficulty): log.debug('----------------------------------') log.debug("recv newblock", block=block, remote_id=proto) self.synchronizer.receive_newblock(proto, block, chain_difficulty) def on_receive_getblockheaders(self, proto, blockhashes): log.debug('----------------------------------') log.debug("on_receive_getblockheaders", count=len(blockhashes)) found = [] for bh in blockhashes[:self.wire_protocol.max_getblocks_count]: try: found.append(rlp.encode(rlp.decode(self.chain.db.get(bh))[0])) except KeyError: log.debug("unknown block requested", block_hash=encode_hex(bh)) if found: log.debug("found", count=len(found)) proto.send_blockheaders(*found) def on_receive_blockheaders(self, proto, transient_blocks): log.debug('----------------------------------') pass # TODO: implement headers first syncing def on_receive_hashlookup(self, proto, hashes): found = [] for h in hashes: try: found.append( utils.encode_hex( self.chain.db.get('node:' + utils.decode_hex(h)))) except KeyError: found.append('') proto.send_hashlookupresponse(h) def on_receive_hashlookupresponse(self, proto, hashresponses): pass
class ChainService(WiredService): """ Manages the chain and requests to it. """ # required by BaseService name = 'chain' default_config = dict( eth=dict(network_id=0, genesis='', pruning=-1), block=ethereum_config.default_config ) # required by WiredService wire_protocol = eth_protocol.ETHProtocol # create for each peer # initialized after configure: chain = None genesis = None synchronizer = None config = None block_queue_size = 1024 transaction_queue_size = 1024 processed_gas = 0 processed_elapsed = 0 def __init__(self, app): self.config = app.config sce = self.config['eth'] if int(sce['pruning']) >= 0: self.db = RefcountDB(app.services.db) if "I am not pruning" in self.db.db: raise Exception("This database was initialized as non-pruning." " Kinda hard to start pruning now.") self.db.ttl = int(sce['pruning']) self.db.db.put("I am pruning", "1") else: self.db = app.services.db if "I am pruning" in self.db: raise Exception("This database was initialized as pruning." " Kinda hard to stop pruning now.") self.db.put("I am not pruning", "1") if 'network_id' in self.db: db_network_id = self.db.get('network_id') if db_network_id != str(sce['network_id']): raise Exception("This database was initialized with network_id {} " "and can not be used when connecting to network_id {}".format( db_network_id, sce['network_id']) ) else: self.db.put('network_id', str(sce['network_id'])) self.db.commit() assert self.db is not None super(ChainService, self).__init__(app) log.info('initializing chain') coinbase = app.services.accounts.coinbase env = Env(self.db, sce['block']) self.chain = Chain(env, new_head_cb=self._on_new_head, coinbase=coinbase) log.info('chain at', number=self.chain.head.number) if 'genesis_hash' in sce: assert sce['genesis_hash'] == self.chain.genesis.hex_hash() self.synchronizer = Synchronizer(self, force_sync=None) self.block_queue = Queue(maxsize=self.block_queue_size) self.transaction_queue = Queue(maxsize=self.transaction_queue_size) self.add_blocks_lock = False self.add_transaction_lock = gevent.lock.Semaphore() self.broadcast_filter = DuplicatesFilter() self.on_new_head_cbs = [] self.on_new_head_candidate_cbs = [] self.newblock_processing_times = deque(maxlen=1000) @property def is_syncing(self): return self.synchronizer.synctask is not None @property def is_mining(self): if 'pow' in self.app.services: return self.app.services.pow.active return False def _on_new_head(self, block): # DEBUG('new head cbs', len(self.on_new_head_cbs)) for cb in self.on_new_head_cbs: cb(block) self._on_new_head_candidate() # we implicitly have a new head_candidate def _on_new_head_candidate(self): # DEBUG('new head candidate cbs', len(self.on_new_head_candidate_cbs)) for cb in self.on_new_head_candidate_cbs: cb(self.chain.head_candidate) def add_transaction(self, tx, origin=None): if self.is_syncing: return # we can not evaluate the tx based on outdated state log.debug('add_transaction', locked=self.add_transaction_lock.locked(), tx=tx) assert isinstance(tx, Transaction) assert origin is None or isinstance(origin, BaseProtocol) if tx.hash in self.broadcast_filter: log.debug('discarding known tx') # discard early return # validate transaction try: validate_transaction(self.chain.head_candidate, tx) log.debug('valid tx, broadcasting') self.broadcast_transaction(tx, origin=origin) # asap except InvalidTransaction as e: log.debug('invalid tx', error=e) return if origin is not None: # not locally added via jsonrpc if not self.is_mining or self.is_syncing: log.debug('discarding tx', syncing=self.is_syncing, mining=self.is_mining) return self.add_transaction_lock.acquire() success = self.chain.add_transaction(tx) self.add_transaction_lock.release() if success: self._on_new_head_candidate() def add_block(self, t_block, proto): "adds a block to the block_queue and spawns _add_block if not running" self.block_queue.put((t_block, proto)) # blocks if full if not self.add_blocks_lock: self.add_blocks_lock = True # need to lock here (ctx switch is later) gevent.spawn(self._add_blocks) def add_mined_block(self, block): log.debug('adding mined block', block=block) assert isinstance(block, Block) assert block.header.check_pow() if self.chain.add_block(block): log.debug('added', block=block, ts=time.time()) assert block == self.chain.head self.broadcast_newblock(block, chain_difficulty=block.chain_difficulty()) def knows_block(self, block_hash): "if block is in chain or in queue" if block_hash in self.chain: return True # check if queued or processed for i in range(len(self.block_queue.queue)): if block_hash == self.block_queue.queue[i][0].header.hash: return True return False def _add_blocks(self): log.debug('add_blocks', qsize=self.block_queue.qsize(), add_tx_lock=self.add_transaction_lock.locked()) assert self.add_blocks_lock is True self.add_transaction_lock.acquire() try: while not self.block_queue.empty(): t_block, proto = self.block_queue.peek() # peek: knows_block while processing if t_block.header.hash in self.chain: log.warn('known block', block=t_block) self.block_queue.get() continue if t_block.header.prevhash not in self.chain: log.warn('missing parent', block=t_block, head=self.chain.head) self.block_queue.get() continue # FIXME, this is also done in validation and in synchronizer for new_blocks if not t_block.header.check_pow(): log.warn('invalid pow', block=t_block, FIXME='ban node') sentry.warn_invalid(t_block, 'InvalidBlockNonce') self.block_queue.get() continue try: # deserialize st = time.time() block = t_block.to_block(env=self.chain.env) elapsed = time.time() - st log.debug('deserialized', elapsed='%.4fs' % elapsed, ts=time.time(), gas_used=block.gas_used, gpsec=self.gpsec(block.gas_used, elapsed)) except processblock.InvalidTransaction as e: log.warn('invalid transaction', block=t_block, error=e, FIXME='ban node') errtype = \ 'InvalidNonce' if isinstance(e, InvalidNonce) else \ 'NotEnoughCash' if isinstance(e, InsufficientBalance) else \ 'OutOfGasBase' if isinstance(e, InsufficientStartGas) else \ 'other_transaction_error' sentry.warn_invalid(t_block, errtype) self.block_queue.get() continue except VerificationFailed as e: log.warn('verification failed', error=e, FIXME='ban node') sentry.warn_invalid(t_block, 'other_block_error') self.block_queue.get() continue # Check canary score = 0 for address in canary_addresses: if block.get_storage_data(address, 1) > 0: score += 1 if score >= 2: log.warn('canary triggered') continue # All checks passed log.debug('adding', block=block, ts=time.time()) if self.chain.add_block(block, forward_pending_transactions=self.is_mining): now = time.time() log.info('added', block=block, txs=block.transaction_count, gas_used=block.gas_used) if t_block.newblock_timestamp: total = now - t_block.newblock_timestamp self.newblock_processing_times.append(total) avg = statistics.mean(self.newblock_processing_times) med = statistics.median(self.newblock_processing_times) max_ = max(self.newblock_processing_times) min_ = min(self.newblock_processing_times) log.info('processing time', last=total, avg=avg, max=max_, min=min_, median=med) else: log.warn('could not add', block=block) self.block_queue.get() # remove block from queue (we peeked only) gevent.sleep(0.001) finally: self.add_blocks_lock = False self.add_transaction_lock.release() def gpsec(self, gas_spent=0, elapsed=0): if gas_spent: self.processed_gas += gas_spent self.processed_elapsed += elapsed return int(self.processed_gas / (0.001 + self.processed_elapsed)) def broadcast_newblock(self, block, chain_difficulty=None, origin=None): if not chain_difficulty: assert block.hash in self.chain chain_difficulty = block.chain_difficulty() assert isinstance(block, (eth_protocol.TransientBlock, Block)) if self.broadcast_filter.update(block.header.hash): log.debug('broadcasting newblock', origin=origin) bcast = self.app.services.peermanager.broadcast bcast(eth_protocol.ETHProtocol, 'newblock', args=(block, chain_difficulty), exclude_peers=[origin.peer] if origin else []) else: log.debug('already broadcasted block') def broadcast_transaction(self, tx, origin=None): assert isinstance(tx, Transaction) if self.broadcast_filter.update(tx.hash): log.debug('broadcasting tx', origin=origin) bcast = self.app.services.peermanager.broadcast bcast(eth_protocol.ETHProtocol, 'transactions', args=(tx,), exclude_peers=[origin.peer] if origin else []) else: log.debug('already broadcasted tx') # wire protocol receivers ########### def on_wire_protocol_start(self, proto): log.debug('----------------------------------') log.debug('on_wire_protocol_start', proto=proto) assert isinstance(proto, self.wire_protocol) # register callbacks proto.receive_status_callbacks.append(self.on_receive_status) proto.receive_transactions_callbacks.append(self.on_receive_transactions) proto.receive_getblockhashes_callbacks.append(self.on_receive_getblockhashes) proto.receive_blockhashes_callbacks.append(self.on_receive_blockhashes) proto.receive_getblocks_callbacks.append(self.on_receive_getblocks) proto.receive_blocks_callbacks.append(self.on_receive_blocks) proto.receive_newblock_callbacks.append(self.on_receive_newblock) proto.receive_newblockhashes_callbacks.append(self.on_newblockhashes) # send status head = self.chain.head proto.send_status(chain_difficulty=head.chain_difficulty(), chain_head_hash=head.hash, genesis_hash=self.chain.genesis.hash) def on_wire_protocol_stop(self, proto): assert isinstance(proto, self.wire_protocol) log.debug('----------------------------------') log.debug('on_wire_protocol_stop', proto=proto) def on_receive_status(self, proto, eth_version, network_id, chain_difficulty, chain_head_hash, genesis_hash): log.debug('----------------------------------') log.debug('status received', proto=proto, eth_version=eth_version) assert eth_version == proto.version, (eth_version, proto.version) if network_id != self.config['eth'].get('network_id', proto.network_id): log.warn("invalid network id", remote_network_id=network_id, expected_network_id=self.config['eth'].get('network_id', proto.network_id)) raise eth_protocol.ETHProtocolError('wrong network_id') # check genesis if genesis_hash != self.chain.genesis.hash: log.warn("invalid genesis hash", remote_id=proto, genesis=genesis_hash.encode('hex')) raise eth_protocol.ETHProtocolError('wrong genesis block') # request chain self.synchronizer.receive_status(proto, chain_head_hash, chain_difficulty) # send transactions transactions = self.chain.get_transactions() if transactions: log.debug("sending transactions", remote_id=proto) proto.send_transactions(*transactions) # transactions def on_receive_transactions(self, proto, transactions): "receives rlp.decoded serialized" log.debug('----------------------------------') log.debug('remote_transactions_received', count=len(transactions), remote_id=proto) for tx in transactions: self.add_transaction(tx, origin=proto) # blockhashes ########### def on_newblockhashes(self, proto, newblockhashes): """ msg sent out if not the full block is propagated chances are high, that we get the newblock, though. """ log.debug('----------------------------------') log.debug("recv newnewblockhashes", num=len(newblockhashes), remote_id=proto) assert len(newblockhashes) <= 32 self.synchronizer.receive_newblockhashes(proto, newblockhashes) def on_receive_getblockhashes(self, proto, child_block_hash, count): log.debug('----------------------------------') log.debug("handle_get_blockhashes", count=count, block_hash=encode_hex(child_block_hash)) max_hashes = min(count, self.wire_protocol.max_getblockhashes_count) found = [] if child_block_hash not in self.chain: log.debug("unknown block") proto.send_blockhashes(*[]) return last = child_block_hash while len(found) < max_hashes: try: last = rlp.decode_lazy(self.chain.db.get(last))[0][0] # [head][prevhash] except KeyError: # this can happen if we started a chain download, which did not complete # should not happen if the hash is part of the canonical chain log.warn('KeyError in getblockhashes', hash=last) break if last: found.append(last) else: break log.debug("sending: found block_hashes", count=len(found)) proto.send_blockhashes(*found) def on_receive_blockhashes(self, proto, blockhashes): log.debug('----------------------------------') if blockhashes: log.debug("on_receive_blockhashes", count=len(blockhashes), remote_id=proto, first=encode_hex(blockhashes[0]), last=encode_hex(blockhashes[-1])) else: log.debug("recv 0 remote block hashes, signifying genesis block") self.synchronizer.receive_blockhashes(proto, blockhashes) # blocks ################ def on_receive_getblocks(self, proto, blockhashes): log.debug('----------------------------------') log.debug("on_receive_getblocks", count=len(blockhashes)) found = [] for bh in blockhashes[:self.wire_protocol.max_getblocks_count]: try: found.append(self.chain.db.get(bh)) except KeyError: log.debug("unknown block requested", block_hash=encode_hex(bh)) if found: log.debug("found", count=len(found)) proto.send_blocks(*found) def on_receive_blocks(self, proto, transient_blocks): log.debug('----------------------------------') blk_number = max(x.header.number for x in transient_blocks) if transient_blocks else 0 log.debug("recv blocks", count=len(transient_blocks), remote_id=proto, highest_number=blk_number) if transient_blocks: self.synchronizer.receive_blocks(proto, transient_blocks) def on_receive_newblock(self, proto, block, chain_difficulty): log.debug('----------------------------------') log.debug("recv newblock", block=block, remote_id=proto) self.synchronizer.receive_newblock(proto, block, chain_difficulty) def on_receive_getblockheaders(self, proto, blockhashes): log.debug('----------------------------------') log.debug("on_receive_getblockheaders", count=len(blockhashes)) found = [] for bh in blockhashes[:self.wire_protocol.max_getblocks_count]: try: found.append(rlp.encode(rlp.decode(self.chain.db.get(bh))[0])) except KeyError: log.debug("unknown block requested", block_hash=encode_hex(bh)) if found: log.debug("found", count=len(found)) proto.send_blockheaders(*found) def on_receive_blockheaders(self, proto, transient_blocks): log.debug('----------------------------------') pass # TODO: implement headers first syncing def on_receive_hashlookup(self, proto, hashes): found = [] for h in hashes: try: found.append(utils.encode_hex(self.chain.db.get( 'node:' + utils.decode_hex(h)))) except KeyError: found.append('') proto.send_hashlookupresponse(h) def on_receive_hashlookupresponse(self, proto, hashresponses): pass