def should_add_block(self, block): # Check that the block wasn't recieved too early now = self.localtime if block.header.timestamp > now: i = 0 while i < len(self.time_queue) and block.timestamp > self.time_queue[i].timestamp: i += 1 self.time_queue.insert(i, block) log.info('Block received too early (%d vs %d). Delaying for %d seconds' % (now, block.header.timestamp, block.header.timestamp - now)) return False # Check that the block's parent has already been added if block.header.prevhash not in self.env.db: if block.header.prevhash not in self.parent_queue: self.parent_queue[block.header.prevhash] = [] self.parent_queue[block.header.prevhash].append(block) log.info('Got block %d (%s) with prevhash %s, parent not found. Delaying for now' % (block.number, encode_hex(block.hash), encode_hex(block.prevhash))) return False # Check that the block doesn't throw an exception if block.header.prevhash == self.head_hash: temp_state = self.state.ephemeral_clone() else: temp_state = self.mk_poststate_of_blockhash(block.header.prevhash) try: apply_block(temp_state, block) except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e: # FIXME add relevant exceptions here log.info('Block %s with parent %s invalid, reason: %s' % (encode_hex(block.header.hash), encode_hex(block.header.prevhash), e)) return False return True
def add_block_to_head(self, block): log.info('Adding to head', head=encode_hex(block.header.prevhash)) apply_block(self.state, block) self.db.put(b'block:' + to_string(block.header.number), block.header.hash) self.get_pow_difficulty(block) # side effect: put 'score:' cache in db self.head_hash = block.header.hash for i, tx in enumerate(block.transactions): self.db.put(b'txindex:' + tx.hash, rlp.encode([block.number, i]))
def add_block(self, block): # ~~~ Validate ~~~~ # # Validate that the block should be added if not self.should_add_block(block): return False # ~~~ Store ~~~~ # # Store the block self.db.put(block.header.hash, rlp.encode(block)) self.add_child(block) if block.number % self.config['EPOCH_LENGTH'] == 0: self.db.put(b'cp_subtree_score' + block.hash, 0) # Store the state root if block.header.prevhash == self.head_hash: temp_state = self.state.ephemeral_clone() else: temp_state = self.mk_poststate_of_blockhash(block.header.prevhash) apply_block(temp_state, block) self.db.put(b'state:' + block.header.hash, temp_state.trie.root_hash) # ~~~ Finality Gadget Fork Choice ~~~~ # old_head_chekpoint = self.head_checkpoint # Store the new score cp_hash = self.get_prev_checkpoint_hash(block.hash) casper = tester.ABIContract(tester.State(temp_state), casper_utils.casper_abi, self.config['CASPER_ADDRESS']) try: new_score = casper.get_main_hash_committed_frac() log.info('Got new score! {}'.format(new_score)) except tester.TransactionFailed: new_score = 0 if self.get_checkpoint_score( cp_hash) < new_score or self.get_checkpoint_score( cp_hash) == 0: log.info( 'Updating checkpoint score. Block num: {} - New Score: {}'. format(self.get_block(cp_hash).number, new_score)) self.db.put(b'cp_score:' + cp_hash, new_score) # Update our view self.update_subtree_scores_and_child_pointers(cp_hash) cp = self.get_block(cp_hash) # Store the block as its checkpoint child if is is heavier than the current child p_cp = cp while cp is not self.genesis and self.get_checkpoint_score( cp.hash) == 0: cp = self.get_prev_checkpoint_block(cp) log.info( 'Recieved block. Block num: {} - Prev cp num: {} - Prev committed cp num: {} - Head cp num: {} - Prev committed cp score: {} - Current head cp score: {}' .format(block.number, p_cp.number, cp.number, self.head_checkpoint.number, self.get_checkpoint_score(cp.hash), self.get_checkpoint_score(self.checkpoint_head_hash))) log.info('head cp hash: {} - block prev cp hash: {}'.format( utils.encode_hex(cp_hash), utils.encode_hex(self.checkpoint_head_hash))) # Recompute head self.recompute_head_checkpoint(cp) # Set a new head if required log.info('Head cp num: {} - block prev cp num: {}'.format( self.head_checkpoint.number, cp.number)) if self.head_checkpoint == cp: if self.head_checkpoint == old_head_chekpoint: log.info( 'Head checkpoint == old head. CP Head Num: {} - Head diff: {} - Block diff: {}' .format(self.head_checkpoint.number, self.get_pow_difficulty(self.head), self.get_pow_difficulty(block))) if self.get_pow_difficulty( self.head) < self.get_pow_difficulty(block): self.set_head(block) else: log.info('Head checkpoint changed to cp number: {}'.format( self.head_checkpoint.number)) new_head, _ = self.find_heaviest_pow_block( self.head_checkpoint) self.set_head(new_head) else: log.info('Skipping block: Head checkpoint is not equal to cp!') # Are there blocks that we received that were waiting for this block? # If so, process them. if block.header.hash in self.parent_queue: for _blk in self.parent_queue[block.header.hash]: self.add_block(_blk) del self.parent_queue[block.header.hash] return True
def add_block(self, block): now = self.localtime # Are we receiving the block too early? if block.header.timestamp > now: i = 0 while i < len(self.time_queue ) and block.timestamp > self.time_queue[i].timestamp: i += 1 self.time_queue.insert(i, block) log.info( 'Block received too early (%d vs %d). Delaying for %d seconds' % (now, block.header.timestamp, block.header.timestamp - now)) return False # Is the block being added to the head? if block.header.prevhash == self.head_hash: log.info('Adding to head', head=encode_hex(block.header.prevhash[:4])) self.state.deletes = [] self.state.changed = {} try: apply_block(self.state, block) except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e: log.info('Block %d (%s) with parent %s invalid, reason: %s' % (block.number, encode_hex(block.header.hash[:4]), encode_hex(block.header.prevhash[:4]), str(e))) return False self.db.put(b'block:%d' % block.header.number, block.header.hash) # side effect: put 'score:' cache in db block_score = self.get_score(block) self.head_hash = block.header.hash for i, tx in enumerate(block.transactions): self.db.put(b'txindex:' + tx.hash, rlp.encode([block.number, i])) assert self.get_blockhash_by_number( block.header.number) == block.header.hash deletes = self.state.deletes changed = self.state.changed # Or is the block being added to a chain that is not currently the # head? elif block.header.prevhash in self.env.db: log.info( 'Receiving block %d (%s) not on head (%s), adding to secondary post state %s' % (block.number, encode_hex( block.header.hash[:4]), encode_hex(self.head_hash[:4]), encode_hex(block.header.prevhash[:4]))) temp_state = self.mk_poststate_of_blockhash(block.header.prevhash) try: apply_block(temp_state, block) except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e: log.info('Block %s with parent %s invalid, reason: %s' % (encode_hex(block.header.hash[:4]), encode_hex(block.header.prevhash[:4]), str(e))) return False deletes = temp_state.deletes block_score = self.get_score(block) changed = temp_state.changed # If the block should be the new head, replace the head if block_score > self.get_score(self.head): b = block new_chain = {} # Find common ancestor while b.header.number >= int(self.db.get(b'GENESIS_NUMBER')): new_chain[b.header.number] = b key = b'block:%d' % b.header.number orig_at_height = self.db.get( key) if key in self.db else None if orig_at_height == b.header.hash: break if b.prevhash not in self.db or self.db.get( b.prevhash) == b'GENESIS': break b = self.get_parent(b) replace_from = b.header.number # Replace block index and tx indices, and edit the state cache # Get a list of all accounts that have been edited along the old and # new chains changed_accts = {} # Read: for i in range(common ancestor block number...new block # number) for i in itertools.count(replace_from): log.info('Rewriting height %d' % i) key = b'block:%d' % i # Delete data for old blocks orig_at_height = self.db.get( key) if key in self.db else None if orig_at_height: orig_block_at_height = self.get_block(orig_at_height) log.info('%s no longer in main chain' % encode_hex(orig_block_at_height.header.hash)) # Delete from block index self.db.delete(key) # Delete from txindex for tx in orig_block_at_height.transactions: if b'txindex:' + tx.hash in self.db: self.db.delete(b'txindex:' + tx.hash) # Add to changed list acct_list = self.db.get(b'changed:' + orig_block_at_height.hash) for j in range(0, len(acct_list), 20): changed_accts[acct_list[j:j + 20]] = True # Add data for new blocks if i in new_chain: new_block_at_height = new_chain[i] log.info('%s now in main chain' % encode_hex(new_block_at_height.header.hash)) # Add to block index self.db.put(key, new_block_at_height.header.hash) # Add to txindex for j, tx in enumerate( new_block_at_height.transactions): self.db.put( b'txindex:' + tx.hash, rlp.encode([new_block_at_height.number, j])) # Add to changed list if i < b.number: acct_list = self.db.get(b'changed:' + new_block_at_height.hash) for j in range(0, len(acct_list), 20): changed_accts[acct_list[j:j + 20]] = True if i not in new_chain and not orig_at_height: break # Add changed list from new head to changed list for c in changed.keys(): changed_accts[c] = True # Update the on-disk state cache for addr in changed_accts.keys(): data = temp_state.trie.get(addr) if data: self.state.db.put(b'address:' + addr, data) else: try: self.state.db.delete(b'address:' + addr) except KeyError: pass self.head_hash = block.header.hash self.state = temp_state self.state.executing_on_head = True # Block has no parent yet else: if block.header.prevhash not in self.parent_queue: self.parent_queue[block.header.prevhash] = [] self.parent_queue[block.header.prevhash].append(block) log.info( 'Got block %d (%s) with prevhash %s, parent not found. Delaying for now' % (block.number, encode_hex( block.hash[:4]), encode_hex(block.prevhash[:4]))) return False self.add_child(block) self.db.put(b'head_hash', self.head_hash) self.db.put(block.hash, rlp.encode(block)) self.db.put( b'changed:' + block.hash, b''.join([ k.encode() if not is_string(k) else k for k in list(changed.keys()) ])) print('Saved %d address change logs' % len(changed.keys())) self.db.put(b'deletes:' + block.hash, b''.join(deletes)) log.debug('Saved %d trie node deletes for block %d (%s)' % (len(deletes), block.number, utils.encode_hex(block.hash))) # Delete old junk data old_block_hash = self.get_blockhash_by_number(block.number - self.max_history) if old_block_hash: try: deletes = self.db.get(b'deletes:' + old_block_hash) log.debug('Deleting up to %d trie nodes' % (len(deletes) // 32)) rdb = RefcountDB(self.db) for i in range(0, len(deletes), 32): rdb.delete(deletes[i:i + 32]) self.db.delete(b'deletes:' + old_block_hash) self.db.delete(b'changed:' + old_block_hash) except KeyError as e: print(e) pass self.db.commit() assert (b'deletes:' + block.hash) in self.db log.info('Added block %d (%s) with %d txs and %d gas' % (block.header.number, encode_hex(block.header.hash)[:8], len(block.transactions), block.header.gas_used)) # Call optional callback if self.new_head_cb and block.header.number != 0: self.new_head_cb(block) # Are there blocks that we received that were waiting for this block? # If so, process them. if block.header.hash in self.parent_queue: for _blk in self.parent_queue[block.header.hash]: self.add_block(_blk) del self.parent_queue[block.header.hash] return True
def add_block(self, block): now = self.localtime # Are we receiving the block too early? if block.header.timestamp > now: i = 0 while i < len(self.time_queue ) and block.timestamp > self.time_queue[i].timestamp: i += 1 self.time_queue.insert(i, block) log.info( 'Block received too early (%d vs %d). Delaying for %d seconds' % (now, block.header.timestamp, block.header.timestamp - now)) return False # Is the block being added to the head? self.state.deletes = [] if block.header.prevhash == self.head_hash: log.info('Adding to head', head=encode_hex(block.header.prevhash)) try: apply_block(self.state, block) except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e: log.info('Block %d (%s) with parent %s invalid, reason: %s' % (block.number, encode_hex(block.header.hash), encode_hex(block.header.prevhash), e)) return False self.db.put(b'block:%d' % block.header.number, block.header.hash) block_score = self.get_score( block) # side effect: put 'score:' cache in db self.head_hash = block.header.hash for i, tx in enumerate(block.transactions): self.db.put(b'txindex:' + tx.hash, rlp.encode([block.number, i])) assert self.get_blockhash_by_number( block.header.number) == block.header.hash deletes = self.state.deletes # Or is the block being added to a chain that is not currently the head? elif block.header.prevhash in self.env.db: log.info( 'Receiving block not on head, adding to secondary post state', prevhash=encode_hex(block.header.prevhash)) temp_state = self.mk_poststate_of_blockhash(block.header.prevhash) try: apply_block(temp_state, block) except (AssertionError, KeyError, ValueError, InvalidTransaction, VerificationFailed) as e: log.info('Block %s with parent %s invalid, reason: %s' % (encode_hex(block.header.hash), encode_hex(block.header.prevhash), e)) return False deletes = temp_state.deletes block_score = self.get_score(block) # If the block should be the new head, replace the head if block_score > self.get_score(self.head): b = block new_chain = {} # Find common ancestor while b.header.number >= int(self.db.get('GENESIS_NUMBER')): new_chain[b.header.number] = b key = b'block:%d' % b.header.number orig_at_height = self.db.get( key) if key in self.db else None if orig_at_height == b.header.hash: break if b.prevhash not in self.db or self.db.get( b.prevhash) == 'GENESIS': break b = self.get_parent(b) # Replace block index and tx indices replace_from = b.header.number for i in itertools.count(replace_from): log.info('Rewriting height %d' % i) key = b'block:%d' % i orig_at_height = self.db.get( key) if key in self.db else None if orig_at_height: self.db.delete(key) orig_block_at_height = self.get_block(orig_at_height) for tx in orig_block_at_height.transactions: if b'txindex:' + tx.hash in self.db: self.db.delete(b'txindex:' + tx.hash) if i in new_chain: new_block_at_height = new_chain[i] self.db.put(key, new_block_at_height.header.hash) for i, tx in enumerate( new_block_at_height.transactions): self.db.put( b'txindex:' + tx.hash, rlp.encode([new_block_at_height.number, i])) if i not in new_chain and not orig_at_height: break self.head_hash = block.header.hash self.state = temp_state # Block has no parent yet else: if block.header.prevhash not in self.parent_queue: self.parent_queue[block.header.prevhash] = [] self.parent_queue[block.header.prevhash].append(block) log.info( 'Got block %d (%s) with prevhash %s, parent not found. Delaying for now' % (block.number, encode_hex( block.hash), encode_hex(block.prevhash))) return False self.add_child(block) self.db.put('head_hash', self.head_hash) self.db.put(block.hash, rlp.encode(block)) self.db.put(b'deletes:' + block.hash, b''.join(deletes)) print('Saved %d trie node deletes for block %d (%s)' % (len(deletes), block.number, utils.encode_hex(block.hash))) # Delete old junk data old_block_hash = self.get_blockhash_by_number(block.number - self.max_history) if old_block_hash: try: deletes = self.db.get(b'deletes:' + old_block_hash) print('Deleting up to %d trie nodes' % (len(deletes) // 32)) rdb = RefcountDB(self.db) for i in range(0, len(deletes), 32): rdb.delete(deletes[i:i + 32]) self.db.delete(b'deletes:' + old_block_hash) except KeyError as e: print(e) pass self.db.commit() assert (b'deletes:' + block.hash) in self.db log.info('Added block %d (%s) with %d txs and %d gas' % \ (block.header.number, encode_hex(block.header.hash)[:8], len(block.transactions), block.header.gas_used)) if self.new_head_cb and block.header.number != 0: self.new_head_cb(block) if block.header.hash in self.parent_queue: for _blk in self.parent_queue[block.header.hash]: self.add_block(_blk) del self.parent_queue[block.header.hash] return True