def VE(self, data=None): """ Version If data is None then sends the version & genesis_prev_headerhash. Otherwise, process the content of data and incase of non matching, genesis_prev_headerhash, it disconnects the odd peer. :return: """ if not data: version_details = { 'version': config.dev.version_number, 'genesis_prev_headerhash': config.dev.genesis_prev_headerhash } self.transport.write( self.wrap_message('VE', helper.json_encode(version_details))) else: try: data = helper.json_decode(data) self.version = str(data['version']) logger.info('%s version: %s | genesis prev_headerhash %s', self.transport.getPeer().host, data['version'], data['genesis_prev_headerhash']) if data['genesis_prev_headerhash'] == config.dev.genesis_prev_headerhash: return logger.warning('%s genesis_prev_headerhash mismatch', self.conn_identity) logger.warning('Expected: %s', config.dev.genesis_prev_headerhash) logger.warning('Found: %s', data['genesis_prev_headerhash']) except Exception as e: logger.error('Peer Caused Exception %s', self.conn_identity) logger.exception(e) self.transport.loseConnection() return
def _parse_buffer(self) -> Optional[list]: # FIXME: This parsing/wire protocol needs to be replaced """ :return: :rtype: bool >>> p=P2PProtocol() >>> p.buffer = bytes(hstr2bin("ff00003030303030303237007b2264617461223a2031323334352c202274797065223a2022544553544b45595f31323334227d0000ff")) >>> messages = p._parse_buffer() >>> messages [b'{"data": 12345, "type": "TESTKEY_1234"}'] """ # FIXME if len(self.buffer) == 0: return None d = self.buffer.find( P2PProtocol.MSG_INITIATOR) # find the initiator sequence num_d = self.buffer.count( P2PProtocol.MSG_INITIATOR) # count the initiator sequences if d == -1: # if no initiator sequences found then wipe buffer.. logger.warning('Message data without initiator') self.clean_buffer(reason='Message data without initiator') return None self.buffer = self.buffer[d:] # delete data up to initiator if len( self.buffer ) < 8: # Buffer is still incomplete as it doesn't have message size return None try: tmp = self.buffer[3:11] tmp2 = hstr2bin(tmp.decode()) tmp3 = bytearray(tmp2) m = struct.unpack('>L', tmp3)[0] # is m length encoded correctly? except (UnicodeDecodeError, ValueError): logger.info('Peer not following protocol %s', self.conn_identity) self.transport.loseConnection() return None except Exception as e: logger.exception(e) if num_d > 1: # if not, is this the only initiator in the buffer? self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason= 'Struct.unpack error attempting to decipher msg length, next msg preserved', upto=d) # no return [] else: self.clean_buffer( reason= 'Struct.unpack error attempting to decipher msg length..' ) # yes return None if m > config.dev.message_buffer_size: # check if size is more than 500 KB if num_d > 1: self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason='Size is more than 500 KB, next msg preserved', upto=d) return [] else: self.clean_buffer(reason='Size is more than 500 KB') return None e = self.buffer.find( P2PProtocol.MSG_TERMINATOR) # find the terminator sequence if e == -1: # no terminator sequence found if len(self.buffer) > 12 + m + 3: if num_d > 1: # if not is this the only initiator sequence? self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason= 'Message without appropriate terminator, next msg preserved', upto=d) # no return [] else: self.clean_buffer( reason='Message without initiator and terminator' ) # yes return None if e != 3 + 9 + m: # is terminator sequence located correctly? if num_d > 1: # if not is this the only initiator sequence? self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason= 'Message terminator incorrectly positioned, next msg preserved', upto=d) # no return [] else: self.clean_buffer( reason='Message terminator incorrectly positioned') # yes return None messages = [self.buffer[12:12 + m]] self.buffer = self.buffer[12 + m + 3:] # reset the buffer to after the msg return messages
def state_add_block(self, chain, block): address_txn = dict() for tx in block.transactions: if tx.txfrom not in address_txn: address_txn[tx.txfrom] = self.state_get_address(tx.txfrom) if tx.subtype == transaction.TX_SUBTYPE_TX: if tx.txto not in address_txn: address_txn[tx.txto] = self.state_get_address(tx.txto) # reminder contents: (state address -> nonce, balance, [pubhash]) (stake -> address, hash_term, nonce) next_sl = self.next_stake_list_get() sl = self.stake_list_get() blocks_left = block.blockheader.blocknumber - ( block.blockheader.epoch * config.dev.blocks_per_epoch) blocks_left = config.dev.blocks_per_epoch - blocks_left if block.blockheader.blocknumber == 1: # Start Updating coin base txn tx = block.transactions[0] pub = tx.pub pub = [''.join(pub[0][0]), pub[0][1], ''.join(pub[2:])] pubhash = sha256(''.join(pub)) if tx.nonce != address_txn[tx.txfrom][0] + 1: logger.warning('nonce incorrect, invalid tx') logger.warning('subtype: %s', tx.subtype) logger.warning('%s actual: %s expected: %s', tx.txfrom, tx.nonce, address_txn[tx.txfrom][0] + 1) return False if pubhash in address_txn[tx.txfrom][2]: logger.warning('pubkey reuse detected: invalid tx %s', tx.txhash) logger.warning('subtype: %s', tx.subtype) return False address_txn[tx.txfrom][0] += 1 address_txn[tx.txto][1] += tx.amount address_txn[tx.txfrom][2].append(pubhash) # Coinbase update end here for tx in block.transactions: if tx.subtype == transaction.TX_SUBTYPE_STAKE: # update txfrom, hash and stake_nonce against genesis for current or next stake_list if tx.txfrom == block.blockheader.stake_selector: if tx.txfrom in chain.m_blockchain[0].stake_list: sl.append([ tx.txfrom, tx.hash, 1, tx.first_hash, tx.balance ]) address_txn[tx.txfrom][0] += 1 else: logger.warning( 'designated staker not in genesis..') return False else: if tx.txfrom in chain.m_blockchain[0].stake_list: sl.append([ tx.txfrom, tx.hash, 0, tx.first_hash, tx.balance ]) else: next_sl.append([ tx.txfrom, tx.hash, 0, tx.first_hash, tx.balance ]) pubhash = tx.generate_pubhash(tx.pub) address_txn[tx.txfrom][2].append(pubhash) epoch_seed = self.calc_seed(sl) chain.block_chain_buffer.epoch_seed = epoch_seed self.put_epoch_seed(epoch_seed) stake_list = sorted(sl, key=lambda staker: chain.score( stake_address=staker[0], reveal_one=sha256(str(staker[1])), balance=self.state_balance(staker[0]), seed=epoch_seed)) if stake_list[0][0] != block.blockheader.stake_selector: logger.info('stake selector wrong..') return hashchain(chain.my[0][1], epoch=0) chain.hash_chain = chain.my[0][1].hc chain.wallet.f_save_wallet() else: logger.info( 'BLOCK: %s epoch: %s blocks_left: %s stake_selector %s', block.blockheader.blocknumber, block.blockheader.epoch, blocks_left - 1, block.blockheader.stake_selector) found = False for s in sl: if block.blockheader.stake_selector == s[0]: found = True break if not found: logger.warning('stake selector not in stake_list_get') return # cycle through every tx in the new block to check state for tx in block.transactions: pub = tx.pub pub = [''.join(pub[0][0]), pub[0][1], ''.join(pub[2:])] pubhash = sha256(''.join(pub)) if tx.nonce != address_txn[tx.txfrom][0] + 1: logger.warning('nonce incorrect, invalid tx') logger.warning('%s actual: %s expected: %s', tx.txfrom, tx.nonce, address_txn[tx.txfrom][0] + 1) return False if pubhash in address_txn[tx.txfrom][2]: logger.warning('pubkey reuse detected: invalid tx %s', tx.txhash) return False if tx.subtype == transaction.TX_SUBTYPE_TX: if address_txn[tx.txfrom][1] - tx.amount < 0: logger.warning('%s %s exceeds balance, invalid tx', tx, tx.txfrom) logger.warning( 'Buffer State Balance: %s Transfer Amount %s', address_txn[tx.txfrom][1], tx.amount) return False elif tx.subtype == transaction.TX_SUBTYPE_STAKE: epoch_blocknum = config.dev.blocks_per_epoch - blocks_left if ( not tx.first_hash ) and epoch_blocknum >= config.dev.stake_before_x_blocks: logger.warning( 'Block rejected #%s due to ST without first_reveal beyond limit', block.blockheader.blocknumber) logger.warning('Stake_selector: %s', block.blockheader.stake_selector) logger.warning('epoch_blocknum: %s Threshold: %s', epoch_blocknum, config.dev.stake_before_x_blocks) return False found = False for s in next_sl: # already in the next stake list, ignore for staker list but update as usual the state_for_address.. if tx.txfrom == s[0]: found = True if s[3] is None and tx.first_hash is not None: threshold_block = self.get_staker_threshold_blocknum( next_sl, s[0]) if epoch_blocknum < threshold_block - 1: logger.warning( 'Block rejected #%s due to ST before threshold', block.blockheader.blocknumber) logger.warning( 'Stake_selector: %s', block.blockheader.stake_selector) logger.warning( 'epoch_blocknum: %s Threshold: %s', epoch_blocknum, threshold_block - 1) return False s[3] = tx.first_hash break address_txn[tx.txfrom][2].append(pubhash) if not found: next_sl.append( [tx.txfrom, tx.hash, 0, tx.first_hash, tx.balance]) address_txn[tx.txfrom][0] += 1 if tx.subtype == transaction.TX_SUBTYPE_TX: address_txn[tx.txfrom][1] -= tx.amount if tx.subtype in (transaction.TX_SUBTYPE_TX, transaction.TX_SUBTYPE_COINBASE): address_txn[tx.txto][1] += tx.amount address_txn[tx.txfrom][2].append(pubhash) for address in address_txn: self.db.put(address, address_txn[address]) if block.blockheader.blocknumber > 1 or block.blockheader.blocknumber == 1: self.stake_list_put(sl) self.next_stake_list_put(sorted(next_sl, key=itemgetter(1))) if blocks_left == 1: logger.info( 'EPOCH change: resetting stake_list, activating next_stake_list, updating PRF with ' 'seed+entropy updating wallet hashchains..') sl = next_sl sl = filter(lambda staker: staker[3] is not None, sl) self.stake_list_put(sl) del next_sl[:] self.next_stake_list_put(next_sl) hashchain(chain.my[0][1], epoch=block.blockheader.epoch + 1) chain.hash_chain = chain.my[0][1].hc chain.wallet.f_save_wallet() self.db.put('blockheight', chain.height() + 1) logger.info('%s %s tx passed verification.', block.blockheader.headerhash, len(block.transactions)) return True
def get_stake_balance(self, stake_address: bytes)->int: if stake_address not in self.sv_dict: logger.warning('Stake address not found in Stake Validators Tracker') raise Exception return self.sv_dict[stake_address].balance
def add_block(self, block): if not block.validate_block(self.chain): logger.info('Block validation failed') logger.info('Block #%s', block.blockheader.blocknumber) logger.info('Stake_selector %s', block.blockheader.stake_selector) return False blocknum = block.blockheader.blocknumber headerhash = block.blockheader.headerhash prev_headerhash = block.blockheader.prev_blockheaderhash if blocknum <= self.chain.height(): return False if blocknum - 1 == self.chain.height(): if prev_headerhash != self.chain.m_blockchain[ -1].blockheader.headerhash: logger.warning( 'Failed due to prevheaderhash mismatch, blockslen %d', len(self.blocks)) return elif blocknum - 1 not in self.blocks or prev_headerhash != self.blocks[ blocknum - 1][0].block.blockheader.headerhash: logger.warning( 'Failed due to prevheaderhash mismatch, blockslen %d', len(self.blocks)) return if blocknum in self.blocks and headerhash == self.blocks[blocknum][ 0].block.blockheader.headerhash: return 0 if (blocknum - config.dev.reorg_limit) in self.blocks: self.move_to_mainchain(blocknum - config.dev.reorg_limit) stake_reward = {} state_buffer = StateBuffer() block_buffer = None if blocknum - 1 == self.chain.height(): stake_validators_list = deepcopy(self.state.stake_validators_list) stxn_state = dict() # TODO: Optimization required if not self.state_add_block_buffer(block, stake_validators_list, stxn_state): logger.warning( 'State_validate_block failed inside chainbuffer #%d', block.blockheader.blocknumber) return block_buffer = BlockBuffer( block, stake_reward, self.chain, self.epoch_seed, self.get_st_balance(block.transactions[0].txto, block.blockheader.blocknumber)) state_buffer.set_next_seed(block.blockheader.reveal_hash, self.epoch_seed) state_buffer.stake_validators_list = stake_validators_list state_buffer.stxn_state = stxn_state state_buffer.update_stxn_state(block, self.state) else: block_state_buffer = self.blocks[blocknum - 1] parent_state_buffer = block_state_buffer[1] parent_seed = block_state_buffer[1].next_seed stake_validators_list = deepcopy( parent_state_buffer.stake_validators_list) stxn_state = deepcopy(parent_state_buffer.stxn_state) if not self.state_add_block_buffer(block, stake_validators_list, stxn_state): logger.warning( 'State_validate_block failed inside chainbuffer #%s', block.blockheader.blocknumber) return block_buffer = BlockBuffer( block, stake_reward, self.chain, parent_seed, self.get_st_balance(block.transactions[0].txto, block.blockheader.blocknumber)) state_buffer.stake_validators_list = stake_validators_list state_buffer.stxn_state = stxn_state state_buffer.update(self.state, parent_state_buffer, block) if blocknum not in self.blocks: self.blocks[blocknum] = [block_buffer, state_buffer] else: old_block_buffer = self.blocks[blocknum][0] if block_buffer.score < old_block_buffer.score: self.blocks[blocknum] = [block_buffer, state_buffer] if blocknum + 1 in self.blocks: self.remove_blocks(blocknum + 1) elif block_buffer.score == old_block_buffer.score: # When two blocks having equal score oldheaderhash = old_block_buffer.block.blockheader.headerhash newheaderhash = block_buffer.block.blockheader.headerhash if int(bin2hstr(newheaderhash), 16) < int( bin2hstr(oldheaderhash), 16): self.blocks[blocknum] = [block_buffer, state_buffer] if blocknum + 1 in self.blocks: self.remove_blocks(blocknum + 1) self.add_txns_buffer() return True
def validate_block(self, chain): # check validity of new block.. """ block validation :param chain: :return: """ try: blk_header = self.blockheader last_blocknum = blk_header.blocknumber - 1 last_block = chain.block_chain_buffer.get_block_n(last_blocknum) if not self.blockheader.validate(last_block.blockheader): return False if len(self.transactions) == 0: logger.warning('BLOCK : There must be atleast 1 txn') return False coinbase_tx = self.transactions[0] if coinbase_tx.subtype != TX_SUBTYPE_COINBASE: logger.warning('BLOCK : First txn must be a COINBASE txn') return False sv_list = chain.block_chain_buffer.stake_list_get( self.blockheader.blocknumber) if coinbase_tx.txto != self.blockheader.stake_selector: logger.info('Non matching txto and stake_selector') logger.info('txto: %s stake_selector %s', coinbase_tx.txfrom, self.blockheader.stake_selector) return False if coinbase_tx.amount != self.blockheader.block_reward + self.blockheader.fee_reward: logger.info('Block_reward doesnt match') logger.info('Found: %s', coinbase_tx.amount) logger.info( 'Expected: %s', self.blockheader.block_reward + self.blockheader.fee_reward) logger.info('block_reward: %s', self.blockheader.block_reward) logger.info('fee_reward: %s', self.blockheader.fee_reward) return False if blk_header.blocknumber == 1: found = False for tx in self.transactions: if tx.subtype == TX_SUBTYPE_STAKE: if tx.txfrom == blk_header.stake_selector: found = True reveal_hash, vote_hash = chain.select_hashchain( chain.m_blockchain[-1].blockheader.headerhash, self.transactions[0].txto, tx.hash, blocknumber=1) if sha256(blk_header.reveal_hash) != reveal_hash: logger.warning( 'reveal_hash does not hash correctly to terminator: failed validation' ) return False if not found: logger.warning( 'Stake selector not in block.stake: failed validation') return False else: # we look in stake_list for the hash terminator and hash to it.. stake_validators_list = chain.block_chain_buffer.get_stake_validators_list( self.blockheader.blocknumber) if self.transactions[ 0].txto not in stake_validators_list.sv_list: logger.warning( 'Stake selector not in stake_list for this epoch..') return False if not stake_validators_list.validate_hash( blk_header.reveal_hash, blk_header.blocknumber, config.dev.hashchain_nums - 1, self.transactions[0].txto): logger.warning( 'Supplied hash does not iterate to terminator: failed validation' ) return False target_chain = select_target_hashchain( blk_header.prev_blockheaderhash) if not stake_validators_list.validate_hash( blk_header.vote_hash, blk_header.blocknumber, target_chain, self.transactions[0].txto): logger.warning('Not all the reveal_hashes are valid..') return False if not self._validate_tx_in_block(chain): logger.warning( 'Block validate_tx_in_block error: failed validation') return False except Exception as e: logger.exception(e) return False return True
def next_stake_list_put(self, next_sl): try: self.db.put('next_stake_list', next_sl) except Exception as e: logger.warning("next_stake_list_put: %s %s", type(e), e) return False
def state_update(self, block, stake_validators_list, address_txn): # reminder contents: (state address -> nonce, balance, [pubhash]) (stake -> address, hash_term, nonce) blocks_left = helper.get_blocks_left(block.blockheader.blocknumber) if block.blockheader.stake_selector not in stake_validators_list.sv_list: logger.warning('stake selector not in stake_list_get') return if stake_validators_list.sv_list[block.blockheader.stake_selector].is_banned: logger.warning('stake selector is in banned list') return # cycle through every tx in the new block to check state for tx in block.transactions: pubhash = tx.generate_pubhash(tx.PK, tx.ots_key) if tx.subtype == TX_SUBTYPE_COINBASE: expected_nonce = stake_validators_list.sv_list[tx.txfrom].nonce + 1 else: expected_nonce = address_txn[tx.txfrom][0] + 1 if tx.nonce != expected_nonce: logger.warning('nonce incorrect, invalid tx') logger.warning('subtype: %s', tx.subtype) logger.warning('%s actual: %s expected: %s', tx.txfrom, tx.nonce, expected_nonce) return False # TODO: To be fixed later if pubhash in address_txn[tx.txfrom][2]: logger.warning('pubkey reuse detected: invalid tx %s', tx.txhash) logger.warning('subtype: %s', tx.subtype) logger.info(pubhash) logger.info(address_txn[tx.txfrom][2]) return False if tx.subtype == TX_SUBTYPE_TX: if address_txn[tx.txfrom][1] - tx.amount < 0: logger.warning('%s %s exceeds balance, invalid tx', tx, tx.txfrom) logger.warning('subtype: %s', tx.subtype) logger.warning('Buffer State Balance: %s Transfer Amount %s', address_txn[tx.txfrom][1], tx.amount) return False elif tx.subtype == TX_SUBTYPE_STAKE: epoch_blocknum = config.dev.blocks_per_epoch - blocks_left if (not tx.first_hash) and epoch_blocknum >= config.dev.stake_before_x_blocks: logger.warning('Block rejected #%s due to ST without first_reveal beyond limit', block.blockheader.blocknumber) logger.warning('Stake_selector: %s', block.blockheader.stake_selector) logger.warning('epoch_blocknum: %s Threshold: %s', epoch_blocknum, config.dev.stake_before_x_blocks) return False address_txn[tx.txfrom][2].append(pubhash) next_sv_list = stake_validators_list.next_sv_list if tx.txfrom in next_sv_list: if not next_sv_list[tx.txfrom].first_hash and tx.first_hash: threshold_blocknum = stake_validators_list.get_threshold(tx.txfrom) if epoch_blocknum < threshold_blocknum - 1: logger.warning('Block rejected #%s due to ST before threshold', block.blockheader.blocknumber) logger.warning('Stake_selector: %s', block.blockheader.stake_selector) logger.warning('epoch_blocknum: %s Threshold: %s', epoch_blocknum, threshold_blocknum - 1) return False stake_validators_list.set_first_hash(tx.txfrom, tx.first_hash) else: stake_validators_list.add_next_sv(tx.txfrom, tx.slave_public_key, tx.hash, tx.first_hash, tx.balance) if tx.subtype != TX_SUBTYPE_COINBASE: address_txn[tx.txfrom][0] += 1 if tx.subtype == TX_SUBTYPE_TX: address_txn[tx.txfrom][1] -= tx.amount - tx.fee if tx.subtype in (TX_SUBTYPE_TX, TX_SUBTYPE_COINBASE): address_txn[tx.txto][1] += tx.amount address_txn[tx.txfrom][2].append(pubhash) return True
def CB(self, data): """ Check Blockheight :return: # FIXME: This test grew too much. Convert doctest into unit test using mocks >>> from collections import namedtuple, defaultdict >>> p=P2PProtocol() >>> Transport = namedtuple("Transport", "getPeer write") >>> Peer = namedtuple("Peer", "host port") >>> Factory = namedtuple("Factory", "peers_blockheight chain nodeState") >>> Chain = namedtuple("Chain", "m_blockchain m_blockheight") >>> def getPeer(): ... return Peer("host", 1234) >>> message = None >>> def write(msg): ... global message ... message = msg >>> def m_blockheight(): ... return 0 >>> p.transport = Transport(getPeer, write) >>> p.chain = Chain([], m_blockheight) >>> tmp = NodeState() >>> tmp.state = NState.synced >>> p.factory = Factory(defaultdict(), p.chain, tmp) >>> p.CB('{"block_number": 3, "headerhash": [53, 130, 168, 57, 183, 215, 120, 178, 209, 30, 194, 223, 221, 58, 72, 124, 62, 148, 110, 81, 19, 189, 27, 243, 218, 87, 217, 203, 198, 97, 84, 19]}') """ z = helper.json_decode(data) block_number = z['block_number'] headerhash = tuple(z['headerhash']) tmp = "{}:{}".format(self.transport.getPeer().host, self.transport.getPeer().port) self.factory.peers_blockheight[tmp] = z['block_number'] self.blockheight = block_number logger.info( '>>>Blockheight from: %s blockheight: %s local blockheight: %s %s', self.transport.getPeer().host, block_number, self.factory.chain.m_blockheight(), str(time.time())) if self.factory.nodeState.state == NState.syncing: return if block_number == self.factory.chain.m_blockheight(): # if self.factory.chain.m_blockchain[block_number].blockheader.headerhash != headerhash: if self.factory.chain.m_get_block( block_number).blockheader.headerhash != headerhash: logger.warning('>>> headerhash mismatch from %s', self.transport.getPeer().host) # initiate fork recovery and protection code here.. # call an outer function which sets a flag and scrutinises the chains from all connected hosts to see what is going on.. # again need to think this one through in detail.. return if block_number > self.factory.chain.m_blockheight(): return if self.factory.chain.height() == 1 and self.factory.genesis == 0: # set the flag so that no other Protocol instances trigger the genesis stake functions.. self.factory.genesis = 1 logger.info( 'genesis pos countdown to block 1 begun, 60s until stake tx circulated..' ) reactor.callLater(1, self.factory.pos.pre_pos_1) return # connected to multiple hosts and already passed through.. elif self.factory.chain.height() == 1 and self.factory.genesis == 1: return
def MR(self, data): """ Message Receipt This function accepts message receipt from peer, checks if the message hash already been received or not. In case its a already received message, it is ignored. Otherwise the request is made to get the full message. :return: """ data = json.loads(data) if data['type'] not in MessageReceipt.allowed_types: return if data['type'] in [ 'TX' ] and self.factory.nodeState.state != NState.synced: return if data['type'] == 'TX' and len(self.factory.chain.pending_tx_pool ) >= config.dev.transaction_pool_size: logger.warning( 'TX pool size full, incoming tx dropped. mr hash: %s', data['hash']) return if data['type'] == 'ST' and self.factory.chain.height( ) > 1 and self.factory.nodeState.state != NState.synced: return if self.factory.master_mr.contains(data['hash'], data['type']): return self.factory.master_mr.add_peer(data['hash'], data['type'], self, data) if self.factory.master_mr.is_callLater_active( data['hash']): # Ignore if already requested return if data['type'] == 'BK': block_chain_buffer = self.factory.chain.block_chain_buffer if not block_chain_buffer.verify_BK_hash(data, self.conn_identity): if block_chain_buffer.is_duplicate_block( blocknum=data['blocknumber'], prev_blockheaderhash=tuple(data['prev_headerhash']), stake_selector=data['stake_selector']): self.factory.RFM(data) return blocknumber = data['blocknumber'] target_blocknumber = block_chain_buffer.bkmr_tracking_blocknumber( self.factory.pos.ntp) if target_blocknumber != self.factory.bkmr_blocknumber: self.factory.bkmr_blocknumber = target_blocknumber del self.factory.bkmr_priorityq self.factory.bkmr_priorityq = PriorityQueue() if blocknumber != target_blocknumber or blocknumber == 1: self.factory.RFM(data) return score = block_chain_buffer.score_BK_hash(data) self.factory.bkmr_priorityq.put((score, data['hash'])) if not self.factory.bkmr_processor.active(): self.factory.bkmr_processor = reactor.callLater( 1, self.factory.select_best_bkmr) return self.factory.RFM(data)
def parse_buffer(self): # FIXME: This parsing/wire protocol needs to be replaced """ :return: :rtype: bool >>> from qrl.core.doctest_data import wrap_message_expected1 >>> p=P2PProtocol() >>> p.buffer = wrap_message_expected1 >>> found_message = p.parse_buffer() >>> p.messages [bytearray(b'{"data": 12345, "type": "TESTKEY_1234"}')] """ # FIXME if len(self.buffer) == 0: return False d = self.buffer.find( P2PProtocol.MSG_INITIATOR) # find the initiator sequence num_d = self.buffer.count( P2PProtocol.MSG_INITIATOR) # count the initiator sequences if d == -1: # if no initiator sequences found then wipe buffer.. logger.warning('Message data without initiator') self.clean_buffer(reason='Message data without initiator') return False self.buffer = self.buffer[d:] # delete data up to initiator if len( self.buffer ) < 8: # Buffer is still incomplete as it doesn't have message size return False try: tmp = self.buffer[3:11] tmp2 = hstr2bin(tmp.decode()) tmp3 = bytearray(tmp2) m = struct.unpack('>L', tmp3)[0] # is m length encoded correctly? except ValueError as e: return False except Exception as e: logger.exception(e) if num_d > 1: # if not, is this the only initiator in the buffer? self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason= 'Struct.unpack error attempting to decipher msg length, next msg preserved', upto=d) # no return True else: self.clean_buffer( reason= 'Struct.unpack error attempting to decipher msg length..' ) # yes return False if m > config.dev.message_buffer_size: # check if size is more than 500 KB if num_d > 1: self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason='Size is more than 500 KB, next msg preserved', upto=d) return True else: self.clean_buffer(reason='Size is more than 500 KB') return False e = self.buffer.find( P2PProtocol.MSG_TERMINATOR) # find the terminator sequence if e == -1: # no terminator sequence found if len(self.buffer) > 12 + m + 3: if num_d > 1: # if not is this the only initiator sequence? self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason= 'Message without appropriate terminator, next msg preserved', upto=d) # no return True else: self.clean_buffer( reason='Message without initiator and terminator' ) # yes return False if e != 3 + 9 + m: # is terminator sequence located correctly? if num_d > 1: # if not is this the only initiator sequence? self.buffer = self.buffer[3:] d = self.buffer.find(P2PProtocol.MSG_INITIATOR) self.clean_buffer( reason= 'Message terminator incorrectly positioned, next msg preserved', upto=d) # no return True else: self.clean_buffer( reason='Message terminator incorrectly positioned') # yes return False self.messages.append( self.buffer[12:12 + m] ) # if survived the above then save the msg into the self.messages self.buffer = self.buffer[12 + m + 3:] # reset the buffer to after the msg return True
def state_validate_block(self, block, sl, next_sl): if block.blockheader.blocknumber % config.dev.blocks_per_epoch == 0: sl = next_sl next_sl = list() address_txn = dict() blocknumber = block.blockheader.blocknumber for tx in block.transactions: if tx.txfrom not in address_txn: address_txn[tx.txfrom] = self.get_stxn_state(blocknumber, tx.txfrom) if tx.subtype == transaction.TX_SUBTYPE_TX: if tx.txto not in address_txn: address_txn[tx.txto] = self.get_stxn_state(blocknumber, tx.txto) found = False blocks_left = block.blockheader.blocknumber - (block.blockheader.epoch * config.dev.blocks_per_epoch) blocks_left = config.dev.blocks_per_epoch - blocks_left for s in sl: if block.blockheader.stake_selector == s[0]: found = True break if not found: logger.warning('stake selector not in stake_list_get') logger.warning('stake selector: %s', block.blockheader.stake_selector) return for tx in block.transactions: pubhash = tx.generate_pubhash(tx.pub) if tx.nonce != address_txn[tx.txfrom][0] + 1: logger.warning('nonce incorrect, invalid tx') logger.warning('subtype: %s', tx.subtype) logger.warning('%s actual: %s expected: %s', tx.txfrom, tx.nonce, address_txn[tx.txfrom][0] + 1) for t in block.transactions: logger.info('%s %s %s', t.subtype, t.txfrom, t.nonce) return False if pubhash in address_txn[tx.txfrom][2]: logger.warning('pubkey reuse detected: invalid tx %s', tx.txhash) logger.warning('subtype: %s', tx.subtype) return False if tx.subtype == transaction.TX_SUBTYPE_TX: if address_txn[tx.txfrom][1] - tx.amount < 0: logger.warning('%s %s exceeds balance, invalid tx', tx, tx.txfrom) logger.warning('subtype: %s', tx.subtype) logger.warning('Buffer State Balance: %s Transfer Amount %s', address_txn[tx.txfrom][1], tx.amount) return False elif tx.subtype == transaction.TX_SUBTYPE_STAKE: epoch_blocknum = config.dev.blocks_per_epoch - blocks_left if (not tx.first_hash) and epoch_blocknum >= config.dev.stake_before_x_blocks: logger.warning('Block rejected #%s due to ST without first_reveal beyond limit', block.blockheader.blocknumber) logger.warning('Stake_selector: %s', block.blockheader.stake_selector) logger.warning('epoch_blocknum: %s Threshold: %s', epoch_blocknum, config.dev.stake_before_x_blocks) return False found = False for s in next_sl: # already in the next stake list, ignore for staker list but update as usual the state_for_address.. if tx.txfrom == s[0]: found = True if s[3] is None and tx.first_hash is not None: threshold_block = self.state.get_staker_threshold_blocknum(next_sl, s[0]) if epoch_blocknum < threshold_block - 1: logger.warning('Block rejected #%s due to ST before threshold', block.blockheader.blocknumber) logger.warning('Stake_selector: %s', block.blockheader.stake_selector) logger.warning('epoch_blocknum: %s Threshold: %s', epoch_blocknum, threshold_block - 1) return False s[3] = tx.first_hash break if not found: next_sl.append([tx.txfrom, tx.hash, 0, tx.first_hash, tx.balance]) address_txn[tx.txfrom][0] += 1 if tx.subtype == transaction.TX_SUBTYPE_TX: address_txn[tx.txfrom][1] -= tx.amount if tx.subtype in (transaction.TX_SUBTYPE_TX, transaction.TX_SUBTYPE_COINBASE): address_txn[tx.txto][1] = address_txn[tx.txto][1] + tx.amount address_txn[tx.txfrom][2].append(pubhash) return True
def add_block(self, block): # TODO : minimum block validation in unsynced state blocknum = block.blockheader.blocknumber headerhash = block.blockheader.headerhash prev_headerhash = block.blockheader.prev_blockheaderhash if blocknum <= self.chain.height(): return True if blocknum - 1 == self.chain.height(): if prev_headerhash != self.chain.m_blockchain[-1].blockheader.headerhash: logger.warning('Failed due to prevheaderhash mismatch, blockslen %d', len(self.blocks)) return else: if blocknum - 1 not in self.blocks or prev_headerhash not in self.headerhashes[blocknum - 1]: logger.warning('Failed due to prevheaderhash mismatch, blockslen %d', len(self.blocks)) return if blocknum not in self.blocks: self.blocks[blocknum] = [] self.headerhashes[blocknum] = [] if headerhash in self.headerhashes[blocknum]: return 0 if blocknum - self.size in self.strongest_chain: self.move_to_mainchain() stake_reward = {} state_buffer = StateBuffer() block_buffer = None if blocknum - 1 == self.chain.height(): tmp_stake_list = self.state.stake_list_get() tmp_next_stake_list = self.state.next_stake_list_get() if blocknum % config.dev.blocks_per_epoch == 0: # quick fix when a node starts, it already moved to next epoch stake list tmp_stake_list, tmp_next_stake_list = tmp_next_stake_list, tmp_stake_list if not self.state_validate_block(block, deepcopy(tmp_stake_list), deepcopy(tmp_next_stake_list)): logger.warning('State_validate_block failed inside chainbuffer #%d', block.blockheader.blocknumber) return if blocknum % config.dev.blocks_per_epoch == 0: # quick fix swapping back values tmp_stake_list, tmp_next_stake_list = tmp_next_stake_list, tmp_stake_list for st in tmp_stake_list: state_buffer.stake_list[st[0]] = st for st in tmp_next_stake_list: state_buffer.next_stake_list[st[0]] = st block_buffer = BlockBuffer(block, stake_reward, self.chain, self.epoch_seed, self.get_st_balance(block.blockheader.stake_selector, block.blockheader.blocknumber)) state_buffer.set_next_seed(block.blockheader.hash, self.epoch_seed) state_buffer.update_stake_list(block) state_buffer.update_next_stake_list(block) state_buffer.update_stxn_state(block, self.state) else: parent_state_buffer = None parent_seed = None for block_state_buffer in self.blocks[blocknum - 1]: prev_block = block_state_buffer[0].block if prev_block.blockheader.headerhash == prev_headerhash: parent_state_buffer = block_state_buffer[1] parent_seed = block_state_buffer[1].next_seed break if not self.state_validate_block(block, deepcopy( parent_state_buffer.tx_to_list(parent_state_buffer.stake_list)), deepcopy( parent_state_buffer.tx_to_list(parent_state_buffer.next_stake_list))): logger.warning('State_validate_block failed inside chainbuffer #%d', block.blockheader.blocknumber) return block_buffer = BlockBuffer(block, stake_reward, self.chain, parent_seed, self.get_st_balance(block.blockheader.stake_selector, block.blockheader.blocknumber)) state_buffer.update(self.state, parent_state_buffer, block) self.blocks[blocknum].append([block_buffer, state_buffer]) if len(self.strongest_chain) == 0 and self.chain.m_blockchain[-1].blockheader.headerhash == prev_headerhash: self.strongest_chain[blocknum] = [block_buffer, state_buffer] self.chain.update_tx_metadata(block) elif blocknum not in self.strongest_chain and self.strongest_chain[blocknum - 1][ 0].block.blockheader.headerhash == prev_headerhash: self.strongest_chain[blocknum] = [block_buffer, state_buffer] self.chain.update_tx_metadata(block) elif blocknum in self.strongest_chain: old_block_buffer = self.strongest_chain[blocknum][0] if old_block_buffer.block.blockheader.prev_blockheaderhash == block_buffer.block.blockheader.prev_blockheaderhash: if block_buffer.score < old_block_buffer.score: self.strongest_chain[blocknum] = [block_buffer, state_buffer] if blocknum + 1 in self.strongest_chain: self.recalculate_strongest_chain(blocknum) self.headerhashes[blocknum].append(block.blockheader.headerhash) epoch = blocknum // config.dev.blocks_per_epoch next_epoch = (blocknum + 1) // config.dev.blocks_per_epoch if epoch != next_epoch: self.update_hash_chain(block.blockheader.blocknumber) self.add_txns_buffer() return True
def stake_list_put(self, sl): try: self.db.put('stake_list', sl) except Exception as e: logger.warning("stake_list_put: %s %s", type(e), e.message) return False
def create_stake_block(self, reveal_hash, vote_hash, last_block_number): t_pool2 = copy.deepcopy(self.transaction_pool) del self.transaction_pool[:] curr_epoch = (last_block_number + 1) // config.dev.blocks_per_epoch # recreate the transaction pool as in the tx_hash_list, ordered by txhash.. tx_nonce = defaultdict(int) total_txn = len(t_pool2) txnum = 0 while txnum < total_txn: tx = t_pool2[txnum] if self.block_chain_buffer.pubhashExists(tx.txfrom, tx.pubhash, last_block_number + 1): del t_pool2[txnum] total_txn -= 1 continue if tx.subtype == TX_SUBTYPE_STAKE: epoch_blocknum = last_block_number + 1 - ( curr_epoch * config.dev.blocks_per_epoch) # skip 1st st txn without tx.first_hash in case its beyond allowed epoch blocknumber if (not tx.first_hash ) and epoch_blocknum >= config.dev.stake_before_x_blocks: logger.warning( 'Skipping st as blocknumber beyond stake limit , CreateBlock()' ) logger.warning( 'Expected ST txn before epoch_blocknumber : %s', config.dev.stake_before_x_blocks) logger.warning('Found ST txn in epoch_blocknumber : %s', epoch_blocknum) del t_pool2[txnum] total_txn -= 1 continue if tx.epoch != curr_epoch: logger.warning( 'Skipping st as epoch mismatch, CreateBlock()') logger.warning('Expected st epoch : %s', curr_epoch) logger.warning('Found st epoch : %s', tx.epoch) del t_pool2[txnum] total_txn -= 1 continue balance = 0 next_sv_list = self.block_chain_buffer.next_stake_list_get( last_block_number + 1) if tx.txfrom in next_sv_list: balance = next_sv_list[tx.txfrom].balance threshold_blocknum = self.block_chain_buffer.get_threshold( last_block_number + 1, tx.txfrom) if epoch_blocknum < threshold_blocknum - 1: logger.warning( 'Skipping st as ST txn before threshold') logger.warning('Expected : %s', threshold_blocknum - 1) logger.warning('Found : %s', epoch_blocknum) del t_pool2[txnum] total_txn -= 1 continue # balance>0 only in case 1st st already accepted if not (balance > 0 or last_block_number == 0): if tx.first_hash: del t_pool2[txnum] total_txn -= 1 continue self.add_tx_to_pool(tx) tx_nonce[tx.txfrom] += 1 tx.nonce = self.block_chain_buffer.get_stxn_state( last_block_number + 1, tx.txfrom)[0] + tx_nonce[tx.txfrom] txnum += 1 # create the block.. block_obj = self.m_create_block(reveal_hash, vote_hash, last_block_number) # reset the pool back self.transaction_pool = copy.deepcopy(t_pool2) return block_obj
def state_update_genesis(self, chain, block, address_txn): # Start Updating coin base txn tx = block.transactions[0] # Expecting only 1 txn of COINBASE subtype in genesis block pubhash = tx.generate_pubhash(tx.PK, tx.ots_key) if tx.nonce != 1: logger.warning('nonce incorrect, invalid tx') logger.warning('subtype: %s', tx.subtype) logger.warning('%s actual: %s expected: %s', tx.txfrom, tx.nonce, address_txn[tx.txfrom][0] + 1) return False # TODO: To be fixed later if pubhash in address_txn[tx.txfrom][2]: logger.warning('pubkey reuse detected: invalid tx %s', tx.txhash) logger.warning('subtype: %s', tx.subtype) return False address_txn[tx.txto][1] += tx.amount address_txn[tx.txfrom][2].append(pubhash) # Coinbase update end here tmp_list = [] for tx in block.transactions: if tx.subtype == TX_SUBTYPE_STAKE: # update txfrom, hash and stake_nonce against genesis for current or next stake_list tmp_list.append([tx.txfrom, tx.hash, 0, tx.first_hash, GenesisBlock().get_info()[tx.txfrom], tx.slave_public_key]) if tx.txfrom == block.blockheader.stake_selector: if tx.txfrom in chain.m_blockchain[0].stake_list: self.stake_validators_list.add_sv(tx.txfrom, tx.slave_public_key, tx.hash, tx.first_hash, tx.balance) self.stake_validators_list.sv_list[tx.txfrom].nonce += 1 else: logger.warning('designated staker not in genesis..') return False else: if tx.txfrom in chain.m_blockchain[0].stake_list: self.stake_validators_list.add_sv(tx.txfrom, tx.slave_public_key, tx.hash, tx.first_hash, tx.balance) else: self.stake_validators_list.add_next_sv(tx.txfrom, tx.slave_public_key, tx.hash, tx.first_hash, tx.balance) pubhash = tx.generate_pubhash(tx.PK, tx.ots_key) address_txn[tx.txfrom][2].append(pubhash) epoch_seed = self.stake_validators_list.calc_seed() chain.block_chain_buffer.epoch_seed = epoch_seed self.put_epoch_seed(epoch_seed) chain.block_chain_buffer.epoch_seed = chain.state.calc_seed(tmp_list) chain.stake_list = sorted(tmp_list, key=lambda staker: chain.score(stake_address=staker[0], reveal_one=bin2hstr(sha256(reduce( lambda set1, set2: set1 + set2, staker[1]))), balance=staker[4], seed=chain.block_chain_buffer.epoch_seed)) chain.block_chain_buffer.epoch_seed = format(chain.block_chain_buffer.epoch_seed, 'x') if chain.stake_list[0][0] != block.blockheader.stake_selector: logger.info('stake selector wrong..') return xmss = chain.wallet.address_bundle[0].xmss tmphc = hashchain(xmss.get_seed_private(), epoch=0) chain.hash_chain = tmphc.hashchain chain.wallet.save_wallet() return True
def validate(self, last_header): if last_header.block_number != self.block_number - 1: logger.warning('Block numbers out of sequence: failed validation') return False if last_header.headerhash != self.prev_blockheaderhash: logger.warning('Headerhash not in sequence: failed validation') return False if self.generate_headerhash() != self.headerhash: logger.warning('Headerhash false for block: failed validation') return False if self.block_reward != self.block_reward_calc(): logger.warning( 'Block reward incorrect for block: failed validation') return False if self.epoch != self.block_number // config.dev.blocks_per_epoch: logger.warning('Epoch incorrect for block: failed validation') return False if self.timestamp == 0 and self.block_number > 0: logger.warning('Invalid block timestamp ') return False if self.timestamp <= last_header.timestamp: logger.warning('BLOCK timestamp is less than prev block timestamp') logger.warning('block timestamp %s ', self.timestamp) logger.warning('must be greater than %s', last_header.timestamp) return False if last_header.timestamp + config.dev.minimum_minting_delay > self.timestamp: logger.warning( 'BLOCK created without waiting for minimum minting delay') logger.warning('prev_block timestamp %s ', last_header.timestamp) logger.warning('current_block timestamp %s ', self.timestamp) return False return True
def stake_list_put(self, sl): try: self.db.put('stake_list', self.stake_validators_list.to_json()) except Exception as e: logger.warning("stake_list_put: %s %s", type(e), e) return False
def validate_block(self, chain, verify_block_reveal_list=True): # check validity of new block.. """ block validation :param chain: :param verify_block_reveal_list: :return: """ b = self.blockheader last_blocknum = b.blocknumber - 1 if len(self.transactions) == 0: logger.warning('BLOCK : There must be atleast 1 txn') return False coinbase_tx = self.transactions[0] try: if coinbase_tx.subtype != transaction.TX_SUBTYPE_COINBASE: logger.warning('BLOCK : First txn must be a COINBASE txn') return False except Exception as e: logger.exception(e) return False if coinbase_tx.txfrom != self.blockheader.stake_selector: logger.info('Non matching txto and stake_selector') logger.info('txto: %s stake_selector %s', coinbase_tx.txfrom, self.blockheader.stake_selector) return False if coinbase_tx.amount != self.blockheader.block_reward: logger.info('Block_reward doesnt match') logger.info('Found: %d Expected: %d', coinbase_tx.amount, self.blockheader.block_reward) return False if b.timestamp == 0 and b.blocknumber > 0: logger.warning('Invalid block timestamp ') return False if b.block_reward != b.block_reward_calc(): logger.warning('Block reward incorrect for block: failed validation') return False if b.epoch != b.blocknumber / config.dev.blocks_per_epoch: logger.warning('Epoch incorrect for block: failed validation') return False if b.blocknumber == 1: x = 0 for tx in self.transactions: if tx.subtype == transaction.TX_SUBTYPE_STAKE: if tx.txfrom == b.stake_selector: x = 1 hash, _ = chain.select_hashchain(chain.m_blockchain[-1].blockheader.headerhash, b.stake_selector, tx.hash, blocknumber=1) if sha256(b.hash) != hash or hash not in tx.hash: logger.warning('Hashchain_link does not hash correctly to terminator: failed validation') return False if x != 1: logger.warning('Stake selector not in block.stake: failed validation') return False else: # we look in stake_list for the hash terminator and hash to it.. found = False terminator = sha256(b.hash) for _ in range(b.blocknumber - (b.epoch * config.dev.blocks_per_epoch) + 1): terminator = sha256(terminator) tmp_stake_list = chain.state.stake_list_get() for st in tmp_stake_list: if st[0] == b.stake_selector: found = True if terminator != st[1][-1]: logger.warning('Supplied hash does not iterate to terminator: failed validation') return False if not found: logger.warning('Stake selector not in stake_list for this epoch..') return False if len(b.reveal_list) != len(set(b.reveal_list)): logger.warning('Repetition in reveal_list') return False if verify_block_reveal_list: i = 0 for r in b.reveal_list: t = sha256(r) for _ in range(b.blocknumber - ( b.epoch * config.dev.blocks_per_epoch) + 1): # +1 as reveal has 1 extra hash t = sha256(t) for s in tmp_stake_list: if t == s[1][-1]: i += 1 if i != len(b.reveal_list): logger.warning('Not all the reveal_hashes are valid..') return False i = 0 target_chain = select_target_hashchain(b.prev_blockheaderhash) for r in b.vote_hashes: t = sha256(r) for x in range(b.blocknumber - (b.epoch * config.dev.blocks_per_epoch)): t = sha256(t) for s in tmp_stake_list: if t == s[1][target_chain]: i += 1 if i != len(b.vote_hashes): logger.warning('Not all the reveal_hashes are valid..') return False if b.generate_headerhash() != b.headerhash: logger.warning('Headerhash false for block: failed validation') return False tmp_last_block = chain.block_chain_buffer.get_block_n(last_blocknum) if tmp_last_block.blockheader.headerhash != b.prev_blockheaderhash: logger.warning('Headerhash not in sequence: failed validation') return False if tmp_last_block.blockheader.blocknumber != b.blocknumber - 1: logger.warning('Block numbers out of sequence: failed validation') return False if not self.validate_tx_in_block(): logger.warning('Block validate_tx_in_block error: failed validation') return False if len(self.transactions) == 1: txhashes = sha256('') else: txhashes = [] for tx_num in range(1, len(self.transactions)): tx = self.transactions[tx_num] txhashes.append(tx.txhash) if merkle_tx_hash(txhashes) != b.tx_merkle_root: logger.warning('Block hashedtransactions error: failed validation') return False return True
def get_epoch_seed(self): try: return self.db.get('epoch_seed') except Exception as e: logger.warning("get_epoch_seed: %s %s", type(e), e) return False
def _create_stake_tx(self, curr_blocknumber): sv_dict = self.buffered_chain.stake_list_get(curr_blocknumber) if self.buffered_chain.staking_address in sv_dict: activation_blocknumber = sv_dict[ self.buffered_chain. staking_address].activation_blocknumber + config.dev.blocks_per_epoch else: activation_blocknumber = curr_blocknumber + 2 # Activate as Stake Validator, 2 blocks after current block balance = self.buffered_chain.get_stxn_state( curr_blocknumber, self.buffered_chain.staking_address).balance if balance < config.dev.minimum_staking_balance_required: logger.warning('Staking not allowed due to insufficient balance') logger.warning('Balance %s', balance) return slave_xmss = self.buffered_chain.get_slave_xmss(activation_blocknumber) if not slave_xmss: return signing_xmss = self.buffered_chain.wallet.address_bundle[0].xmss blocknumber_headerhash = dict() current_blocknumber = self.buffered_chain.height for stamp in config.dev.stamping_series: if stamp > current_blocknumber: continue blocknumber = self.get_last_blockheight_endswith( current_blocknumber, stamp) finalized_block = self.buffered_chain.get_block(blocknumber) if not finalized_block: logger.warning( 'Cannot make ST txn, unable to get blocknumber %s', blocknumber) return blocknumber_headerhash[blocknumber] = finalized_block.headerhash st = StakeTransaction.create( activation_blocknumber=activation_blocknumber, xmss=signing_xmss, slavePK=slave_xmss.pk()) st.sign(signing_xmss) tx_state = self.buffered_chain.get_stxn_state(curr_blocknumber, st.txfrom) if not (st.validate() and st.validate_extended(tx_state)): logger.warning( 'Create St Txn failed due to validation failure, will retry next block' ) return self.p2p_factory.broadcast_st(st) for num in range(len(self.buffered_chain.tx_pool.transaction_pool)): t = self.buffered_chain.tx_pool.transaction_pool[num] if t.subtype == qrl_pb2.Transaction.STAKE and st.hash == t.hash: if st.get_message_hash() == t.get_message_hash(): return self.buffered_chain.tx_pool.remove_tx_from_pool(t) break self.buffered_chain.tx_pool.add_tx_to_pool(st) self.buffered_chain.wallet.save_wallet()
def validate_tx(self, chain, blockheader): sv_list = chain.block_chain_buffer.stake_list_get( blockheader.blocknumber) if blockheader.blocknumber > 1 and sv_list[ self.txto].slave_public_key != self.PK: logger.warning('Stake validator doesnt own the Public key') logger.warning('Expected public key %s', sv_list[self.txto].slave_public_key) logger.warning('Found public key %s', self.PK) return False if self.txto != self.txfrom: logger.warning('Non matching txto and txfrom') logger.warning('txto: %s txfrom: %s', self.txto, self.txfrom) return False # FIXME: Duplication. Risk of mismatch (create & verification) txhash = blockheader.prev_blockheaderhash + \ tuple([int(char) for char in str(blockheader.blocknumber)]) + \ blockheader.headerhash # FIXME: This additional transformation happens in a base class txhash = sha256(txhash + self.pubhash) if self.txhash != txhash: logger.warning('Block_headerhash doesnt match') logger.warning('Found: %s', self.txhash) logger.warning('Expected: %s', txhash) return False # Slave XMSS is used to sign COINBASE txn having quite low XMSS height if not self._validate_signed_hash(height=config.dev.slave_xmss_height): return False return True
def ST(self, data): """ Stake Transaction This function processes whenever a Transaction having subtype ST is received. :return: """ try: st = Transaction.from_json(data) except Exception as e: logger.error( 'st rejected - unable to decode serialised data - closing connection' ) logger.exception(e) self.transport.loseConnection() return if not self.factory.master_mr.isRequested(st.get_message_hash(), self): return if len(self.factory.buffered_chain._chain.blockchain) == 1 and \ st.activation_blocknumber > self.factory.buffered_chain.height + config.dev.blocks_per_epoch: return height = self.factory.buffered_chain.height + 1 stake_validators_tracker = self.factory.buffered_chain.get_stake_validators_tracker( height) if st.txfrom in stake_validators_tracker.future_stake_addresses: logger.debug( 'P2P dropping st as staker is already in future_stake_address %s', st.txfrom) return if st.txfrom in stake_validators_tracker.sv_dict: expiry = stake_validators_tracker.sv_dict[ st.txfrom].activation_blocknumber + config.dev.blocks_per_epoch if st.activation_blocknumber < expiry: logger.debug( 'P2P dropping st txn as it is already active for the given range %s', st.txfrom) return if st.activation_blocknumber > height + config.dev.blocks_per_epoch: logger.debug( 'P2P dropping st as activation_blocknumber beyond limit') return False for t in self.factory.buffered_chain.tx_pool.transaction_pool: if st.get_message_hash() == t.get_message_hash(): return tx_state = self.factory.buffered_chain.get_stxn_state( blocknumber=self.factory.buffered_chain.height + 1, addr=st.txfrom) if st.validate() and st.validate_extended(tx_state=tx_state): self.factory.buffered_chain.tx_pool.add_tx_to_pool(st) else: logger.warning('>>>ST %s invalid state validation failed..', bin2hstr(tuple(st.hash))) return self.factory.register_and_broadcast('ST', st.get_message_hash(), st.to_json())
def verify_BK_hash(self, data, conn_identity): blocknum = data['blocknumber'] stake_selector = data['stake_selector'] prev_headerhash = tuple(data['prev_headerhash']) if blocknum <= self.chain.height(): return False if blocknum - 1 == self.chain.height(): if prev_headerhash != self.chain.m_blockchain[ -1].blockheader.headerhash: logger.warning( 'verify_BK_hash Failed due to prevheaderhash mismatch, blockslen %d', len(self.blocks)) return return True elif blocknum - 1 not in self.blocks or prev_headerhash != self.blocks[ blocknum - 1][0].block.blockheader.headerhash: logger.warning( 'verify_BK_hash Failed due to prevheaderhash mismatch, blockslen %d', len(self.blocks)) return sv_list = self.stake_list_get(blocknum) if stake_selector not in sv_list: return if sv_list[stake_selector].is_banned: logger.warning( 'Rejecting block created by banned stake selector %s', stake_selector) return reveal_hash = tuple(data['reveal_hash']) vote_hash = tuple(data['vote_hash']) stake_validators_list = self.get_stake_validators_list(blocknum) target_chain = stake_validators_list.select_target(prev_headerhash) if not stake_validators_list.validate_hash( vote_hash, blocknum, target_chain=target_chain, stake_address=stake_selector): logger.info('%s vote hash doesnt hash to stake terminator vote %s', conn_identity, vote_hash) return if not stake_validators_list.validate_hash( reveal_hash, blocknum, target_chain=config.dev.hashchain_nums - 1, stake_address=stake_selector): logger.info('%s reveal doesnt hash to stake terminator reveal %s', conn_identity, reveal_hash) return score = self.score_BK_hash(data) return self.is_better_block(blocknum, score)
def MR(self, data): """ Message Receipt This function accepts message receipt from peer, checks if the message hash already been received or not. In case its a already received message, it is ignored. Otherwise the request is made to get the full message. :return: """ mr_data = qrl_pb2.MR() try: Parse(data, mr_data) except Exception as e: # Disconnect peer not following protocol logger.debug( 'Disconnected peer %s not following protocol in MR %s', self.conn_identity, e) self.transport.loseConnection() msg_hash = mr_data.hash if mr_data.type not in MessageReceipt.allowed_types: return if mr_data.type in [ 'TX' ] and self.factory.sync_state.state != ESyncState.synced: return if mr_data.type == 'TX': if len(self.factory.buffered_chain.tx_pool.pending_tx_pool ) >= config.dev.transaction_pool_size: logger.warning( 'TX pool size full, incoming tx dropped. mr hash: %s', bin2hstr(msg_hash)) return if mr_data.type == 'ST' or mr_data.type == 'VT': if self.factory.buffered_chain.height > 1 and self.factory.sync_state.state != ESyncState.synced: return if self.factory.master_mr.contains(msg_hash, mr_data.type): return self.factory.master_mr.add_peer(msg_hash, mr_data.type, self, mr_data) if self.factory.master_mr.is_callLater_active( msg_hash): # Ignore if already requested return if mr_data.type == 'BK': block_chain_buffer = self.factory.buffered_chain if not block_chain_buffer.verify_BK_hash(mr_data, self.conn_identity): if block_chain_buffer.is_duplicate_block( block_idx=mr_data.block_number, prev_headerhash=mr_data.prev_headerhash, stake_selector=mr_data.stake_selector): self.factory.RFM(mr_data) return blocknumber = mr_data.block_number target_blocknumber = block_chain_buffer.bkmr_tracking_blocknumber( self.factory.ntp) if target_blocknumber != self.factory.bkmr_blocknumber: self.factory.bkmr_blocknumber = target_blocknumber del self.factory.bkmr_priorityq self.factory.bkmr_priorityq = PriorityQueue() if blocknumber != target_blocknumber or blocknumber == 1: self.factory.RFM(mr_data) return score = block_chain_buffer.score_BK_hash(mr_data) self.factory.bkmr_priorityq.put((score, msg_hash)) if not self.factory.bkmr_processor.active(): self.factory.bkmr_processor = reactor.callLater( 1, self.factory.select_best_bkmr) return self.factory.RFM(mr_data)
def _validate_custom(self) -> bool: if len(self.message_hash) > 80: logger.warning('Message hash length more than 80, %s', len(self.message_hash)) return False return True
def validate_block(self, chain): # check validity of new block.. """ block validation :param chain: :return: """ b = self.blockheader last_blocknum = b.blocknumber - 1 tmp_last_block = chain.block_chain_buffer.get_block_n(last_blocknum) curr_timestamp = ntp.getTime() if b.timestamp <= tmp_last_block.blockheader.timestamp: logger.warning('BLOCK timestamp is less than prev block timestamp') logger.warning('block timestamp %s ', b.timestamp) logger.warning('must be greater than %s', tmp_last_block.blockheader.timestamp) return False if b.generate_headerhash() != b.headerhash: logger.warning('Headerhash false for block: failed validation') return False if tmp_last_block.blockheader.timestamp + config.dev.minimum_minting_delay > b.timestamp: logger.warning( 'BLOCK created without waiting for minimum minting delay') logger.warning('prev_block timestamp %s ', tmp_last_block.blockheader.timestamp) logger.warning('current_block timestamp %s ', b.timestamp) return False if tmp_last_block.blockheader.headerhash != b.prev_blockheaderhash: logger.warning('Headerhash not in sequence: failed validation') return False if tmp_last_block.blockheader.blocknumber != b.blocknumber - 1: logger.warning('Block numbers out of sequence: failed validation') return False if len(self.transactions) == 0: logger.warning('BLOCK : There must be atleast 1 txn') return False coinbase_tx = self.transactions[0] try: if coinbase_tx.subtype != TX_SUBTYPE_COINBASE: logger.warning('BLOCK : First txn must be a COINBASE txn') return False except Exception as e: logger.exception(e) return False sv_list = chain.block_chain_buffer.stake_list_get( self.blockheader.blocknumber) if coinbase_tx.txto != self.blockheader.stake_selector: logger.info('Non matching txto and stake_selector') logger.info('txto: %s stake_selector %s', coinbase_tx.txfrom, self.blockheader.stake_selector) return False if coinbase_tx.amount != self.blockheader.block_reward + self.blockheader.fee_reward: logger.info('Block_reward doesnt match') logger.info('Found: %s', coinbase_tx.amount) logger.info( 'Expected: %s', self.blockheader.block_reward + self.blockheader.fee_reward) logger.info('block_reward: %s', self.blockheader.block_reward) logger.info('fee_reward: %s', self.blockheader.fee_reward) return False if b.timestamp == 0 and b.blocknumber > 0: logger.warning('Invalid block timestamp ') return False if b.block_reward != b.block_reward_calc(): logger.warning( 'Block reward incorrect for block: failed validation') return False if b.epoch != b.blocknumber // config.dev.blocks_per_epoch: logger.warning('Epoch incorrect for block: failed validation') return False if b.blocknumber == 1: x = 0 for tx in self.transactions: if tx.subtype == TX_SUBTYPE_STAKE: if tx.txfrom == b.stake_selector: x = 1 reveal_hash, vote_hash = chain.select_hashchain( chain.m_blockchain[-1].blockheader.headerhash, self.transactions[0].txto, tx.hash, blocknumber=1) if sha256(b.reveal_hash) != reveal_hash: logger.warning( 'reveal_hash does not hash correctly to terminator: failed validation' ) return False if sha256(b.vote_hash) != vote_hash: logger.warning( 'vote_hash does not hash correctly to terminator: failed validation' ) return False if x != 1: logger.warning( 'Stake selector not in block.stake: failed validation') return False else: # we look in stake_list for the hash terminator and hash to it.. stake_validators_list = chain.block_chain_buffer.get_stake_validators_list( self.blockheader.blocknumber) if self.transactions[0].txto not in stake_validators_list.sv_list: logger.warning( 'Stake selector not in stake_list for this epoch..') return False if not stake_validators_list.validate_hash( b.reveal_hash, b.blocknumber, config.dev.hashchain_nums - 1, self.transactions[0].txto): logger.warning( 'Supplied hash does not iterate to terminator: failed validation' ) return False target_chain = select_target_hashchain(b.prev_blockheaderhash) if not stake_validators_list.validate_hash( b.vote_hash, b.blocknumber, target_chain, self.transactions[0].txto): logger.warning('Not all the reveal_hashes are valid..') return False if not self._validate_tx_in_block(chain): logger.warning( 'Block validate_tx_in_block error: failed validation') return False return True