def anonymize(tx_count, per_tx, remainder, anon_recipient, identifier, anon_sender): # return remainder to source! a.execute("SELECT * FROM transactions WHERE openfield = ?", (identifier,)) try: exists = a.fetchall()[0] except:#if payout didn't happen yet print(tx_count, per_tx, remainder, identifier) for tx in range(tx_count): #construct tx openfield = "mixer" operation = 0 fee = fee_calculate(openfield) timestamp = '%.2f' % time.time() transaction = (str(timestamp), str(address), str(anon_recipient), '%.8f' % float(per_tx - fee), str(operation), str(openfield)) # this is signed h = SHA.new(str(transaction).encode("utf-8")) signer = PKCS1_v1_5.new(key) signature = signer.sign(h) signature_enc = base64.b64encode(signature) print("Encoded Signature: {}".format(signature_enc.decode("utf-8"))) verifier = PKCS1_v1_5.new(key) if verifier.verify(h, signature): print("The signature is valid, proceeding to save transaction to mempool") #construct tx a.execute("INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?)", (str(timestamp), str(address), str(anon_recipient), '%.8f' % float(per_tx - fee), str(signature_enc.decode("utf-8")), str(public_key_hashed), str(operation), str(identifier))) anon.commit() m.execute("INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?)", (str(timestamp), str(address), str(anon_recipient), '%.8f' % float(per_tx - fee), str(signature_enc.decode("utf-8")), str(public_key_hashed), str(operation), str(openfield))) mempool.commit() if (remainder - fee) > 0: openfield = "mixer" operation = 0 fee = fee_calculate(openfield) timestamp = '%.2f' % time.time() transaction = (str(timestamp), str(address), str(anon_sender), '%.8f' % float(remainder - fee), str(operation), str(openfield)) # this is signed m.execute("INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?)", (str(timestamp), str(address), str(anon_sender), '%.8f' % float(remainder - fee), str(signature_enc.decode("utf-8")), str(public_key_hashed), str(operation), str(openfield))) mempool.commit() return
def balanceget(balance_address, h3): # verify balance # app_log.info("Mempool: Verifying balance") # app_log.info("Mempool: Received address: " + str(balance_address)) base_mempool = mp.MEMPOOL.fetchall ("SELECT amount, openfield FROM transactions WHERE address = ?;", (balance_address,)) # include mempool fees debit_mempool = 0 if base_mempool: for x in base_mempool: debit_tx = Decimal(x[0]) fee = fee_calculate (x[1], x[2], 700000) debit_mempool = quantize_eight(debit_mempool + debit_tx + fee) else: debit_mempool = 0 # include mempool fees credit_ledger = Decimal ("0") for entry in execute_param (h3, ("SELECT amount FROM transactions WHERE recipient = ?;"), (balance_address,)): try: credit_ledger = quantize_eight (credit_ledger) + quantize_eight (entry[0]) credit_ledger = 0 if credit_ledger is None else credit_ledger except: credit_ledger = 0 fees = Decimal ("0") debit_ledger = Decimal ("0") for entry in execute_param (h3, ("SELECT fee, amount FROM transactions WHERE address = ?;"), (balance_address,)): try: fees = quantize_eight (fees) + quantize_eight (entry[0]) fees = 0 if fees is None else fees except: fees = 0 try: debit_ledger = debit_ledger + Decimal (entry[1]) debit_ledger = 0 if debit_ledger is None else debit_ledger except: debit_ledger = 0 debit = quantize_eight (debit_ledger + debit_mempool) rewards = Decimal ("0") for entry in execute_param (h3, ("SELECT reward FROM transactions WHERE recipient = ?;"), (balance_address,)): try: rewards = quantize_eight (rewards) + quantize_eight (entry[0]) rewards = 0 if rewards is None else rewards except: rewards = 0 balance = quantize_eight (credit_ledger - debit - fees + rewards) balance_no_mempool = float(credit_ledger) - float(debit_ledger) - float(fees) + float(rewards) # app_log.info("Mempool: Projected transction address balance: " + str(balance)) return str (balance), str (credit_ledger), str (debit), str (fees), str (rewards), str(balance_no_mempool)
print("Lost") if can_be_added_to_bets(tx_signature): bets_db_add(x, rolled, victorious) for y in pay_this: recipient = y[2] bet_amount = float(y[4]) tx_signature = y[5] # unique # print y # create transactions for missing payouts timestamp = '%.2f' % time.time() win_amount = Decimal(bet_amount * 2) - percentage(5, bet_amount) payout_operation = "zircodice:payout" fee = fee_calculate(y[5][:8]) payout_amount = '%.8f' % float(win_amount - fee) # float(0.01 + (float(win_amount) * 0.001) + (float(len(payout_openfield)) / 100000) + (float(payout_keep) / 10)) # 0.1% + 0.01 dust payout_transaction = (str(timestamp), str(address), str(recipient), str(payout_amount), str(payout_operation), str(y[5][:8])) # this is signed h = SHA.new(str(payout_transaction).encode("utf-8")) signer = PKCS1_v1_5.new(key) signature = signer.sign(h) signature_enc = base64.b64encode(signature) print("Encoded Signature: {}".format(signature_enc.decode())) verifier = PKCS1_v1_5.new(key)
def digest_block(node, data, sdef, peer_ip, db_handler): """node param for imports""" class Transaction(): def __init__(self): self.start_time_tx = 0 self.q_received_timestamp = 0 self.received_timestamp = "0.00" self.received_address = None self.received_recipient = None self.received_amount = 0 self.received_signature_enc = None self.received_public_key_hashed = None self.received_operation = None self.received_openfield = None class MinerTransaction(): def __init__(self): self.q_block_timestamp = 0 self.nonce = None self.miner_address = None class PreviousBlock(): def __init__(self): db_handler.execute( db_handler.c, "SELECT block_hash, block_height, timestamp FROM transactions WHERE reward != 0 ORDER BY block_height DESC LIMIT 1;" ) result = db_handler.c.fetchall() self.block_hash = result[0][0] self.block_height = result[0][1] self.q_timestamp_last = quantize_two(result[0][2]) class BlockArray(): def __init__(self): self.tx_count = 0 self.block_height_new = node.last_block + 1 # for logging purposes. self.block_hash = 'N/A' self.failed_cause = '' self.block_count = 0 block_array = BlockArray() def transaction_validate(): received_public_key = RSA.importKey( base64.b64decode(tx.received_public_key_hashed)) received_signature_dec = base64.b64decode(tx.received_signature_enc) verifier = PKCS1_v1_5.new(received_public_key) essentials.validate_pem(tx.received_public_key_hashed) sha_hash = SHA.new( str((tx.received_timestamp, tx.received_address, tx.received_recipient, tx.received_amount, tx.received_operation, tx.received_openfield)).encode("utf-8")) if not verifier.verify(sha_hash, received_signature_dec): raise ValueError(f"Invalid signature from {tx.received_address}") else: node.logger.app_log.info( f"Valid signature from {tx.received_address} to {tx.received_recipient} amount {tx.received_amount}" ) if float(tx.received_amount) < 0: raise ValueError("Negative balance spend attempt") if tx.received_address != hashlib.sha224( base64.b64decode(tx.received_public_key_hashed)).hexdigest(): raise ValueError("Attempt to spend from a wrong address") if not essentials.address_validate(tx.received_address): raise ValueError("Not a valid sender address") if not essentials.address_validate(tx.received_recipient): raise ValueError("Not a valid recipient address") if tx.start_time_tx < tx.q_received_timestamp: raise ValueError( f"Future transaction not allowed, timestamp {quantize_two((tx.q_received_timestamp - tx.start_time_tx) / 60)} minutes in the future" ) if previous_block.q_timestamp_last - 86400 > tx.q_received_timestamp: raise ValueError("Transaction older than 24h not allowed.") def check_signature(block): for entry in block: # sig 4 block_array.tx_count += 1 entry_signature = entry[4] if entry_signature: # prevent empty signature database retry hack signature_list.append(entry_signature) # reject block with transactions which are already in the ledger ram db_handler.execute_param( db_handler.h, "SELECT block_height FROM transactions WHERE signature = ?;", (entry_signature, )) tx_presence_check = db_handler.h.fetchone() if tx_presence_check: # print(node.last_block) raise ValueError( f"That transaction {entry_signature[:10]} is already in our ram ledger, block_height {tx_presence_check[0]}" ) db_handler.execute_param( db_handler.c, "SELECT block_height FROM transactions WHERE signature = ?;", (entry_signature, )) tx_presence_check = db_handler.c.fetchone() if tx_presence_check: # print(node.last_block) raise ValueError( f"That transaction {entry_signature[:10]} is already in our ledger, block_height {tx_presence_check[0]}" ) else: raise ValueError(f"Empty signature from {peer_ip}") if node.peers.is_banned(peer_ip): # no need to loose any time with banned peers raise ValueError("Cannot accept blocks from a banned peer") # since we raise, it will also drop the connection, it's fine since he's banned. if not node.db_lock.locked(): node.db_lock.acquire() node.logger.app_log.warning(f"Database lock acquired") while mp.MEMPOOL.lock.locked(): time.sleep(0.1) node.logger.app_log.info( f"Chain: Waiting for mempool to unlock {peer_ip}") node.logger.app_log.warning(f"Chain: Digesting started from {peer_ip}") # variables that have been quantized are prefixed by q_ So we can avoid any unnecessary quantize again later. Takes time. # Variables that are only used as quantized decimal are quantized once and for all. block_size = Decimal(sys.getsizeof(str(data))) / Decimal(1000000) node.logger.app_log.warning(f"Chain: Block size: {block_size} MB") try: block_array_data = data # reject block with duplicate transactions signature_list = [] block_transactions = [] for block in block_array_data: block_array.block_count += 1 # Reworked process: we exit as soon as we find an error, no need to process further tests. # Then the exception handler takes place. # TODO EGG: benchmark this loop vs a single "WHERE IN" SQL # move down, so bad format tx do not require sql query check_signature(block) block_array.tx_count = len(signature_list) if block_array.tx_count != len(set(signature_list)): raise ValueError( "There are duplicate transactions in this block, rejected" ) del signature_list[:] previous_block = PreviousBlock() block_array.block_height_new = previous_block.block_height + 1 db_handler.execute( db_handler.c, "SELECT max(block_height) FROM transactions") node.last_block = db_handler.c.fetchone()[0] start_time_block = quantize_two(time.time()) transaction_list_converted = [ ] # makes sure all the data are properly converted for tx_index, transaction in enumerate(block): tx = Transaction() tx.start_time_tx = quantize_two(time.time()) tx.q_received_timestamp = quantize_two(transaction[0]) tx.received_timestamp = '%.2f' % tx.q_received_timestamp tx.received_address = str(transaction[1])[:56] tx.received_recipient = str(transaction[2])[:56] tx.received_amount = '%.8f' % (quantize_eight( transaction[3])) tx.received_signature_enc = str(transaction[4])[:684] tx.received_public_key_hashed = str(transaction[5])[:1068] tx.received_operation = str(transaction[6])[:30] tx.received_openfield = str(transaction[7])[:100000] # if transaction == block[-1]: if tx_index == block_array.tx_count - 1: # faster than comparing the whole tx miner_tx = MinerTransaction() # recognize the last transaction as the mining reward transaction miner_tx.q_block_timestamp = tx.q_received_timestamp miner_tx.nonce = tx.received_openfield[:128] miner_tx.miner_address = tx.received_address transaction_list_converted.append( (tx.received_timestamp, tx.received_address, tx.received_recipient, tx.received_amount, tx.received_signature_enc, tx.received_public_key_hashed, tx.received_operation, tx.received_openfield)) # if (start_time_tx < q_received_timestamp + 432000) or not quicksync: # convert readable key to instance transaction_validate() # reject blocks older than latest block if miner_tx.q_block_timestamp <= previous_block.q_timestamp_last: raise ValueError( "Block is older than the previous one, will be rejected" ) # calculate current difficulty (is done for each block in block array, not super easy to isolate) diff = difficulty(node, db_handler) node.difficulty = diff node.logger.app_log.warning( f"Time to generate block {previous_block.block_height + 1}: {'%.2f' % diff[2]}" ) node.logger.app_log.warning(f"Current difficulty: {diff[3]}") node.logger.app_log.warning(f"Current blocktime: {diff[4]}") node.logger.app_log.warning(f"Current hashrate: {diff[5]}") node.logger.app_log.warning( f"Difficulty adjustment: {diff[6]}") node.logger.app_log.warning(f"Difficulty: {diff[0]} {diff[1]}") # node.logger.app_log.info("Transaction list: {}".format(transaction_list_converted)) block_array.block_hash = hashlib.sha224( (str(transaction_list_converted) + previous_block.block_hash).encode("utf-8")).hexdigest() # node.logger.app_log.info("Last block sha_hash: {}".format(block_hash)) node.logger.app_log.info( f"Calculated block sha_hash: {block_array.block_hash}") # node.logger.app_log.info("Nonce: {}".format(nonce)) # check if we already have the sha_hash db_handler.execute_param( db_handler.h, "SELECT block_height FROM transactions WHERE block_hash = ?", (block_array.block_hash, )) dummy = db_handler.h.fetchone() if dummy: raise ValueError( "Skipping digestion of block {} from {}, because we already have it on block_height {}" .format(block_array.block_hash[:10], peer_ip, dummy[0])) if node.is_mainnet: if block_array.block_height_new < POW_FORK: diff_save = mining.check_block( block_array.block_height_new, miner_tx.miner_address, miner_tx.nonce, previous_block.block_hash, diff[0], tx.received_timestamp, tx.q_received_timestamp, previous_block.q_timestamp_last, peer_ip=peer_ip, app_log=node.logger.app_log) else: diff_save = mining_heavy3.check_block( block_array.block_height_new, miner_tx.miner_address, miner_tx.nonce, previous_block.block_hash, diff[0], tx.received_timestamp, tx.q_received_timestamp, previous_block.q_timestamp_last, peer_ip=peer_ip, app_log=node.logger.app_log) elif node.is_testnet: diff_save = mining_heavy3.check_block( block_array.block_height_new, miner_tx.miner_address, miner_tx.nonce, previous_block.block_hash, diff[0], tx.received_timestamp, tx.q_received_timestamp, previous_block.q_timestamp_last, peer_ip=peer_ip, app_log=node.logger.app_log) else: # it's regnet then, will use a specific fake method here. diff_save = mining_heavy3.check_block( block_array.block_height_new, miner_tx.miner_address, miner_tx.nonce, previous_block.block_hash, regnet.REGNET_DIFF, tx.received_timestamp, tx.q_received_timestamp, previous_block.q_timestamp_last, peer_ip=peer_ip, app_log=node.logger.app_log) fees_block = [] mining_reward = 0 # avoid warning # Cache for multiple tx from same address balances = {} for tx_index, transaction in enumerate(block): db_timestamp = '%.2f' % quantize_two(transaction[0]) db_address = str(transaction[1])[:56] db_recipient = str(transaction[2])[:56] db_amount = '%.8f' % quantize_eight(transaction[3]) db_signature = str(transaction[4])[:684] db_public_key_hashed = str(transaction[5])[:1068] db_operation = str(transaction[6])[:30] db_openfield = str(transaction[7])[:100000] block_debit_address = 0 block_fees_address = 0 # this also is redundant on many tx per address block for x in block: if x[1] == db_address: # make calculation relevant to a particular address in the block block_debit_address = quantize_eight( Decimal(block_debit_address) + Decimal(x[3])) if x != block[-1]: block_fees_address = quantize_eight( Decimal(block_fees_address) + Decimal( essentials.fee_calculate( db_openfield, db_operation, node.last_block)) ) # exclude the mining tx from fees # print("block_fees_address", block_fees_address, "for", db_address) # node.logger.app_log.info("Digest: Inbound block credit: " + str(block_credit)) # node.logger.app_log.info("Digest: Inbound block debit: " + str(block_debit)) # include the new block # if (start_time_tx < q_received_timestamp + 432000) and not quicksync: # balance_pre = quantize_eight(credit_ledger - debit_ledger - fees + rewards) # without projection balance_pre = ledger_balance3( db_address, balances, db_handler) # keep this as c (ram hyperblock access) # balance = quantize_eight(credit - debit - fees + rewards) balance = quantize_eight(balance_pre - block_debit_address) # node.logger.app_log.info("Digest: Projected transaction address balance: " + str(balance)) # else: # print("hyp2") fee = essentials.fee_calculate(db_openfield, db_operation, node.last_block) fees_block.append(quantize_eight(fee)) # node.logger.app_log.info("Fee: " + str(fee)) # decide reward if tx_index == block_array.tx_count - 1: db_amount = 0 # prevent spending from another address, because mining txs allow delegation if previous_block.block_height <= 10000000: mining_reward = 15 - ( quantize_eight(block_array.block_height_new) / quantize_eight(1000000 / 2)) - Decimal("0.8") if mining_reward < 0: mining_reward = 0 else: mining_reward = 0 reward = quantize_eight(mining_reward + sum(fees_block[:-1])) # don't request a fee for mined block so new accounts can mine fee = 0 else: reward = 0 if quantize_eight(balance_pre) < quantize_eight(db_amount): raise ValueError( f"{db_address} sending more than owned: {db_amount}/{balance_pre}" ) if quantize_eight(balance) - quantize_eight( block_fees_address) < 0: # exclude fee check for the mining/header tx raise ValueError( f"{db_address} Cannot afford to pay fees (balance: {balance}, block fees: {block_fees_address})" ) # append, but do not insert to ledger before whole block is validated, note that it takes already validated values (decimals, length) node.logger.app_log.info( f"Chain: Appending transaction back to block with {len(block_transactions)} transactions in it" ) block_transactions.append( (str(block_array.block_height_new), str(db_timestamp), str(db_address), str(db_recipient), str(db_amount), str(db_signature), str(db_public_key_hashed), str(block_array.block_hash), str(fee), str(reward), str(db_operation), str(db_openfield))) try: mp.MEMPOOL.delete_transaction(db_signature) node.logger.app_log.info( f"Chain: Removed processed transaction {db_signature[:56]} from the mempool while digesting" ) except: # tx was not or is no more in the local mempool pass # end for block # save current diff (before the new block) # quantized vars have to be converted, since Decimal is not json serializable... node.plugin_manager.execute_action_hook( 'block', { 'height': block_array.block_height_new, 'diff': diff_save, 'sha_hash': block_array.block_hash, 'timestamp': float(miner_tx.q_block_timestamp), 'miner': miner_tx.miner_address, 'ip': peer_ip }) node.plugin_manager.execute_action_hook( 'fullblock', { 'height': block_array.block_height_new, 'diff': diff_save, 'sha_hash': block_array.block_hash, 'timestamp': float(miner_tx.q_block_timestamp), 'miner': miner_tx.miner_address, 'ip': peer_ip, 'transactions': block_transactions }) db_handler.execute_param( db_handler.c, "INSERT INTO misc VALUES (?, ?)", (block_array.block_height_new, diff_save)) db_handler.commit(db_handler.conn) #db_handler.execute_many(db_handler.c, "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", block_transactions) for transaction2 in block_transactions: db_handler.execute_param( db_handler.c, "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?,?)", (str(transaction2[0]), str(transaction2[1]), str(transaction2[2]), str(transaction2[3]), str(transaction2[4]), str(transaction2[5]), str(transaction2[6]), str(transaction2[7]), str(transaction2[8]), str(transaction2[9]), str(transaction2[10]), str(transaction2[11]))) # secure commit for slow nodes db_handler.commit(db_handler.conn) # savings if node.is_testnet or block_array.block_height_new >= 843000: # no savings for regnet if int(block_array.block_height_new ) % 10000 == 0: # every x blocks staking.staking_update(db_handler.conn, db_handler.c, db_handler.index, db_handler.index_cursor, "normal", block_array.block_height_new, node.logger.app_log) staking.staking_payout( db_handler.conn, db_handler.c, db_handler.index, db_handler.index_cursor, block_array.block_height_new, float(miner_tx.q_block_timestamp), node.logger.app_log) staking.staking_revalidate( db_handler.conn, db_handler.c, db_handler.index, db_handler.index_cursor, block_array.block_height_new, node.logger.app_log) # new sha_hash db_handler.execute( db_handler.c, "SELECT * FROM transactions WHERE block_height = (SELECT max(block_height) FROM transactions)" ) # Was trying to simplify, but it's the latest mirror sha_hash. not the latest block, nor the mirror of the latest block. # c.execute("SELECT * FROM transactions WHERE block_height = ?", (block_array.block_height_new -1,)) tx_list_to_hash = db_handler.c.fetchall() mirror_hash = hashlib.blake2b(str(tx_list_to_hash).encode(), digest_size=20).hexdigest() # /new sha_hash if int(block_array.block_height_new ) % 10 == 0: # every 10 blocks db_handler.dev_reward() # node.logger.app_log.warning("Block: {}: {} valid and saved from {}".format(block_array.block_height_new, block_hash[:10], peer_ip)) node.logger.app_log.warning( f"Valid block: {block_array.block_height_new}: {block_array.block_hash[:10]} with {len(block)} txs, digestion from {peer_ip} completed in {str(time.time() - float(start_time_block))[:5]}s." ) del block_transactions[:] node.peers.unban(peer_ip) # This new block may change the int(diff). Trigger the hook whether it changed or not. diff = difficulty(node, db_handler) node.difficulty = diff node.plugin_manager.execute_action_hook('diff', diff[0]) # We could recalc diff after inserting block, and then only trigger the block hook, but I fear this would delay the new block event. # /whole block validation # NEW: returns new block sha_hash checkpoint_set(node, block_array.block_height_new) return block_array.block_hash except Exception as e: node.logger.app_log.warning(f"Chain processing failed: {e}") node.logger.app_log.info(f"Received data dump: {data}") block_array.failed_cause = str(e) # Temp exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print(exc_type, fname, exc_tb.tb_lineno) if node.peers.warning(sdef, peer_ip, "Rejected block", 2): raise ValueError(f"{peer_ip} banned") raise ValueError("Chain: digestion aborted") finally: if node.ram: db_to_drive(node, db_handler) node.db_lock.release() node.logger.app_log.warning(f"Database lock released") delta_t = time.time() - float(start_time_block) # node.logger.app_log.warning("Block: {}: {} digestion completed in {}s.".format(block_array.block_height_new, block_hash[:10], delta_t)) node.plugin_manager.execute_action_hook( 'digestblock', { 'failed': block_array.failed_cause, 'ip': peer_ip, 'deltat': delta_t, "blocks": block_array.block_count, "txs": block_array.tx_count }) else: node.logger.app_log.warning( f"Chain: Skipping processing from {peer_ip}, someone delivered data faster" ) node.plugin_manager.execute_action_hook('digestblock', { 'failed': "skipped", 'ip': peer_ip })
if y not in processed: processed.append(y) #can overflow payout_address = y[2] print(payout_address) bet_amount = float(y[4]) tx_signature = y[5] # unique #print y # create transactions for missing payouts timestamp = '%.2f' % time.time() payout_amount = Decimal(bet_amount * 2) - percentage(5, bet_amount) payout_openfield = "payout for " + tx_signature[:8] payout_operation = 0 fee = fee_calculate(payout_openfield) #float(0.01 + (float(payout_amount) * 0.001) + (float(len(payout_openfield)) / 100000) + (float(payout_keep) / 10)) # 0.1% + 0.01 dust transaction = (str(timestamp), str(address), str(payout_address), '%.8f' % float(payout_amount - fee), str(payout_operation), str(payout_openfield) ) # this is signed print(transaction) h = SHA.new(str(transaction).encode("utf-8")) signer = PKCS1_v1_5.new(key) signature = signer.sign(h) signature_enc = base64.b64encode(signature) print("Encoded Signature: {}".format(signature_enc.decode()))
def merge(self, data, peer_ip, c, size_bypass=False, wait=False, revert=False): """ Checks and merge the tx list in out mempool :param data: :param peer_ip: :param c: :param size_bypass: if True, will merge whatever the mempool size is :param wait: if True, will wait until the main db_lock is free. if False, will just drop. :param revert: if True, we are reverting tx from digest_block, so main lock is on. Don't bother, process without lock. :return: """ if not data: return "Mempool from {} was empty".format(peer_ip) mempool_result = [] if data == '*': raise ValueError("Connection lost") try: if self.peers_sent[peer_ip] > time.time(): self.app_log.warning( "Mempool ignoring merge from frozen {}".format(peer_ip)) mempool_result.append( "Mempool ignoring merge from frozen {}".format(peer_ip)) return mempool_result except: # unknown peer pass if not essentials.is_sequence(data): with self.peers_lock: self.peers_sent[peer_ip] = time.time() + 10 * 60 self.app_log.warning( "Freezing mempool from {} for 10 min - Bad TX format".format( peer_ip)) mempool_result.append("Bad TX Format") return mempool_result mempool_result.append( "Mempool merging started from {}".format(peer_ip)) if not revert: while self.db_lock.locked( ): # prevent transactions which are just being digested from being added to mempool if not wait: # not reverting, but not waiting, bye # By default, we don't wait. mempool_result.append("Locked ledger, dropping txs") return mempool_result self.app_log.warning( "Waiting for block digestion to finish before merging mempool" ) time.sleep(1) # if reverting, don't bother with main lock, go on. mempool_size = self.size( ) # caulculate current mempool size before adding txs # TODO: we check main ledger db is not locked before beginning, but we don't lock? ok, see comment in node.py. since it's called from a lock, it would deadlock. # merge mempool #while self.lock.locked(): # time.sleep(1) with self.lock: try: block_list = data if not isinstance( block_list[0], list ): # convert to list of lists if only one tx and not handled block_list = [block_list] for transaction in block_list: # set means unique, only accepts list of txs if (mempool_size < 0.3 or size_bypass) or \ (len(str(transaction[7])) > 200 and mempool_size < 0.4) \ or (Decimal(transaction[3]) > Decimal(5) and mempool_size < 0.5) \ or (transaction[1] in self.config.mempool_allowed and mempool_size < 0.6): # condition 1: size limit or bypass, # condition 2: spend more than 25 coins, # condition 3: have length of openfield larger than 200 # all transactions in the mempool need to be cycled to check for special cases, # therefore no while/break loop here mempool_timestamp = '%.2f' % (quantize_two( transaction[0])) mempool_address = str(transaction[1])[:56] mempool_recipient = str(transaction[2])[:56] mempool_amount = '%.8f' % (quantize_eight( transaction[3])) # convert scientific notation mempool_signature_enc = str(transaction[4])[:684] mempool_public_key_hashed = str(transaction[5])[:1068] mempool_operation = str(transaction[6])[:10] mempool_openfield = str(transaction[7])[:100000] # convert readable key to instance mempool_public_key = RSA.importKey( base64.b64decode(mempool_public_key_hashed)) mempool_signature_dec = base64.b64decode( mempool_signature_enc) acceptable = True try: # TODO: sure it will throw an exception? # condition 1) dummy = self.fetchall( "SELECT * FROM transactions WHERE signature = ?;", (mempool_signature_enc, )) if dummy: # self.app_log.warning("That transaction is already in our mempool") mempool_result.append( "That transaction is already in our mempool" ) acceptable = False mempool_in = True else: mempool_in = False except: #print('sigmempool NO ', mempool_signature_enc) mempool_in = False # reject transactions which are already in the ledger # TODO: not clean, will need to have ledger as a module too. # dup code atm. essentials.execute_param_c( c, "SELECT * FROM transactions WHERE signature = ?;", (mempool_signature_enc, ), self.app_log) # condition 2 try: dummy = c.fetchall()[0] #print('sigledger', mempool_signature_enc, dummy) if dummy: mempool_result.append( "That transaction is already in our ledger" ) # self.app_log.warning("That transaction is already in our ledger") # reject transactions which are already in the ledger acceptable = False ledger_in = True # Can be a syncing node. Do not request mempool from this peer until 10 min with self.peers_lock: self.peers_sent[peer_ip] = time.time( ) + 10 * 60 self.app_log.warning( "Freezing mempool from {} for 10 min.". format(peer_ip)) return mempool_result else: ledger_in = False except: #print('sigledger NO ', mempool_signature_enc) ledger_in = False # if mempool_operation != "1" and mempool_operation != "0": # mempool_result.append = ("Mempool: Wrong keep value {}".format(mempool_operation)) # acceptable = 0 if mempool_address != hashlib.sha224( base64.b64decode( mempool_public_key_hashed)).hexdigest(): mempool_result.append( "Mempool: Attempt to spend from a wrong address" ) # self.app_log.warning("Mempool: Attempt to spend from a wrong address") acceptable = False if not essentials.address_validate( mempool_address ) or not essentials.address_validate( mempool_recipient): mempool_result.append( "Mempool: Not a valid address") # self.app_log.warning("Mempool: Not a valid address") acceptable = False if quantize_eight(mempool_amount) < 0: acceptable = False mempool_result.append( "Mempool: Negative balance spend attempt") # self.app_log.warning("Mempool: Negative balance spend attempt") if quantize_two(mempool_timestamp) > time.time( ) + drift_limit: # dont accept future txs acceptable = False # dont accept old txs, mempool needs to be harsher than ledger if quantize_two( mempool_timestamp) < time.time() - 82800: acceptable = 0 # remove from mempool if it's in both ledger and mempool already if mempool_in and ledger_in: try: # Do not lock, we already have the lock for the whole merge. self.execute(SQL_DELETE_TX, (mempool_signature_enc, )) self.commit() mempool_result.append( "Mempool: Transaction deleted from our mempool" ) except: # experimental try and except mempool_result.append( "Mempool: Transaction was not present in the pool anymore" ) pass # continue to mempool finished message # verify signatures and balances essentials.validate_pem(mempool_public_key_hashed) # verify signature verifier = PKCS1_v1_5.new(mempool_public_key) my_hash = SHA.new( str((mempool_timestamp, mempool_address, mempool_recipient, mempool_amount, mempool_operation, mempool_openfield)).encode("utf-8")) if not verifier.verify(my_hash, mempool_signature_dec): acceptable = False mempool_result.append( "Mempool: Wrong signature in mempool insert attempt: {}" .format(transaction)) # self.app_log.warning("Mempool: Wrong signature in mempool insert attempt") # verify signature if acceptable: # verify balance # mempool_result.append("Mempool: Verifying balance") mempool_result.append( "Mempool: Received address: {}".format( mempool_address)) # include mempool fees result = self.fetchall( "SELECT amount, openfield FROM transactions WHERE address = ?;", (mempool_address, )) debit_mempool = 0 if result: for x in result: debit_tx = quantize_eight(x[0]) fee = quantize_eight( essentials.fee_calculate(x[1])) debit_mempool = quantize_eight( debit_mempool + debit_tx + fee) else: debit_mempool = 0 # include the new block credit_ledger = Decimal("0") for entry in essentials.execute_param_c( c, "SELECT amount FROM transactions WHERE recipient = ?;", (mempool_address, ), self.app_log): try: credit_ledger = quantize_eight( credit_ledger) + quantize_eight( entry[0]) credit_ledger = 0 if credit_ledger is None else credit_ledger except: credit_ledger = 0 credit = credit_ledger debit_ledger = Decimal("0") for entry in essentials.execute_param_c( c, "SELECT amount FROM transactions WHERE address = ?;", (mempool_address, ), self.app_log): try: debit_ledger = quantize_eight( debit_ledger) + quantize_eight( entry[0]) debit_ledger = 0 if debit_ledger is None else debit_ledger except: debit_ledger = 0 debit = debit_ledger + debit_mempool fees = Decimal("0") for entry in essentials.execute_param_c( c, "SELECT fee FROM transactions WHERE address = ?;", (mempool_address, ), self.app_log): try: fees = quantize_eight( fees) + quantize_eight(entry[0]) fees = 0 if fees is None else fees except: fees = 0 rewards = Decimal("0") for entry in essentials.execute_param_c( c, "SELECT sum(reward) FROM transactions WHERE recipient = ?;", (mempool_address, ), self.app_log): try: rewards = quantize_eight( rewards) + quantize_eight(entry[0]) rewards = 0 if rewards is None else rewards except: rewards = 0 balance = quantize_eight( credit - debit - fees + rewards - quantize_eight(mempool_amount)) balance_pre = quantize_eight(credit_ledger - debit_ledger - fees + rewards) fee = essentials.fee_calculate(mempool_openfield) time_now = time.time() if quantize_two(mempool_timestamp) > quantize_two( time_now) + drift_limit: mempool_result.append( "Mempool: Future transaction not allowed, timestamp {} minutes in the future" .format( quantize_two( (quantize_two(mempool_timestamp) - quantize_two(time_now)) / 60))) # self.app_log.warning("Mempool: Future transaction not allowed, timestamp {} minutes in the future.") elif quantize_two(time_now) - 86400 > quantize_two( mempool_timestamp): mempool_result.append( "Mempool: Transaction older than 24h not allowed." ) # self.app_log.warning("Mempool: Transaction older than 24h not allowed.") elif quantize_eight( mempool_amount) > quantize_eight( balance_pre): mempool_result.append( "Mempool: Sending more than owned") # self.app_log.warning("Mempool: Sending more than owned") elif quantize_eight(balance) - quantize_eight( fee) < 0: mempool_result.append( "Mempool: Cannot afford to pay fees") # self.app_log.warning("Mempool: Cannot afford to pay fees") # verify signatures and balances else: self.execute( "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?)", (str(mempool_timestamp), str(mempool_address), str(mempool_recipient), str(mempool_amount), str(mempool_signature_enc), str(mempool_public_key_hashed), str(mempool_operation), str(mempool_openfield))) mempool_result.append( "Mempool updated with a received transaction from {}" .format(peer_ip)) self.commit() # Save (commit) the changes mempool_size = mempool_size + sys.getsizeof( str(transaction)) / 1000000.0 else: mempool_result.append( "Local mempool is already full for this tx type, skipping merging" ) # self.app_log.warning("Local mempool is already full for this tx type, skipping merging") return mempool_result # avoid spamming of the logs # TODO: Here maybe commit() on c to release the write lock? except Exception as e: self.app_log.warning("Mempool: Error processing: {} {}".format( data, e)) if self.config.debug_conf == 1: raise try: return e, mempool_result except: return mempool_result
if not address_validate(recipient_input): print("Wrong address format") exit(1) try: operation_input = sys.argv[3] except IndexError: operation_input = 0 try: openfield_input = sys.argv[4] except IndexError: openfield_input = "" # hardfork fee display fee = fee_calculate(openfield_input) print("Fee: %s" % fee) # confirm = input("Confirm (y/n): ") # if confirm != 'y': # print("Transaction cancelled, user confirmation failed") # exit(1) # hardfork fee display try: float(amount_input) is_float = 1 except ValueError: is_float = 0 exit(1)
def process_transactions(block): fees_block = [] block_instance.mining_reward = 0 # avoid warning # Cache for multiple tx from same address balances = {} # TODO: remove condition after HF if block_instance.block_height_new >= 1450000: oldest_possible_tx = miner_tx.q_block_timestamp - 60 * 60 * 2 else: # Was 24 h before oldest_possible_tx = miner_tx.q_block_timestamp - 60 * 60 * 24 for tx_index, transaction in enumerate(block): if float(transaction[0]) < oldest_possible_tx: raise ValueError( "txid {} from {} is older ({}) than oldest possible date ({})" .format(transaction[4][:56], transaction[1], transaction[0], oldest_possible_tx)) db_timestamp = '%.2f' % quantize_two(transaction[0]) db_address = str(transaction[1])[:56] db_recipient = str(transaction[2])[:56] db_amount = '%.8f' % quantize_eight(transaction[3]) db_signature = str(transaction[4])[:684] db_public_key_b64encoded = str(transaction[5])[:1068] db_operation = str(transaction[6])[:30] db_openfield = str(transaction[7])[:100000] block_debit_address = 0 block_fees_address = 0 # this also is redundant on many tx per address block for x in block: if x[1] == db_address: # make calculation relevant to a particular address in the block block_debit_address = quantize_eight( Decimal(block_debit_address) + Decimal(x[3])) if x != block[-1]: block_fees_address = quantize_eight( Decimal(block_fees_address) + Decimal( essentials.fee_calculate( db_openfield, db_operation, node.last_block ))) # exclude the mining tx from fees # node.logger.app_log.info("Fee: " + str(fee)) # decide reward if tx_index == block_instance.tx_count - 1: db_amount = 0 # prevent spending from another address, because mining txs allow delegation if node.is_testnet and node.last_block >= fork.POW_FORK_TESTNET: block_instance.mining_reward = 15 - ( block_instance.block_height_new - fork.POW_FORK_TESTNET) / 1100000 - 9.5 elif node.is_mainnet and node.last_block >= fork.POW_FORK: block_instance.mining_reward = 15 - ( block_instance.block_height_new - fork.POW_FORK) / 1100000 - 9.5 else: block_instance.mining_reward = 15 - ( quantize_eight(block_instance.block_height_new) / quantize_eight(1000000 / 2)) - Decimal("2.4") if block_instance.mining_reward < 0.5: block_instance.mining_reward = 0.5 reward = '{:.8f}'.format(block_instance.mining_reward + sum(fees_block)) # don't request a fee for mined block so new accounts can mine fee = 0 else: reward = 0 fee = essentials.fee_calculate(db_openfield, db_operation, node.last_block) fees_block.append(quantize_eight(fee)) balance_pre = ledger_balance3( db_address, balances, db_handler) # keep this as c (ram hyperblock access) balance = quantize_eight(balance_pre - block_debit_address) if quantize_eight(balance_pre) < quantize_eight(db_amount): raise ValueError( f"{db_address} sending more than owned: {db_amount}/{balance_pre}" ) if quantize_eight(balance) - quantize_eight( block_fees_address) < 0: # exclude fee check for the mining/header tx raise ValueError( f"{db_address} Cannot afford to pay fees (balance: {balance}, " f"block fees: {block_fees_address})") # append, but do not insert to ledger before whole block is validated, # note that it takes already validated values (decimals, length) node.logger.app_log.info( f"Chain: Appending transaction back to block with " f"{len(block_transactions)} transactions in it") block_transactions.append( (str(block_instance.block_height_new), str(db_timestamp), str(db_address), str(db_recipient), str(db_amount), str(db_signature), str(db_public_key_b64encoded), str(block_instance.block_hash), str(fee), str(reward), str(db_operation), str(db_openfield))) try: mp.MEMPOOL.delete_transaction(db_signature) node.logger.app_log.info( f"Chain: Removed processed transaction {db_signature[:56]}" f" from the mempool while digesting") except: # tx was not or is no more in the local mempool pass
def merge(self, data, peer_ip, c, size_bypass=False, wait=False, revert=False): """ Checks and merge the tx list in out mempool :param data: :param peer_ip: :param c: :param size_bypass: if True, will merge whatever the mempool size is :param wait: if True, will wait until the main db_lock is free. if False, will just drop. :param revert: if True, we are reverting tx from digest_block, so main lock is on. Don't bother, process without lock. :return: """ global REFUSE_OLDER_THAN # Easy cases of empty or invalid data if not data: return "Mempool from {} was empty".format(peer_ip) mempool_result = [] if data == '*': raise ValueError("Connection lost") try: if self.peers_sent[peer_ip] > time.time( ) and peer_ip != '127.0.0.1': self.app_log.warning( "Mempool ignoring merge from frozen {}".format(peer_ip)) mempool_result.append( "Mempool ignoring merge from frozen {}".format(peer_ip)) return mempool_result except: # unknown peer pass if not essentials.is_sequence(data): if peer_ip != '127.0.0.1': with self.peers_lock: self.peers_sent[peer_ip] = time.time() + 10 * 60 self.app_log.warning( "Freezing mempool from {} for 10 min - Bad TX format". format(peer_ip)) mempool_result.append("Bad TX Format") return mempool_result if not revert: while self.db_lock.locked(): # prevent transactions which are just being digested from being added to mempool if not wait: # not reverting, but not waiting, bye # By default, we don't wait. mempool_result.append("Locked ledger, dropping txs") return mempool_result self.app_log.warning( "Waiting for block digestion to finish before merging mempool" ) time.sleep(1) # if reverting, don't bother with main lock, go on. # Let's really dig mempool_result.append( "Mempool merging started from {}".format(peer_ip)) # Single time reference here for the whole merge. time_now = time.time() # calculate current mempool size before adding txs mempool_size = self.size() # TODO: we check main ledger db is not locked before beginning, but we don't lock? ok, see comment in node.py. since it's called from a lock, it would deadlock. # merge mempool # while self.lock.locked(): # time.sleep(1) with self.lock: try: block_list = data if not isinstance( block_list[0], list ): # convert to list of lists if only one tx and not handled block_list = [block_list] for transaction in block_list: if size_bypass or self.space_left_for_tx( transaction, mempool_size): # all transactions in the mempool need to be cycled to check for special cases, # therefore no while/break loop here mempool_timestamp = '%.2f' % (quantize_two( transaction[0])) mempool_timestamp_float = float( transaction[0]) # limit Decimal where not needed mempool_address = str(transaction[1])[:56] mempool_recipient = str(transaction[2])[:56] mempool_amount = '%.8f' % (quantize_eight( transaction[3])) # convert scientific notation mempool_amount_float = float(transaction[3]) mempool_signature_enc = str(transaction[4])[:684] mempool_public_key_hashed = str(transaction[5])[:1068] if "b'" == mempool_public_key_hashed[:2]: mempool_public_key_hashed = transaction[5][2:1070] mempool_operation = str(transaction[6])[:30] mempool_openfield = str(transaction[7])[:100000] # Begin with the easy tests that do not require cpu or disk access if mempool_amount_float < 0: mempool_result.append( "Mempool: Negative balance spend attempt") continue if not essentials.address_validate(mempool_address): mempool_result.append( "Mempool: Invalid address {}".format( mempool_address)) continue if not essentials.address_validate(mempool_recipient): mempool_result.append( "Mempool: Invalid recipient {}".format( mempool_recipient)) continue if mempool_timestamp_float > time_now: mempool_result.append( "Mempool: Future transaction rejected {}s". format(mempool_timestamp_float - time_now)) continue if mempool_timestamp_float < time_now - REFUSE_OLDER_THAN: # don't accept old txs, mempool needs to be harsher than ledger mempool_result.append( "Mempool: Too old a transaction") continue # Then more cpu heavy tests hashed_address = hashlib.sha224( base64.b64decode( mempool_public_key_hashed)).hexdigest() if mempool_address != hashed_address: mempool_result.append( "Mempool: Attempt to spend from a wrong address {} instead of {}" .format(mempool_address, hashed_address)) continue # Crypto tests - more cpu hungry try: essentials.validate_pem(mempool_public_key_hashed) except ValueError as e: mempool_result.append( "Mempool: Public key does not validate: {}". format(e)) # recheck sig try: mempool_public_key = RSA.importKey( base64.b64decode(mempool_public_key_hashed)) mempool_signature_dec = base64.b64decode( mempool_signature_enc) verifier = PKCS1_v1_5.new(mempool_public_key) tx_signed = (mempool_timestamp, mempool_address, mempool_recipient, mempool_amount, mempool_operation, mempool_openfield) my_hash = SHA.new(str(tx_signed).encode("utf-8")) if not verifier.verify(my_hash, mempool_signature_dec): mempool_result.append( "Mempool: Wrong signature ({}) for data {} in mempool insert attempt" .format(mempool_signature_enc, tx_signed)) continue except Exception as e: mempool_result.append( "Mempool: Unexpected error checking sig: {}". format(e)) continue # Only now, process the tests requiring db access mempool_in = self.sig_check(mempool_signature_enc) # Temp: get last block for HF reason essentials.execute_param_c( c, "SELECT block_height FROM transactions WHERE 1 ORDER by block_height DESC limit ?", (1, ), self.app_log) last_block = c.fetchone()[0] # reject transactions which are already in the ledger # TODO: not clean, will need to have ledger as a module too. essentials.execute_param_c( c, "SELECT timestamp FROM transactions WHERE signature = ?", (mempool_signature_enc, ), self.app_log) ledger_in = bool(c.fetchone()) # remove from mempool if it's in both ledger and mempool already if mempool_in and ledger_in: try: # Do not lock, we already have the lock for the whole merge. self.execute(SQL_DELETE_TX, (mempool_signature_enc, )) self.commit() mempool_result.append( "Mempool: Transaction deleted from our mempool" ) except: # experimental try and except mempool_result.append( "Mempool: Transaction was not present in the pool anymore" ) continue if ledger_in: mempool_result.append( "That transaction is already in our ledger") # Can be a syncing node. Do not request mempool from this peer until 10 min if peer_ip != '127.0.0.1': with self.peers_lock: self.peers_sent[peer_ip] = time.time( ) + 10 * 60 self.app_log.warning( "Freezing mempool from {} for 10 min.". format(peer_ip)) # Here, we point blank stop processing the batch from this host since it's outdated. # Update: Do not, since it blocks further valid tx - case has been found in real use. # return mempool_result continue # Already there, just ignore then if mempool_in: mempool_result.append( "That transaction is already in our mempool") continue # Here we covered the basics, the current tx is conform and signed. Now let's check balance. # verify balance mempool_result.append( "Mempool: Received address: {}".format( mempool_address)) # include mempool fees result = self.fetchall( "SELECT amount, openfield, operation FROM transactions WHERE address = ?", (mempool_address, )) debit_mempool = 0 if result: for x in result: debit_tx = quantize_eight(x[0]) fee = quantize_eight( essentials.fee_calculate( x[1], x[2], last_block)) debit_mempool = quantize_eight(debit_mempool + debit_tx + fee) credit = 0 for entry in essentials.execute_param_c( c, "SELECT amount FROM transactions WHERE recipient = ?", (mempool_address, ), self.app_log): credit = quantize_eight(credit) + quantize_eight( entry[0]) debit_ledger = 0 for entry in essentials.execute_param_c( c, "SELECT amount FROM transactions WHERE address = ?", (mempool_address, ), self.app_log): debit_ledger = quantize_eight( debit_ledger) + quantize_eight(entry[0]) debit = debit_ledger + debit_mempool fees = 0 for entry in essentials.execute_param_c( c, "SELECT fee FROM transactions WHERE address = ?", (mempool_address, ), self.app_log): fees = quantize_eight(fees) + quantize_eight( entry[0]) rewards = 0 for entry in essentials.execute_param_c( c, "SELECT sum(reward) FROM transactions WHERE recipient = ?", (mempool_address, ), self.app_log): rewards = quantize_eight(rewards) + quantize_eight( entry[0]) balance = quantize_eight( credit - debit - fees + rewards - quantize_eight(mempool_amount)) balance_pre = quantize_eight(credit - debit_ledger - fees + rewards) fee = essentials.fee_calculate(mempool_openfield, mempool_operation, last_block) if quantize_eight(mempool_amount) > quantize_eight( balance_pre): mempool_result.append( "Mempool: Sending more than owned") continue if quantize_eight(balance) - quantize_eight(fee) < 0: mempool_result.append( "Mempool: Cannot afford to pay fees") continue # Pfew! we can finally insert into mempool - all is str, type converted and enforced above self.execute( "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?)", (mempool_timestamp, mempool_address, mempool_recipient, mempool_amount, mempool_signature_enc, mempool_public_key_hashed, mempool_operation, mempool_openfield)) mempool_result.append( "Mempool updated with a received transaction from {}" .format(peer_ip)) mempool_result.append("Success") self.commit( ) # Save (commit) the changes to mempool db mempool_size += sys.getsizeof( str(transaction)) / 1000000.0 else: mempool_result.append( "Local mempool is already full for this tx type, skipping merging" ) # self.app_log.warning("Local mempool is already full for this tx type, skipping merging") # TEMP # print("Mempool insert", mempool_result) return mempool_result # TODO: Here maybe commit() on c to release the write lock? except Exception as e: self.app_log.warning("Mempool: Error processing: {} {}".format( data, e)) if self.config.debug_conf == 1: raise return mempool_result
def merge(self, data: list, peer_ip: str, c, size_bypass: bool = False, wait: bool = False, revert: bool = False) -> list: """ Checks and merge the tx list in out mempool :param data: :param peer_ip: :param c: :param size_bypass: if True, will merge whatever the mempool size is :param wait: if True, will wait until the main db_lock is free. if False, will just drop. :param revert: if True, we are reverting tx from digest_block, so main lock is on. Don't bother, process without lock. :return: """ global REFUSE_OLDER_THAN # Easy cases of empty or invalid data if not data: return ["Mempool from {} was empty".format(peer_ip)] mempool_result = [] if data == '*': raise ValueError("Connection lost") try: if self.peers_sent[peer_ip] > time.time( ) and peer_ip != '127.0.0.1': self.app_log.warning( "Mempool ignoring merge from frozen {}".format(peer_ip)) mempool_result.append( "Mempool ignoring merge from frozen {}".format(peer_ip)) return mempool_result except: # unknown peer pass if not essentials.is_sequence(data): if peer_ip != '127.0.0.1': with self.peers_lock: self.peers_sent[peer_ip] = time.time() + 10 * 60 self.app_log.warning( "Freezing mempool from {} for 10 min - Bad TX format". format(peer_ip)) mempool_result.append("Bad TX Format") return mempool_result if not revert: while self.db_lock.locked(): # prevent transactions which are just being digested from being added to mempool if not wait: # not reverting, but not waiting, bye # By default, we don't wait. mempool_result.append("Locked ledger, dropping txs") return mempool_result self.app_log.warning( "Waiting for block digestion to finish before merging mempool" ) time.sleep(1) # if reverting, don't bother with main lock, go on. # Let's really dig mempool_result.append( "Mempool merging started from {}".format(peer_ip)) # Single time reference here for the whole merge. time_now = time.time() # calculate current mempool size before adding txs mempool_size = self.size() # TODO: we check main ledger db is not locked before beginning, but we don't lock? ok, see comment in node.py. since it's called from a lock, it would deadlock. # merge mempool # while self.lock.locked(): # time.sleep(1) with self.lock: try: block_list = data if not isinstance( block_list[0], list ): # convert to list of lists if only one tx and not handled block_list = [block_list] for transaction in block_list: if size_bypass or self.space_left_for_tx( transaction, mempool_size): # all transactions in the mempool need to be cycled to check for special cases, # therefore no while/break loop here try: mempool_timestamp = '%.2f' % (quantize_two( transaction[0])) mempool_timestamp_float = float( transaction[0] ) # limit Decimal where not needed except Exception as e: mempool_result.append( "Mempool: Invalid timestamp {}".format( transaction[0])) if not essentials.address_validate(transaction[1]): mempool_result.append( "Mempool: Invalid address {}".format( transaction[1])) continue # We could now ignore the truncates here, I left them for explicit reminder of the various fields max lengths. mempool_address = str(transaction[1])[:56] if not essentials.address_validate(transaction[2]): mempool_result.append( "Mempool: Invalid recipient {}".format( transaction[2])) continue mempool_recipient = str(transaction[2])[:56] try: mempool_amount = '%.8f' % (quantize_eight( transaction[3])) # convert scientific notation mempool_amount_float = float(transaction[3]) except Exception as e: mempool_result.append( "Mempool: Invalid amount {}".format( transaction[3])) continue if len(transaction[4]) > 684: mempool_result.append( "Mempool: Invalid signature len{}".format( len(transaction[4]))) continue mempool_signature_enc = str(transaction[4])[:684] if len(transaction[5]) > 1068: mempool_result.append( "Mempool: Invalid pubkey len{}".format( len(transaction[5]))) continue mempool_public_key_b64encoded = str( transaction[5])[:1068] if "b'" == mempool_public_key_b64encoded[:2]: # Binary content instead of str - leftover from legacy code? mempool_public_key_b64encoded = transaction[5][ 2:1070] if len(transaction[6]) > 30: mempool_result.append( "Mempool: Invalid operation len{}".format( len(transaction[6]))) continue mempool_operation = str(transaction[6])[:30] if len(transaction[7]) > 100000: mempool_result.append( "Mempool: Invalid openfield len{}".format( len(transaction[7]))) continue mempool_openfield = str(transaction[7])[:100000] if len(mempool_openfield) <= 4: # no or short message for a mandatory message if mempool_recipient in self.config.mandatory_message.keys( ): mempool_result.append( "Mempool: Missing message - {}".format( self.config. mandatory_message[mempool_recipient])) continue # Begin with the easy tests that do not require cpu or disk access if mempool_amount_float < 0: mempool_result.append( "Mempool: Negative balance spend attempt") continue if mempool_timestamp_float > time_now: mempool_result.append( "Mempool: Future transaction rejected {}s". format(mempool_timestamp_float - time_now)) continue if mempool_timestamp_float < time_now - REFUSE_OLDER_THAN: # don't accept old txs, mempool needs to be harsher than ledger mempool_result.append( "Mempool: Too old a transaction") continue # Then more cpu heavy tests buffer = str((mempool_timestamp, mempool_address, mempool_recipient, mempool_amount, mempool_operation, mempool_openfield)).encode("utf-8") # Will raise if error try: SignerFactory.verify_bis_signature( mempool_signature_enc, mempool_public_key_b64encoded, buffer, mempool_address) except Exception as e: mempool_result.append( f"Mempool: Signature did not match for address ({e})" ) continue # Only now, process the tests requiring db access mempool_in = self.sig_check(mempool_signature_enc) # Temp: get last block for HF reason essentials.execute_param_c( c, "SELECT block_height FROM transactions WHERE 1 ORDER by block_height DESC limit ?", (1, ), self.app_log) last_block = c.fetchone()[0] # reject transactions which are already in the ledger # TODO: not clean, will need to have ledger as a module too. # TODO: need better txid index, this is very sloooooooow if self.config.old_sqlite: essentials.execute_param_c( c, "SELECT timestamp FROM transactions WHERE signature = ?1", (mempool_signature_enc, ), self.app_log) else: essentials.execute_param_c( c, "SELECT timestamp FROM transactions WHERE substr(signature,1,4) = substr(?1,1,4) AND signature = ?1", (mempool_signature_enc, ), self.app_log) ledger_in = bool(c.fetchone()) # remove from mempool if it's in both ledger and mempool already if mempool_in and ledger_in: try: # Do not lock, we already have the lock for the whole merge. if self.config.old_sqlite: self.execute(SQL_DELETE_TX_OLD, (mempool_signature_enc, )) else: self.execute(SQL_DELETE_TX, (mempool_signature_enc, )) self.commit() mempool_result.append( "Mempool: Transaction deleted from our mempool" ) except: # experimental try and except mempool_result.append( "Mempool: Transaction was not present in the pool anymore" ) continue if ledger_in: mempool_result.append( "That transaction is already in our ledger") # Can be a syncing node. Do not request mempool from this peer until FREEZE_MIN min # ledger_in is the ts of the tx in ledger. if it's recent, maybe the peer is just one block late. # give him 15 minute margin. if (peer_ip != '127.0.0.1') and ( ledger_in < time_now - 60 * 15): with self.peers_lock: self.peers_sent[peer_ip] = time.time( ) + FREEZE_MIN * 60 self.app_log.warning( "Freezing mempool from {} for {} min.". format(peer_ip, FREEZE_MIN)) # Here, we point blank stop processing the batch from this host since it's outdated. # Update: Do not, since it blocks further valid tx - case has been found in real use. # return mempool_result continue # Already there, just ignore then if mempool_in: mempool_result.append( "That transaction is already in our mempool") continue # Here we covered the basics, the current tx is conform and signed. Now let's check balance. # verify balance mempool_result.append( "Mempool: Received address: {}".format( mempool_address)) # include mempool fees result = self.fetchall( "SELECT amount, openfield, operation FROM transactions WHERE address = ?", (mempool_address, )) debit_mempool = 0 if result: for x in result: debit_tx = quantize_eight(x[0]) fee = quantize_eight( essentials.fee_calculate( x[1], x[2], last_block)) debit_mempool = quantize_eight(debit_mempool + debit_tx + fee) credit = 0 for entry in essentials.execute_param_c( c, "SELECT amount FROM transactions WHERE recipient = ?", (mempool_address, ), self.app_log): credit = quantize_eight(credit) + quantize_eight( entry[0]) debit_ledger = 0 for entry in essentials.execute_param_c( c, "SELECT amount FROM transactions WHERE address = ?", (mempool_address, ), self.app_log): debit_ledger = quantize_eight( debit_ledger) + quantize_eight(entry[0]) debit = debit_ledger + debit_mempool fees = 0 for entry in essentials.execute_param_c( c, "SELECT fee FROM transactions WHERE address = ?", (mempool_address, ), self.app_log): fees = quantize_eight(fees) + quantize_eight( entry[0]) rewards = 0 for entry in essentials.execute_param_c( c, "SELECT sum(reward) FROM transactions WHERE recipient = ?", (mempool_address, ), self.app_log): rewards = quantize_eight(rewards) + quantize_eight( entry[0]) # error conversion from NoneType to Decimal is not supported balance = quantize_eight( credit - debit - fees + rewards - quantize_eight(mempool_amount)) balance_pre = quantize_eight(credit - debit_ledger - fees + rewards) fee = essentials.fee_calculate(mempool_openfield, mempool_operation, last_block) # print("Balance", balance, fee) if quantize_eight(mempool_amount) > quantize_eight( balance_pre): # mp amount is already included in "balance" var! also, that tx might already be in the mempool mempool_result.append( "Mempool: Sending more than owned") continue if quantize_eight(balance) - quantize_eight(fee) < 0: mempool_result.append( "Mempool: Cannot afford to pay fees") continue # Pfew! we can finally insert into mempool - all is str, type converted and enforced above self.execute( "INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?)", (mempool_timestamp, mempool_address, mempool_recipient, mempool_amount, mempool_signature_enc, mempool_public_key_b64encoded, mempool_operation, mempool_openfield, int(time_now))) mempool_result.append( "Mempool updated with a received transaction from {}" .format(peer_ip)) mempool_result.append( "Success" ) # WARNING: Do not change string or case ever! self.commit( ) # Save (commit) the changes to mempool db mempool_size += sys.getsizeof( str(transaction)) / 1000000.0 else: mempool_result.append( "Local mempool is already full for this tx type, skipping merging" ) # self.app_log.warning("Local mempool is already full for this tx type, skipping merging") # TEMP # print("Mempool insert", mempool_result) return mempool_result # TODO: Here maybe commit() on c to release the write lock? except Exception as e: self.app_log.warning("Mempool: Error processing: {} {}".format( data, e)) if self.config.debug: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split( exc_tb.tb_frame.f_code.co_filename)[1] self.app_log.warning("{} {} {}".format( exc_type, fname, exc_tb.tb_lineno)) mempool_result.append("Exception: {}".format(str(e))) # if left there, means debug can *not* be used in production, or exception is not sent back to the client. raise return mempool_result