async def erc20_balance_sanity_check(self): while True: log.info("starting erc20 balance sanity check process") async with self.pool.acquire() as con: token_registrations = await con.fetch("SELECT * FROM token_registrations") for reg in token_registrations: async with self.pool.acquire() as con: balances = await con.fetch("SELECT * FROM token_balances where eth_address = $1", reg['eth_address']) tokens1 = {t['contract_address']: t['balance'] for t in balances} erc20_dispatcher.update_token_cache("*", reg['eth_address']) await asyncio.sleep(10) async with self.pool.acquire() as con: balances = await con.fetch("SELECT * FROM token_balances where eth_address = $1", reg['eth_address']) tokens2 = {t['contract_address']: t['balance'] for t in balances} # report fixed = 0 for key in tokens1: if key in tokens2: if tokens1[key] != tokens2[key]: fixed += 1 log.warning("fixed {}'s {} balance: {} -> {}".format(reg['eth_address'], key, tokens1[key], tokens2[key])) tokens2.pop(key) else: fixed += 1 log.warning("fixed {}'s {} balance: {} -> {}".format(reg['eth_address'], key, tokens1[key], "0x0")) for key in tokens2: fixed += 1 log.warning("fixed {}'s {} balance: {} -> {}".format(reg['eth_address'], key, "0x0", tokens2[key]))
async def test_token_balance_updates_with_old_nodes( self, *, parity, push_client, monitor, erc20_manager): """Tests that if a node is behind in blocks, we retry until the node catches up""" # make sure the retry delay is low for faster testing import toshieth.erc20manager toshieth.erc20manager.RETRY_DELAY = 1 contract = await self.deploy_erc20_contract("TST", "Test Token", 18) await contract.transfer.set_sender(FAUCET_PRIVATE_KEY)(TEST_ADDRESS, 10 * 10**18) await self.faucet(TEST_ADDRESS, 10**18) await self.send_tx(TEST_PRIVATE_KEY, TEST_ADDRESS_2, 5 * 10**18, token_address=contract.address) tokens = [] while not tokens: async with self.pool.acquire() as con: tokens = await con.fetch( "SELECT * FROM token_balances WHERE eth_address = $1", TEST_ADDRESS_2) async with self.pool.acquire() as con: bn = await con.fetchval("SELECT blocknumber FROM last_blocknumber") target_bn = bn + 10 await con.execute("UPDATE last_blocknumber SET blocknumber = $1", target_bn) await con.execute("UPDATE token_balances SET balance = '0x0'") tokens = await con.fetch( "SELECT * FROM token_balances WHERE eth_address = $1", TEST_ADDRESS_2) self.assertEqual(tokens[0]['balance'], hex(0)) from toshieth.tasks import erc20_dispatcher erc20_dispatcher.update_token_cache(contract.address, TEST_ADDRESS_2, blocknumber=target_bn) erc20_dispatcher.update_token_cache("*", TEST_ADDRESS, blocknumber=target_bn) while bn < target_bn + 1: async with self.pool.acquire() as con: bn = await con.fetchval( "SELECT blocknumber FROM last_blocknumber") await asyncio.sleep(0.1) async with self.pool.acquire() as con: tokens = await con.fetch( "SELECT * FROM token_balances WHERE eth_address = $1", TEST_ADDRESS) tokens2 = await con.fetch( "SELECT * FROM token_balances WHERE eth_address = $1", TEST_ADDRESS_2) self.assertEqual(tokens[0]['balance'], hex(5 * 10**18)) self.assertEqual(tokens2[0]['balance'], hex(5 * 10**18))
async def _run_erc20_health_check(self): log.info("running erc20 health check") async with self.pool.acquire() as con: token_balances = await con.fetch("SELECT * FROM token_balances") bad = 0 requests = [] last_execute = 0 bulk = self.eth.bulk() for token in token_balances: contract_address = token['contract_address'] data = "0x70a08231000000000000000000000000" + token['eth_address'][ 2:] f = bulk.eth_call(to_address=contract_address, data=data) requests.append( (contract_address, token['eth_address'], f, token['value'])) if len(requests) >= last_execute + 500: await bulk.execute() bulk = self.eth.bulk() last_execute = len(requests) if len(requests) > last_execute: await bulk.execute() bad_data = {} for contract_address, eth_address, f, db_value in requests: if not f.done(): log.warning("future not done when checking erc20 cache") continue try: value = f.result() except: log.exception("error getting erc20 value {}:{}".format( contract_address, eth_address)) continue if parse_int(value) != parse_int(db_value): bad += 1 bad_data.setdefault(eth_address, set()).add(contract_address) if bad > 0: log.warning( "Found {}/{} bad ERC20 caches over {} addresses".format( bad, len(token_balances), len(bad_data))) for eth_address in bad_data: erc20_dispatcher.update_token_cache("*", eth_address) await asyncio.sleep(15) # don't overload things
async def erc20_balance_sanity_check(self): while True: log.info("starting erc20 balance sanity check process") async with self.pool.acquire() as con: bad_from = await con.fetch( "SELECT b.eth_address, b.contract_address FROM token_balances b " "JOIN token_transactions t ON b.contract_address = t.contract_address AND b.eth_address = t.from_address " "JOIN transactions x ON x.transaction_id = t.transaction_id " "WHERE b.blocknumber > 0 and x.blocknumber > b.blocknumber " "GROUP BY b.eth_address, b.contract_address") bad_to = await con.fetch( "SELECT b.eth_address, b.contract_address FROM token_balances b " "JOIN token_transactions t ON b.contract_address = t.contract_address AND b.eth_address = t.to_address " "JOIN transactions x ON x.transaction_id = t.transaction_id " "WHERE b.blocknumber > 0 and x.blocknumber > b.blocknumber " "GROUP BY b.eth_address, b.contract_address") bad_balances = {} for b in bad_from: if b['contract_address'] not in bad_balances: bad_balances[b['contract_address']] = set() bad_balances[b['contract_address']].add(b['eth_address']) for b in bad_to: if b['contract_address'] not in bad_balances: bad_balances[b['contract_address']] = set() bad_balances[b['contract_address']].add(b['eth_address']) for contract_address, addresses in bad_balances.items(): log.info( "Found bad balances for token {} for addresses:\n - {}". format(contract_address, "\n - ".join(list(addresses)))) erc20_dispatcher.update_token_cache(contract_address, *addresses) # run every 5 minutes await asyncio.sleep(300)
async def update_token_cache(self, contract_address, *eth_addresses, blocknumber=None): if len(eth_addresses) == 0: return is_wildcard = contract_address == "*" async with self.db: last_blocknumber = ( await self.db.fetchval("SELECT blocknumber FROM last_blocknumber")) if blocknumber is None: blocknumber = last_blocknumber elif blocknumber > last_blocknumber: # don't continue until the block numbers match log.info( "request to update erc20 cache before block processor is caught up" ) erc20_dispatcher.update_token_cache( contract_address, *eth_addresses, blocknumber=blocknumber).delay(1) return if is_wildcard: tokens = await self.db.fetch( "SELECT contract_address FROM tokens where custom = FALSE") else: tokens = [{'contract_address': contract_address}] if is_wildcard: if len(eth_addresses) > 1: # this is currently unneeded and dangerous raise Exception( "wildcard update of token caches unsupported for multiple addresses" ) log.info("START update_token_cache(\"*\", {})".format( eth_addresses[0])) start_time = time.time() # NOTE: we don't remove this at the end on purpose # to avoid spamming of "*" refreshes should_run = await self.redis.set( "bulk_token_update:{}".format(eth_addresses[0]), 1, expire=60, exist=self.redis.SET_IF_NOT_EXIST) if not should_run: log.info("ABORT update_token_cache(\"*\", {}): {}".format( eth_addresses[0], should_run)) return client = self.eth.bulk() futures = [] for eth_address in eth_addresses: for token in tokens: data = "0x70a08231000000000000000000000000" + eth_address[2:] f = client.eth_call(to_address=token['contract_address'], data=data, block=blocknumber) futures.append((token['contract_address'], eth_address, f)) if len(futures) > 0: await client.execute() bulk_insert = [] for token_contract_address, eth_address, f in futures: try: value = f.result() if value == "0x0000000000000000000000000000000000000000000000000000000000000000" or value == "0x": if value == "0x": log.warning( "calling balanceOf for contract {} failed". format(token_contract_address)) value = 0 else: value = parse_int(value) # remove hex padding of value bulk_insert.append( (token_contract_address, eth_address, hex(value))) except JsonRPCError as e: if e.message == "Unknown Block Number": # reschedule the update and abort for now log.info( "got unknown block number in erc20 cache update") erc20_dispatcher.update_token_cache( contract_address, *eth_addresses, blocknumber=blocknumber).delay(1) return log.exception( "WARNING: failed to update token cache of '{}' for address: {}" .format(token_contract_address, eth_address)) send_update = False if len(bulk_insert) > 0: async with self.db: await self.db.executemany( "INSERT INTO token_balances (contract_address, eth_address, balance) " "VALUES ($1, $2, $3) " "ON CONFLICT (contract_address, eth_address) " "DO UPDATE set balance = EXCLUDED.balance", bulk_insert) await self.db.commit() send_update = True # wildcard updates usually mean we need to send a refresh trigger to clients # currently clients only use a TokenPayment as a trigger to refresh their # token cache, so we abuse this functionality here if is_wildcard and send_update: # lots of fake values so it doesn't get confused with a real tx data = { "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "fromAddress": "0x0000000000000000000000000000000000000000", "toAddress": eth_addresses[0], "status": "confirmed", "value": "0x0", "contractAddress": "0x0000000000000000000000000000000000000000" } message = "SOFA::TokenPayment: " + json_encode(data) manager_dispatcher.send_notification(eth_addresses[0], message) if is_wildcard: end_time = time.time() log.info("DONE update_token_cache(\"*\", {}) in {}s".format( eth_addresses[0], round(end_time - start_time, 2)))
async def get_token_balances(self, eth_address, token_address=None, force_update=None): if not validate_address(eth_address): raise JsonRPCInvalidParamsError(data={ 'id': 'invalid_address', 'message': 'Invalid Address' }) if token_address is not None and not validate_address(token_address): raise JsonRPCInvalidParamsError(data={ 'id': 'invalid_token_address', 'message': 'Invalid Token Address' }) # get token balances async with self.db: result = await self.db.execute( "UPDATE token_registrations SET last_queried = (now() AT TIME ZONE 'utc') WHERE eth_address = $1", eth_address) await self.db.commit() registered = result == "UPDATE 1" if not registered or force_update: erc20_dispatcher.update_token_cache("*", eth_address) async with self.db: await self.db.execute( "INSERT INTO token_registrations (eth_address) VALUES ($1) ON CONFLICT (eth_address) DO NOTHING", eth_address) await self.db.commit() if token_address: async with self.db: token = await self.db.fetchrow( "SELECT symbol, name, decimals, format, custom " "FROM tokens WHERE contract_address = $1", token_address) if token is None: return None if token['custom']: custom_token = await self.db.fetchrow( "SELECT name, symbol, decimals FROM token_balances " "WHERE contract_address = $1 AND eth_address = $2", token_address, eth_address) if custom_token: token = { 'name': custom_token['name'], 'symbol': custom_token['symbol'], 'decimals': custom_token['decimals'], 'format': token['format'] } balance = await self.db.fetchval( "SELECT balance " "FROM token_balances " "WHERE eth_address = $1 AND contract_address = $2", eth_address, token_address) if balance is None: balance = "0x0" details = { "symbol": token['symbol'], "name": token['name'], "decimals": token['decimals'], "value": balance, # NOTE: 'value' left in for backwards compatibility "balance": balance, "contract_address": token_address } if token['format'] is not None: details["icon"] = "{}://{}/token/{}.{}".format( self.request.protocol, self.request.host, token_address, token['format']) else: details['icon'] = None return details else: async with self.db: balances = await self.db.fetch( "SELECT COALESCE(b.symbol, t.symbol) AS symbol, COALESCE(b.name, t.name) AS name, COALESCE(b.decimals, t.decimals) AS decimals, b.balance, b.contract_address, t.format " "FROM token_balances b " "JOIN tokens t " "ON t.contract_address = b.contract_address " "WHERE b.eth_address = $1 AND " "(b.visibility = 2 OR (b.visibility = 1 AND b.balance != '0x0')) " "ORDER BY t.symbol", eth_address) tokens = [] for b in balances: details = { "symbol": b['symbol'], "name": b['name'], "decimals": b['decimals'], "value": b['balance'], # NOTE: 'value' left in for backwards compatibility "balance": b['balance'], "contract_address": b['contract_address'] } if b['format'] is not None: details["icon"] = "{}://{}/token/{}.{}".format( self.request.protocol, self.request.host, b['contract_address'], b['format']) else: details['icon'] = None tokens.append(details) return tokens
async def update_transaction(self, transaction_id, status, retry_start_time=0): async with self.db: tx = await self.db.fetchrow( "SELECT * FROM transactions WHERE transaction_id = $1", transaction_id) if tx is None or tx['status'] == status: return token_txs = await self.db.fetch( "SELECT tok.symbol, tok.name, tok.decimals, tx.contract_address, tx.value, tx.from_address, tx.to_address, tx.transaction_log_index, tx.status " "FROM token_transactions tx " "JOIN tokens tok " "ON tok.contract_address = tx.contract_address " "WHERE tx.transaction_id = $1", transaction_id) # check if we're trying to update the state of a tx that is already confirmed, we have an issue if tx['status'] == 'confirmed': log.warning( "Trying to update status of tx {} to {}, but tx is already confirmed" .format(tx['hash'], status)) return # only log if the transaction is internal if tx['v'] is not None: log.info( "Updating status of tx {} to {} (previously: {})".format( tx['hash'], status, tx['status'])) if status == 'confirmed': try: bulk = self.eth.bulk() transaction = bulk.eth_getTransactionByHash(tx['hash']) tx_receipt = bulk.eth_getTransactionReceipt(tx['hash']) await bulk.execute() transaction = transaction.result() tx_receipt = tx_receipt.result() except: log.exception("Error getting transaction: {}".format( tx['hash'])) transaction = None tx_receipt = None if transaction and 'blockNumber' in transaction and transaction[ 'blockNumber'] is not None: if retry_start_time > 0: log.info( "successfully confirmed tx {} after {} seconds".format( tx['hash'], round(time.time() - retry_start_time, 2))) token_tx_updates = [] updated_token_txs = [] for token_tx in token_txs: from_address = token_tx['from_address'] to_address = token_tx['to_address'] # check transaction receipt to make sure the transfer was successful has_transfer_event = False token_tx_status = 'confirmed' if tx_receipt[ 'logs'] is not None: # should always be [], but checking just incase for _log in tx_receipt['logs']: if len(_log['topics']) > 0 and _log['topics'][ 0] == TRANSFER_TOPIC: if len(_log['topics']) == 3 and len(_log['data']) == 66 and \ decode_single_address(_log['topics'][1]) == from_address and \ decode_single_address(_log['topics'][2]) == to_address: has_transfer_event = True break elif len(_log['topics']) == 1 and len( _log['data']) == 194: erc20_from_address, erc20_to_address, erc20_value = decode_abi( ['address', 'address', 'uint256'], data_decoder(_log['data'])) if erc20_from_address == from_address and \ erc20_to_address == to_address: has_transfer_event = True break elif _log['address'] == WETH_CONTRACT_ADDRESS: if _log['topics'][ 0] == DEPOSIT_TOPIC and decode_single_address( _log['topics'][1]) == to_address: has_transfer_event = True break elif _log['topics'][ 0] == WITHDRAWAL_TOPIC and decode_single_address( _log['topics'][1]) == from_address: has_transfer_event = True break if not has_transfer_event: # there was no Transfer event matching this transaction, this means something went wrong token_tx_status = 'error' else: erc20_dispatcher.update_token_cache( token_tx['contract_address'], from_address, to_address, blocknumber=parse_int( transaction['blockNumber'])) else: log.error( "Unexpectedly got null for tx receipt logs for tx: {}" .format(tx['hash'])) token_tx_status = 'error' token_tx_updates.append( (token_tx_status, tx['transaction_id'], token_tx['transaction_log_index'])) token_tx = dict(token_tx) token_tx['status'] = token_tx_status updated_token_txs.append(token_tx) token_txs = updated_token_txs blocknumber = parse_int(transaction['blockNumber']) async with self.db: await self.db.execute( "UPDATE transactions SET status = $1, blocknumber = $2, updated = (now() AT TIME ZONE 'utc') " "WHERE transaction_id = $3", status, blocknumber, transaction_id) if token_tx_updates: await self.db.executemany( "UPDATE token_transactions SET status = $1 " "WHERE transaction_id = $2 AND transaction_log_index = $3", token_tx_updates) await self.db.commit() else: # this is probably because the node hasn't caught up with the latest block yet, retry in a "bit" (but only retry up to 60 seconds) if retry_start_time > 0 and time.time( ) - retry_start_time >= 60: if transaction is None: log.error( "requested transaction {}'s status to be set to confirmed, but cannot find the transaction" .format(tx['hash'])) else: log.error( "requested transaction {}'s status to be set to confirmed, but transaction is not confirmed on the node" .format(tx['hash'])) return await asyncio.sleep(random.random()) manager_dispatcher.update_transaction( transaction_id, status, retry_start_time=retry_start_time or time.time()) return else: async with self.db: await self.db.execute( "UPDATE transactions SET status = $1, updated = (now() AT TIME ZONE 'utc') WHERE transaction_id = $2", status, transaction_id) await self.db.commit() # render notification # don't send "queued" if status == 'queued': status = 'unconfirmed' elif status == 'unconfirmed' and tx['status'] == 'queued': # there's already been a tx for this so no need to send another return messages = [] # check if this is an erc20 transaction, if so use those values if token_txs: for token_tx in token_txs: token_tx_status = token_tx['status'] from_address = token_tx['from_address'] to_address = token_tx['to_address'] # TokenPayment PNs are not shown at the moment, so i'm removing # this for the time being until they're required # if token_tx_status == 'confirmed': # data = { # "txHash": tx['hash'], # "fromAddress": from_address, # "toAddress": to_address, # "status": token_tx_status, # "value": token_tx['value'], # "contractAddress": token_tx['contract_address'] # } # messages.append((from_address, to_address, token_tx_status, "SOFA::TokenPayment: " + json_encode(data))) # if a WETH deposit or withdrawal, we need to let the client know to # update their ETHER balance using a normal SOFA:Payment if token_tx['contract_address'] == WETH_CONTRACT_ADDRESS and ( from_address == "0x0000000000000000000000000000000000000000" or to_address == "0x0000000000000000000000000000000000000000"): payment = SofaPayment( value=parse_int(token_tx['value']), txHash=tx['hash'], status=status, fromAddress=from_address, toAddress=to_address, networkId=config['ethereum']['network_id']) messages.append( (from_address, to_address, status, payment.render())) else: from_address = tx['from_address'] to_address = tx['to_address'] payment = SofaPayment(value=parse_int(tx['value']), txHash=tx['hash'], status=status, fromAddress=from_address, toAddress=to_address, networkId=config['ethereum']['network_id']) messages.append( (from_address, to_address, status, payment.render())) # figure out what addresses need pns # from address always needs a pn for from_address, to_address, status, message in messages: manager_dispatcher.send_notification(from_address, message) # no need to check to_address for contract deployments if to_address == "0x": # TODO: update any notification registrations to be marked as a contract return # check if this is a brand new tx with no status if tx['status'] == 'new': # if an error has happened before any PNs have been sent # we only need to send the error to the sender, thus we # only add 'to' if the new status is not an error if status != 'error': manager_dispatcher.send_notification(to_address, message) else: manager_dispatcher.send_notification(to_address, message) # trigger a processing of the to_address's queue incase it has # things waiting on this transaction manager_dispatcher.process_transaction_queue(to_address)
async def update_transaction(self, transaction_id, status): async with self.db: tx = await self.db.fetchrow( "SELECT * FROM transactions WHERE transaction_id = $1", transaction_id) if tx is None or tx['status'] == status: return token_txs = await self.db.fetch( "SELECT tok.symbol, tok.name, tok.decimals, tx.contract_address, tx.value, tx.from_address, tx.to_address, tx.transaction_log_index, tx.status " "FROM token_transactions tx " "JOIN tokens tok " "ON tok.contract_address = tx.contract_address " "WHERE tx.transaction_id = $1", transaction_id) # check if we're trying to update the state of a tx that is already confirmed, we have an issue if tx['status'] == 'confirmed': log.warning( "Trying to update status of tx {} to {}, but tx is already confirmed" .format(tx['hash'], status)) return # only log if the transaction is internal if tx['v'] is not None: log.info( "Updating status of tx {} to {} (previously: {})".format( tx['hash'], status, tx['status'])) if status == 'confirmed': transaction = await self.eth.eth_getTransactionByHash(tx['hash']) if transaction and 'blockNumber' in transaction: blocknumber = parse_int(transaction['blockNumber']) async with self.db: await self.db.execute( "UPDATE transactions SET status = $1, blocknumber = $2, updated = (now() AT TIME ZONE 'utc') " "WHERE transaction_id = $3", status, blocknumber, transaction_id) await self.db.commit() else: log.error( "requested transaction '{}''s status to be set to confirmed, but cannot find the transaction" .format(tx['hash'])) else: async with self.db: await self.db.execute( "UPDATE transactions SET status = $1, updated = (now() AT TIME ZONE 'utc') WHERE transaction_id = $2", status, transaction_id) await self.db.commit() # render notification # don't send "queued" if status == 'queued': status = 'unconfirmed' elif status == 'unconfirmed' and tx['status'] == 'queued': # there's already been a tx for this so no need to send another return messages = [] # check if this is an erc20 transaction, if so use those values if token_txs: if status == 'confirmed': tx_receipt = await self.eth.eth_getTransactionReceipt( tx['hash']) if tx_receipt is None: log.error( "Failed to get transaction receipt for confirmed transaction: {}" .format(tx_receipt)) # requeue to try again manager_dispatcher.update_transaction( transaction_id, status) return for token_tx in token_txs: token_tx_status = status from_address = token_tx['from_address'] to_address = token_tx['to_address'] if status == 'confirmed': # check transaction receipt to make sure the transfer was successful has_transfer_event = False if tx_receipt[ 'logs'] is not None: # should always be [], but checking just incase for _log in tx_receipt['logs']: if len(_log['topics']) > 2: if _log['topics'][0] == TRANSFER_TOPIC and \ decode_single_address(_log['topics'][1]) == from_address and \ decode_single_address(_log['topics'][2]) == to_address: has_transfer_event = True break elif _log['address'] == WETH_CONTRACT_ADDRESS: if _log['topics'][ 0] == DEPOSIT_TOPIC and decode_single_address( _log['topics'][1]) == to_address: has_transfer_event = True break elif _log['topics'][ 0] == WITHDRAWAL_TOPIC and decode_single_address( _log['topics'][1]) == from_address: has_transfer_event = True break if not has_transfer_event: # there was no Transfer event matching this transaction token_tx_status = 'error' else: erc20_dispatcher.update_token_cache( token_tx['contract_address'], from_address, to_address, blocknumber=parse_int(transaction['blockNumber'])) if token_tx_status == 'confirmed': data = { "txHash": tx['hash'], "fromAddress": from_address, "toAddress": to_address, "status": token_tx_status, "value": token_tx['value'], "contractAddress": token_tx['contract_address'] } messages.append( (from_address, to_address, token_tx_status, "SOFA::TokenPayment: " + json_encode(data))) async with self.db: await self.db.execute( "UPDATE token_transactions SET status = $1 " "WHERE transaction_id = $2 AND transaction_log_index = $3", token_tx_status, tx['transaction_id'], token_tx['transaction_log_index']) await self.db.commit() # if a WETH deposit or withdrawal, we need to let the client know to # update their ETHER balance using a normal SOFA:Payment if token_tx['contract_address'] == WETH_CONTRACT_ADDRESS and ( from_address == "0x0000000000000000000000000000000000000000" or to_address == "0x0000000000000000000000000000000000000000"): payment = SofaPayment( value=parse_int(token_tx['value']), txHash=tx['hash'], status=status, fromAddress=from_address, toAddress=to_address, networkId=config['ethereum']['network_id']) messages.append( (from_address, to_address, status, payment.render())) else: from_address = tx['from_address'] to_address = tx['to_address'] payment = SofaPayment(value=parse_int(tx['value']), txHash=tx['hash'], status=status, fromAddress=from_address, toAddress=to_address, networkId=config['ethereum']['network_id']) messages.append( (from_address, to_address, status, payment.render())) # figure out what addresses need pns # from address always needs a pn for from_address, to_address, status, message in messages: manager_dispatcher.send_notification(from_address, message) # no need to check to_address for contract deployments if to_address == "0x": # TODO: update any notification registrations to be marked as a contract return # check if this is a brand new tx with no status if tx['status'] is None: # if an error has happened before any PNs have been sent # we only need to send the error to the sender, thus we # only add 'to' if the new status is not an error if status != 'error': manager_dispatcher.send_notification(to_address, message) else: manager_dispatcher.send_notification(to_address, message) # trigger a processing of the to_address's queue incase it has # things waiting on this transaction manager_dispatcher.process_transaction_queue(to_address)
async def filter_poll(self): # check for newly added erc20 tokens if not self._shutdown: async with self.pool.acquire() as con: rows = await con.fetch("SELECT contract_address FROM tokens WHERE ready = false") if len(rows) > 0: total_registrations = await con.fetchval("SELECT COUNT(*) FROM token_registrations") else: total_registrations = 0 for row in rows: log.info("Got new erc20 token: {}. updating {} registrations".format( row['contract_address'], total_registrations)) if len(rows) > 0: limit = 1000 for offset in range(0, total_registrations, limit): async with self.pool.acquire() as con: registrations = await con.fetch( "SELECT eth_address FROM token_registrations OFFSET $1 LIMIT $2", offset, limit) for row in rows: erc20_dispatcher.update_token_cache( row['contract_address'], *[r['eth_address'] for r in registrations]) async with self.pool.acquire() as con: await con.executemany("UPDATE tokens SET ready = true WHERE contract_address = $1", [(r['contract_address'],) for r in rows]) if not self._shutdown: if self._new_pending_transaction_filter_id is not None: # get the list of new pending transactions try: new_pending_transactions = await self.eth.eth_getFilterChanges(self._new_pending_transaction_filter_id) # add any to the list of unprocessed transactions self.unmatched_transactions.update({tx_hash: 0 for tx_hash in new_pending_transactions}) except JSONRPC_ERRORS: log.exception("WARNING: unable to connect to server") new_pending_transactions = None if new_pending_transactions is None: await self.register_filters() elif len(new_pending_transactions) > 0: self._last_saw_new_pending_transactions = asyncio.get_event_loop().time() else: # make sure the filter timeout period hasn't passed time_since_last_pending_transaction = int(asyncio.get_event_loop().time() - self._last_saw_new_pending_transactions) if time_since_last_pending_transaction > FILTER_TIMEOUT: log.warning("Haven't seen any new pending transactions for {} seconds".format(time_since_last_pending_transaction)) await self.register_new_pending_transaction_filter() if len(self.unmatched_transactions) > 0: self.run_process_unconfirmed_transactions() if not self._shutdown: if self._new_block_filter_id is not None: try: new_blocks = await self.eth.eth_getFilterChanges(self._new_block_filter_id) except JSONRPC_ERRORS: log.exception("Error getting new block filter") new_blocks = None if new_blocks is None: await self.register_filters() # do a block check right after as it may have taken some time to # reconnect and we may have missed a block notification new_blocks = [True] # NOTE: this is not very smart, as if the block check is # already running this will cause it to run twice. However, # this is currently taken care of in the block check itself # which should suffice. if new_blocks and not self._shutdown: self._last_saw_new_block = asyncio.get_event_loop().time() self.schedule_block_check() elif not self._shutdown and len(new_blocks) == 0: # make sure the filter timeout period hasn't passed time_since_last_new_block = int(asyncio.get_event_loop().time() - self._last_saw_new_block) if time_since_last_new_block > FILTER_TIMEOUT: log.warning("Haven't seen any new blocks for {} seconds".format(time_since_last_new_block)) await self.register_new_block_filter() # also force a block check just incase self.schedule_block_check() else: log.warning("no filter id for new blocks") self._filter_poll_process = None if not self._shutdown: self.schedule_filter_poll(1 if self.unmatched_transactions else DEFAULT_POLL_DELAY)
async def update_token_cache(self, contract_address, *eth_addresses, blocknumber=None): if len(eth_addresses) == 0: return is_wildcard = contract_address == "*" async with self.db: last_blocknumber = (await self.db.fetchval("SELECT blocknumber FROM last_blocknumber")) if blocknumber is None: blocknumber = last_blocknumber elif blocknumber > last_blocknumber: # don't continue until the block numbers match log.info("request to update erc20 cache before block processor is caught up") erc20_dispatcher.update_token_cache(contract_address, *eth_addresses, blocknumber=blocknumber).delay(RETRY_DELAY) return if is_wildcard: tokens = await self.db.fetch("SELECT contract_address, custom FROM tokens where custom = FALSE") else: tokens = await self.db.fetch("SELECT contract_address, custom FROM tokens where contract_address = $1", contract_address) if is_wildcard: if len(eth_addresses) > 1: # this is currently unneeded and dangerous raise Exception("wildcard update of token caches unsupported for multiple addresses") log.info("START update_token_cache(\"*\", {})".format(eth_addresses[0])) start_time = time.time() # NOTE: we don't remove this at the end on purpose # to avoid spamming of "*" refreshes should_run = await self.redis.set("bulk_token_update:{}".format(eth_addresses[0]), 1, expire=60, exist=self.redis.SET_IF_NOT_EXIST) if not should_run: log.info("ABORT update_token_cache(\"*\", {}): {}".format(eth_addresses[0], should_run)) return client = self.eth.bulk(should_retry=False) futures = [] for eth_address in eth_addresses: for token in tokens: data = "0x70a08231000000000000000000000000" + eth_address[2:] try: f = client.eth_call(to_address=token['contract_address'], data=data, block=blocknumber) except: log.exception("Error getting balance of token {} for address {}".format(token['contract_address'], eth_address)) continue futures.append((token['contract_address'], eth_address, token['custom'], f)) if len(futures) > 0: should_retry = False try: await client.execute() except Exception as e: log.warning("Error in http request updating erc20 cache update of '{}' for addresses {}: {}".format(contract_address, eth_addresses, e)) futures = [] should_retry = True bulk_insert = [] for token_contract_address, eth_address, custom, f in futures: try: value = f.result() if value == "0x0000000000000000000000000000000000000000000000000000000000000000" or value == "0x": if value == "0x": log.warning("calling balanceOf for contract {} failed".format(token_contract_address)) value = 0 else: value = parse_int(value) # remove hex padding of value bulk_insert.append((token_contract_address, eth_address, hex(value), blocknumber, 0 if custom else 1)) except JsonRPCError as e: if e.message == "Unknown Block Number" or e.message == "This request is not supported because your node is running with state pruning. Run with --pruning=archive.": # reschedule the update and abort for now # NOTE: not aborting right away as we should clear out the rest of the future values if not should_retry: log.info("got unknown block number in erc20 cache update of '{}' for address: {}".format(token_contract_address, eth_address)) should_retry = True continue log.exception("WARNING: failed to update token cache of '{}' for address: {}".format(token_contract_address, eth_address)) if should_retry: if is_wildcard: # clear up bulk_token_update key, as we want to allow this to run again await self.redis.delete("bulk_token_update:{}".format(eth_addresses[0])) erc20_dispatcher.update_token_cache(contract_address, *eth_addresses, blocknumber=blocknumber).delay(RETRY_DELAY) return send_update = False if len(bulk_insert) > 0: async with self.db: await self.db.executemany( "INSERT INTO token_balances (contract_address, eth_address, balance, blocknumber, visibility) " "VALUES ($1, $2, $3, $4, $5) " "ON CONFLICT (contract_address, eth_address) " "DO UPDATE SET balance = EXCLUDED.balance, blocknumber = EXCLUDED.blocknumber " "WHERE token_balances.blocknumber < EXCLUDED.blocknumber", bulk_insert) await self.db.commit() send_update = True # token updates need to send a refresh trigger to clients # currently clients only use a TokenPayment as a trigger to refresh their # token cache, so we abuse this functionality here if send_update: # lots of fake values so it doesn't get confused with a real tx for eth_address in eth_addresses: data = { "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "fromAddress": "0x0000000000000000000000000000000000000000", "toAddress": eth_addresses[0], "status": "confirmed", "value": "0x0", "contractAddress": "0x0000000000000000000000000000000000000000" } message = "SOFA::TokenPayment: " + json_encode(data) manager_dispatcher.send_notification(eth_address, message) if is_wildcard: end_time = time.time() log.info("DONE update_token_cache(\"*\", {}) in {}s".format(eth_addresses[0], round(end_time - start_time, 2)))
async def filter_poll(self): # check for newly added erc20 tokens if not self._shutdown: async with self.pool.acquire() as con: rows = await con.fetch( "SELECT contract_address FROM tokens WHERE ready = FALSE AND custom = FALSE" ) if len(rows) > 0: total_registrations = await con.fetchval( "SELECT COUNT(*) FROM token_registrations") else: total_registrations = 0 for row in rows: log.info("Got new erc20 token: {}. updating {} registrations". format(row['contract_address'], total_registrations)) if len(rows) > 0: limit = 1000 for offset in range(0, total_registrations, limit): async with self.pool.acquire() as con: registrations = await con.fetch( "SELECT eth_address FROM token_registrations OFFSET $1 LIMIT $2", offset, limit) for row in rows: erc20_dispatcher.update_token_cache( row['contract_address'], *[r['eth_address'] for r in registrations]) async with self.pool.acquire() as con: await con.executemany( "UPDATE tokens SET ready = true WHERE contract_address = $1", [(r['contract_address'], ) for r in rows]) if not self._shutdown: if self._new_pending_transaction_filter_id is not None: # get the list of new pending transactions try: new_pending_transactions = await self.filter_eth.eth_getFilterChanges( self._new_pending_transaction_filter_id) # add any to the list of unprocessed transactions for tx_hash in new_pending_transactions: await self.redis.hsetnx( UNCONFIRMED_TRANSACTIONS_REDIS_KEY, tx_hash, int(asyncio.get_event_loop().time())) except JSONRPC_ERRORS: log.exception("WARNING: unable to connect to server") new_pending_transactions = None if new_pending_transactions is None: await self.register_filters() elif len(new_pending_transactions) > 0: self._last_saw_new_pending_transactions = asyncio.get_event_loop( ).time() else: # make sure the filter timeout period hasn't passed time_since_last_pending_transaction = int( asyncio.get_event_loop().time() - self._last_saw_new_pending_transactions) if time_since_last_pending_transaction > FILTER_TIMEOUT: log.warning( "Haven't seen any new pending transactions for {} seconds" .format(time_since_last_pending_transaction)) await self.register_new_pending_transaction_filter() if await self.redis.hlen(UNCONFIRMED_TRANSACTIONS_REDIS_KEY ) > 0: self.run_process_unconfirmed_transactions() if not self._shutdown: # no need to run this if the block checking process is still running if self._block_checking_process is None or self._block_checking_process.done( ): try: block_number = await self.filter_eth.eth_blockNumber() except JSONRPC_ERRORS: log.exception("Error getting current block number") block_number = 0 if block_number > self.last_block_number and not self._shutdown: self.schedule_block_check() self._filter_poll_process = None if not self._shutdown: self.schedule_filter_poll(1 if ( await self.redis.hlen(UNCONFIRMED_TRANSACTIONS_REDIS_KEY) > 0 ) else DEFAULT_POLL_DELAY)