def __init__( self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, initial_backoff: int = 4, backoff_limit: int = 180, ): super(Binance, self).__init__('binance', api_key, secret, database) self.uri = BINANCE_BASE_URL self.session.headers.update({ 'Accept': 'application/json', 'X-MBX-APIKEY': self.api_key, }) self.msg_aggregator = msg_aggregator self.initial_backoff = initial_backoff self.backoff_limit = backoff_limit self.nonce_lock = Semaphore()
def tick_subscriber(): response = stub.ListenCybosTickData(Empty()) for msg in response: if msg.code not in spread_dict: if msg.code not in sem_dict: sem_dict[msg.code] = Semaphore() sem_dict[msg.code].acquire() if msg.code not in spread_dict: spread_dict[msg.code] = sp.Spread(msg.code, get_company_name(msg.code), order_callback) sem_dict[msg.code].release() spread_dict[msg.code].set_price_info(msg.buy_or_sell, msg.current_price, msg.bid_price, msg.ask_price, msg.market_type) trademachine.tick_arrived(msg.code, msg)
def __init__(self, magic_file=None): """ Create a new libmagic wrapper. magic_file - Another magic file other then the default """ # The lock allows us to be thread-safe self.lock = Semaphore(value=1) # Tracks the errno/errstr set after each call self.errno = 0 self.errstr = '' # Load our magic file self.magic_file = magic_file # Initialize our flags # our flags self.flags = (MAGIC_MIME | MAGIC_MIME_ENCODING)
def test_renamed_label_refresh(db, default_account, thread, message, imapuid, folder, mock_imapclient, monkeypatch): # Check that imapuids see their labels refreshed after running # the LabelRenameHandler. msg_uid = imapuid.msg_uid uid_dict = {msg_uid: GmailFlags((), ('stale label',), ('23',))} update_metadata(default_account.id, folder.id, folder.canonical_name, uid_dict, db.session) new_flags = {msg_uid: {'FLAGS': ('\\Seen',), 'X-GM-LABELS': ('new label',), 'MODSEQ': ('23',)}} mock_imapclient._data['[Gmail]/All mail'] = new_flags mock_imapclient.add_folder_data(folder.name, new_flags) monkeypatch.setattr(MockIMAPClient, 'search', lambda x, y: [msg_uid]) semaphore = Semaphore(value=1) rename_handler = LabelRenameHandler(default_account.id, default_account.namespace.id, 'new label', semaphore) # Acquire the semaphore to check that LabelRenameHandlers block if # the semaphore is in-use. semaphore.acquire() rename_handler.start() gevent.sleep(0) # yield to the handler labels = list(imapuid.labels) assert len(labels) == 1 assert labels[0].name == 'stale label' semaphore.release() rename_handler.join() db.session.refresh(imapuid) # Now check that the label got updated. labels = list(imapuid.labels) assert len(labels) == 1 assert labels[0].name == 'new label'
def __init__( self, name: str, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super().__init__( name=name, location=Location.BITFINEX, api_key=api_key, secret=secret, database=database, ) self.base_uri = 'https://api.bitfinex.com' self.session.headers.update({'bfx-apikey': self.api_key}) self.msg_aggregator = msg_aggregator self.nonce_lock = Semaphore()
def __init__(self, fobj, mode=None, bufsize=-1, close=True, threadpool=None, lock=True): """ :param fobj: The underlying file-like object to wrap, or an integer fileno that will be pass to :func:`os.fdopen` along with *mode* and *bufsize*. :keyword bool lock: If True (the default) then all operations will be performed one-by-one. Note that this does not guarantee that, if using this file object from multiple threads/greenlets, operations will be performed in any particular order, only that no two operations will be attempted at the same time. You can also pass your own :class:`gevent.lock.Semaphore` to synchronize file operations with an external resource. :keyword bool close: If True (the default) then when this object is closed, the underlying object is closed as well. """ closefd = close self.threadpool = threadpool or get_hub().threadpool self.lock = lock if self.lock is True: self.lock = Semaphore() elif not self.lock: self.lock = DummySemaphore() if not hasattr(self.lock, '__enter__'): raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock)) if isinstance(fobj, integer_types): if not closefd: # we cannot do this, since fdopen object will close the descriptor raise TypeError( 'FileObjectThread does not support close=False on an fd.') if mode is None: assert bufsize == -1, "If you use the default mode, you can't choose a bufsize" fobj = os.fdopen(fobj) else: fobj = os.fdopen(fobj, mode, bufsize) self.__io_holder = [fobj] # signal for _wrap_method super(FileObjectThread, self).__init__(fobj, closefd)
def __init__( self, web3: Web3, chain_id: ChainID, contract_manager: ContractManager, last_fetched_block: BlockNumber, event_filters: List[SmartContractEvents], block_batch_size_config: BlockBatchSizeConfig, ) -> None: self.web3 = web3 self.chain_id = chain_id self.last_fetched_block = last_fetched_block self.contract_manager = contract_manager self.block_batch_size_adjuster = BlockBatchSizeAdjuster( block_batch_size_config) # This lock is used to add a new smart contract to the list of polled # smart contracts. The crucial optimization done by this class is to # query all smart contracts with only one request, this requires the # parameters `fromBlock` and `toBlock` to be the same for all smart # contracts. The lock is used to hold new requests, while the logs of # the new smart contract are queried to catch up, and then for it to be # added to the list of tracked smart contracts. # # This lock also guarantees that the events will be processed only # once, and because of this the `target_block_number` must always be a # confirmed block. # # Additionally, user facing APIs, which have on-chain side-effects, # used force poll the blockchain to update the node's state. This force # poll is used to provide a consistent view to the user, e.g. a channel # open call waits for the transaction to be mined and force polled the # event to update the node's state. This pattern introduced a race with # the alarm task and the task which served the user request, because # the events are returned only once per filter. The lock below is to # protect against these races (introduced by the commit # 3686b3275ff7c0b669a6d5e2b34109c3bdf1921d) self._filters_lock = Semaphore() self._address_to_filters: Dict[Address, SmartContractEvents] = { event.contract_address: event for event in event_filters }
def __init__( self, jsonrpc_client, token_network_address: typing.TokenNetworkAddress, contract_manager: ContractManager, ): if not is_binary_address(token_network_address): raise InvalidAddress( 'Expected binary address format for token nework') check_address_has_code( jsonrpc_client, typing.Address(token_network_address), CONTRACT_TOKEN_NETWORK, ) self.contract_manager = contract_manager proxy = jsonrpc_client.new_contract_proxy( self.contract_manager.get_contract_abi(CONTRACT_TOKEN_NETWORK), to_normalized_address(token_network_address), ) compare_contract_versions( proxy=proxy, expected_version=contract_manager.contracts_version, contract_name=CONTRACT_TOKEN_NETWORK, address=typing.Address(token_network_address), ) self.address = token_network_address self.proxy = proxy self.client = jsonrpc_client self.node_address = privatekey_to_address(self.client.privkey) self.open_channel_transactions = dict() # Forbids concurrent operations on the same channel self.channel_operations_lock = defaultdict(RLock) # Serializes concurent deposits on this token network. This must be an # exclusive lock, since we need to coordinate the approve and # setTotalDeposit calls. self.deposit_lock = Semaphore()
def _add_config_to_store(self, identity, config_name, raw, parsed, config_type, trigger_callback=False): """Adds a processed configuration to the store.""" agent_store = self.store.get(identity) action = "UPDATE" if agent_store is None: #Initialize a new store. store_path = os.path.join(self.store_path, identity+ store_ext) store = PersistentDict(filename=store_path, flag='c', format='json') agent_store = {"configs": {}, "store": store, "lock": Semaphore()} self.store[identity] = agent_store agent_configs = agent_store["configs"] agent_disk_store = agent_store["store"] agent_store_lock = agent_store["lock"] config_name = strip_config_name(config_name) if config_name not in agent_configs: action = "NEW" if check_for_recursion(config_name, parsed, agent_configs): raise ValueError("Recursive configuration references detected.") agent_configs[config_name] = parsed agent_disk_store[config_name] = {"type": config_type, "data": raw} agent_disk_store.async_sync() _log.info("Agent {} config {} stored.".format(identity, config_name)) with agent_store_lock: try: self.vip.rpc.call(identity, "update_config", action, contents=parsed, trigger_callback=trigger_callback).get(timeout=10.0) except errors.Unreachable: _log.debug("Agent {} not currently running. Configuration update not sent.".format(identity)) except RemoteError as e: _log.error("Agent {} failure when adding/updating configuration {}: {}".format(identity, config_name, e)) except MethodNotFound as e: _log.error( "Agent {} failure when adding/updating configuration {}: {}".format(identity, config_name, e))
def __init__(self, fobj, *args, **kwargs): self._close = kwargs.pop('close', True) self.threadpool = kwargs.pop('threadpool', None) self.lock = kwargs.pop('lock', True) if kwargs: raise TypeError('Unexpected arguments: %r' % kwargs.keys()) if self.lock is True: self.lock = Semaphore() elif not self.lock: self.lock = DummySemaphore() if not hasattr(self.lock, '__enter__'): raise TypeError('Expected a Semaphore or boolean, got %r' % type(self.lock)) if isinstance(fobj, (int, long)): if not self._close: # we cannot do this, since fdopen object will close the descriptor raise TypeError('FileObjectThread does not support close=False') fobj = os.fdopen(fobj, *args) self._fobj = fobj if self.threadpool is None: self.threadpool = get_hub().threadpool
def __init__( self, ethereum_manager: 'EthereumManager', database: 'DBHandler', premium: Optional[Premium], msg_aggregator: MessagesAggregator, ) -> None: self.ethereum = ethereum_manager self.database = database self.premium = premium self.msg_aggregator = msg_aggregator self.data_directory = database.user_data_dir.parent self.trades_lock = Semaphore() try: self.graph = Graph( 'https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2', ) except RemoteError as e: self.msg_aggregator.add_error( SUBGRAPH_REMOTE_ERROR_MSG.format(error_msg=str(e))) raise ModuleInitializationFailure('subgraph remote error') from e
def __init__(self, rabbit_ip, rabbit_port, rabbit_user, rabbit_password, rabbit_vhost, rabbit_ha_mode, q_name, subscribe_cb, logger): self._rabbit_ip = rabbit_ip self._rabbit_port = rabbit_port self._rabbit_user = rabbit_user self._rabbit_password = rabbit_password self._rabbit_vhost = rabbit_vhost self._subscribe_cb = subscribe_cb self._logger = logger self._publish_queue = Queue() self._conn_lock = Semaphore() self.obj_upd_exchange = kombu.Exchange('vnc_config.object-update', 'fanout', durable=False) # Register a handler for SIGTERM so that we can release the lock # Without it, it can take several minutes before new master is elected # If any app using this wants to register their own sigterm handler, # then we will have to modify this function to perhaps take an argument gevent.signal(signal.SIGTERM, self.sigterm_handler)
def __init__( self, ethereum_manager: EthereumManager, database: DBHandler, premium: Optional[Premium], msg_aggregator: MessagesAggregator, ) -> None: super().__init__( ethereum_manager=ethereum_manager, database=database, premium=premium, msg_aggregator=msg_aggregator, ) self.reset_last_query_ts() self.lock = Semaphore() self.usd_price: Dict[str, FVal] = defaultdict(FVal) self.vault_mappings: Dict[ChecksumEthAddress, List[MakerDAOVault]] = defaultdict(list) self.ilk_to_stability_fee: Dict[bytes, FVal] = {} self.vault_details: List[MakerDAOVaultDetails] = []
def __init__( self, *args, max_retries=3, pool_maxsize=256, **kwargs, ): super().__init__(*args, **kwargs) http_adapter = HTTPAdapter( max_retries=max_retries, pool_maxsize=pool_maxsize, ) https_adapter = HTTPAdapter( max_retries=max_retries, pool_maxsize=pool_maxsize, ) self.session.mount('http://', http_adapter) self.session.mount('https://', https_adapter) self.lock = Semaphore(pool_maxsize)
def __init__(self, store, relay=None, backoff=None, bounce_factory=None, store_pool=None, relay_pool=None): super(Queue, self).__init__() self.store = store self.relay = relay self.backoff = backoff or self._default_backoff self.bounce_factory = bounce_factory or Bounce self.wake = Event() self.queued = [] self.active_ids = set() self.queued_ids = set() self.queued_lock = Semaphore(1) self.queue_policies = [] self._use_pool('store_pool', store_pool) self._use_pool('relay_pool', relay_pool)
def bidask_subscriber(): response = stub.ListenCybosBidAsk(Empty()) for msg in response: if msg.code not in spread_dict: if msg.code not in sem_dict: sem_dict[msg.code] = Semaphore() sem_dict[msg.code].acquire() if msg.code not in spread_dict: spread_dict[msg.code] = sp.Spread(msg.code, get_company_name(msg.code), order_callback) sem_dict[msg.code].release() spread_dict[msg.code].set_spread_info(msg.bid_prices[:], msg.ask_prices[:], msg.bid_remains[:], msg.ask_remains[:]) trademachine.bidask_arrived(msg.code, msg)
def __init__( self, web3: Web3, privkey: bytes, gasprice: int = None, nonce_offset: int = 0, ): if privkey is None or len(privkey) != 32: raise ValueError('Invalid private key') monkey_patch_web3(web3, self) try: version = web3.version.node except ConnectTimeout: raise EthNodeCommunicationError('couldnt reach the ethereum node') _, eth_node = is_supported_client(version) sender = privatekey_to_address(privkey) transaction_count = web3.eth.getTransactionCount( to_checksum_address(sender), 'pending') _available_nonce = transaction_count + nonce_offset self.eth_node = eth_node self.given_gas_price = gasprice self.privkey = privkey self.sender = sender self.web3 = web3 self._gaslimit_cache = TTLCache(maxsize=16, ttl=RPC_CACHE_TTL) self._gasprice_cache = TTLCache(maxsize=16, ttl=RPC_CACHE_TTL) self._available_nonce = _available_nonce self._nonce_lock = Semaphore() self._nonce_offset = nonce_offset log.debug( 'JSONRPCClient created', sender=pex(self.sender), available_nonce=_available_nonce, )
def __init__( self, jsonrpc_client: JSONRPCClient, secret_registry_address: SecretRegistryAddress, contract_manager: ContractManager, block_identifier: BlockIdentifier, ) -> None: if not is_binary_address(secret_registry_address): raise ValueError( "Expected binary address format for secret registry") self.contract_manager = contract_manager check_address_has_code_handle_pruned_block( client=jsonrpc_client, address=Address(secret_registry_address), contract_name=CONTRACT_SECRET_REGISTRY, expected_code=decode_hex( contract_manager.get_runtime_hexcode( CONTRACT_SECRET_REGISTRY)), given_block_identifier=block_identifier, ) proxy = jsonrpc_client.new_contract_proxy( abi=self.contract_manager.get_contract_abi( CONTRACT_SECRET_REGISTRY), contract_address=Address(secret_registry_address), ) # There should be only one smart contract deployed, to avoid race # conditions for on-chain unlocks. self.address = secret_registry_address self.proxy = proxy self.client = jsonrpc_client self.node_address = self.client.address # The dictionary of open transactions is used to avoid sending a # transaction for the same secret more than once. This requires # synchronization for the local threads. self.open_secret_transactions: Dict[Secret, AsyncResult] = dict() self._open_secret_transactions_lock = Semaphore()
def __init__( self, parent, socket ): self.parent = parent # A simple connection header sent by the proxy before the connection # content, it encapsulates the original connection source information. self.address = msgpack.unpackb( socket.recv( struct.unpack( '!I', socket.recv( 4 ) )[ 0 ] ) ) self.parent.log( 'Remote address: %s' % str( self.address ) ) try: socket = parent.sslContext.wrap_socket( socket, server_side = True, do_handshake_on_connect = True, suppress_ragged_eofs = True ) except: raise DisconnectException self.s = socket self.aid = None self.lock = Semaphore( 1 ) self.r = rpcm( isHumanReadable = True, isDebug = self.parent.log ) self.r.loadSymbols( Symbols.lookups ) self.connId = uuid.uuid4()
def handle(ws): """ This is the websocket handler function. Note that we can dispatch based on path in here, too.""" if ws.path == '/echo': sem = Semaphore() while True: logger.debug("loop") data = ws.wait() logger.info("data {}".format(data)) pool.apply_async(process, args=(data, ws, sem), callback=log_result) elif ws.path == '/data': for i in xrange(10000): ws.send("0 %s %s\n" % (i, random.random())) gevent.sleep(0.1) while True: logger.info("loop") data = ws.recvmsg() pool.apply_async(process, args=(data, ws, sem), callback=log_result)
def run_poll(self): self._sem = Semaphore() #self.constnt_schdlr = ConsistentScheduler( # self.ol._moduleid, # zookeeper=self._config.zookeeper_server(), # delete_hndlr=self._del_ol) while self._keep_running: self.scan_data() # if self.constnt_schdlr.schedule(self.prouters): if self.prouters: print '@run: ', self.prouters, self._keep_running try: with self._sem: self.compute() except Exception as e: import traceback traceback.print_exc() print str(e) gevent.sleep(self._sleep_time) else: gevent.sleep(1)
def __init__( self, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, uri: str = BINANCE_BASE_URL, ): exchange_name = str(Location.BINANCE) if uri == BINANCE_US_BASE_URL: exchange_name = str(Location.BINANCE_US) super().__init__(exchange_name, api_key, secret, database) self.uri = uri self.session.headers.update({ 'Accept': 'application/json', 'X-MBX-APIKEY': self.api_key, }) self.msg_aggregator = msg_aggregator self.nonce_lock = Semaphore() self.offset_ms = 0
def __init__(self, func, args): """ @param func: action that needs to be called @param action_name: arguments for the action @param args: argument to pass to the action when executing """ self.guid = j.data.idgenerator.generateGUID() self.service = None self._func = func self.action_name = func.__name__ if func else None self._args = args self._priority = None self._result = None self._created = time.time() self._duration = None # used when action raises an exception self._eco = None self._state = TASK_STATE_NEW self._state_lock = Semaphore()
def __init__( self, name: str, api_key: typing.ApiKey, secret: typing.ApiSecret, user_directory: typing.FilePath, ): assert isinstance(api_key, typing.T_ApiKey), ( 'api key for {} should be a bytestring'.format(name)) assert isinstance(secret, typing.T_ApiSecret), ( 'secret for {} should be a bytestring'.format(name)) self.name = name self.user_directory = user_directory self.api_key = api_key self.secret = secret self.first_connection_made = False self.session = requests.session() self.lock = Semaphore() self.results_cache: dict = {} self.session.headers.update({'User-Agent': 'rotkehlchen'}) log.info(f'Initialized {name} exchange')
def __init__( self, name: str, api_key: ApiKey, secret: ApiSecret, database: 'DBHandler', msg_aggregator: MessagesAggregator, ): super().__init__( name=name, location=Location.POLONIEX, api_key=api_key, secret=secret, database=database, ) self.uri = 'https://poloniex.com/' self.public_uri = self.uri + 'public?command=' self.session.headers.update({'Key': self.api_key}) self.msg_aggregator = msg_aggregator self.nonce_lock = Semaphore()
def __init__(self, cloudDest, cbReceiveMessage, orgId, installerId, platform, architecture, sensorId=None, enrollmentToken=None, cbDebugLog=None, cbEnrollment=None): gevent.Greenlet.__init__(self) self._cbDebugLog = cbDebugLog self._cbReceiveMessage = cbReceiveMessage self._cbEnrollment = cbEnrollment try: self._destServer, self._destPort = cloudDest.split(':') except: self._destServer = cloudDest self._destPort = 443 self._oid = uuid.UUID(str(orgId)) self._iid = uuid.UUID(str(installerId)) self._sid = sensorId self._arch = architecture self._plat = platform if self._sid is not None: self._sid = uuid.UUID(str(self._sid)) self._enrollmentToken = enrollmentToken self._socket = None self._threads = gevent.pool.Group() self._stopEvent = gevent.event.Event() self._lock = Semaphore(1) self._connectedEvent = gevent.event.Event() self._r = rpcm(isHumanReadable=True, isDebug=self._log) self._r.loadSymbols(Symbols.lookups) self._hcpModules = [] self._hbsProfileHash = ("\x00" * 32)
def __init__(self, args: argparse.Namespace) -> None: """Initialize the Rotkehlchen object May Raise: - SystemPermissionError if the given data directory's permissions are not correct. """ self.lock = Semaphore() self.lock.acquire() # Can also be None after unlock if premium credentials did not # authenticate or premium server temporarily offline self.premium: Optional[Premium] = None self.user_is_logged_in: bool = False configure_logging(args) self.sleep_secs = args.sleep_secs if args.data_dir is None: self.data_dir = default_data_directory() else: self.data_dir = Path(args.data_dir) if not os.access(self.data_dir, os.W_OK | os.R_OK): raise SystemPermissionError( f'The given data directory {self.data_dir} is not readable or writable', ) self.args = args self.msg_aggregator = MessagesAggregator() self.greenlet_manager = GreenletManager( msg_aggregator=self.msg_aggregator) self.exchange_manager = ExchangeManager( msg_aggregator=self.msg_aggregator) self.data = DataHandler(self.data_dir, self.msg_aggregator) self.cryptocompare = Cryptocompare(data_directory=self.data_dir, database=None) # Initialize the Inquirer singleton Inquirer(data_dir=self.data_dir, cryptocompare=self.cryptocompare) self.lock.release() self.shutdown_event = gevent.event.Event()
def __init__( self, blockchain_accounts: Dict[str, List[Union[typing.EthAddress, typing.BTCAddress]]], all_eth_tokens: List[typing.EthTokenInfo], owned_eth_tokens: List[typing.EthToken], inquirer: Inquirer, ethchain, # TODO ethchain type not added yet due to Cyclic Dependency ): self.lock = Semaphore() self.results_cache: Dict[str, typing.ResultCache] = {} self.ethchain = ethchain self.inquirer = inquirer self.accounts = blockchain_accounts # go through ETH accounts and make sure they are EIP55 encoded if S_ETH in self.accounts: self.accounts[S_ETH] = [ to_checksum_address(x) for x in self.accounts[S_ETH] ] self.owned_eth_tokens = owned_eth_tokens # All the known tokens, along with addresses and decimals self.all_eth_tokens: AllEthTokens = {} for token in all_eth_tokens: try: token_symbol = cast(typing.EthToken, str(token['symbol'])) except (UnicodeDecodeError, UnicodeEncodeError): # skip tokens with problems in unicode encoding decoding continue self.all_eth_tokens[token_symbol] = { 'address': to_checksum_address(token['address']), 'decimal': token['decimal'] } # Per account balances self.balances: Balances = defaultdict(dict) # Per asset total balances self.totals: Totals = defaultdict(dict)
def __init__(self, logger, config, heartbeat=0): self._logger = logger servers = re.compile(r'[,\s]+').split(config.servers) urls = self._parse_servers(servers, config) ssl_params = self._fetch_ssl_params(config) self._queue_args = {"x-ha-policy": "all"} if config.ha_mode else None self._heartbeat = float(heartbeat) self._connection_lock = Semaphore() self._consumer_event = Event() self._consumers_created_event = Event() self._publisher_queue = Queue() self._connection = kombu.Connection(urls, ssl=ssl_params, heartbeat=heartbeat, transport_options={'confirm_publish': True}) self._connected = False self._exchanges = {} self._consumers = {} self._removed_consumers = [] self._running = False self._consumers_changed = True self._consumer_gl = None self._publisher_gl = None self._heartbeat_gl = None
def __init__( self, blockchain_accounts: BlockchainAccounts, all_eth_tokens: List[typing.EthTokenInfo], owned_eth_tokens: List[typing.EthToken], inquirer: Inquirer, ethchain, # TODO ethchain type not added yet due to Cyclic Dependency ): self.lock = Semaphore() self.results_cache: Dict[str, typing.ResultCache] = {} self.ethchain = ethchain self.inquirer = inquirer self.accounts = blockchain_accounts # go through ETH accounts and make sure they are EIP55 encoded # TODO: really really bad thing here. Should not have to force mutate # a named tuple. Move this into the named tuple constructor self.accounts._replace( eth=[to_checksum_address(x) for x in self.accounts.eth]) self.owned_eth_tokens = owned_eth_tokens # All the known tokens, along with addresses and decimals self.all_eth_tokens: AllEthTokens = {} for token in all_eth_tokens: try: token_symbol = cast(typing.EthToken, str(token['symbol'])) except (UnicodeDecodeError, UnicodeEncodeError): # skip tokens with problems in unicode encoding decoding continue self.all_eth_tokens[token_symbol] = { 'address': to_checksum_address(token['address']), 'decimal': cast(int, token['decimal']), } # Per account balances self.balances: Balances = defaultdict(dict) # Per asset total balances self.totals: Totals = defaultdict(dict)