def __init__(self): self.logger = util.class_logger(__name__, self.__class__.__name__) # For history compaction self.max_hist_row_entries = 12500 self.unflushed = defaultdict(partial(array.array, 'I')) self.unflushed_count = 0 self.db = None
def __init__(self, env): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin self.executor = None self.logger.info(f'switching current directory to {env.db_dir}') self.db_class = db_class(env.db_dir, self.env.db_engine) self.history = History() self.utxo_db = None self.tx_counts = None self.headers = None self.encoded_headers = LRUCacheWithMetrics( 1 << 21, metric_name='encoded_headers', namespace='wallet_server') self.last_flush = time.time() self.logger.info(f'using {self.env.db_engine} for DB backend') # Header merkle cache self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) self.headers_db = None self.tx_db = None self._tx_and_merkle_cache = LRUCacheWithMetrics( 2**17, metric_name='tx_and_merkle', namespace="wallet_server") self.total_transactions = None
def __init__(self, env, db, daemon, notifications): self.env = env self.db = db self.daemon = daemon self.notifications = notifications self.coin = env.coin self.blocks_event = asyncio.Event() self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event) self.logger = class_logger(__name__, self.__class__.__name__) self.executor = ThreadPoolExecutor(1) # Meta self.next_cache_check = 0 self.touched = set() self.reorg_count = 0 # Caches of unflushed items. self.headers = [] self.tx_hashes = [] self.undo_infos = [] # UTXO cache self.utxo_cache = {} self.db_deletes = [] # If the lock is successfully acquired, in-memory chain state # is consistent with self.height self.state_lock = asyncio.Lock() self.search_cache = {}
def __init__(self, env): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin # Setup block header size handlers if self.coin.STATIC_BLOCK_HEADERS: self.header_offset = self.coin.static_header_offset self.header_len = self.coin.static_header_len else: self.header_offset = self.dynamic_header_offset self.header_len = self.dynamic_header_len self.logger.info(f'switching current directory to {env.db_dir}') self.db_class = db_class(env.db_dir, self.env.db_engine) self.history = History() self.utxo_db = None self.tx_counts = None self.last_flush = time.time() self.logger.info(f'using {self.env.db_engine} for DB backend') # Header merkle cache self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) path = partial(os.path.join, self.env.db_dir) self.headers_file = util.LogicalFile(path('meta/headers'), 2, 16000000) self.tx_counts_file = util.LogicalFile(path('meta/txcounts'), 2, 2000000) self.hashes_file = util.LogicalFile(path('meta/hashes'), 4, 16000000) if not self.coin.STATIC_BLOCK_HEADERS: self.headers_offsets_file = util.LogicalFile( path('meta/headers_offsets'), 2, 16000000)
def __init__(self, main, path): self.main = main self._db_path = path self.db = None self.logger = class_logger(__name__, self.__class__.__name__) self.ledger = Ledger if self.main.coin.NET == 'mainnet' else RegTestLedger self._fts_synced = False
def __init__(self, env): self.logger = util.class_logger(__name__, self.__class__.__name__) self.env = env self.coin = env.coin self.executor = None self.logger.info(f'switching current directory to {env.db_dir}') self.db_class = db_class(env.db_dir, self.env.db_engine) self.history = History() self.utxo_db = None self.tx_counts = None self.headers = None self.last_flush = time.time() self.logger.info(f'using {self.env.db_engine} for DB backend') # Header merkle cache self.merkle = Merkle() self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes) self.headers_db = None self.tx_db = None self._tx_and_merkle_cache = pylru.lrucache(100000) self.total_transactions = None
def __init__(self, coin, daemon, db, state_lock: asyncio.Lock, refresh_secs=1.0, log_status_secs=120.0): self.coin = coin self._daemon = daemon self._db = db self._touched_mp = {} self._touched_bp = {} self._highest_block = -1 self.logger = class_logger(__name__, self.__class__.__name__) self.txs = {} self.hashXs = defaultdict(set) # None can be a key self.cached_compact_histogram = [] self.refresh_secs = refresh_secs self.log_status_secs = log_status_secs # Prevents mempool refreshes during fee histogram calculation self.lock = state_lock self.wakeup = asyncio.Event() self.mempool_process_time_metric = mempool_process_time_metric self.notified_mempool_txs = set() self.notify_sessions: Optional[Callable[[int, Set[bytes], Set[bytes]], Awaitable[None]]] = None
def __init__(self, index_prefix: str, search_timeout=3.0, elastic_host='localhost', elastic_port=9200): self.search_timeout = search_timeout self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import self.search_client: Optional[AsyncElasticsearch] = None self.sync_client: Optional[AsyncElasticsearch] = None self.index = index_prefix + 'claims' self.logger = class_logger(__name__, self.__class__.__name__) self.claim_cache = LRUCache(2 ** 15) self.search_cache = LRUCache(2 ** 17) self._elastic_host = elastic_host self._elastic_port = elastic_port
def __init__(self, index_prefix: str, search_timeout=3.0): self.search_timeout = search_timeout self.sync_timeout = 600 # wont hit that 99% of the time, but can hit on a fresh import self.search_client: Optional[AsyncElasticsearch] = None self.sync_client: Optional[AsyncElasticsearch] = None self.index = index_prefix + 'claims' self.logger = class_logger(__name__, self.__class__.__name__) self.claim_cache = LRUCache(2 ** 15) self.short_id_cache = LRUCache(2 ** 17) # never invalidated, since short ids are forever self.search_cache = LRUCache(2 ** 17) self.resolution_cache = LRUCache(2 ** 17)
def __init__(self, coin, api, refresh_secs=5.0, log_status_secs=120.0): assert isinstance(api, MemPoolAPI) self.coin = coin self.api = api self.logger = class_logger(__name__, self.__class__.__name__) self.txs = {} self.hashXs = defaultdict(set) # None can be a key self.cached_compact_histogram = [] self.refresh_secs = refresh_secs self.log_status_secs = log_status_secs # Prevents mempool refreshes during fee histogram calculation self.lock = asyncio.Lock() self.wakeup = asyncio.Event()
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25, max_retry=4.0): self.coin = coin self.logger = class_logger(__name__, self.__class__.__name__) self.set_url(url) # Limit concurrent RPC calls to this number. # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16 self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue) self.init_retry = init_retry self.max_retry = max_retry self._height = None self.available_rpcs = {} self.connector = aiohttp.TCPConnector()
def __init__(self, daemon, coin, blocks_event): self.logger = class_logger(__name__, self.__class__.__name__) self.daemon = daemon self.coin = coin self.blocks_event = blocks_event self.blocks = [] self.caught_up = False # Access to fetched_height should be protected by the semaphore self.fetched_height = None self.semaphore = asyncio.Semaphore() self.refill_event = asyncio.Event() # The prefetched block cache size. The min cache size has # little effect on sync time. self.cache_size = 0 self.min_cache_size = 10 * 1024 * 1024 # This makes the first fetch be 10 blocks self.ave_size = self.min_cache_size // 10 self.polling_delay = 5
def __init__( self, main, path: str, blocking_channels: list, filtering_channels: list, trending: list): self.main = main self._db_path = path self.db = None self.logger = class_logger(__name__, self.__class__.__name__) self.ledger = Ledger if main.coin.NET == 'mainnet' else RegTestLedger self.blocked_streams = None self.blocked_channels = None self.blocking_channel_hashes = { unhexlify(channel_id)[::-1] for channel_id in blocking_channels if channel_id } self.filtered_streams = None self.filtered_channels = None self.filtering_channel_hashes = { unhexlify(channel_id)[::-1] for channel_id in filtering_channels if channel_id } self.trending = trending self.pending_deletes = set()
def __init__(self, env, db): self.logger = class_logger(__name__, self.__class__.__name__) # Initialise the Peer class Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS self.env = env self.db = db # Our clearnet and Tor Peers, if any sclass = env.coin.SESSIONCLS self.myselves = [Peer(ident.host, sclass.server_features(env), 'env') for ident in env.identities] self.server_version_args = sclass.server_version_args() # Peers have one entry per hostname. Once connected, the # ip_addr property is either None, an onion peer, or the # IP address that was connected to. Adding a peer will evict # any other peers with the same host name or IP address. self.peers: typing.Set[Peer] = set() self.permit_onion_peer_time = time.time() self.proxy = None self.group = TaskGroup()
def __init__(self, coin, url, max_workqueue=10, init_retry=0.25, max_retry=4.0): self.coin = coin self.logger = class_logger(__name__, self.__class__.__name__) self.set_url(url) # Limit concurrent RPC calls to this number. # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16 self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue) self.init_retry = init_retry self.max_retry = max_retry self._height = None self.available_rpcs = {} self.connector = aiohttp.TCPConnector() self._block_hash_cache = LRUCacheWithMetrics(100000) self._block_cache = LRUCacheWithMetrics(2**13, metric_name='block', namespace=NAMESPACE)
def __init__(self): self.logger = util.class_logger(__name__, self.__class__.__name__) self.runner = None
def __init__(self, coin=None): self.logger = class_logger(__name__, self.__class__.__name__) self.allow_root = self.boolean('ALLOW_ROOT', False) self.host = self.default('HOST', 'localhost') self.rpc_host = self.default('RPC_HOST', 'localhost') self.loop_policy = self.set_event_loop_policy() self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK']) self.db_dir = self.required('DB_DIRECTORY') self.db_engine = self.default('DB_ENGINE', 'leveldb') self.trending_algorithms = [ trending for trending in set(self.default('TRENDING_ALGORITHMS', 'zscore').split(' ')) if trending ] self.max_query_workers = self.integer('MAX_QUERY_WORKERS', None) self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True) self.track_metrics = self.boolean('TRACK_METRICS', False) self.websocket_host = self.default('WEBSOCKET_HOST', self.host) self.websocket_port = self.integer('WEBSOCKET_PORT', None) self.daemon_url = self.required('DAEMON_URL') if coin is not None: assert issubclass(coin, Coin) self.coin = coin else: coin_name = self.required('COIN').strip() network = self.default('NET', 'mainnet').strip() self.coin = Coin.lookup_coin_class(coin_name, network) self.es_index_prefix = self.default('ES_INDEX_PREFIX', '') self.es_mode = self.default('ES_MODE', 'writer') self.cache_MB = self.integer('CACHE_MB', 1200) self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT) # Server stuff self.tcp_port = self.integer('TCP_PORT', None) self.udp_port = self.integer('UDP_PORT', self.tcp_port) self.ssl_port = self.integer('SSL_PORT', None) if self.ssl_port: self.ssl_certfile = self.required('SSL_CERTFILE') self.ssl_keyfile = self.required('SSL_KEYFILE') self.rpc_port = self.integer('RPC_PORT', 8000) self.prometheus_port = self.integer('PROMETHEUS_PORT', 0) self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000) self.banner_file = self.default('BANNER_FILE', None) self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file) self.anon_logs = self.boolean('ANON_LOGS', False) self.log_sessions = self.integer('LOG_SESSIONS', 3600) # Peer discovery self.peer_discovery = self.peer_discovery_enum() self.peer_announce = self.boolean('PEER_ANNOUNCE', True) self.force_proxy = self.boolean('FORCE_PROXY', False) self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost') self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None) # The electrum client takes the empty string as unspecified self.payment_address = self.default('PAYMENT_ADDRESS', '') self.donation_address = self.default('DONATION_ADDRESS', '') # Server limits to help prevent DoS self.max_send = self.integer('MAX_SEND', 1000000) self.max_receive = self.integer('MAX_RECEIVE', 1000000) self.max_subs = self.integer('MAX_SUBS', 250000) self.max_sessions = self.sane_max_sessions() self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000) self.session_timeout = self.integer('SESSION_TIMEOUT', 600) self.drop_client = self.custom("DROP_CLIENT", None, re.compile) self.description = self.default('DESCRIPTION', '') self.daily_fee = self.string_amount('DAILY_FEE', '0') # Identities clearnet_identity = self.clearnet_identity() tor_identity = self.tor_identity(clearnet_identity) self.identities = [identity for identity in (clearnet_identity, tor_identity) if identity is not None] self.database_query_timeout = float(self.integer('QUERY_TIMEOUT_MS', 3000)) / 1000.0
def __init__(self, coin=None, db_dir=None, daemon_url=None, host=None, rpc_host=None, elastic_host=None, elastic_port=None, loop_policy=None, max_query_workers=None, websocket_host=None, websocket_port=None, chain=None, es_index_prefix=None, es_mode=None, cache_MB=None, reorg_limit=None, tcp_port=None, udp_port=None, ssl_port=None, ssl_certfile=None, ssl_keyfile=None, rpc_port=None, prometheus_port=None, max_subscriptions=None, banner_file=None, anon_logs=None, log_sessions=None, allow_lan_udp=None, cache_all_tx_hashes=None, cache_all_claim_txos=None, country=None, payment_address=None, donation_address=None, max_send=None, max_receive=None, max_sessions=None, session_timeout=None, drop_client=None, description=None, daily_fee=None, database_query_timeout=None, db_max_open_files=512): self.logger = class_logger(__name__, self.__class__.__name__) self.db_dir = db_dir if db_dir is not None else self.required( 'DB_DIRECTORY') self.daemon_url = daemon_url if daemon_url is not None else self.required( 'DAEMON_URL') self.db_max_open_files = db_max_open_files self.host = host if host is not None else self.default( 'HOST', 'localhost') self.rpc_host = rpc_host if rpc_host is not None else self.default( 'RPC_HOST', 'localhost') self.elastic_host = elastic_host if elastic_host is not None else self.default( 'ELASTIC_HOST', 'localhost') self.elastic_port = elastic_port if elastic_port is not None else self.integer( 'ELASTIC_PORT', 9200) self.loop_policy = self.set_event_loop_policy( loop_policy if loop_policy is not None else self. default('EVENT_LOOP_POLICY', None)) self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK']) self.max_query_workers = max_query_workers if max_query_workers is not None else self.integer( 'MAX_QUERY_WORKERS', 4) self.websocket_host = websocket_host if websocket_host is not None else self.default( 'WEBSOCKET_HOST', self.host) self.websocket_port = websocket_port if websocket_port is not None else self.integer( 'WEBSOCKET_PORT', None) if coin is not None: assert issubclass(coin, Coin) self.coin = coin else: chain = chain if chain is not None else self.default( 'NET', 'mainnet').strip().lower() if chain == 'mainnet': self.coin = LBC elif chain == 'testnet': self.coin = LBCTestNet else: self.coin = LBCRegTest self.es_index_prefix = es_index_prefix if es_index_prefix is not None else self.default( 'ES_INDEX_PREFIX', '') self.es_mode = es_mode if es_mode is not None else self.default( 'ES_MODE', 'writer') self.cache_MB = cache_MB if cache_MB is not None else self.integer( 'CACHE_MB', 1024) self.reorg_limit = reorg_limit if reorg_limit is not None else self.integer( 'REORG_LIMIT', self.coin.REORG_LIMIT) # Server stuff self.tcp_port = tcp_port if tcp_port is not None else self.integer( 'TCP_PORT', None) self.udp_port = udp_port if udp_port is not None else self.integer( 'UDP_PORT', self.tcp_port) self.ssl_port = ssl_port if ssl_port is not None else self.integer( 'SSL_PORT', None) if self.ssl_port: self.ssl_certfile = ssl_certfile if ssl_certfile is not None else self.required( 'SSL_CERTFILE') self.ssl_keyfile = ssl_keyfile if ssl_keyfile is not None else self.required( 'SSL_KEYFILE') self.rpc_port = rpc_port if rpc_port is not None else self.integer( 'RPC_PORT', 8000) self.prometheus_port = prometheus_port if prometheus_port is not None else self.integer( 'PROMETHEUS_PORT', 0) self.max_subscriptions = max_subscriptions if max_subscriptions is not None else self.integer( 'MAX_SUBSCRIPTIONS', 10000) self.banner_file = banner_file if banner_file is not None else self.default( 'BANNER_FILE', None) # self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file) self.anon_logs = anon_logs if anon_logs is not None else self.boolean( 'ANON_LOGS', False) self.log_sessions = log_sessions if log_sessions is not None else self.integer( 'LOG_SESSIONS', 3600) self.allow_lan_udp = allow_lan_udp if allow_lan_udp is not None else self.boolean( 'ALLOW_LAN_UDP', False) self.cache_all_tx_hashes = cache_all_tx_hashes if cache_all_tx_hashes is not None else self.boolean( 'CACHE_ALL_TX_HASHES', False) self.cache_all_claim_txos = cache_all_claim_txos if cache_all_claim_txos is not None else self.boolean( 'CACHE_ALL_CLAIM_TXOS', False) self.country = country if country is not None else self.default( 'COUNTRY', 'US') # Peer discovery self.peer_discovery = self.peer_discovery_enum() self.peer_announce = self.boolean('PEER_ANNOUNCE', True) self.peer_hubs = self.extract_peer_hubs() # self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost') # self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None) # The electrum client takes the empty string as unspecified self.payment_address = payment_address if payment_address is not None else self.default( 'PAYMENT_ADDRESS', '') self.donation_address = donation_address if donation_address is not None else self.default( 'DONATION_ADDRESS', '') # Server limits to help prevent DoS self.max_send = max_send if max_send is not None else self.integer( 'MAX_SEND', 1000000) self.max_receive = max_receive if max_receive is not None else self.integer( 'MAX_RECEIVE', 1000000) # self.max_subs = self.integer('MAX_SUBS', 250000) self.max_sessions = max_sessions if max_sessions is not None else self.sane_max_sessions( ) # self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000) self.session_timeout = session_timeout if session_timeout is not None else self.integer( 'SESSION_TIMEOUT', 600) self.drop_client = drop_client if drop_client is not None else self.custom( "DROP_CLIENT", None, re.compile) self.description = description if description is not None else self.default( 'DESCRIPTION', '') self.daily_fee = daily_fee if daily_fee is not None else self.string_amount( 'DAILY_FEE', '0') # Identities clearnet_identity = self.clearnet_identity() tor_identity = self.tor_identity(clearnet_identity) self.identities = [ identity for identity in (clearnet_identity, tor_identity) if identity is not None ] self.database_query_timeout = database_query_timeout if database_query_timeout is not None else \ (float(self.integer('QUERY_TIMEOUT_MS', 10000)) / 1000.0)