Beispiel #1
0
    def __init__(self, env, db, daemon, notifications):
        self.env = env
        self.db = db
        self.daemon = daemon
        self.notifications = notifications

        self.coin = env.coin
        self.blocks_event = asyncio.Event()
        self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
        self.logger = class_logger(__name__, self.__class__.__name__)

        # Meta
        self.next_cache_check = 0
        self.touched = set()
        self.reorg_count = 0

        # Caches of unflushed items.
        self.headers = []
        self.tx_hashes = []
        self.undo_infos = []

        # UTXO cache
        self.utxo_cache = {}
        self.db_deletes = []

        # If the lock is successfully acquired, in-memory chain state
        # is consistent with self.height
        self.state_lock = asyncio.Lock()
Beispiel #2
0
    def __init__(self, coin=None):
        self.logger = class_logger(__name__, self.__class__.__name__)
        self.allow_root = self.boolean('ALLOW_ROOT', False)
        self.host = self.default('HOST', 'localhost')
        self.rpc_host = self.default('RPC_HOST', 'localhost')
        self.loop_policy = self.set_event_loop_policy()
        self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
        self.db_dir = self.required('DB_DIRECTORY')
        self.db_engine = self.default('DB_ENGINE', 'leveldb')
        self.max_query_workers = self.integer('MAX_QUERY_WORKERS', None)
        self.individual_tag_indexes = self.boolean('INDIVIDUAL_TAG_INDEXES', True)
        self.track_metrics = self.boolean('TRACK_METRICS', False)
        self.websocket_host = self.default('WEBSOCKET_HOST', self.host)
        self.websocket_port = self.integer('WEBSOCKET_PORT', None)
        self.daemon_url = self.required('DAEMON_URL')
        if coin is not None:
            assert issubclass(coin, Coin)
            self.coin = coin
        else:
            coin_name = self.required('COIN').strip()
            network = self.default('NET', 'mainnet').strip()
            self.coin = Coin.lookup_coin_class(coin_name, network)
        self.cache_MB = self.integer('CACHE_MB', 1200)
        self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
        # Server stuff
        self.tcp_port = self.integer('TCP_PORT', None)
        self.ssl_port = self.integer('SSL_PORT', None)
        if self.ssl_port:
            self.ssl_certfile = self.required('SSL_CERTFILE')
            self.ssl_keyfile = self.required('SSL_KEYFILE')
        self.rpc_port = self.integer('RPC_PORT', 8000)
        self.max_subscriptions = self.integer('MAX_SUBSCRIPTIONS', 10000)
        self.banner_file = self.default('BANNER_FILE', None)
        self.tor_banner_file = self.default('TOR_BANNER_FILE', self.banner_file)
        self.anon_logs = self.boolean('ANON_LOGS', False)
        self.log_sessions = self.integer('LOG_SESSIONS', 3600)
        # Peer discovery
        self.peer_discovery = self.peer_discovery_enum()
        self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
        self.force_proxy = self.boolean('FORCE_PROXY', False)
        self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
        self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
        # The electrum client takes the empty string as unspecified
        self.donation_address = self.default('DONATION_ADDRESS', '')
        # Server limits to help prevent DoS
        self.max_send = self.integer('MAX_SEND', 1000000)
        self.max_subs = self.integer('MAX_SUBS', 250000)
        self.max_sessions = self.sane_max_sessions()
        self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
        self.bandwidth_limit = self.integer('BANDWIDTH_LIMIT', 2000000)
        self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
        self.drop_client = self.custom("DROP_CLIENT", None, re.compile)

        # Identities
        clearnet_identity = self.clearnet_identity()
        tor_identity = self.tor_identity(clearnet_identity)
        self.identities = [identity
                           for identity in (clearnet_identity, tor_identity)
                           if identity is not None]
        self.database_query_timeout = float(self.integer('QUERY_TIMEOUT_MS', 250)) / 1000.0
Beispiel #3
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.utxo_db = None
        self.tx_counts = None
        self.last_flush = time.time()

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Beispiel #4
0
 def __init__(self):
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     # For history compaction
     self.max_hist_row_entries = 12500
     self.unflushed = defaultdict(partial(array.array, 'I'))
     self.unflushed_count = 0
     self.db = None
Beispiel #5
0
 def __init__(self, main, path):
     self.main = main
     self._db_path = path
     self.db = None
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.ledger = MainNetLedger if self.main.coin.NET == 'mainnet' else RegTestLedger
     self._fts_synced = False
Beispiel #6
0
 def __init__(self, coin, api, refresh_secs=5.0, log_status_secs=120.0):
     assert isinstance(api, MemPoolAPI)
     self.coin = coin
     self.api = api
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.txs = {}
     self.hashXs = defaultdict(set)  # None can be a key
     self.cached_compact_histogram = []
     self.refresh_secs = refresh_secs
     self.log_status_secs = log_status_secs
     # Prevents mempool refreshes during fee histogram calculation
     self.lock = Lock()
Beispiel #7
0
 def __init__(self, coin, url, max_workqueue=10, init_retry=0.25,
              max_retry=4.0):
     self.coin = coin
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.set_url(url)
     # Limit concurrent RPC calls to this number.
     # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
     self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
     self.init_retry = init_retry
     self.max_retry = max_retry
     self._height = None
     self.available_rpcs = {}
     self.connector = aiohttp.TCPConnector()
Beispiel #8
0
 def __init__(self, daemon, coin, blocks_event):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.daemon = daemon
     self.coin = coin
     self.blocks_event = blocks_event
     self.blocks = []
     self.caught_up = False
     # Access to fetched_height should be protected by the semaphore
     self.fetched_height = None
     self.semaphore = asyncio.Semaphore()
     self.refill_event = asyncio.Event()
     # The prefetched block cache size.  The min cache size has
     # little effect on sync time.
     self.cache_size = 0
     self.min_cache_size = 10 * 1024 * 1024
     # This makes the first fetch be 10 blocks
     self.ave_size = self.min_cache_size // 10
     self.polling_delay = 5
Beispiel #9
0
    def __init__(self, env, db):
        self.logger = class_logger(__name__, self.__class__.__name__)
        # Initialise the Peer class
        Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
        self.env = env
        self.db = db

        # Our clearnet and Tor Peers, if any
        sclass = env.coin.SESSIONCLS
        self.myselves = [Peer(ident.host, sclass.server_features(env), 'env')
                         for ident in env.identities]
        self.server_version_args = sclass.server_version_args()
        # Peers have one entry per hostname.  Once connected, the
        # ip_addr property is either None, an onion peer, or the
        # IP address that was connected to.  Adding a peer will evict
        # any other peers with the same host name or IP address.
        self.peers = set()
        self.permit_onion_peer_time = time.time()
        self.proxy = None
        self.group = TaskGroup()
Beispiel #10
0
 def __init__(self, main, path):
     self.main = main
     self._db_path = path
     self.db = None
     self.logger = class_logger(__name__, self.__class__.__name__)