Пример #1
0
 def __init__(self):
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     # For history compaction
     self.max_hist_row_entries = 12500
     self.unflushed = defaultdict(partial(array.array, 'I'))
     self.unflushed_count = 0
     self.db = None
Пример #2
0
    def __init__(self, env, db, daemon, notifications):
        self.env = env
        self.db = db
        self.daemon = daemon
        self.notifications = notifications

        self.coin = env.coin
        self.blocks_event = asyncio.Event()
        self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
        self.logger = class_logger(__name__, self.__class__.__name__)

        # Meta
        self.next_cache_check = 0
        self.touched = set()
        self.reorg_count = 0

        # Caches of unflushed items.
        self.headers = []
        self.tx_hashes = []
        self.undo_infos = []

        # UTXO cache
        self.utxo_cache = {}
        self.db_deletes = []

        # If the lock is successfully acquired, in-memory chain state
        # is consistent with self.height
        self.state_lock = asyncio.Lock()
Пример #3
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.utxo_db = None
        self.tx_counts = None
        self.last_flush = time.time()

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Пример #4
0
    def __init__(self, env):
        '''Save the environment, perform basic sanity checks, and set the
        event loop policy.
        '''
        # First asyncio operation must be to set the event loop policy
        # as this replaces the event loop
        asyncio.set_event_loop_policy(env.loop_policy)

        self.logger = class_logger(__name__, self.__class__.__name__)
        version_str = ' '.join(sys.version.splitlines())
        self.logger.info(f'Python version: {version_str}')
        self.env = env
        self.start_time = 0

        # Sanity checks
        if sys.version_info < self.PYTHON_MIN_VERSION:
            mvs = '.'.join(str(part) for part in self.PYTHON_MIN_VERSION)
            raise RuntimeError('Python version >= {} is required'.format(mvs))

        if platform.system() == 'Windows':
            pass
        elif os.geteuid() == 0 and not env.allow_root:
            raise RuntimeError('RUNNING AS ROOT IS STRONGLY DISCOURAGED!\n'
                               'You shoud create an unprivileged user account '
                               'and use that.\n'
                               'To continue as root anyway, restart with '
                               'environment variable ALLOW_ROOT non-empty')
Пример #5
0
    def __init__(self, env, db):
        self.logger = class_logger(__name__, self.__class__.__name__)
        # Initialise the Peer class
        Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
        self.env = env
        self.db = db

        # Our clearnet and Tor Peers, if any
        sclass = env.coin.SESSIONCLS
        self.myselves = [
            Peer(ident.host, sclass.server_features(env), 'env')
            for ident in env.identities
        ]
        self.server_version_args = sclass.server_version_args()
        # Peers have one entry per hostname.  Once connected, the
        # ip_addr property is either None, an onion peer, or the
        # IP address that was connected to.  Adding a peer will evict
        # any other peers with the same host name or IP address.
        self.peers = set()
        self.permit_onion_peer_time = time.time()
        self.proxy = None
        self.group = TaskGroup()
        self.recent_peer_adds = {}
        # refreshed
        self.blacklist = set()
Пример #6
0
    def __init__(
        self,
        coin: Type['Coin'],
        url,
        *,
        max_workqueue=10,
        init_retry=0.25,
        max_retry=4.0,
    ):
        self.coin = coin
        self.logger = class_logger(__name__, self.__class__.__name__)
        self.url_index = None
        self.urls = []
        self.set_url(url)
        # Limit concurrent RPC calls to this number.
        # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
        self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
        self.init_retry = init_retry
        self.max_retry = max_retry
        self._height = None
        self.available_rpcs = {}
        self.session = None

        self._networkinfo_cache = (None, 0)
        self._networkinfo_lock = asyncio.Lock()
Пример #7
0
    def __init__(self, env, tasks, chain_state):
        self.logger = class_logger(__name__, self.__class__.__name__)
        # Initialise the Peer class
        Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
        self.env = env
        self.tasks = tasks
        self.chain_state = chain_state
        self.loop = tasks.loop

        # Our clearnet and Tor Peers, if any
        sclass = env.coin.SESSIONCLS
        self.myselves = [
            Peer(ident.host, sclass.server_features(env), 'env')
            for ident in env.identities
        ]
        self.server_version_args = sclass.server_version_args()
        self.retry_event = asyncio.Event()
        # Peers have one entry per hostname.  Once connected, the
        # ip_addr property is either None, an onion peer, or the
        # IP address that was connected to.  Adding a peer will evict
        # any other peers with the same host name or IP address.
        self.peers = set()
        self.permit_onion_peer_time = time.time()
        self.proxy = None
        self.last_proxy_try = 0
Пример #8
0
 def __init__(self):
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     # For history compaction
     self.max_hist_row_entries = 12500
     self.unflushed = defaultdict(
         list)  # {b'hashY' => [array('I', [txnum, log_index]),]}
     self.unflushed_count = 0
     self.db = None
Пример #9
0
 def __init__(self, peer, peer_mgr, kind, host, port, **kwargs):
     super().__init__(host, port, **kwargs)
     self.peer = peer
     self.peer_mgr = peer_mgr
     self.kind = kind
     self.timeout = 20 if self.peer.is_tor else 10
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.logger = ConnectionLogger(self.logger, {'conn_id': f'{host}'})
Пример #10
0
 def __init__(self, coin, daemon, notifications, lookup_utxos):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.coin = coin
     self.lookup_utxos = lookup_utxos
     self.daemon = daemon
     self.notifications = notifications
     self.txs = {}
     self.hashXs = defaultdict(set)  # None can be a key
     self.cached_compact_histogram = []
Пример #11
0
 def __init__(self, coin, api, refresh_secs=5.0, log_status_secs=60.0):
     assert isinstance(api, MemPoolAPI)
     self.coin = coin
     self.api = api
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.txs = {}
     self.hashXs = defaultdict(set)  # None can be a key
     self.refresh_secs = refresh_secs
     self.log_status_secs = log_status_secs
Пример #12
0
 def __init__(self, *, loop=None):
     self.tasks = TaskSet(loop=loop)
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     # FIXME: is the executor still needed?
     self.executor = ThreadPoolExecutor()
     self.tasks.loop.set_default_executor(self.executor)
     # Pass through until integrated
     self.loop = self.tasks.loop
     self.cancel_all = self.tasks.cancel_all
     self.wait = self.tasks.wait
Пример #13
0
 def __init__(self):
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     # For history compaction
     self.max_hist_row_entries = 12500
     self.unflushed = defaultdict(partial(array.array, 'I'))
     self.unflushed_count = 0
     self.flush_count = 0
     self.comp_flush_count = -1
     self.comp_cursor = -1
     self.db_version = max(self.DB_VERSIONS)
     self.db = None
Пример #14
0
 def __init__(self, coin, api, refresh_secs=5.0, log_status_secs=120.0):
     assert isinstance(api, MemPoolAPI)
     self.coin = coin
     self.api = api
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.txs = {}
     self.hashXs = defaultdict(set)  # None can be a key
     self.cached_compact_histogram = []
     self.refresh_secs = refresh_secs
     self.log_status_secs = log_status_secs
     # Prevents mempool refreshes during fee histogram calculation
     self.lock = Lock()
Пример #15
0
 def __init__(self, env):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.coin = env.coin
     self.set_urls(env.coin.daemon_urls(env.daemon_url))
     self._height = None
     # Limit concurrent RPC calls to this number.
     # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
     self.workqueue_semaphore = asyncio.Semaphore(value=10)
     self.down = False
     self.last_error_time = 0
     self.req_id = 0
     self._available_rpcs = {}  # caches results for _is_rpc_available()
Пример #16
0
 def __init__(self, coin, url, max_workqueue=10, init_retry=0.25,
              max_retry=4.0):
     self.coin = coin
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.set_url(url)
     # Limit concurrent RPC calls to this number.
     # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
     self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
     self.init_retry = init_retry
     self.max_retry = max_retry
     self._height = None
     self.available_rpcs = {}
Пример #17
0
 def __init__(self):
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     # For history compaction
     self.max_hist_row_entries = 12500
     self.unflushed = defaultdict(
         list)  # {b'hashY_topic' => [array('I', [txnum, log_index]),]}
     self.unflushed_count = 0
     self.flush_count = 0
     self.comp_flush_count = -1
     self.comp_cursor = -1
     self.db_version = max(self.DB_VERSIONS)
     self.db = None
Пример #18
0
 def __init__(self, coin: Type['Coin'], api: MemPoolAPI, refresh_secs=5.0, log_status_secs=60.0):
     assert isinstance(api, MemPoolAPI)
     self.coin = coin
     self.api = api
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.txs = {}  # type: Dict[bytes, MemPoolTx]  # txid->tx
     self.hashXs = defaultdict(set)  # type: Dict[Optional[bytes], Set[bytes]]  # hashX->txids
     self.txo_to_spender = {}  # type: Dict[Tuple[bytes, int], bytes]  # prevout->txid
     self.cached_compact_histogram = []
     self.refresh_secs = refresh_secs
     self.log_status_secs = log_status_secs
     # Prevents mempool refreshes during fee histogram calculation
     self.lock = Lock()
Пример #19
0
 def __init__(self, bp, controller):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.daemon = bp.daemon
     self.controller = controller
     self.coin = bp.coin
     self.db = bp
     self.touched = set()
     self.stop = False
     self.txs = {}
     self.hashXs = defaultdict(set)  # None can be a key
     self.synchronized_event = asyncio.Event()
     self.fee_histogram = defaultdict(int)
     self.compact_fee_histogram = []
     self.histogram_time = 0
Пример #20
0
 def __init__(self, coin, chain_state, tasks, add_new_block_callback):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.coin = coin
     self.chain_state = chain_state
     self.tasks = tasks
     self.touched = set()
     self.stop = False
     self.txs = {}
     self.hashXs = defaultdict(set)  # None can be a key
     self.synchronized_event = asyncio.Event()
     self.fee_histogram = defaultdict(int)
     self.compact_fee_histogram = []
     self.histogram_time = 0
     add_new_block_callback(self.on_new_block)
Пример #21
0
 def __init__(self, bp):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.bp = bp
     self.caught_up = False
     # Access to fetched_height should be protected by the semaphore
     self.fetched_height = None
     self.semaphore = asyncio.Semaphore()
     self.refill_event = asyncio.Event()
     # The prefetched block cache size.  The min cache size has
     # little effect on sync time.
     self.cache_size = 0
     self.min_cache_size = 10 * 1024 * 1024
     # This makes the first fetch be 10 blocks
     self.ave_size = self.min_cache_size // 10
Пример #22
0
    def __init__(self, env, db, daemon, notifications):
        self.env = env
        self.db = db
        self.daemon = daemon
        self.notifications = notifications

        self.coin = env.coin
        self.blocks_event = asyncio.Event()
        self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
        self.logger = class_logger(__name__, self.__class__.__name__)

        # Meta
        self.next_cache_check = 0
        self.touched = set()
        self.reorg_count = 0
        self.height = -1
        self.tip = None
        self.tx_count = 0
        self._caught_up_event = None

        # Caches of unflushed items.
        self.headers = []
        self.tx_hashes = []
        self.undo_infos = []

        # UTXO cache
        self.utxo_cache = {}
        self.db_deletes = []

        # If the lock is successfully acquired, in-memory chain state
        # is consistent with self.height
        self.state_lock = asyncio.Lock()
        self.dup_tx_hashes = {
            hex_str_to_hash(
                '7702eaa0e042846d39d01eeb4c87f774913022e9958cfd714c5c2942af380569'
            ),
            hex_str_to_hash(
                'a5210b0bdfe0edaff3f1fb7ac24a379f55bbc51dcc224dc5efc04c1de8b30b2f'
            ),
            hex_str_to_hash(
                '1bf147bdaaad84364f6ff49661c66a0d7d4545c0eab2cd997d2ea0f3490393ec'
            ),
            hex_str_to_hash(
                '95e55038b16a4f6f81bbdcf3a44b0a76ffc76e395c57c0967229f26088d05fa7'
            ),
            hex_str_to_hash(
                '83890738940d7afd1f94a67db072f8fc4fdeea60c1f32e46f082f86ff4be3a48'
            ),
        }
Пример #23
0
    def __init__(self):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        # For history compaction
        self.max_hist_row_entries = 12500
        self.unflushed = defaultdict(bytearray)
        self.unflushed_count = 0
        self.flush_count = 0
        self.comp_flush_count = -1
        self.comp_cursor = -1
        self.db_version = max(self.DB_VERSIONS)
        self.upgrade_cursor = -1

        # Key: address_hashX + flush_id
        # Value: sorted "list" of tx_nums in history of hashX
        self.db = None
Пример #24
0
 def __init__(self, controller, kind):
     super().__init__(rpc_protocol=JSONRPCAutoDetect)
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     self.kind = kind  # 'RPC', 'TCP' etc.
     self.controller = controller
     self.bp = controller.bp
     self.env = controller.env
     self.daemon = self.bp.daemon
     self.client = 'unknown'
     self.client_version = (1, )
     self.anon_logs = self.env.anon_logs
     self.txs_sent = 0
     self.log_me = False
     self.bw_limit = self.env.bandwidth_limit
     self._orig_mr = self.rpc.message_received
Пример #25
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info(f'switching current directory to {env.db_dir}')
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.history = History()
        self.eventlog = Eventlog()
        self.unflushed_hashYs = defaultdict(
            set)  # {blockHash => [hashY_topic, ]}, for reorg_chain
        self.hashY_db = None
        self.utxo_db = None
        self.utxo_flush_count = 0
        self.fs_height = -1
        self.fs_tx_count = 0
        self.db_height = -1
        self.db_tx_count = 0
        self.db_tip = None
        self.tx_counts = None
        self.last_flush = time.time()
        self.last_flush_tx_count = 0
        self.wall_time = 0
        self.first_sync = True
        self.db_version = -1

        self.logger.info(f'using {self.env.db_engine} for DB backend')

        # Header merkle cache
        self.merkle = Merkle()
        self.header_mc = MerkleCache(self.merkle, self.fs_block_hashes)

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
Пример #26
0
    def __init__(self, env):
        self.logger = util.class_logger(__name__, self.__class__.__name__)
        self.env = env
        self.coin = env.coin

        # Setup block header size handlers
        if self.coin.STATIC_BLOCK_HEADERS:
            self.header_offset = self.coin.static_header_offset
            self.header_len = self.coin.static_header_len
        else:
            self.header_offset = self.dynamic_header_offset
            self.header_len = self.dynamic_header_len

        self.logger.info('switching current directory to {}'.format(
            env.db_dir))
        os.chdir(env.db_dir)

        self.db_class = db_class(self.env.db_engine)
        self.logger.info('using {} for DB backend'.format(self.env.db_engine))

        self.history = History()
        self.utxo_db = None
        self.open_dbs()

        self.logger.info('reorg limit is {:,d} blocks'.format(
            self.env.reorg_limit))

        self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
        self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
        self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
        if not self.coin.STATIC_BLOCK_HEADERS:
            self.headers_offsets_file = util.LogicalFile(
                'meta/headers_offsets', 2, 16000000)
            # Write the offset of the genesis block
            if self.headers_offsets_file.read(0, 8) != b'\x00' * 8:
                self.headers_offsets_file.write(0, b'\x00' * 8)

        # tx_counts[N] has the cumulative number of txs at the end of
        # height N.  So tx_counts[0] is 1 - the genesis coinbase
        size = (self.db_height + 1) * 4
        tx_counts = self.tx_counts_file.read(0, size)
        assert len(tx_counts) == size
        self.tx_counts = array.array('I', tx_counts)
        if self.tx_counts:
            assert self.db_tx_count == self.tx_counts[-1]
        else:
            assert self.db_tx_count == 0
Пример #27
0
 def __init__(self, daemon: 'Daemon', coin: Type['Coin'], blocks_event: asyncio.Event):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.daemon = daemon
     self.coin = coin
     self.blocks_event = blocks_event
     self.blocks = []
     self.caught_up = False
     # Access to fetched_height should be protected by the semaphore
     self.fetched_height = None
     self.semaphore = asyncio.Semaphore()
     self.refill_event = asyncio.Event()
     # The prefetched block cache size.  The min cache size has
     # little effect on sync time.
     self.cache_size = 0
     self.min_cache_size = 10 * 1024 * 1024
     # This makes the first fetch be 10 blocks
     self.ave_size = self.min_cache_size // 10
     self.polling_delay = 5
Пример #28
0
 def __init__(self, session_mgr, db, mempool, peer_mgr, kind):
     connection = JSONRPCConnection(JSONRPCAutoDetect)
     super().__init__(connection=connection)
     self.logger = util.class_logger(__name__, self.__class__.__name__)
     self.session_mgr = session_mgr
     self.db = db
     self.mempool = mempool
     self.peer_mgr = peer_mgr
     self.kind = kind  # 'RPC', 'TCP' etc.
     self.env = session_mgr.env
     self.coin = self.env.coin
     self.client = 'unknown'
     self.anon_logs = self.env.anon_logs
     self.txs_sent = 0
     self.log_me = False
     self.bw_limit = self.env.bandwidth_limit
     self.daemon_request = self.session_mgr.daemon_request
     # Hijack the connection so we can log messages
     self._receive_message_orig = self.connection.receive_message
     self.connection.receive_message = self.receive_message
Пример #29
0
    def __init__(self, env: 'Env', db: DB, daemon: Daemon,
                 notifications: 'Notifications'):
        self.env = env
        self.db = db
        self.daemon = daemon
        self.notifications = notifications

        self.coin = env.coin
        # blocks_event: set when new blocks are put on the queue by the Prefetcher, to be processed
        self.blocks_event = asyncio.Event()
        self.prefetcher = Prefetcher(daemon, env.coin, self.blocks_event)
        self.logger = class_logger(__name__, self.__class__.__name__)

        # Meta
        self.next_cache_check = 0
        self.touched_hashxs = set()  # type: Set[bytes]
        self.touched_outpoints = set()  # type: Set[Tuple[bytes, int]]
        self.reorg_count = 0
        self.height = -1
        self.tip = None  # type: Optional[bytes]
        self.tip_advanced_event = asyncio.Event()
        self.tx_count = 0
        self._caught_up_event = None

        # Caches of unflushed items.
        self.headers = []
        self.tx_hashes = []  # type: List[bytes]
        self.undo_tx_hashes = []  # type: List[bytes]
        self.undo_historical_spends = []  # type: List[bytes]
        self.undo_infos = []  # type: List[Tuple[Sequence[bytes], int]]

        # UTXO cache
        self.utxo_cache = {}
        self.db_deletes = []

        # If the lock is successfully acquired, in-memory chain state
        # is consistent with self.height
        self.state_lock = asyncio.Lock()

        # Signalled after backing up during a reorg
        self.backed_up_event = asyncio.Event()
Пример #30
0
 def __init__(self, env):
     self.logger = class_logger(__name__, self.__class__.__name__)
     self.coin = env.coin
     self.set_urls(env.coin.daemon_urls(env.daemon_url))
     self._height = None
     self._mempool_hashes = set()
     self.mempool_refresh_event = asyncio.Event()
     # Limit concurrent RPC calls to this number.
     # See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
     self.workqueue_semaphore = asyncio.Semaphore(value=10)
     self.down = False
     self.last_error_time = 0
     self.req_id = 0
     # assignment of asyncio.TimeoutError are essentially ignored
     if aiohttp.__version__.startswith('1.'):
         self.ClientHttpProcessingError = aiohttp.ClientHttpProcessingError
         self.ClientPayloadError = asyncio.TimeoutError
     else:
         self.ClientHttpProcessingError = asyncio.TimeoutError
         self.ClientPayloadError = aiohttp.ClientPayloadError
     self._available_rpcs = {}  # caches results for _is_rpc_available()