コード例 #1
0
    def _compact_hashX(self, hashX, hist_map, hist_list, write_items,
                       keys_to_delete):
        '''Compres history for a hashX.  hist_list is an ordered list of
        the histories to be compressed.'''
        # History entries (tx numbers) are 4 bytes each.  Distribute
        # over rows of up to 50KB in size.  A fixed row size means
        # future compactions will not need to update the first N - 1
        # rows.
        max_row_size = self.max_hist_row_entries * 4
        full_hist = b''.join(hist_list)
        nrows = (len(full_hist) + max_row_size - 1) // max_row_size
        if nrows > 4:
            self.logger.info('hashX {} is large: {:,d} entries across '
                             '{:,d} rows'.format(hash_to_str(hashX),
                                                 len(full_hist) // 4, nrows))

        # Find what history needs to be written, and what keys need to
        # be deleted.  Start by assuming all keys are to be deleted,
        # and then remove those that are the same on-disk as when
        # compacted.
        write_size = 0
        keys_to_delete.update(hist_map)
        for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
            key = hashX + pack('>H', n)
            if hist_map.get(key) == chunk:
                keys_to_delete.remove(key)
            else:
                write_items.append((key, chunk))
                write_size += len(chunk)

        assert n + 1 == nrows
        self.comp_flush_count = max(self.comp_flush_count, n)

        return write_size
コード例 #2
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await self.flush(True)

        async def get_raw_blocks(last_height, hex_hashes):
            heights = range(last_height, last_height - len(hex_hashes), -1)
            try:
                blocks = [self.db.read_raw_block(height) for height in heights]
                self.logger.info(f'read {len(blocks)} blocks from disk')
                return blocks
            except FileNotFoundError:
                return await self.daemon.raw_blocks(hex_hashes)

        def flush_backup():
            # self.touched can include other addresses which is
            # harmless, but remove None.
            self.touched.discard(None)
            self.db.flush_backup(self.flush_data(), self.touched)

        _start, last, hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            raw_blocks = await get_raw_blocks(last, hex_hashes)
            await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks)
            await self.run_in_thread_with_lock(flush_backup)
            last -= len(raw_blocks)
        await self.prefetcher.reset_height(self.height)
コード例 #3
0
ファイル: block_processor.py プロジェクト: bleach86/electrumx
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await self.tasks.run_in_thread(self.flush, True)

        async def get_raw_blocks(last_height, hex_hashes):
            heights = range(last_height, last_height - len(hex_hashes), -1)
            try:
                blocks = [self.read_raw_block(height) for height in heights]
                self.logger.info(f'read {len(blocks)} blocks from disk')
                return blocks
            except Exception:
                return await self.daemon.raw_blocks(hex_hashes)

        start, hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        last = start + count - 1
        for hex_hashes in chunks(hashes, 50):
            raw_blocks = await get_raw_blocks(last, hex_hashes)
            async with self.state_lock:
                await self.tasks.run_in_thread(self.backup_blocks, raw_blocks)
            last -= len(raw_blocks)
        # Truncate header_mc: header count is 1 more than the height
        self.header_mc.truncate(self.height + 1)
        await self.prefetcher.reset_height(self.height)
コード例 #4
0
    def flush_backup(self, flush_data: FlushData, touched_hashxs):
        '''Like flush_dbs() but when backing up.  All UTXOs are flushed.'''
        assert not flush_data.headers
        assert not flush_data.block_tx_hashes
        assert flush_data.height < self.db_height
        self.history.assert_flushed()
        assert len(flush_data.undo_block_tx_hashes
                   ) == self.db_height - flush_data.height

        start_time = time.time()
        tx_delta = flush_data.tx_count - self.last_flush_tx_count

        tx_hashes = []
        for block in flush_data.undo_block_tx_hashes:
            tx_hashes += [*util.chunks(block, 32)]
        flush_data.undo_block_tx_hashes.clear()
        assert len(tx_hashes) == -tx_delta

        self.backup_fs(flush_data.height, flush_data.tx_count)
        self.history.backup(
            hashXs=touched_hashxs,
            tx_count=flush_data.tx_count,
            tx_hashes=tx_hashes,
            spends=flush_data.undo_historical_spends,
        )
        flush_data.undo_historical_spends.clear()
        with self.utxo_db.write_batch() as batch:
            self.flush_utxo_db(batch, flush_data)
            # Flush state last as it reads the wall time.
            self.flush_state(batch)

        elapsed = self.last_flush - start_time
        self.logger.info(f'backup flush took '
                         f'{elapsed:.1f}s.  Height {flush_data.height:,d} '
                         f'txs: {flush_data.tx_count:,d} ({tx_delta:+,d})')
コード例 #5
0
    async def _process_mempool(self, all_hashes, touched, mempool_height):
        # Re-sync with the new set of hashes
        txs = self.txs
        hashXs = self.hashXs

        if mempool_height != self.api.db_height():
            raise DBSyncError

        # First handle txs that have disappeared
        for tx_hash in (set(txs) - all_hashes):
            tx = txs.pop(tx_hash)
            tx_hashXs = {hashX for hashX, value in tx.in_pairs}
            tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
            for hashX in tx_hashXs:
                hashXs[hashX].remove(tx_hash)
                if not hashXs[hashX]:
                    del hashXs[hashX]
            touched |= tx_hashXs

        # Process new transactions
        new_hashes = list(all_hashes.difference(txs))
        if new_hashes:
            group = TaskGroup()
            for hashes in chunks(new_hashes, 200):
                coro = self._fetch_and_accept(hashes, all_hashes, touched)
                await group.spawn(coro)
            if mempool_height != self.api.db_height():
                raise DBSyncError

            tx_map = {}
            utxo_map = {}
            async for task in group:
                deferred, unspent = task.result()
                tx_map.update(deferred)
                utxo_map.update(unspent)

            prior_count = 0
            # FIXME: this is not particularly efficient
            while tx_map and len(tx_map) != prior_count:
                prior_count = len(tx_map)
                tx_map, utxo_map = self._accept_transactions(
                    tx_map, utxo_map, touched)
            if tx_map:
                self.logger.error(f'{len(tx_map)} txs dropped')

        return touched
コード例 #6
0
ファイル: mempool.py プロジェクト: dgtorpheas/electrumx
    async def _process_mempool(self, all_hashes):
        # Re-sync with the new set of hashes
        txs = self.txs
        hashXs = self.hashXs
        touched = set()

        # First handle txs that have disappeared
        for tx_hash in set(txs).difference(all_hashes):
            tx = txs.pop(tx_hash)
            tx_hashXs = set(hashX for hashX, value in tx.in_pairs)
            tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
            for hashX in tx_hashXs:
                hashXs[hashX].remove(tx_hash)
                if not hashXs[hashX]:
                    del hashXs[hashX]
            touched.update(tx_hashXs)

        # Process new transactions
        new_hashes = list(all_hashes.difference(txs))
        jobs = [
            self.tasks.create_task(
                self._fetch_and_accept(hashes, all_hashes, touched))
            for hashes in chunks(new_hashes, 2000)
        ]
        if jobs:
            await asyncio.wait(jobs)
            tx_map = {}
            utxo_map = {}
            for job in jobs:
                deferred, unspent = job.result()
                tx_map.update(deferred)
                utxo_map.update(unspent)

            # Handle the stragglers
            if len(tx_map) >= 10:
                self.logger.info(f'{len(tx_map)} stragglers')
            prior_count = 0
            # FIXME: this is not particularly efficient
            while tx_map and len(tx_map) != prior_count:
                prior_count = len(tx_map)
                tx_map, utxo_map = self._accept_transactions(
                    tx_map, utxo_map, touched)
            if tx_map:
                self.logger.info(f'{len(tx_map)} txs dropped')

        return touched
コード例 #7
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info('faking a reorg of {:,d} blocks'.format(count))
        await self.controller.run_in_executor(self.flush, True)

        hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            blocks = await self.daemon.raw_blocks(hex_hashes)
            await self.controller.run_in_executor(self.backup_blocks, blocks)
        await self.prefetcher.reset_height()
コード例 #8
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await run_in_thread(self.flush, True)

        async def get_raw_blocks(last_height, hex_hashes):
            heights = range(last_height, last_height - len(hex_hashes), -1)
            try:
                blocks = [self.read_raw_block(height) for height in heights]
                self.logger.info(f'read {len(blocks)} blocks from disk')
                return blocks
            except Exception:
                return await self.daemon.raw_blocks(hex_hashes)

        start, last, hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        # get saved evntlog hashYs
        if hashes:
            eventlog_hashYs = reduce(
                operator.add, [self.get_block_hashYs(x) for x in hashes])
        else:
            eventlog_hashYs = []
        self.logger.info('chain reorg eventlog_hashYs {} {}'.format(
            eventlog_hashYs, hashes))

        for hex_hashes in chunks(hashes, 50):
            raw_blocks = await get_raw_blocks(last, hex_hashes)
            await self.run_in_thread_shielded(self.backup_blocks_eventlogs,
                                              raw_blocks, eventlog_hashYs)
            last -= len(raw_blocks)
        # Truncate header_mc: header count is 1 more than the height.
        # Note header_mc is None if the reorg happens at startup.
        if self.header_mc:
            self.header_mc.truncate(self.height + 1)
        await self.prefetcher.reset_height(self.height)
コード例 #9
0
ファイル: test_util.py プロジェクト: reubenyap/electrumx
def test_chunks():
    assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
コード例 #10
0
    async def _process_mempool(self, all_hashes, touched, mempool_height):
        # Re-sync with the new set of hashes
        txs = self.txs
        hashXs = self.hashXs

        tx_to_create = self.tx_to_asset_create
        tx_to_reissue = self.tx_to_asset_reissue
        creates = self.asset_creates
        reissues = self.asset_reissues

        if mempool_height != self.api.db_height():
            raise DBSyncError

        # First handle txs that have disappeared
        for tx_hash in set(txs).difference(all_hashes):
            tx = txs.pop(tx_hash)

            reissued_asset = tx_to_reissue.pop(tx_hash, None)
            if reissued_asset:
                del reissues[reissued_asset]

            created_asset = tx_to_create.pop(tx_hash, None)
            if created_asset:
                del creates[created_asset]

            tx_hashXs = set(hashX for hashX, value, _, _ in tx.in_pairs)
            tx_hashXs.update(hashX for hashX, value, _, _ in tx.out_pairs)
            for hashX in tx_hashXs:
                hashXs[hashX].remove(tx_hash)
                if not hashXs[hashX]:
                    del hashXs[hashX]
            touched.update(tx_hashXs)

        # Process new transactions
        new_hashes = list(all_hashes.difference(txs))
        if new_hashes:
            group = TaskGroup()
            for hashes in chunks(new_hashes, 200):
                coro = self._fetch_and_accept(hashes, all_hashes, touched)
                await group.spawn(coro)

            tx_map = {}
            utxo_map = {}
            async for task in group:
                (deferred,
                 unspent), internal_creates, internal_reissues = task.result()

                # Store asset changes
                for asset, stats in internal_creates.items():
                    tx_to_create[hex_str_to_hash(
                        stats['source']['tx_hash'])] = asset
                    creates[asset] = stats

                for asset, stats in internal_reissues.items():
                    tx_to_reissue[hex_str_to_hash(
                        stats['source']['tx_hash'])] = asset
                    reissues[asset] = stats

                tx_map.update(deferred)
                utxo_map.update(unspent)

            prior_count = 0
            # FIXME: this is not particularly efficient
            while tx_map and len(tx_map) != prior_count:
                prior_count = len(tx_map)
                tx_map, utxo_map = self._accept_transactions(
                    tx_map, utxo_map, touched)
            if tx_map:
                self.logger.error(f'{len(tx_map)} txs dropped')

        return touched
コード例 #11
0
    async def _process_mempool(
        self,
        *,
        all_hashes: Set[bytes],  # set of txids
        touched_hashxs: Set[bytes],  # set of hashXs
        touched_outpoints: Set[Tuple[bytes, int]],  # set of outpoints
        mempool_height: int,
    ) -> None:
        # Re-sync with the new set of hashes
        txs = self.txs
        hashXs = self.hashXs
        txo_to_spender = self.txo_to_spender

        if mempool_height != self.api.db_height():
            raise DBSyncError

        # First handle txs that have disappeared
        for tx_hash in (set(txs) - all_hashes):
            tx = txs.pop(tx_hash)
            # hashXs
            tx_hashXs = {hashX for hashX, value in tx.in_pairs}
            tx_hashXs.update(hashX for hashX, value in tx.out_pairs)
            for hashX in tx_hashXs:
                hashXs[hashX].remove(tx_hash)
                if not hashXs[hashX]:
                    del hashXs[hashX]
            touched_hashxs |= tx_hashXs
            # outpoints
            for prevout in tx.prevouts:
                del txo_to_spender[prevout]
                touched_outpoints.add(prevout)
            for out_idx, out_pair in enumerate(tx.out_pairs):
                touched_outpoints.add((tx_hash, out_idx))

        # Process new transactions
        new_hashes = list(all_hashes.difference(txs))
        if new_hashes:
            group = TaskGroup()
            for hashes in chunks(new_hashes, 200):
                coro = self._fetch_and_accept(
                    hashes=hashes,
                    all_hashes=all_hashes,
                    touched_hashxs=touched_hashxs,
                    touched_outpoints=touched_outpoints,
                )
                await group.spawn(coro)
            if mempool_height != self.api.db_height():
                raise DBSyncError

            tx_map = {}
            utxo_map = {}
            async for task in group:
                deferred, unspent = task.result()
                tx_map.update(deferred)
                utxo_map.update(unspent)

            prior_count = 0
            # FIXME: this is not particularly efficient
            while tx_map and len(tx_map) != prior_count:
                prior_count = len(tx_map)
                tx_map, utxo_map = self._accept_transactions(
                    tx_map=tx_map,
                    utxo_map=utxo_map,
                    touched_hashxs=touched_hashxs,
                    touched_outpoints=touched_outpoints,
                )
            if tx_map:
                self.logger.error(f'{len(tx_map)} txs dropped')