示例#1
0
    def _read_tx_parts(self):
        '''Return a (deserialized TX, tx_hash, vsize) tuple.'''
        start = self.cursor
        tx_version = self._get_version()
        if tx_version == self.bitcoin_diamond_tx_version:
            marker = self.binary[self.cursor + 4 + 32]
        else:
            marker = self.binary[self.cursor + 4]

        if marker:
            tx = super().read_tx()
            tx_hash = self.TX_HASH_FN(self.binary[start:self.cursor])
            return tx, tx_hash, self.binary_length

        # Ugh, this is nasty.
        version = self._read_le_int32()
        present_block_hash = None
        if version == self.bitcoin_diamond_tx_version:
            present_block_hash = hash_to_hex_str(self._read_nbytes(32))
        orig_ser = self.binary[start:self.cursor]

        marker = self._read_byte()
        flag = self._read_byte()

        start = self.cursor
        inputs = self._read_inputs()
        outputs = self._read_outputs()
        orig_ser += self.binary[start:self.cursor]

        base_size = self.cursor - start
        witness = self._read_witness(len(inputs))

        start = self.cursor
        locktime = self._read_le_uint32()
        orig_ser += self.binary[start:self.cursor]
        vsize = (3 * base_size + self.binary_length) // 4

        if present_block_hash is not None:
            return TxBitcoinDiamondSegWit(
                version, present_block_hash, marker, flag, inputs, outputs,
                witness, locktime), self.TX_HASH_FN(orig_ser), vsize
        else:
            return TxSegWit(version, marker, flag, inputs, outputs, witness,
                            locktime), self.TX_HASH_FN(orig_ser), vsize
示例#2
0
    def read_utxo_state(self):
        state = self.utxo_db.get(b'state')
        if not state:
            self.db_height = -1
            self.db_tx_count = 0
            self.db_tip = b'\0' * 32
            self.db_version = max(self.DB_VERSIONS)
            self.utxo_flush_count = 0
            self.wall_time = 0
            self.first_sync = True
        else:
            state = ast.literal_eval(state.decode())
            if not isinstance(state, dict):
                raise self.DBError('failed reading state from DB')
            self.db_version = state['db_version']
            if self.db_version not in self.DB_VERSIONS:
                raise self.DBError('your UTXO DB version is {} but this '
                                   'software only handles versions {}'.format(
                                       self.db_version, self.DB_VERSIONS))
            # backwards compat
            genesis_hash = state['genesis']
            if isinstance(genesis_hash, bytes):
                genesis_hash = genesis_hash.decode()
            if genesis_hash != self.coin.GENESIS_HASH:
                raise self.DBError(
                    'DB genesis hash {} does not match coin {}'.format(
                        genesis_hash, self.coin.GENESIS_HASH))
            self.db_height = state['height']
            self.db_tx_count = state['tx_count']
            self.db_tip = state['tip']
            self.utxo_flush_count = state['utxo_flush_count']
            self.wall_time = state['wall_time']
            self.first_sync = state['first_sync']

        # Log some stats
        self.logger.info('DB version: {:d}'.format(self.db_version))
        self.logger.info('coin: {}'.format(self.coin.NAME))
        self.logger.info('network: {}'.format(self.coin.NET))
        self.logger.info('height: {:,d}'.format(self.db_height))
        self.logger.info('tip: {}'.format(hash_to_hex_str(self.db_tip)))
        self.logger.info('tx count: {:,d}'.format(self.db_tx_count))
        if self.first_sync:
            self.logger.info('sync time so far: {}'.format(
                util.formatted_time(self.wall_time)))
示例#3
0
    def spend_utxo(self, tx_hash, tx_idx):
        '''Spend a UTXO and return the 33-byte value.

        If the UTXO is not in the cache it must be on disk.  We store
        all UTXOs so not finding one indicates a logic error or DB
        corruption.
        '''
        # Fast track is it being in the cache
        idx_packed = pack('<H', tx_idx)
        cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
        if cache_value:
            return cache_value

        # Spend it from the DB.

        # Key: b'h' + compressed_tx_hash + tx_idx + tx_num
        # Value: hashX
        prefix = b'h' + tx_hash[:4] + idx_packed
        candidates = {db_key: hashX for db_key, hashX
                      in self.db.utxo_db.iterator(prefix=prefix)}

        for hdb_key, hashX in candidates.items():
            tx_num_packed = hdb_key[-4:]

            if len(candidates) > 1:
                tx_num, = unpack('<I', tx_num_packed)
                hash, height = self.db.fs_tx_hash(tx_num)
                if hash != tx_hash:
                    assert hash is not None  # Should always be found
                    continue

            # Key: b'u' + address_hashX + tx_idx + tx_num
            # Value: the UTXO value as a 64-bit unsigned integer
            udb_key = b'u' + hashX + hdb_key[-6:]
            utxo_value_packed = self.db.utxo_db.get(udb_key)
            if utxo_value_packed:
                # Remove both entries for this UTXO
                self.db_deletes.append(hdb_key)
                self.db_deletes.append(udb_key)
                return hashX + tx_num_packed + utxo_value_packed

        raise ChainError('UTXO {} / {:,d} not found in "h" table'
                         .format(hash_to_hex_str(tx_hash), tx_idx))
示例#4
0
    async def transactions(self, hashX):
        '''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
        entries for the hashX.

        unconfirmed is True if any txin is unconfirmed.
        '''
        deserializer = self.coin.DESERIALIZER
        pairs = await self.raw_transactions(hashX)
        result = []
        for hex_hash, raw_tx in pairs:
            item = self.txs.get(hex_hash)
            if not item or not raw_tx:
                continue
            tx_fee = item[2]
            tx = deserializer(raw_tx).read_tx()
            unconfirmed = any(hash_to_hex_str(txin.prev_hash) in self.txs
                              for txin in tx.inputs)
            result.append((hex_hash, tx_fee, unconfirmed))
        return result
示例#5
0
    async def _fetch_and_accept(self, hashes, all_hashes, touched):
        '''Fetch a list of mempool transactions.'''
        hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes)
        raw_txs = await self.api.raw_transactions(hex_hashes_iter)

        def deserialize_txs():    # This function is pure
            to_hashX = self.coin.hashX_from_script
            deserializer = self.coin.DESERIALIZER

            txs = {}
            for hash, raw_tx in zip(hashes, raw_txs):
                # The daemon may have evicted the tx from its
                # mempool or it may have gotten in a block
                if not raw_tx:
                    continue
                tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()
                # Convert the inputs and outputs into (hashX, value) pairs
                # Drop generation-like inputs from MemPoolTx.prevouts
                txin_pairs = tuple((txin.prev_hash, txin.prev_idx)
                                   for txin in tx.inputs
                                   if not txin.is_generation())
                
                txout_pairs = tuple((to_hashX(txout.pk_script), txout.value)
                                    for txout in tx.outputs if txout.version==1)
                txs[hash] = MemPoolTx(txin_pairs, None, txout_pairs,
                                      0, tx_size)
            return txs

        # Thread this potentially slow operation so as not to block
        tx_map = await run_in_thread(deserialize_txs)

        # Determine all prevouts not in the mempool, and fetch the
        # UTXO information from the database.  Failed prevout lookups
        # return None - concurrent database updates happen - which is
        # relied upon by _accept_transactions. Ignore prevouts that are
        # generation-like.
        prevouts = tuple(prevout for tx in tx_map.values()
                         for prevout in tx.prevouts
                         if prevout[0] not in all_hashes)
        utxos = await self.api.lookup_utxos(prevouts)
        utxo_map = {prevout: utxo for prevout, utxo in zip(prevouts, utxos)}

        return self._accept_transactions(tx_map, utxo_map, touched)
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await self.flush(True)

        async def get_raw_blocks(last_height, hex_hashes):
            heights = range(last_height, last_height - len(hex_hashes), -1)
            try:
                blocks = [self.db.read_raw_block(height) for height in heights]
                self.logger.info(f'read {len(blocks)} blocks from disk')
                return blocks
            except FileNotFoundError:
                return await self.daemon.raw_blocks(hex_hashes)

        def flush_backup():
            # self.touched can include other addresses which is
            # harmless, but remove None.
            self.touched.discard(None)
            self.eventlog_touched.discard(None)
            self.db.flush_backup(self.flush_data(), self.touched, self.eventlog_touched)

        start, last, hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        # get saved evntlog hashYs
        if hashes:
            eventlog_hashYs = reduce(operator.add, [self.db.get_block_hashYs(x) for x in hashes])
        else:
            eventlog_hashYs = []
        self.logger.info('chain reorg eventlog_hashYs {} {}'.format(eventlog_hashYs, hashes))

        for hex_hashes in chunks(hashes, 50):
            raw_blocks = await get_raw_blocks(last, hex_hashes)
            await self.run_in_thread_with_lock(self.backup_blocks_eventlogs, raw_blocks, eventlog_hashYs)
            await self.run_in_thread_with_lock(flush_backup)
            last -= len(raw_blocks)
        await self.prefetcher.reset_height(self.height)
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await run_in_thread(self.flush, True)

        async def get_raw_blocks(last_height, hex_hashes):
            heights = range(last_height, last_height - len(hex_hashes), -1)
            try:
                blocks = [self.read_raw_block(height) for height in heights]
                self.logger.info(f'read {len(blocks)} blocks from disk')
                return blocks
            except Exception:
                return await self.daemon.raw_blocks(hex_hashes)

        start, last, hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        # get saved evntlog hashYs
        if hashes:
            eventlog_hashYs = reduce(
                operator.add, [self.get_block_hashYs(x) for x in hashes])
        else:
            eventlog_hashYs = []
        self.logger.info('chain reorg eventlog_hashYs {} {}'.format(
            eventlog_hashYs, hashes))

        for hex_hashes in chunks(hashes, 50):
            raw_blocks = await get_raw_blocks(last, hex_hashes)
            await self.run_in_thread_shielded(self.backup_blocks_eventlogs,
                                              raw_blocks, eventlog_hashYs)
            last -= len(raw_blocks)
        # Truncate header_mc: header count is 1 more than the height.
        # Note header_mc is None if the reorg happens at startup.
        if self.header_mc:
            self.header_mc.truncate(self.height + 1)
        await self.prefetcher.reset_height(self.height)
示例#8
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info('faking a reorg of {:,d} blocks'.format(count))
        await self.controller.run_in_executor(self.flush, True)

        hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            blocks = await self.daemon.raw_blocks(hex_hashes)
            await self.controller.run_in_executor(self.backup_blocks, blocks)
        # Truncate header_mc: header count is 1 more than the height
        self.header_mc.truncate(self.height + 1)
        await self.prefetcher.reset_height()
示例#9
0
    async def _fetch_and_accept(self, hashes, all_hashes, touched):
        '''Fetch a list of mempool transactions.'''
        hex_hashes_iter = (hash_to_hex_str(hash) for hash in hashes)
        raw_txs = await self.api.raw_transactions(hex_hashes_iter)

        # Thread this potentially slow operation so as not to block
        tx_map = await run_in_thread(self._deserialize_txs, hashes, raw_txs)

        # Determine all prevouts not in the mempool, and fetch the
        # UTXO information from the database.  Failed prevout lookups
        # return None - concurrent database updates happen - which is
        # relied upon by _accept_transactions. Ignore prevouts that are
        # generation-like.
        prevouts = tuple(prevout for tx in tx_map.values()
                         for prevout in tx.prevouts
                         if prevout[0] not in all_hashes)
        utxos = await self.api.lookup_utxos(prevouts)
        utxo_map = {prevout: utxo for prevout, utxo in zip(prevouts, utxos)}

        return self._accept_transactions(tx_map, utxo_map, touched)
示例#10
0
    async def reorg_hashes(self, count):
        '''Return a pair (start, hashes) of blocks to back up during a
        reorg.

        The hashes are returned in order of increasing height.  Start
        is the height of the first hash.
        '''

        def diff_pos(hashes1, hashes2):
            '''Returns the index of the first difference in the hash lists.
            If both lists match returns their length.'''
            for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
                if hash1 != hash2:
                    return n
            return len(hashes)

        if count is None:
            # A real reorg
            start = self.height - 1
            count = 1
            while start > 0:
                hashes = self.fs_block_hashes(start, count)
                hex_hashes = [hash_to_hex_str(hash) for hash in hashes]
                d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
                n = diff_pos(hex_hashes, d_hex_hashes)
                if n > 0:
                    start += n
                    break
                count = min(count * 2, start)
                start -= count

            count = (self.height - start) + 1
        else:
            start = (self.height - count) + 1

        s = '' if count == 1 else 's'
        self.logger.info('chain was reorganised replacing {:,d} block{} at '
                         'heights {:,d}-{:,d}'
                         .format(count, s, start, start + count - 1))

        return start, self.fs_block_hashes(start, count)
示例#11
0
    async def transaction_merkle(self, tx_hash, height):
        '''Return the markle tree to a confirmed transaction given its hash
        and height.

        tx_hash: the transaction hash as a hexadecimal string
        height: the height of the block it is in
        '''
        assert_tx_hash(tx_hash)
        block_hash, tx_hashes = await self.block_hash_and_tx_hashes(height)
        try:
            pos = tx_hashes.index(tx_hash)
        except ValueError:
            raise RPCError(
                BAD_REQUEST, f'tx hash {tx_hash} not in '
                f'block {block_hash} at height {height:,d}')

        hashes = [hex_str_to_hash(hash) for hash in tx_hashes]
        branch, root = self.bp.merkle.branch_and_root(hashes, pos)
        branch = [hash_to_hex_str(hash) for hash in branch]

        return {"block_height": height, "merkle": branch, "pos": pos}
示例#12
0
    async def _reorg_hashes(self, count):
        '''Return a pair (start, hashes) of blocks to back up during a
        reorg.

        The hashes are returned in order of increasing height.  Start
        is the height of the first hash, last of the last.
        '''
        start, count = await self._calc_reorg_range(count)
        last = start + count - 1
        if count == 1:
            logger.info(
                f'chain was reorganised replacing 1 block at height {start:,d}'
            )
        else:
            logger.info(
                f'chain was reorganised replacing {count:,d} blocks at heights '
                f'{start:,d}-{last:,d}')

        hashes = await self.db.fs_block_hashes(start, count)
        hex_hashes = [hash_to_hex_str(block_hash) for block_hash in hashes]
        return start, hex_hashes
示例#13
0
    def db_utxo_lookup(self, tx_hash, tx_idx):
        '''Given a prevout return a (hashX, value) pair.

        Raises MissingUTXOError if the UTXO is not found.  Used by the
        mempool code.
        '''
        idx_packed = pack('<H', tx_idx)
        hashX, tx_num_packed = self._db_hashX(tx_hash, idx_packed)
        if not hashX:
            # This can happen when the daemon is a block ahead of us
            # and has mempool txs spending outputs from that new block
            raise self.MissingUTXOError

        # Key: b'u' + address_hashX + tx_idx + tx_num
        # Value: the UTXO value as a 64-bit unsigned integer
        key = b'u' + hashX + idx_packed + tx_num_packed
        db_value = self.utxo_db.get(key)
        if not db_value:
            raise self.DBError('UTXO {} / {:,d} in one table only'.format(
                hash_to_hex_str(tx_hash), tx_idx))
        value, = unpack('<Q', db_value)
        return hashX, value
    async def _reorg_chain(self, count):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count < 0:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await self.flush(True)

        async def get_raw_block(hex_hash, height):
            try:
                block = self.db.read_raw_block(height)
                self.logger.info(
                    f'read block {hex_hash} at height {height:,d} from disk')
            except FileNotFoundError:
                block = await self.daemon.raw_blocks([hex_hash])[0]
                self.logger.info(
                    f'obtained block {hex_hash} at height {height:,d} from daemon'
                )
            return block

        _start, height, hashes = await self._reorg_hashes(count)
        hex_hashes = [hash_to_hex_str(block_hash) for block_hash in hashes]
        for hex_hash in reversed(hex_hashes):
            raw_block = await get_raw_block(hex_hash, height)
            await self._backup_block(raw_block)
            # self.touched can include other addresses which is harmless, but remove None.
            self.touched.discard(None)
            self.db.flush_backup(self.flush_data(), self.touched)
            height -= 1

        self.logger.info('backed up to height {:,d}'.format(self.height))

        await self.prefetcher.reset_height(self.height)
        self.backed_up_event.set()
        self.backed_up_event.clear()
示例#15
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info(f'faking a reorg of {count:,d} blocks')
        await self.flush(True)

        async def get_raw_blocks(last_height, hex_hashes) -> Sequence[bytes]:
            heights = range(last_height, last_height - len(hex_hashes), -1)
            try:
                blocks = [self.db.read_raw_block(height) for height in heights]
                self.logger.info(f'read {len(blocks)} blocks from disk')
                return blocks
            except FileNotFoundError:
                return await self.daemon.raw_blocks(hex_hashes)

        def flush_backup():
            # self.touched_hashxs can include other addresses which is
            # harmless, but remove None.
            self.touched_hashxs.discard(None)
            self.db.flush_backup(self.flush_data(), self.touched_hashxs)

        _start, last, hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_hex_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            raw_blocks = await get_raw_blocks(last, hex_hashes)
            await self.run_in_thread_with_lock(self.backup_blocks, raw_blocks)
            await self.run_in_thread_with_lock(flush_backup)
            last -= len(raw_blocks)
        await self.prefetcher.reset_height(self.height)
        self.backed_up_event.set()
        self.backed_up_event.clear()
示例#16
0
    def _compact_hashX(self, hashX, hist_map, hist_list,
                       write_items, keys_to_delete):
        '''Compres history for a hashX.  hist_list is an ordered list of
        the histories to be compressed.'''
        # History entries (tx numbers) are 4 bytes each.  Distribute
        # over rows of up to 50KB in size.  A fixed row size means
        # future compactions will not need to update the first N - 1
        # rows.
        max_row_size = self.max_hist_row_entries * 5
        full_hist = b''.join(hist_list)
        nrows = (len(full_hist) + max_row_size - 1) // max_row_size
        if nrows > 4:
            self.logger.info('hashX {} is large: {:,d} entries across '
                             '{:,d} rows'
                             .format(hash_to_hex_str(hashX),
                                     len(full_hist) // 5, nrows))

        # Find what history needs to be written, and what keys need to
        # be deleted.  Start by assuming all keys are to be deleted,
        # and then remove those that are the same on-disk as when
        # compacted.
        write_size = 0
        keys_to_delete.update(hist_map)
        n = 0   # In case of no loops
        for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
            key = hashX + pack_be_uint16(n)
            if hist_map.get(key) == chunk:
                keys_to_delete.remove(key)
            else:
                write_items.append((key, chunk))
                write_size += len(chunk)

        assert n + 1 == nrows
        self.comp_flush_count = max(self.comp_flush_count, n)

        return write_size
示例#17
0
    def electrum_header(cls, header, height):
        version, = struct.unpack('<I', header[:4])
        timestamp, bits, nonce = struct.unpack('<III', header[68:80])

        deserializer = cls.DESERIALIZER(header, start=cls.BASIC_HEADER_SIZE)
        sig_length = deserializer.read_varint()
        header = {
            'block_height': height,
            'version': version,
            'prev_block_hash': hash_to_hex_str(header[4:36]),
            'merkle_root': hash_to_hex_str(header[36:68]),
            'timestamp': timestamp,
            'bits': bits,
            'nonce': nonce,
            'hash_state_root': hash_to_hex_str(header[80:112]),
            'hash_utxo_root': hash_to_hex_str(header[112:144]),
            'hash_prevout_stake': hash_to_hex_str(header[144:176]),
            'hash_prevout_n': struct.unpack('<I', header[176:180])[0],
            'sig': hash_to_hex_str(header[:-sig_length - 1:-1]),
        }
        return header
示例#18
0
 async def confirmed_and_unconfirmed_history(self, hashX):
     # Note history is ordered but unconfirmed is unordered in e-s
     history = await self.controller.get_history(hashX)
     conf = [{'tx_hash': hash_to_hex_str(tx_hash), 'height': height}
             for tx_hash, height in history]
     return conf + await self.unconfirmed_history(hashX)
示例#19
0
 async def mempool_hashes(self):
     '''Query bitcoind for the hashes of all transactions in its
     mempool, returned as a list.'''
     await sleep(0)
     return [hash_to_hex_str(hash) for hash in self.txs]
示例#20
0
 def __str__(self):
     prev_hash = hash_to_hex_str(self.prev_hash)
     return (f"Input({prev_hash}, {self.prev_idx:d}, tree={self.tree}, "
             f"sequence={self.sequence:d})")
示例#21
0
 def __str__(self):
     prev_hash = hash_to_hex_str(self.prev_hash)
     return ("Input({}, {:d}, tree={}, sequence={:d})"
             .format(prev_hash, self.prev_idx, self.tree, self.sequence))
示例#22
0
    def process_raw_txs(self, raw_tx_map, pending):
        '''Process the dictionary of raw transactions and return a dictionary
        of updates to apply to self.txs.

        This runs in the executor so should not update any member
        variables it doesn't own.  Atomic reads of self.txs that do
        not depend on the result remaining the same are fine.
        '''
        script_hashX = self.coin.hashX_from_script
        deserializer = self.coin.DESERIALIZER
        db_utxo_lookup = self.db.db_utxo_lookup
        txs = self.txs

        # Deserialize each tx and put it in a pending list
        for tx_hash, raw_tx in raw_tx_map.items():
            if tx_hash not in txs:
                continue
            tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()

            # Convert the tx outputs into (hashX, value) pairs
            txout_pairs = [(script_hashX(txout.pk_script), txout.value)
                           for txout in tx.outputs]

            # Convert the tx inputs to ([prev_hex_hash, prev_idx) pairs
            txin_pairs = [(hash_to_hex_str(txin.prev_hash), txin.prev_idx)
                          for txin in tx.inputs]

            pending.append((tx_hash, txin_pairs, txout_pairs, tx_size))

        # Now process what we can
        result = {}
        deferred = []

        for item in pending:
            if self.stop:
                break

            tx_hash, old_txin_pairs, txout_pairs, tx_size = item
            if tx_hash not in txs:
                continue

            mempool_missing = False
            txin_pairs = []

            try:
                for prev_hex_hash, prev_idx in old_txin_pairs:
                    tx_info = txs.get(prev_hex_hash, 0)
                    if tx_info is None:
                        tx_info = result.get(prev_hex_hash)
                        if not tx_info:
                            mempool_missing = True
                            continue
                    if tx_info:
                        txin_pairs.append(tx_info[1][prev_idx])
                    elif not mempool_missing:
                        prev_hash = hex_str_to_hash(prev_hex_hash)
                        txin_pairs.append(db_utxo_lookup(prev_hash, prev_idx))
            except (self.db.MissingUTXOError, self.db.DBError):
                # DBError can happen when flushing a newly processed
                # block.  MissingUTXOError typically happens just
                # after the daemon has accepted a new block and the
                # new mempool has deps on new txs in that block.
                continue

            if mempool_missing:
                deferred.append(item)
            else:
                # Compute fee
                tx_fee = (sum(v for hashX, v in txin_pairs) -
                          sum(v for hashX, v in txout_pairs))
                result[tx_hash] = (txin_pairs, txout_pairs, tx_fee, tx_size)

        return result, deferred
示例#23
0
def sign_claim(private_key, raw_claim, address, claim_id):
    claim = smart_decode(raw_claim)
    return claim.sign(private_key,
                      address,
                      hash_to_hex_str(claim_id),
                      curve=SECP256k1)
示例#24
0
 async def mempool_hashes(self):
     await sleep(0)
     return [hash_to_hex_str(hash) for hash in self.txs]
 def converter(hash):
     if hash == b"*":
         return hash.decode()
     else:
         return hash_to_hex_str(hash)
示例#26
0
def test_hash_to_hex_str():
    assert lib_hash.hash_to_hex_str(b'hash_to_str') == '7274735f6f745f68736168'
示例#27
0
 def prev_hex_hash(raw_block):
     return hash_to_hex_str(raw_block[4:36])
示例#28
0
 def __str__(self):
     script = self.script.hex()
     prev_hash = hash_to_hex_str(self.prev_hash)
     return (f"Input({prev_hash}, {self.prev_idx:d}, script={script}, "
             f"sequence={self.sequence:d})")
示例#29
0
 def __str__(self):
     script = self.script.hex()
     prev_hash = hash_to_hex_str(self.prev_hash)
     return ("Input({}, {:d}, script={}, sequence={:d})"
             .format(prev_hash, self.prev_idx, script, self.sequence))
def _get_txout_asset(coin, txout):
    if coin.EXTENDED_VOUT:
        return hash_to_hex_str(txout.asset[1:])
    else:
        return ""