Exemple #1
0
 def remove_claim_from_certificate_claims(self, cert_id, claim_id):
     self.log_info("[-] Removing signature: {} - {}".format(
         hash_to_str(claim_id), hash_to_str(cert_id)))
     certs = self.get_signed_claim_ids_by_cert_id(cert_id)
     if claim_id in certs:
         certs.remove(claim_id)
     self.claims_signed_by_cert_cache[cert_id] = certs
    def backup_blocks(self, raw_blocks):
        '''Backup the raw blocks and flush.

        The blocks should be in order of decreasing height, starting at.
        self.height.  A flush is performed once the blocks are backed up.
        '''
        self.assert_flushed()
        assert self.height >= len(raw_blocks)

        coin = self.coin
        for raw_block in raw_blocks:
            # Check and update self.tip
            block = coin.block(raw_block, self.height)
            header_hash = coin.header_hash(block.header)
            if header_hash != self.tip:
                raise ChainError(
                    'backup block {} not tip {} at height {:,d}'.format(
                        hash_to_str(header_hash), hash_to_str(self.tip),
                        self.height))
            self.tip = coin.header_prevhash(block.header)
            self.backup_txs(block.transactions)
            self.height -= 1
            self.tx_counts.pop()

        self.logger.info('backed up to height {:,d}'.format(self.height))
        self.backup_flush()
Exemple #3
0
def main():
    env = Env()
    bp = DB(env)
    coin = env.coin
    if len(sys.argv) == 1:
        count_entries(bp.hist_db, bp.utxo_db)
        return
    argc = 1
    try:
        limit = int(sys.argv[argc])
        argc += 1
    except Exception:
        limit = 10
    for addr in sys.argv[argc:]:
        print('Address: ', addr)
        hashX = coin.address_to_hashX(addr)

        for n, (tx_hash, height) in enumerate(bp.get_history(hashX, limit)):
            print('History #{:d}: hash: {} height: {:d}'.format(
                n + 1, hash_to_str(tx_hash), height))
        n = None
        for n, utxo in enumerate(bp.get_utxos(hashX, limit)):
            print('UTXOs #{:d}: hash: {} pos: {:d} height: {:d} value: {:d}'.
                  format(n + 1, hash_to_str(utxo.tx_hash), utxo.tx_pos,
                         utxo.height, utxo.value))
        if n is None:
            print('No UTXOs')
        balance = bp.get_balance(hashX)
        print('Balance: {} {}'.format(coin.decimal_value(balance),
                                      coin.SHORTNAME))
Exemple #4
0
 def electrum_header(cls, header, height):
     version, = struct.unpack('<I', header[:4])
     timestamp, bits, nonce = struct.unpack('<III', header[100:112])
     return {
         'version': version,
         'prev_block_hash': hash_to_str(header[4:36]),
         'merkle_root': hash_to_str(header[36:68]),
         'claim_trie_root': hash_to_str(header[68:100]),
         'timestamp': timestamp,
         'bits': bits,
         'nonce': nonce,
         'block_height': height,
     }
Exemple #5
0
    async def tx_merkle(self, tx_hash, height):
        '''tx_hash is a hex string.'''
        hex_hashes = await self.daemon_request('block_hex_hashes', height, 1)
        block = await self.daemon_request('deserialised_block', hex_hashes[0])
        tx_hashes = block['tx']
        try:
            pos = tx_hashes.index(tx_hash)
        except ValueError:
            raise RPCError(
                BAD_REQUEST, f'tx hash {tx_hash} not in '
                f'block {hex_hashes[0]} at height {height:,d}')

        idx = pos
        hashes = [hex_str_to_hash(txh) for txh in tx_hashes]
        merkle_branch = []
        while len(hashes) > 1:
            if len(hashes) & 1:
                hashes.append(hashes[-1])
            idx = idx - 1 if (idx & 1) else idx + 1
            merkle_branch.append(hash_to_str(hashes[idx]))
            idx //= 2
            hashes = [
                double_sha256(hashes[n] + hashes[n + 1])
                for n in range(0, len(hashes), 2)
            ]

        return {"block_height": height, "merkle": merkle_branch, "pos": pos}
Exemple #6
0
    async def address_status(self, hashX):
        '''Returns an address status.

        Status is a hex string, but must be None if there is no history.
        '''
        # Note history is ordered and mempool unordered in electrum-server
        # For mempool, height is -1 if unconfirmed txins, otherwise 0
        history = await self.controller.get_history(hashX)
        mempool = await self.controller.mempool_transactions(hashX)

        status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height)
                         for tx_hash, height in history)
        status += ''.join('{}:{:d}:'.format(hex_hash, -unconfirmed)
                          for hex_hash, tx_fee, unconfirmed in mempool)
        if status:
            status = sha256(status.encode()).hex()
        else:
            status = None

        if mempool:
            self.mempool_statuses[hashX] = status
        else:
            self.mempool_statuses.pop(hashX, None)

        return status
Exemple #7
0
    def _compact_hashX(self, hashX, hist_map, hist_list, write_items,
                       keys_to_delete):
        '''Compres history for a hashX.  hist_list is an ordered list of
        the histories to be compressed.'''
        # History entries (tx numbers) are 4 bytes each.  Distribute
        # over rows of up to 50KB in size.  A fixed row size means
        # future compactions will not need to update the first N - 1
        # rows.
        max_row_size = self.max_hist_row_entries * 4
        full_hist = b''.join(hist_list)
        nrows = (len(full_hist) + max_row_size - 1) // max_row_size
        if nrows > 4:
            self.logger.info('hashX {} is large: {:,d} entries across '
                             '{:,d} rows'.format(hash_to_str(hashX),
                                                 len(full_hist) // 4, nrows))

        # Find what history needs to be written, and what keys need to
        # be deleted.  Start by assuming all keys are to be deleted,
        # and then remove those that are the same on-disk as when
        # compacted.
        write_size = 0
        keys_to_delete.update(hist_map)
        for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
            key = hashX + pack('>H', n)
            if hist_map.get(key) == chunk:
                keys_to_delete.remove(key)
            else:
                write_items.append((key, chunk))
                write_size += len(chunk)

        assert n + 1 == nrows
        self.comp_flush_count = max(self.comp_flush_count, n)

        return write_size
Exemple #8
0
 async def confirmed_and_unconfirmed_history(self, hashX):
     # Note history is ordered but unconfirmed is unordered in e-s
     history = await self.get_history(hashX)
     conf = [{
         'tx_hash': hash_to_str(tx_hash),
         'height': height
     } for tx_hash, height in history]
     return conf + await self.unconfirmed_history(hashX)
Exemple #9
0
 def abandon_spent(self, tx_hash, tx_idx):
     claim_id = self.get_claim_id_from_outpoint(tx_hash, tx_idx)
     if claim_id:
         self.log_info("[!] Abandon: {}".format(hash_to_str(claim_id)))
         self.pending_abandons.setdefault(claim_id, []).append((
             tx_hash,
             tx_idx,
         ))
         return claim_id
Exemple #10
0
 def remove_claim_for_name(self, name, claim_id):
     self.log_info("[-] Removing claim from name: {} - {}".format(
         hash_to_str(claim_id), name))
     claims = self.get_claims_for_name(name)
     claim_n = claims.pop(claim_id)
     for claim_id, number in claims.items():
         if number > claim_n:
             claims[claim_id] = number - 1
     self.claims_for_name_cache[name] = claims
Exemple #11
0
    def genesis_block(cls, block):
        '''Check the Genesis block is the right one for this coin.

        Return the block less its unspendable coinbase.
        '''
        header = cls.block_header(block, 0)
        header_hex_hash = hash_to_str(cls.header_hash(header))
        if header_hex_hash != cls.GENESIS_HASH:
            raise CoinError('genesis block has hash {} expected {}'.format(
                header_hex_hash, cls.GENESIS_HASH))

        return block
Exemple #12
0
 def advance_claim_txs(self, txs, height):
     # TODO: generate claim undo info!
     undo_info = []
     add_undo = undo_info.append
     update_inputs = set()
     for tx, txid in txs:
         update_inputs.clear()
         if tx.has_claims:
             for index, output in enumerate(tx.outputs):
                 claim = output.claim
                 if isinstance(claim, NameClaim):
                     add_undo(
                         self.advance_claim_name_transaction(
                             output, height, txid, index))
                 elif isinstance(claim, ClaimUpdate):
                     update_input = self.get_update_input(claim, tx.inputs)
                     if update_input:
                         update_inputs.add(update_input)
                         add_undo(
                             self.advance_update_claim(
                                 output, height, txid, index))
                     else:
                         info = (
                             hash_to_str(txid),
                             hash_to_str(claim.claim_id),
                         )
                         self.log_error(
                             "REJECTED: {} updating {}".format(*info))
                 elif isinstance(claim, ClaimSupport):
                     self.advance_support(claim, txid, index, height,
                                          output.value)
         for txin in tx.inputs:
             if txin not in update_inputs:
                 abandoned_claim_id = self.abandon_spent(
                     txin.prev_hash, txin.prev_idx)
                 if abandoned_claim_id:
                     add_undo((abandoned_claim_id,
                               self.get_claim_info(abandoned_claim_id)))
     return undo_info
Exemple #13
0
    def open_dbs(self):
        '''Open the databases.  If already open they are closed and re-opened.

        When syncing we want to reserve a lot of open files for the
        synchronization.  When serving clients we want the open files for
        serving network connections.
        '''
        def log_reason(message, is_for_sync):
            reason = 'sync' if is_for_sync else 'serving'
            self.logger.info('{} for {}'.format(message, reason))

        # Assume we're serving until we find out otherwise
        for for_sync in [False, True]:
            if self.utxo_db:
                if self.utxo_db.for_sync == for_sync:
                    return
                log_reason('closing DB to re-open', for_sync)
                self.utxo_db.close()
                self.history.close_db()

            # Open DB and metadata files.  Record some of its state.
            self.utxo_db = self.db_class('utxo', for_sync)
            if self.utxo_db.is_new:
                self.logger.info('created new database')
                self.logger.info('creating metadata directory')
                os.mkdir('meta')
                with util.open_file('COIN', create=True) as f:
                    f.write(
                        'ElectrumX databases and metadata for {} {}'.format(
                            self.coin.NAME, self.coin.NET).encode())
            else:
                log_reason('opened DB', self.utxo_db.for_sync)

            self.read_utxo_state()
            if self.first_sync == self.utxo_db.for_sync:
                break

        # Open history DB, clear excess history
        self.utxo_flush_count = self.history.open_db(self.db_class, for_sync,
                                                     self.utxo_flush_count)
        self.clear_excess_undo_info()

        self.logger.info('DB version: {:d}'.format(self.db_version))
        self.logger.info('coin: {}'.format(self.coin.NAME))
        self.logger.info('network: {}'.format(self.coin.NET))
        self.logger.info('height: {:,d}'.format(self.db_height))
        self.logger.info('tip: {}'.format(hash_to_str(self.db_tip)))
        self.logger.info('tx count: {:,d}'.format(self.db_tx_count))
        if self.first_sync:
            self.logger.info('sync time so far: {}'.format(
                util.formatted_time(self.wall_time)))
Exemple #14
0
    async def hashX_listunspent(self, hashX):
        '''Return the list of UTXOs of a script hash, including mempool
        effects.'''
        utxos = await self.get_utxos(hashX)
        utxos = sorted(utxos)
        utxos.extend(self.mempool.get_utxos(hashX))
        spends = await self.mempool.potential_spends(hashX)

        return [{
            'tx_hash': hash_to_str(utxo.tx_hash),
            'tx_pos': utxo.tx_pos,
            'height': utxo.height,
            'value': utxo.value
        } for utxo in utxos if (utxo.tx_hash, utxo.tx_pos) not in spends]
    def spend_utxo(self, tx_hash, tx_idx):
        '''Spend a UTXO and return the 33-byte value.

        If the UTXO is not in the cache it must be on disk.  We store
        all UTXOs so not finding one indicates a logic error or DB
        corruption.
        '''
        # Fast track is it being in the cache
        idx_packed = pack('<H', tx_idx)
        cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
        if cache_value:
            return cache_value

        # Spend it from the DB.

        # Key: b'h' + compressed_tx_hash + tx_idx + tx_num
        # Value: hashX
        prefix = b'h' + tx_hash[:4] + idx_packed
        candidates = {
            db_key: hashX
            for db_key, hashX in self.utxo_db.iterator(prefix=prefix)
        }

        for hdb_key, hashX in candidates.items():
            tx_num_packed = hdb_key[-4:]

            if len(candidates) > 1:
                tx_num, = unpack('<I', tx_num_packed)
                hash, height = self.fs_tx_hash(tx_num)
                if hash != tx_hash:
                    assert hash is not None  # Should always be found
                    continue

            # Key: b'u' + address_hashX + tx_idx + tx_num
            # Value: the UTXO value as a 64-bit unsigned integer
            udb_key = b'u' + hashX + hdb_key[-6:]
            utxo_value_packed = self.utxo_db.get(udb_key)
            if utxo_value_packed:
                # Remove both entries for this UTXO
                self.db_deletes.append(hdb_key)
                self.db_deletes.append(udb_key)
                return hashX + tx_num_packed + utxo_value_packed

        raise ChainError('UTXO {} / {:,d} not found in "h" table'.format(
            hash_to_str(tx_hash), tx_idx))
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info('faking a reorg of {:,d} blocks'.format(count))
        await self.controller.run_in_executor(self.flush, True)

        hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            blocks = await self.daemon.raw_blocks(hex_hashes)
            await self.controller.run_in_executor(self.backup_blocks, blocks)
        await self.prefetcher.reset_height()
Exemple #17
0
    async def transactions(self, hashX):
        '''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
        entries for the hashX.

        unconfirmed is True if any txin is unconfirmed.
        '''
        deserializer = self.coin.DESERIALIZER
        pairs = await self.raw_transactions(hashX)
        result = []
        for hex_hash, raw_tx in pairs:
            item = self.txs.get(hex_hash)
            if not item or not raw_tx:
                continue
            tx_fee = item[2]
            tx = deserializer(raw_tx).read_tx()
            unconfirmed = any(
                hash_to_str(txin.prev_hash) in self.txs for txin in tx.inputs)
            result.append((hex_hash, tx_fee, unconfirmed))
        return result
Exemple #18
0
    def db_utxo_lookup(self, tx_hash, tx_idx):
        '''Given a prevout return a (hashX, value) pair.

        Raises MissingUTXOError if the UTXO is not found.  Used by the
        mempool code.
        '''
        idx_packed = pack('<H', tx_idx)
        hashX, tx_num_packed = self._db_hashX(tx_hash, idx_packed)
        if not hashX:
            # This can happen when the daemon is a block ahead of us
            # and has mempool txs spending outputs from that new block
            raise self.MissingUTXOError

        # Key: b'u' + address_hashX + tx_idx + tx_num
        # Value: the UTXO value as a 64-bit unsigned integer
        key = b'u' + hashX + idx_packed + tx_num_packed
        db_value = self.utxo_db.get(key)
        if not db_value:
            raise self.DBError('UTXO {} / {:,d} in one table only'.format(
                hash_to_str(tx_hash), tx_idx))
        value, = unpack('<Q', db_value)
        return hashX, value
    async def reorg_hashes(self, count):
        '''Return the list of hashes to back up beacuse of a reorg.

        The hashes are returned in order of increasing height.'''
        def diff_pos(hashes1, hashes2):
            '''Returns the index of the first difference in the hash lists.
            If both lists match returns their length.'''
            for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
                if hash1 != hash2:
                    return n
            return len(hashes)

        if count is None:
            # A real reorg
            start = self.height - 1
            count = 1
            while start > 0:
                hashes = self.fs_block_hashes(start, count)
                hex_hashes = [hash_to_str(hash) for hash in hashes]
                d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
                n = diff_pos(hex_hashes, d_hex_hashes)
                if n > 0:
                    start += n
                    break
                count = min(count * 2, start)
                start -= count

            count = (self.height - start) + 1
        else:
            start = (self.height - count) + 1

        s = '' if count == 1 else 's'
        self.logger.info('chain was reorganised replacing {:,d} block{} at '
                         'heights {:,d}-{:,d}'.format(count, s, start,
                                                      start + count - 1))

        return self.fs_block_hashes(start, count)
Exemple #20
0
 def put_claim_for_name(self, name, claim_id):
     self.log_info("[+] Adding claim {} for name {}.".format(
         hash_to_str(claim_id), name))
     claims = self.get_claims_for_name(name)
     claims.setdefault(claim_id, max(claims.values() or [0]) + 1)
     self.claims_for_name_cache[name] = claims
Exemple #21
0
 def put_claim_id_signed_by_cert_id(self, cert_id, claim_id):
     self.log_info("[+] Adding signature: {} - {}".format(
         hash_to_str(claim_id), hash_to_str(cert_id)))
     certs = self.get_signed_claim_ids_by_cert_id(cert_id)
     certs.append(claim_id)
     self.claims_signed_by_cert_cache[cert_id] = certs
Exemple #22
0
 async def claimtrie_getclaimssignedbynthtoname(self, name, n):
     n = int(n)
     for claim_id, sequence in self.bp.get_claims_for_name(name.encode('ISO-8859-1')).items():
         if n == sequence:
             return await self.claimtrie_getclaimssignedbyid(hash_to_str(claim_id))
Exemple #23
0
 def remove_certificate(self, cert_id):
     self.log_info("[-] Removing certificate: {}".format(
         hash_to_str(cert_id)))
     self.claims_signed_by_cert_cache[cert_id] = []
Exemple #24
0
 def __str__(self):
     script = self.script.hex()
     prev_hash = hash_to_str(self.prev_hash)
     return ("Input({}, {:d}, script={}, sequence={:d})".format(
         prev_hash, self.prev_idx, script, self.sequence))
Exemple #25
0
 def __str__(self):
     prev_hash = hash_to_str(self.prev_hash)
     return ("Input({}, {:d}, tree={}, sequence={:d})".format(
         prev_hash, self.prev_idx, self.tree, self.sequence))
Exemple #26
0
 def put_claim_info(self, claim_id, claim_info):
     self.log_info("[+] Adding claim info for: {}".format(
         hash_to_str(claim_id)))
     self.claim_cache[claim_id] = claim_info.serialized
Exemple #27
0
    async def claimtrie_getvalueforuri(self, block_hash, uri):
        key = str((block_hash, uri))
        if key in self.cache:
            return self.cache[key]
        # TODO: this thing is huge, refactor
        CLAIM_ID = "claim_id"
        WINNING = "winning"
        SEQUENCE = "sequence"
        uri = uri
        block_hash = block_hash
        try:
            parsed_uri = parse_lbry_uri(uri)
        except URIParseError as err:
            return {'error': err.message}
        result = {}

        if parsed_uri.is_channel:
            certificate = None

            # TODO: this is also done on the else, refactor
            if parsed_uri.claim_id:
                certificate_info = await self.claimtrie_getclaimbyid(parsed_uri.claim_id)
                if certificate_info and certificate_info['name'] == parsed_uri.name:
                    certificate = {'resolution_type': CLAIM_ID, 'result': certificate_info}
            elif parsed_uri.claim_sequence:
                certificate_info = await self.claimtrie_getnthclaimforname(parsed_uri.name, parsed_uri.claim_sequence)
                if certificate_info:
                    certificate = {'resolution_type': SEQUENCE, 'result': certificate_info}
            else:
                certificate_info = await self.claimtrie_getvalue(parsed_uri.name, block_hash)
                if certificate_info:
                    certificate = {'resolution_type': WINNING, 'result': certificate_info}

            if certificate and 'claim_id' not in certificate['result']:
                return result

            if certificate and not parsed_uri.path:
                result['certificate'] = certificate
                channel_id = certificate['result']['claim_id']
                claims_in_channel = await self.claimtrie_getclaimssignedbyid(channel_id)
                result['unverified_claims_in_channel'] = {claim['claim_id']: (claim['name'], claim['height'])
                                                          for claim in claims_in_channel if claim}
            elif certificate:
                result['certificate'] = certificate
                channel_id = certificate['result']['claim_id']
                claim_ids_matching_name = self.get_signed_claims_with_name_for_channel(channel_id, parsed_uri.path)
                claims = await self.batched_formatted_claims_from_daemon(claim_ids_matching_name)

                claims_in_channel = {claim['claim_id']: (claim['name'], claim['height'])
                                     for claim in claims}
                result['unverified_claims_for_name'] = claims_in_channel
        else:
            claim = None
            if parsed_uri.claim_id:
                claim_info = await self.claimtrie_getclaimbyid(parsed_uri.claim_id)
                if claim_info and claim_info['name'] == parsed_uri.name:
                    claim = {'resolution_type': CLAIM_ID, 'result': claim_info}
            elif parsed_uri.claim_sequence:
                claim_info = await self.claimtrie_getnthclaimforname(parsed_uri.name, parsed_uri.claim_sequence)
                if claim_info:
                    claim = {'resolution_type': SEQUENCE, 'result': claim_info}
            else:
                claim_info = await self.claimtrie_getvalue(parsed_uri.name, block_hash)
                if claim_info:
                    claim = {'resolution_type': WINNING, 'result': claim_info}
            if (claim and
                    # is not an unclaimed winning name
                    (claim['resolution_type'] != WINNING or proof_has_winning_claim(claim['result']['proof']))):
                raw_claim_id = unhexlify(claim['result']['claim_id'])[::-1]
                raw_certificate_id = self.bp.get_claim_info(raw_claim_id).cert_id
                if raw_certificate_id:
                    certificate_id = hash_to_str(raw_certificate_id)
                    certificate = await self.claimtrie_getclaimbyid(certificate_id)
                    if certificate:
                        certificate = {'resolution_type': CLAIM_ID,
                                       'result': certificate}
                        result['certificate'] = certificate
                result['claim'] = claim
        self.cache[key] = result
        return result
Exemple #28
0
 def remove_claim_id_for_outpoint(self, tx_hash, tx_idx):
     self.log_info("[-] Remove outpoint: {}:{}.".format(
         hash_to_str(tx_hash), tx_idx))
     self.outpoint_to_claim_id_cache[tx_hash +
                                     struct.pack('>I', tx_idx)] = None
Exemple #29
0
    def process_raw_txs(self, raw_tx_map, pending):
        '''Process the dictionary of raw transactions and return a dictionary
        of updates to apply to self.txs.

        This runs in the executor so should not update any member
        variables it doesn't own.  Atomic reads of self.txs that do
        not depend on the result remaining the same are fine.
        '''
        script_hashX = self.coin.hashX_from_script
        deserializer = self.coin.DESERIALIZER
        db_utxo_lookup = self.db.db_utxo_lookup
        txs = self.txs

        # Deserialize each tx and put it in a pending list
        for tx_hash, raw_tx in raw_tx_map.items():
            if tx_hash not in txs:
                continue
            tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()

            # Convert the tx outputs into (hashX, value) pairs
            txout_pairs = [(script_hashX(txout.pk_script), txout.value)
                           for txout in tx.outputs]

            # Convert the tx inputs to ([prev_hex_hash, prev_idx) pairs
            txin_pairs = [(hash_to_str(txin.prev_hash), txin.prev_idx)
                          for txin in tx.inputs]

            pending.append((tx_hash, txin_pairs, txout_pairs, tx_size))

        # Now process what we can
        result = {}
        deferred = []

        for item in pending:
            if self.stop:
                break

            tx_hash, old_txin_pairs, txout_pairs, tx_size = item
            if tx_hash not in txs:
                continue

            mempool_missing = False
            txin_pairs = []

            try:
                for prev_hex_hash, prev_idx in old_txin_pairs:
                    tx_info = txs.get(prev_hex_hash, 0)
                    if tx_info is None:
                        tx_info = result.get(prev_hex_hash)
                        if not tx_info:
                            mempool_missing = True
                            continue
                    if tx_info:
                        txin_pairs.append(tx_info[1][prev_idx])
                    elif not mempool_missing:
                        prev_hash = hex_str_to_hash(prev_hex_hash)
                        txin_pairs.append(db_utxo_lookup(prev_hash, prev_idx))
            except (self.db.MissingUTXOError, self.db.DBError):
                # DBError can happen when flushing a newly processed
                # block.  MissingUTXOError typically happens just
                # after the daemon has accepted a new block and the
                # new mempool has deps on new txs in that block.
                continue

            if mempool_missing:
                deferred.append(item)
            else:
                # Compute fee
                tx_fee = (sum(v for hashX, v in txin_pairs) -
                          sum(v for hashX, v in txout_pairs))
                result[tx_hash] = (txin_pairs, txout_pairs, tx_fee, tx_size)

        return result, deferred
Exemple #30
0
def sign_claim(private_key, raw_claim, address, claim_id):
    claim = smart_decode(raw_claim)
    return claim.sign(private_key,
                      address,
                      hash_to_str(claim_id),
                      curve=SECP256k1)