Ejemplo n.º 1
0
    def electrum_header(cls, header, height):
        h = dict(
            block_height=height,
            version=struct.unpack('<I', header[:4])[0],
            prev_block_hash=hash_to_str(header[4:36]),
            merkle_root=hash_to_str(header[36:68]),
            timestamp=struct.unpack('<I', header[100:104])[0],
            reserved=hash_to_str(header[72:100]),
            bits=struct.unpack('<I', header[104:108])[0],
            nonce=hash_to_str(header[108:140]),
            solution=hash_to_str(header[140:])
        )

        return h
Ejemplo n.º 2
0
    def electrum_header(cls, header, height):
        version, = struct.unpack('<I', header[:4])
        timestamp, bits, nonce = struct.unpack('<III', header[68:80])
        block_hash = bytes(reversed(cls.header_hash(header, height))).hex()

        return {
            'block_height': height,
            'version': version,
            'block_hash': block_hash,
            'prev_block_hash': hash_to_str(header[4:36]),
            'merkle_root': hash_to_str(header[36:68]),
            'timestamp': timestamp,
            'bits': bits,
            'nonce': nonce,
        }
Ejemplo n.º 3
0
    def electrum_header(cls, header, height):
        version, = struct.unpack('<I', header[:4])
        timestamp, bits = struct.unpack('<II', header[100:108])

        return {
            'block_height': height,
            'version': version,
            'prev_block_hash': hash_to_str(header[4:36]),
            'merkle_root': hash_to_str(header[36:68]),
            'hash_reserved': hash_to_str(header[68:100]),
            'timestamp': timestamp,
            'bits': bits,
            'nonce': hash_to_str(header[108:140]),
            'n_solution': base64.b64encode(lib_tx.Deserializer(header, start=140)._read_varbytes()).decode('utf8')
        }
Ejemplo n.º 4
0
    async def transactions(self, hashX):
        '''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
        entries for the hashX.

        unconfirmed is True if any txin is unconfirmed.
        '''
        # hashXs is a defaultdict
        if hashX not in self.hashXs:
            return []

        deserializer = self.coin.DESERIALIZER
        hex_hashes = self.hashXs[hashX]
        raw_txs = await self.daemon.getrawtransactions(hex_hashes)
        result = []
        for hex_hash, raw_tx in zip(hex_hashes, raw_txs):
            item = self.txs.get(hex_hash)
            if not item or not raw_tx:
                continue
            txin_pairs, txout_pairs = item
            tx_fee = (sum(v for hashX, v in txin_pairs) -
                      sum(v for hashX, v in txout_pairs))
            tx, tx_hash = deserializer(raw_tx).read_tx()
            unconfirmed = any(hash_to_str(txin.prev_hash) in self.txs
                              for txin in tx.inputs)
            result.append((hex_hash, tx_fee, unconfirmed))
        return result
Ejemplo n.º 5
0
    async def address_status(self, hashX):
        '''Returns an address status.

        Status is a hex string, but must be None if there is no history.
        '''
        # Note history is ordered and mempool unordered in electrum-server
        # For mempool, height is -1 if unconfirmed txins, otherwise 0
        history = await self.controller.get_history(hashX)
        mempool = await self.controller.mempool_transactions(hashX)

        status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height)
                         for tx_hash, height in history)
        status += ''.join('{}:{:d}:'.format(hex_hash, -unconfirmed)
                          for hex_hash, tx_fee, unconfirmed in mempool)
        if status:
            status = sha256(status.encode()).hex()
        else:
            status = None

        if mempool:
            self.mempool_statuses[hashX] = status
        else:
            self.mempool_statuses.pop(hashX, None)

        return status
Ejemplo n.º 6
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info('faking a reorg of {:,d} blocks'.format(count))
        await self.controller.run_in_executor(self.flush, True)

        hashes, start, count = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_str(hash) for hash in reversed(hashes)]
        # get saved evntlog hashYs
        if hashes:
            eventlog_hashYs = reduce(
                operator.add, [self.get_block_hashYs(x) for x in hashes])
        else:
            eventlog_hashYs = []
        self.logger.info('chain reorg eventlog_hashYs {} {}'.format(
            eventlog_hashYs, hashes))

        for hex_hashes in chunks(hashes, 50):
            blocks = await self.daemon.raw_blocks(hex_hashes)
            await self.controller.run_in_executor(self.backup_blocks, blocks,
                                                  eventlog_hashYs)
        await self.prefetcher.reset_height()
Ejemplo n.º 7
0
    async def tx_merkle(self, tx_hash, height):
        '''tx_hash is a hex string.'''
        hex_hashes = await self.daemon_request('block_hex_hashes', height, 1)
        block = await self.daemon_request('deserialised_block', hex_hashes[0])
        tx_hashes = block['tx']
        try:
            pos = tx_hashes.index(tx_hash)
        except ValueError:
            raise RPCError(
                BAD_REQUEST, f'tx hash {tx_hash} not in '
                f'block {hex_hashes[0]} at height {height:,d}')

        idx = pos
        hashes = [hex_str_to_hash(txh) for txh in tx_hashes]
        merkle_branch = []
        while len(hashes) > 1:
            if len(hashes) & 1:
                hashes.append(hashes[-1])
            idx = idx - 1 if (idx & 1) else idx + 1
            merkle_branch.append(hash_to_str(hashes[idx]))
            idx //= 2
            hashes = [
                double_sha256(hashes[n] + hashes[n + 1])
                for n in range(0, len(hashes), 2)
            ]

        return {"block_height": height, "merkle": merkle_branch, "pos": pos}
Ejemplo n.º 8
0
    def advance_blocks(self, blocks):
        '''Synchronously advance the blocks.

        It is already verified they correctly connect onto our tip.
        '''
        min_height = self.min_undo_height(self.daemon.cached_height())
        height = self.height

        for block in blocks:
            height += 1
            undo_info = self.advance_txs(block.transactions)
            if height >= min_height:
                self.undo_infos.append((undo_info, height))

        headers = [block.header for block in blocks]
        self.height = height
        self.headers.extend(headers)
        self.logger.info("Set tip {0}".format(
            hash_to_str(self.coin.header_hash(headers[-1]))))
        self.tip = self.coin.header_hash(headers[-1])

        # If caught up, flush everything as client queries are
        # performed on the DB.
        if self.caught_up_event.is_set():
            self.flush(True)
        else:
            if time.time() > self.next_cache_check:
                self.check_cache_size()
                self.next_cache_check = time.time() + 30
Ejemplo n.º 9
0
    async def reorg_hashes(self, count):
        '''Return the list of hashes to back up beacuse of a reorg.

        The hashes are returned in order of increasing height.'''
        def match_pos(hashes1, hashes2):
            for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
                if hash1 == hash2:
                    return n
            return -1

        if count is None:
            # A real reorg
            start = self.height - 1
            count = 1
            while start > 0:
                hashes = self.fs_block_hashes(start, count)
                hex_hashes = [hash_to_str(hash) for hash in hashes]
                d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
                n = match_pos(hex_hashes, d_hex_hashes)
                if n >= 0:
                    start += n + 1
                    break
                count = min(count * 2, start)
                start -= count

            count = (self.height - start) + 1
        else:
            start = (self.height - count) + 1

        s = '' if count == 1 else 's'
        self.logger.info('chain was reorganised replacing {:,d} block{} at '
                         'heights {:,d}-{:,d}'.format(count, s, start,
                                                      start + count - 1))

        return self.fs_block_hashes(start, count)
Ejemplo n.º 10
0
    def _compact_hashX(self, hashX, hist_map, hist_list, write_items,
                       keys_to_delete):
        '''Compres history for a hashX.  hist_list is an ordered list of
        the histories to be compressed.'''
        # History entries (tx numbers) are 4 bytes each.  Distribute
        # over rows of up to 50KB in size.  A fixed row size means
        # future compactions will not need to update the first N - 1
        # rows.
        max_row_size = self.max_hist_row_entries * 4
        full_hist = b''.join(hist_list)
        nrows = (len(full_hist) + max_row_size - 1) // max_row_size
        if nrows > 4:
            self.log_info(
                'hashX {} is large: {:,d} entries across {:,d} rows'.format(
                    hash_to_str(hashX),
                    len(full_hist) // 4, nrows))

        # Find what history needs to be written, and what keys need to
        # be deleted.  Start by assuming all keys are to be deleted,
        # and then remove those that are the same on-disk as when
        # compacted.
        write_size = 0
        keys_to_delete.update(hist_map)
        for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
            key = hashX + pack('>H', n)
            if hist_map.get(key) == chunk:
                keys_to_delete.remove(key)
            else:
                write_items.append((key, chunk))
                write_size += len(chunk)

        assert n + 1 == nrows
        self.comp_flush_count = max(self.comp_flush_count, n)

        return write_size
 async def hash160_contract_get_eventlogs(self, hash160, contract_addr):
     hashY = self.coin.hash160_contract_to_hashY(hash160, contract_addr)
     eventlogs = await self.get_eventlogs(hashY)
     conf = [{
         'tx_hash': hash_to_str(tx_hash),
         'height': height
     } for tx_hash, height in eventlogs]
     return conf
Ejemplo n.º 12
0
 async def address_listunspent(self, address):
 '''Return the list of UTXOs of an address.'''
 hashX = self.address_to_hashX(address)
 script = self.pay_to_address_script(address)
 scriptKey = script.hex()
 return [{'tx_hash': hash_to_str(utxo.tx_hash), 'tx_pos': utxo.tx_pos,
          'height': utxo.height, 'value': utxo.value, 'script': scriptKey}
         for utxo in sorted(await self.get_utxos(hashX))]
Ejemplo n.º 13
0
 async def confirmed_and_unconfirmed_history(self, hashX):
     # Note history is ordered but unconfirmed is unordered in e-s
     history = await self.get_history(hashX)
     conf = [{
         'tx_hash': hash_to_str(tx_hash),
         'height': height
     } for tx_hash, height in history]
     return conf + await self.unconfirmed_history(hashX)
Ejemplo n.º 14
0
    def open_dbs(self):
        '''Open the databases.  If already open they are closed and re-opened.

        When syncing we want to reserve a lot of open files for the
        synchtonization.  When serving clients we want the open files for
        serving network connections.
        '''
        def log_reason(message, is_for_sync):
            reason = 'sync' if is_for_sync else 'serving'
            self.logger.info('{} for {}'.format(message, reason))

        # Assume we're serving until we find out otherwise
        for for_sync in [False, True]:
            if self.utxo_db:
                if self.utxo_db.for_sync == for_sync:
                    return
                log_reason('closing DB to re-open', for_sync)
                self.utxo_db.close()
                self.hist_db.close()
                self.eventlog_db.close()
                self.hashY_db.close()

            # Open DB and metadata files.  Record some of its state.
            self.utxo_db = self.db_class('utxo', for_sync)
            self.hist_db = self.db_class('hist', for_sync)
            self.eventlog_db = self.db_class('eventlog', for_sync)
            self.hashY_db = self.db_class('hashY', for_sync)

            if self.utxo_db.is_new:
                self.logger.info('created new database')
                self.logger.info('creating metadata directory')
                os.mkdir('meta')
                with util.open_file('COIN', create=True) as f:
                    f.write(
                        'ElectrumX databases and metadata for {} {}'.format(
                            self.coin.NAME, self.coin.NET).encode())
            else:
                log_reason('opened DB', self.utxo_db.for_sync)

            self.read_utxo_state()
            if self.first_sync == self.utxo_db.for_sync:
                break

        self.read_history_state()
        self.read_eventlog_state()

        self.logger.info('DB version: {:d}'.format(self.db_version))
        self.logger.info('coin: {}'.format(self.coin.NAME))
        self.logger.info('network: {}'.format(self.coin.NET))
        self.logger.info('height: {:,d}'.format(self.db_height))
        self.logger.info('tip: {}'.format(hash_to_str(self.db_tip)))
        self.logger.info('tx count: {:,d}'.format(self.db_tx_count))
        self.logger.info('flush count: {:,d}'.format(self.flush_count))
        self.logger.info('eventlog flush count: {:,d}'.format(
            self.eventlog_flush_count))
        if self.first_sync:
            self.logger.info('sync time so far: {}'.format(
                util.formatted_time(self.wall_time)))
Ejemplo n.º 15
0
    def electrum_header(cls, header, height):
    
        if height >= cls.BTQ_FORK_HEIGHT:
            version, = struct.unpack('<I', header[:4])
            
            return {
                'block_height': height,#[68:72]
                'version': version,
                'prev_block_hash': hash_to_str(header[4:36]),
                'merkle_root': hash_to_str(header[36:68]),
                'reserved': hash_to_str(header[72:100]),
                'timestamp': struct.unpack('<I', header[100:104])[0],
                'bits': struct.unpack('<I', header[104:108])[0],
                'nonce': hash_to_str(header[108:140]),
                'solution': hash_to_str(header[140:]), 
            }
        else:
            version, = struct.unpack('<I', header[:4])
            timestamp, bits, nonce = struct.unpack('<III', header[68:80])

            return {
                'block_height': height,
                'version': version,
                'prev_block_hash': hash_to_str(header[4:36]),
                'merkle_root': hash_to_str(header[36:68]),
                'timestamp': timestamp,
                'bits': bits,
                'nonce': nonce,
            }
Ejemplo n.º 16
0
 async def scripthash_listunspent(self, scripthash):
     '''Return the list of UTXOs of a scripthash.'''
     hashX = self.scripthash_to_hashX(scripthash)
     return [{
         'tx_hash': hash_to_str(utxo.tx_hash),
         'tx_pos': utxo.tx_pos,
         'height': utxo.height,
         'value': utxo.value
     } for utxo in sorted(await self.get_utxos(hashX))]
Ejemplo n.º 17
0
    async def hashX_listunspent(self, hashX):
        '''Return the list of UTXOs of a script hash.

        We should remove mempool spends from the in-DB UTXOs.'''
        utxos = await self.get_utxos(hashX)
        spends = await self.mempool.spends(hashX)

        return [{'tx_hash': hash_to_str(utxo.tx_hash), 'tx_pos': utxo.tx_pos,
                 'height': utxo.height, 'value': utxo.value}
                for utxo in sorted(utxos)
                if (utxo.tx_hash, utxo.tx_pos) not in spends]
Ejemplo n.º 18
0
    async def hashX_listunspent(self, hashX):
        '''Return the list of UTXOs of a script hash, including mempool
        effects.'''
        utxos = await self.get_utxos(hashX)
        utxos = sorted(utxos)
        utxos.extend(self.mempool.get_utxos(hashX))
        spends = await self.mempool.potential_spends(hashX)

        return [{'tx_hash': hash_to_str(utxo.tx_hash), 'tx_pos': utxo.tx_pos,
                 'height': utxo.height, 'value': utxo.value}
                for utxo in utxos
                if (utxo.tx_hash, utxo.tx_pos) not in spends]
Ejemplo n.º 19
0
    def genesis_block(cls, block):
        '''Check the Genesis block is the right one for this coin.

        Return the block less its unspendable coinbase.
        '''
        header = cls.block_header(block, 0)
        header_hex_hash = hash_to_str(cls.header_hash(header))
        if header_hex_hash != cls.GENESIS_HASH:
            raise CoinError('genesis block has hash {} expected {}'
                            .format(header_hex_hash, cls.GENESIS_HASH))

        return header + bytes(1)
Ejemplo n.º 20
0
    def electrum_header(cls, header, height):
        version, = struct.unpack('<I', header[:4])
        timestamp, bits = struct.unpack('<II', header[100:108])

        return {
            # 'block_height': height,
            # 'version': version,
            # 'prev_block_hash': hash_to_str(header[4:36]),
            # 'merkle_root': hash_to_str(header[36:68]),
            # 'timestamp': timestamp,
            # 'bits': bits,
            # 'nonce': hash_to_str(header[108:140]),
            'block_height': height,
            'version': version,
            'prev_block_hash': hash_to_str(header[4:36]),
            'merkle_root': hash_to_str(header[36:68]),
            'reserved': hash_to_str(header[68:100]),
            'timestamp': timestamp,
            'bits': bits,
            'nonce': hash_to_str(header[108:140]),
            'solution': hash_to_str(header[140:1484])
        }
Ejemplo n.º 21
0
    def spend_utxo(self, tx_hash, tx_idx):
        '''Spend a UTXO and return the 33-byte value.

        If the UTXO is not in the cache it must be on disk.  We store
        all UTXOs so not finding one indicates a logic error or DB
        corruption.
        '''
        # Fast track is it being in the cache
        # utxo_cache是utxo的内存cache,注意这里没有使用tx_num作为key的一部分了,而tx_hash没有被compress
        idx_packed = pack('<H', tx_idx)
        cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
        if cache_value:
            return cache_value

        # Spend it from the DB.
        # tx_idx=2字节,tx_num=4字节
        # Key: b'h' + compressed_tx_hash + tx_idx + tx_num
        # Value: hashX
        prefix = b'h' + tx_hash[:4] + idx_packed
        # leveldb中取出所有符合前缀的要求的hkey
        # 但是对于hkey来说,其实绝大多数情况下,该前缀列出的元素只有一个
        candidates = {
            db_key: hashX
            for db_key, hashX in self.utxo_db.iterator(prefix=prefix)
        }

        for hdb_key, hashX in candidates.items():
            tx_num_packed = hdb_key[-4:]

            if len(candidates) > 1:
                # 如果列出的元素不止一个,则通过tx_num到tx_hash的map来排除
                tx_num, = unpack('<I', tx_num_packed)
                hash, height = self.fs_tx_hash(tx_num)
                if hash != tx_hash:
                    assert hash is not None  # Should always be found
                    continue

            # Key: b'u' + address_hashX + tx_idx + tx_num
            # Value: the UTXO value as a 64-bit unsigned integer
            udb_key = b'u' + hashX + hdb_key[-6:]
            utxo_value_packed = self.utxo_db.get(udb_key)
            # 这里取到的就是需要找的utxo了
            if utxo_value_packed:
                # Remove both entries for this UTXO
                self.db_deletes.append(hdb_key)
                self.db_deletes.append(udb_key)
                return hashX + tx_num_packed + utxo_value_packed

        raise ChainError('UTXO {} / {:,d} not found in "h" table'.format(
            hash_to_str(tx_hash), tx_idx))
Ejemplo n.º 22
0
    async def address_status(self, hashX):
        '''Returns status as 32 bytes.'''
        # Note history is ordered and mempool unordered in electrum-server
        # For mempool, height is -1 if unconfirmed txins, otherwise 0
        history = await self.get_history(hashX)
        mempool = await self.mempool_transactions(hashX)

        status = ''.join('{}:{:d}:'.format(hash_to_str(tx_hash), height)
                         for tx_hash, height in history)
        status += ''.join('{}:{:d}:'.format(hex_hash, -unconfirmed)
                          for hex_hash, tx_fee, unconfirmed in mempool)
        if status:
            return sha256(status.encode()).hex()
        return None
Ejemplo n.º 23
0
    def spend_utxo(self, tx_hash, tx_idx):
        '''Spend a UTXO and return the 33-byte value.

        If the UTXO is not in the cache it must be on disk.  We store
        all UTXOs so not finding one indicates a logic error or DB
        corruption.
        '''
        # Fast track is it being in the cache
        idx_packed = pack('<H', tx_idx)
        cache_value = self.utxo_cache.pop(tx_hash + idx_packed, None)
        if cache_value:
            return cache_value

        # Spend it from the DB.

        # Key: b'h' + compressed_tx_hash + tx_idx + tx_num
        # Value: hashX
        prefix = b'h' + tx_hash[:4] + idx_packed
        candidates = {
            db_key: hashX
            for db_key, hashX in self.utxo_db.iterator(prefix=prefix)
        }

        for hdb_key, hashX in candidates.items():
            tx_num_packed = hdb_key[-4:]

            if len(candidates) > 1:
                tx_num, = unpack('<I', tx_num_packed)
                hash, height = self.fs_tx_hash(tx_num)
                if hash != tx_hash:
                    assert hash is not None  # Should always be found
                    continue

            # Key: b'u' + address_hashX + tx_idx + tx_num
            # Value: the UTXO value as a 64-bit unsigned integer
            udb_key = b'u' + hashX + hdb_key[-6:]
            utxo_value_packed = self.utxo_db.get(udb_key)
            if utxo_value_packed:
                # Remove both entries for this UTXO
                self.db_deletes.append(hdb_key)
                self.db_deletes.append(udb_key)
                return hashX + tx_num_packed + utxo_value_packed

        raise ChainError('UTXO {} / {:,d} not found in "h" table'.format(
            hash_to_str(tx_hash), tx_idx))
Ejemplo n.º 24
0
    def read_tx(self):
        '''Return a (Deserialized TX, TX_HASH) pair.

        The hash needs to be reversed for human display; for efficiency
        we process it in the natural serialized order.
        '''
        tx_version = self._get_version()
        if tx_version == self.bitcoin_diamond_tx_version:
            marker = self.binary[self.cursor + 4 + 32]
        else:
            marker = self.binary[self.cursor + 4]

        if marker:
            return super().read_tx()

        # Ugh, this is nasty.
        start = self.cursor
        version = self._read_le_int32()
        if version == self.bitcoin_diamond_tx_version:
            present_block_hash = hash_to_str(self._read_nbytes(32))
        else:
            present_block_hash = None
        orig_ser = self.binary[start:self.cursor]

        marker = self._read_byte()
        flag = self._read_byte()

        start = self.cursor
        inputs = self._read_inputs()
        outputs = self._read_outputs()
        orig_ser += self.binary[start:self.cursor]

        witness = self._read_witness(len(inputs))

        start = self.cursor
        locktime = self._read_le_uint32()
        orig_ser += self.binary[start:self.cursor]

        if present_block_hash is not None:
            return TxBitcoinDiamondSegWit(version, present_block_hash, marker,
                                          flag, inputs, outputs, witness,
                                          locktime), double_sha256(orig_ser)
        else:
            return TxSegWit(version, marker, flag, inputs,
                            outputs, witness, locktime), double_sha256(orig_ser)
Ejemplo n.º 25
0
    async def reorg_chain(self, count=None):
        '''Handle a chain reorganisation.

        Count is the number of blocks to simulate a reorg, or None for
        a real reorg.'''
        if count is None:
            self.logger.info('chain reorg detected')
        else:
            self.logger.info('faking a reorg of {:,d} blocks'.format(count))
        await self.controller.run_in_executor(self.flush, True)

        hashes = await self.reorg_hashes(count)
        # Reverse and convert to hex strings.
        hashes = [hash_to_str(hash) for hash in reversed(hashes)]
        for hex_hashes in chunks(hashes, 50):
            blocks = await self.daemon.raw_blocks(hex_hashes)
            await self.controller.run_in_executor(self.backup_blocks, blocks)
        await self.prefetcher.reset_height()
Ejemplo n.º 26
0
    async def transactions(self, hashX):
        '''Generate (hex_hash, tx_fee, unconfirmed) tuples for mempool
        entries for the hashX.

        unconfirmed is True if any txin is unconfirmed.
        '''
        deserializer = self.coin.DESERIALIZER
        pairs = await self.raw_transactions(hashX)
        result = []
        for hex_hash, raw_tx in pairs:
            item = self.txs.get(hex_hash)
            if not item or not raw_tx:
                continue
            txin_pairs, txout_pairs, tx_fee, tx_size = item
            tx = deserializer(raw_tx).read_tx()
            unconfirmed = any(hash_to_str(txin.prev_hash) in self.txs
                              for txin in tx.inputs)
            result.append((hex_hash, tx_fee, unconfirmed))
        return result
Ejemplo n.º 27
0
    def db_utxo_lookup(self, tx_hash, tx_idx):
        '''Given a prevout return a (hashX, value) pair.

        Raises MissingUTXOError if the UTXO is not found.  Used by the
        mempool code.
        '''
        idx_packed = pack('<H', tx_idx)
        hashX, tx_num_packed = self.db_hashX(tx_hash, idx_packed)
        if not hashX:
            # This can happen when the daemon is a block ahead of us
            # and has mempool txs spending outputs from that new block
            raise self.MissingUTXOError

        # Key: b'u' + address_hashX + tx_idx + tx_num
        # Value: the UTXO value as a 64-bit unsigned integer
        key = b'u' + hashX + idx_packed + tx_num_packed
        db_value = self.utxo_db.get(key)
        if not db_value:
            raise self.DBError('UTXO {} / {:,d} in one table only'.format(
                hash_to_str(tx_hash), tx_idx))
        value, = unpack('<Q', db_value)
        return hashX, value
Ejemplo n.º 28
0
    def read_tx(self):
        '''Return a (Deserialized TX, TX_HASH) pair.

        The hash needs to be reversed for human display; for efficiency
        we process it in the natural serialized order.
        '''
        start = self.cursor
        version = self._get_version()
        if version != self.bitcoin_diamond_tx_version:
            return Tx(
                self._read_le_int32(),  # version
                self._read_inputs(),    # inputs
                self._read_outputs(),   # outputs
                self._read_le_uint32()  # locktime
            ), double_sha256(self.binary[start:self.cursor])
        else:
            return TxBitcoinDiamond(
                self._read_le_int32(),  # version
                hash_to_str(self._read_nbytes(32)),  # blockhash
                self._read_inputs(),  # inputs
                self._read_outputs(),  # outputs
                self._read_le_uint32()  # locktime
            ), double_sha256(self.binary[start:self.cursor])
    def electrum_header(cls, header, height):
        version, = struct.unpack('<I', header[:4])
        timestamp, bits, nonce = struct.unpack('<III', header[68:80])

        deserializer = cls.DESERIALIZER(header, start=cls.BASIC_HEADER_SIZE)
        sig_length = deserializer.read_varint()
        header = {
            'block_height': height,
            'version': version,
            'prev_block_hash': hash_to_str(header[4:36]),
            'merkle_root': hash_to_str(header[36:68]),
            'timestamp': timestamp,
            'bits': bits,
            'nonce': nonce,
            'hash_state_root': hash_to_str(header[80:112]),
            'hash_utxo_root': hash_to_str(header[112:144]),
            'hash_prevout_stake': hash_to_str(header[144:176]),
            'hash_prevout_n': struct.unpack('<I', header[176:180])[0],
            'sig': hash_to_str(header[:-sig_length-1:-1]),
        }
        return header
Ejemplo n.º 30
0
    def process_raw_txs(self, raw_tx_map, pending):
        '''Process the dictionary of raw transactions and return a dictionary
        of updates to apply to self.txs.

        This runs in the executor so should not update any member
        variables it doesn't own.  Atomic reads of self.txs that do
        not depend on the result remaining the same are fine.
        '''
        script_hashX = self.coin.hashX_from_script
        deserializer = self.coin.DESERIALIZER
        db_utxo_lookup = self.db.db_utxo_lookup
        txs = self.txs

        # Deserialize each tx and put it in a pending list
        for tx_hash, raw_tx in raw_tx_map.items():
            if tx_hash not in txs:
                continue
            tx, tx_size = deserializer(raw_tx).read_tx_and_vsize()

            # Convert the tx outputs into (hashX, value) pairs
            txout_pairs = [(script_hashX(txout.pk_script), txout.value)
                           for txout in tx.outputs]

            # Convert the tx inputs to ([prev_hex_hash, prev_idx) pairs
            txin_pairs = [(hash_to_str(txin.prev_hash), txin.prev_idx)
                          for txin in tx.inputs]

            pending.append((tx_hash, txin_pairs, txout_pairs, tx_size))

        # Now process what we can
        result = {}
        deferred = []

        for item in pending:
            if self.stop:
                break

            tx_hash, old_txin_pairs, txout_pairs, tx_size = item
            if tx_hash not in txs:
                continue

            mempool_missing = False
            txin_pairs = []

            try:
                for prev_hex_hash, prev_idx in old_txin_pairs:
                    tx_info = txs.get(prev_hex_hash, 0)
                    if tx_info is None:
                        tx_info = result.get(prev_hex_hash)
                        if not tx_info:
                            mempool_missing = True
                            continue
                    if tx_info:
                        txin_pairs.append(tx_info[1][prev_idx])
                    elif not mempool_missing:
                        prev_hash = hex_str_to_hash(prev_hex_hash)
                        txin_pairs.append(db_utxo_lookup(prev_hash, prev_idx))
            except (self.db.MissingUTXOError, self.db.DBError):
                # DBError can happen when flushing a newly processed
                # block.  MissingUTXOError typically happens just
                # after the daemon has accepted a new block and the
                # new mempool has deps on new txs in that block.
                continue

            if mempool_missing:
                deferred.append(item)
            else:
                # Compute fee
                tx_fee = (sum(v for hashX, v in txin_pairs) -
                          sum(v for hashX, v in txout_pairs))
                result[tx_hash] = (txin_pairs, txout_pairs, tx_fee, tx_size)

        return result, deferred