Ejemplo n.º 1
0
    def upgrade_db(self):
        self.logger.info('DB version: {:d}'.format(self.db_version))
        self.logger.info('Upgrading your DB; this can take some time...')

        def upgrade_u_prefix(prefix):
            count = 0
            with self.utxo_db.write_batch() as batch:
                batch_delete = batch.delete
                batch_put = batch.put
                # Key: b'u' + address_hashX + tx_idx + tx_num
                for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
                    if len(db_key) != 18:
                        break
                    count += 1
                    batch_delete(db_key)
                    batch_put(db_key[:14] + b'\0\0' + db_key[14:], db_value)
            return count

        last = time.time()
        count = 0
        for cursor in range(65536):
            prefix = b'u' + pack_be_uint16(cursor)
            count += upgrade_u_prefix(prefix)
            now = time.time()
            if now > last + 10:
                last = now
                self.logger.info(f'DB 1 of 2: {count:,d} entries updated, '
                                 f'{cursor * 100 / 65536:.1f}% complete')
        self.logger.info('DB 1 of 2 upgraded successfully')

        def upgrade_h_prefix(prefix):
            count = 0
            with self.utxo_db.write_batch() as batch:
                batch_delete = batch.delete
                batch_put = batch.put
                # Key: b'h' + compressed_tx_hash + tx_idx + tx_num
                for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
                    if len(db_key) != 11:
                        break
                    count += 1
                    batch_delete(db_key)
                    batch_put(db_key[:7] + b'\0\0' + db_key[7:], db_value)
            return count

        last = time.time()
        count = 0
        for cursor in range(65536):
            prefix = b'h' + pack_be_uint16(cursor)
            count += upgrade_h_prefix(prefix)
            now = time.time()
            if now > last + 10:
                last = now
                self.logger.info(f'DB 2 of 2: {count:,d} entries updated, '
                                 f'{cursor * 100 / 65536:.1f}% complete')

        self.db_version = max(self.DB_VERSIONS)
        with self.utxo_db.write_batch() as batch:
            self.write_utxo_state(batch)
        self.logger.info('DB 2 of 2 upgraded successfully')
Ejemplo n.º 2
0
def check_hashX_compaction(history):
    history.max_hist_row_entries = 40
    row_size = history.max_hist_row_entries * 4
    full_hist = array.array('I', range(100)).tobytes()
    hashX = urandom(HASHX_LEN)
    pairs = ((1, 20), (26, 50), (56, 30))

    cum = 0
    hist_list = []
    hist_map = {}
    for flush_count, count in pairs:
        key = hashX + pack_be_uint16(flush_count)
        hist = full_hist[cum * 4:(cum + count) * 4]
        hist_map[key] = hist
        hist_list.append(hist)
        cum += count

    write_items = []
    keys_to_delete = set()
    write_size = history._compact_hashX(hashX, hist_map, hist_list,
                                        write_items, keys_to_delete)
    # Check results for sanity
    assert write_size == len(full_hist)
    assert len(write_items) == 3
    assert len(keys_to_delete) == 3
    assert len(hist_map) == len(pairs)
    for n, item in enumerate(write_items):
        assert item == (hashX + pack_be_uint16(n),
                        full_hist[n * row_size:(n + 1) * row_size])
    for flush_count, count in pairs:
        assert hashX + pack_be_uint16(flush_count) in keys_to_delete

    # Check re-compaction is null
    hist_map = {key: value for key, value in write_items}
    hist_list = [value for key, value in write_items]
    write_items.clear()
    keys_to_delete.clear()
    write_size = history._compact_hashX(hashX, hist_map, hist_list,
                                        write_items, keys_to_delete)
    assert write_size == 0
    assert len(write_items) == 0
    assert len(keys_to_delete) == 0
    assert len(hist_map) == len(pairs)

    # Check re-compaction adding a single tx writes the one row
    hist_list[-1] += array.array('I', [100]).tobytes()
    write_size = history._compact_hashX(hashX, hist_map, hist_list,
                                        write_items, keys_to_delete)
    assert write_size == len(hist_list[-1])
    assert write_items == [(hashX + pack_be_uint16(2), hist_list[-1])]
    assert len(keys_to_delete) == 1
    assert write_items[0][0] in keys_to_delete
    assert len(hist_map) == len(pairs)
Ejemplo n.º 3
0
    def _compact_history(self, limit):
        '''Inner loop of history compaction.  Loops until limit bytes have
        been processed.
        '''
        keys_to_delete = set()
        write_items = []   # A list of (key, value) pairs
        write_size = 0

        # Loop over 2-byte prefixes
        cursor = self.comp_cursor
        while write_size < limit and cursor < 65536:
            prefix = pack_be_uint16(cursor)
            write_size += self._compact_prefix(prefix, write_items,
                                               keys_to_delete)
            cursor += 1

        max_rows = self.comp_flush_count + 1
        self._flush_compaction(cursor, write_items, keys_to_delete)

        self.logger.info('history compaction: wrote {:,d} rows ({:.1f} MB), '
                         'removed {:,d} rows, largest: {:,d}, {:.1f}% complete'
                         .format(len(write_items), write_size / 1000000,
                                 len(keys_to_delete), max_rows,
                                 100 * cursor / 65536))
        return write_size
Ejemplo n.º 4
0
    def _compact_hashY_topic(self, hashY_topic, hist_map, hist_list,
                             write_items, keys_to_delete):
        '''Compres history for a hashY.  hist_list is an ordered list of
        the histories to be compressed.'''
        # History entries (tx numbers) are 4 bytes each.  Distribute
        # over rows of up to 50KB in size.  A fixed row size means
        # future compactions will not need to update the first N - 1
        # rows.
        max_row_size = self.max_hist_row_entries * 4
        full_hist = b''.join(hist_list)
        nrows = (len(full_hist) + max_row_size - 1) // max_row_size
        if nrows > 4:
            self.logger.info('hashY {} is large: {:,d} entries across '
                             '{:,d} rows'.format(hash_to_hex_str(hashY_topic),
                                                 len(full_hist) // 4, nrows))

        # Find what history needs to be written, and what keys need to
        # be deleted.  Start by assuming all keys are to be deleted,
        # and then remove those that are the same on-disk as when
        # compacted.
        write_size = 0
        keys_to_delete.update(hist_map)
        for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
            key = hashY_topic + pack_be_uint16(n)
            if hist_map.get(key) == chunk:
                keys_to_delete.remove(key)
            else:
                write_items.append((key, chunk))
                write_size += len(chunk)

        assert n + 1 == nrows
        self.comp_flush_count = max(self.comp_flush_count, n)

        return write_size
Ejemplo n.º 5
0
 def serialize(self):
     assert (len(self.proTxHash) == 32 and len(self.ipAddress) == 16
             and len(self.inputsHash) == 32 and len(self.payloadSig) == 96)
     return (
         pack_le_uint16(self.version) +  # version
         self.proTxHash +  # proTxHash
         self.ipAddress +  # ipAddress
         pack_be_uint16(self.port) +  # port
         pack_varbytes(self.scriptOperatorPayout) +  # scriptOperatorPayout
         self.inputsHash +  # inputsHash
         self.payloadSig  # payloadSig
     )
Ejemplo n.º 6
0
 def upgrade_cursor(cursor):
     count = 0
     prefix = pack_be_uint16(cursor)
     key_len = HASHX_LEN + 2
     chunks = util.chunks
     with self.db.write_batch() as batch:
         batch_put = batch.put
         for key, hist in self.db.iterator(prefix=prefix):
             # Ignore non-history entries
             if len(key) != key_len:
                 continue
             count += 1
             hist = b''.join(item + b'\0' for item in chunks(hist, 4))
             batch_put(key, hist)
         self.upgrade_cursor = cursor
         self.write_state(batch)
     return count
Ejemplo n.º 7
0
 def serialize(self):
     assert (len(self.ipAddress) == 16 and len(self.KeyIdOwner) == 20
             and len(self.PubKeyOperator) == 48
             and len(self.KeyIdVoting) == 20 and len(self.inputsHash) == 32)
     return (pack_le_uint16(self.version) +  # version
             pack_le_uint16(self.type) +  # type
             pack_le_uint16(self.mode) +  # mode
             self.collateralOutpoint.serialize() +  # collateralOutpoint
             self.ipAddress +  # ipAddress
             pack_be_uint16(self.port) +  # port
             self.KeyIdOwner +  # KeyIdOwner
             self.PubKeyOperator +  # PubKeyOperator
             self.KeyIdVoting +  # KeyIdVoting
             pack_le_uint16(self.operatorReward) +  # operatorReward
             pack_varbytes(self.scriptPayout) +  # scriptPayout
             self.inputsHash +  # inputsHash
             pack_varbytes(self.payloadSig)  # payloadSig
             )
Ejemplo n.º 8
0
    def flush(self):
        start_time = time.monotonic()
        self.flush_count += 1
        flush_id = pack_be_uint16(self.flush_count)
        unflushed = self.unflushed

        with self.db.write_batch() as batch:
            for hashX in sorted(unflushed):
                key = hashX + flush_id
                batch.put(key, bytes(unflushed[hashX]))
            self.write_state(batch)

        count = len(unflushed)
        unflushed.clear()
        self.unflushed_count = 0

        if self.db.for_sync:
            elapsed = time.monotonic() - start_time
            self.logger.info(f'flushed history in {elapsed:.1f}s '
                             f'for {count:,d} addrs')
Ejemplo n.º 9
0
    def flush(self):
        start_time = time.time()
        self.flush_count += 1
        flush_id = pack_be_uint16(self.flush_count)
        unflushed = self.unflushed
        with self.db.write_batch() as batch:
            for hashY in sorted(unflushed):
                key = hashY + flush_id
                # 把二维数据按照一维数组存储
                batch.put(key,
                          b''.join([x.tobytes() for x in unflushed[hashY]]))
            self.write_state(batch)

        count = len(unflushed)
        unflushed.clear()
        self.unflushed_count = 0

        if self.db.for_sync:
            elapsed = time.time() - start_time
            self.logger.info(f'flushed eventlog in {elapsed:.1f}s '
                             f'for {count:,d} addrs')
Ejemplo n.º 10
0
    def upgrade_db(self):
        self.logger.info(f'UTXO DB version: {self.db_version}')
        self.logger.info('Upgrading your DB; this can take some time...')

        def upgrade_u_prefix(prefix):
            count = 0
            with self.utxo_db.write_batch() as batch:
                batch_delete = batch.delete
                batch_put = batch.put
                # Key: b'u' + address_hashX + tx_idx + tx_num
                for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
                    if len(db_key) == 21:
                        return
                    break
                if self.db_version == 6:
                    for db_key, db_value in self.utxo_db.iterator(
                            prefix=prefix):
                        count += 1
                        batch_delete(db_key)
                        batch_put(db_key[:14] + b'\0\0' + db_key[14:] + b'\0',
                                  db_value)
                else:
                    for db_key, db_value in self.utxo_db.iterator(
                            prefix=prefix):
                        count += 1
                        batch_delete(db_key)
                        batch_put(db_key + b'\0', db_value)
            return count

        last = time.time()
        count = 0
        for cursor in range(65536):
            prefix = b'u' + pack_be_uint16(cursor)
            count += upgrade_u_prefix(prefix)
            now = time.time()
            if now > last + 10:
                last = now
                self.logger.info(f'DB 1 of 3: {count:,d} entries updated, '
                                 f'{cursor * 100 / 65536:.1f}% complete')
        self.logger.info('DB 1 of 3 upgraded successfully')

        def upgrade_h_prefix(prefix):
            count = 0
            with self.utxo_db.write_batch() as batch:
                batch_delete = batch.delete
                batch_put = batch.put
                # Key: b'h' + compressed_tx_hash + tx_idx + tx_num
                for db_key, db_value in self.utxo_db.iterator(prefix=prefix):
                    if len(db_key) == 14:
                        return
                    break
                if self.db_version == 6:
                    for db_key, db_value in self.utxo_db.iterator(
                            prefix=prefix):
                        count += 1
                        batch_delete(db_key)
                        batch_put(db_key[:7] + b'\0\0' + db_key[7:] + b'\0',
                                  db_value)
                else:
                    for db_key, db_value in self.utxo_db.iterator(
                            prefix=prefix):
                        count += 1
                        batch_delete(db_key)
                        batch_put(db_key + b'\0', db_value)
            return count

        last = time.time()
        count = 0
        for cursor in range(65536):
            prefix = b'h' + pack_be_uint16(cursor)
            count += upgrade_h_prefix(prefix)
            now = time.time()
            if now > last + 10:
                last = now
                self.logger.info(f'DB 2 of 3: {count:,d} entries updated, '
                                 f'{cursor * 100 / 65536:.1f}% complete')

        # Upgrade tx_counts file
        size = (self.db_height + 1) * 8
        tx_counts = self.tx_counts_file.read(0, size)
        if len(tx_counts) == (self.db_height + 1) * 4:
            tx_counts = array('I', tx_counts)
            tx_counts = array('Q', tx_counts)
            self.tx_counts_file.write(0, tx_counts.tobytes())

        self.db_version = max(self.DB_VERSIONS)
        with self.utxo_db.write_batch() as batch:
            self.write_utxo_state(batch)
        self.logger.info('DB 2 of 3 upgraded successfully')