Example #1
0
def _header_for_block(block):
    from pycoin.block import BlockHeader

    f = io.BytesIO()
    block.stream(f)
    f.seek(0)
    return BlockHeader.parse(f)
Example #2
0
 def get_blockheader_with_transaction_hashes(self, block_hash):
     URL = "%s/api/block/%s" % (self.base_url, b2h_rev(block_hash))
     r = json.loads(urlopen(URL).read().decode("utf8"))
     version = r.get("version")
     previous_block_hash = h2b_rev(r.get("previousblockhash"))
     merkle_root = h2b_rev(r.get("merkleroot"))
     timestamp = r.get("time")
     difficulty = int(r.get("bits"), 16)
     nonce = int(r.get("nonce"))
     tx_hashes = [h2b_rev(tx_hash) for tx_hash in r.get("tx")]
     blockheader = BlockHeader(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce)
     if blockheader.hash() != block_hash:
         return None, None
     calculated_hash = merkle(tx_hashes, double_sha256)
     if calculated_hash != merkle_root:
         return None, None
     blockheader.height = r.get("height")
     return blockheader, tx_hashes
Example #3
0
 def get_blockheader_with_transaction_hashes(self, block_hash):
     URL = "%s/api/block/%s" % (self.base_url, b2h_rev(block_hash))
     r = json.loads(urlopen(URL).read().decode("utf8"))
     version = r.get("version")
     previous_block_hash = h2b_rev(r.get("previousblockhash"))
     merkle_root = h2b_rev(r.get("merkleroot"))
     timestamp = r.get("time")
     difficulty = int(r.get("bits"), 16)
     nonce = int(r.get("nonce"))
     tx_hashes = [h2b_rev(tx_hash) for tx_hash in r.get("tx")]
     blockheader = BlockHeader(version, previous_block_hash, merkle_root, timestamp, difficulty, nonce)
     if blockheader.hash() != block_hash:
         return None, None
     calculated_hash = merkle(tx_hashes, double_sha256)
     if calculated_hash != merkle_root:
         return None, None
     blockheader.height = r.get("height")
     return blockheader, tx_hashes
Example #4
0
def locked_blocks_iterator(start_info=(0, 0), cached_headers=50, batch_size=50, base_dir=None,
                           headers_only=False):
    """
    This method loads blocks from disk, skipping any orphan blocks.
    """
    block_class = BlockHeader if headers_only else Block
    f = Blockfiles(base_dir, start_info)
    for initial_location in block_info_iterator(start_info, base_dir):
        f.jump_to(initial_location)
        BlockHeader.parse(f)
        break
    current_state = []

    def change_state(bc, ops):
        for op, bh, work in ops:
            if op == 'add':
                current_state.append(bh)
                pass
            else:
                current_state.pop()
    bc = BlockChain()
    bc.add_change_callback(change_state)
    bhs = []
    index = 0
    for info in block_info_iterator(start_info, base_dir):
        bh = blockheader_for_offset_info(info, base_dir)
        bh.info = info
        bhs.append(bh)
        if len(bhs) > batch_size:
            bc.add_headers(bhs)
            bhs = []
            if len(current_state) > cached_headers:
                for bh in current_state[:cached_headers]:
                    f.jump_to(bh.info)
                    block = block_class.parse(f)
                    yield block
                    index += 1
                    bc.lock_to_index(index)
                current_state = current_state[cached_headers:]
Example #5
0
def locked_blocks_iterator(start_info=(0, 0),
                           cached_headers=50,
                           batch_size=50,
                           base_dir=None,
                           headers_only=False):
    """
    This method loads blocks from disk, skipping any orphan blocks.
    """
    block_class = BlockHeader if headers_only else Block
    f = Blockfiles(base_dir, start_info)
    for initial_location in block_info_iterator(start_info, base_dir):
        f.jump_to(initial_location)
        initial_header = BlockHeader.parse(f)
        break
    index_table = {initial_header.previous_block_hash: (-1, None, None)}
    head_hash = initial_header.previous_block_hash

    max_index = -1
    for info in block_info_iterator(start_info, base_dir):
        bh = blockheader_for_offset_info(info, base_dir)
        t = index_table.get(bh.previous_block_hash)
        if t is None:
            logger.debug("ignoring block with hash %s" % bh.id())
            continue
        (parent_index, info_1, parent_bh) = t
        h = bh.hash()
        index_table[h] = (parent_index + 1, info, bh)
        max_index = max(max_index, parent_index + 1)
        chain_length = max_index - index_table[head_hash][0]
        if chain_length > cached_headers + batch_size:
            last_hash = h
            best_chain = [last_hash]
            while last_hash != head_hash:
                bh = index_table[last_hash][-1]
                if bh is None:
                    break
                last_hash = bh.previous_block_hash
                best_chain.append(last_hash)
            best_chain.reverse()
            for h in best_chain[:cached_headers]:
                (parent_index, info_1, parent_bh) = index_table[h]
                if info_1:
                    f.jump_to(info_1)
                    block = block_class.parse(f)
                    yield block
            index_table = dict((k, index_table.get(k))
                               for k in best_chain[cached_headers:]
                               if k in index_table)
            head_hash = best_chain[cached_headers]
Example #6
0
def make_headers(count, header=None):
    if header is None:
        last_hash = HASH_INITIAL_BLOCK
    else:
        last_hash = header.hash()
    tweak = last_hash
    headers = []
    for i in range(count):
        headers.append(
            BlockHeader(version=1,
                        previous_block_hash=last_hash,
                        merkle_root=make_hash(i, tweak),
                        timestamp=GENESIS_TIME + i * 600,
                        difficulty=DEFAULT_DIFFICULTY,
                        nonce=i * 137))
        last_hash = headers[-1].hash()
    return headers
Example #7
0
def locked_blocks_iterator(start_info=(0, 0), cached_headers=50, batch_size=50, base_dir=None,
                           headers_only=False):
    """
    This method loads blocks from disk, skipping any orphan blocks.
    """
    block_class = BlockHeader if headers_only else Block
    f = Blockfiles(base_dir, start_info)
    for initial_location in block_info_iterator(start_info, base_dir):
        f.jump_to(initial_location)
        initial_header = BlockHeader.parse(f)
        break
    index_table = {initial_header.previous_block_hash: (-1, None, None)}
    head_hash = initial_header.previous_block_hash

    max_index = -1
    for info in block_info_iterator(start_info, base_dir):
        bh = blockheader_for_offset_info(info, base_dir)
        t = index_table.get(bh.previous_block_hash)
        if t is None:
            logger.debug("ignoring block with hash %s" % bh.id())
            continue
        (parent_index, info_1, parent_bh) = t
        h = bh.hash()
        index_table[h] = (parent_index + 1, info, bh)
        max_index = max(max_index, parent_index + 1)
        chain_length = max_index - index_table[head_hash][0]
        if chain_length > cached_headers + batch_size:
            last_hash = h
            best_chain = [last_hash]
            while last_hash != head_hash:
                bh = index_table[last_hash][-1]
                if bh is None:
                    break
                last_hash = bh.previous_block_hash
                best_chain.append(last_hash)
            best_chain.reverse()
            for h in best_chain[:cached_headers]:
                (parent_index, info_1, parent_bh) = index_table[h]
                if info_1:
                    f.jump_to(info_1)
                    block = block_class.parse(f)
                    yield block
            index_table = dict((k, index_table.get(k))
                               for k in best_chain[cached_headers:] if k in index_table)
            head_hash = best_chain[cached_headers]
Example #8
0
def locked_blocks_iterator(start_info=(0, 0),
                           cached_headers=50,
                           batch_size=50,
                           base_dir=None,
                           headers_only=False):
    """
    This method loads blocks from disk, skipping any orphan blocks.
    """
    block_class = BlockHeader if headers_only else Block
    f = Blockfiles(base_dir, start_info)
    for initial_location in block_info_iterator(start_info, base_dir):
        f.jump_to(initial_location)
        initial_header = BlockHeader.parse(f)
        break
    current_state = []

    def change_state(bc, ops):
        for op, bh, work in ops:
            if op == 'add':
                current_state.append(bh)
                pass
            else:
                current_state.pop()

    bc = BlockChain()
    bc.add_change_callback(change_state)
    bhs = []
    index = 0
    for info in block_info_iterator(start_info, base_dir):
        bh = blockheader_for_offset_info(info, base_dir)
        bh.info = info
        bhs.append(bh)
        if len(bhs) > batch_size:
            bc.add_headers(bhs)
            bhs = []
            if len(current_state) > cached_headers:
                for bh in current_state[:cached_headers]:
                    f.jump_to(bh.info)
                    block = block_class.parse(f)
                    yield block
                    index += 1
                    bc.lock_to_index(index)
                current_state = current_state[cached_headers:]
Example #9
0
def blockheader_for_offset_info(offset_info, base_dir=None):
    f = Blockfiles(base_dir, offset_info)
    block = BlockHeader.parse(f)
    f.close()
    return block
Example #10
0
def blockheader_for_offset_info(offset_info, base_dir=None):
    f = Blockfiles(base_dir, offset_info)
    block = BlockHeader.parse(f)
    f.close()
    return block
Example #11
0
def _header_for_block(block):
    from pycoin.block import BlockHeader
    f = io.BytesIO()
    block.stream(f)
    f.seek(0)
    return BlockHeader.parse(f)