def _header_for_block(block): from pycoin.block import BlockHeader f = io.BytesIO() block.stream(f) f.seek(0) return BlockHeader.parse(f)
def locked_blocks_iterator(start_info=(0, 0), cached_headers=50, batch_size=50, base_dir=None, headers_only=False): """ This method loads blocks from disk, skipping any orphan blocks. """ block_class = BlockHeader if headers_only else Block f = Blockfiles(base_dir, start_info) for initial_location in block_info_iterator(start_info, base_dir): f.jump_to(initial_location) BlockHeader.parse(f) break current_state = [] def change_state(bc, ops): for op, bh, work in ops: if op == 'add': current_state.append(bh) pass else: current_state.pop() bc = BlockChain() bc.add_change_callback(change_state) bhs = [] index = 0 for info in block_info_iterator(start_info, base_dir): bh = blockheader_for_offset_info(info, base_dir) bh.info = info bhs.append(bh) if len(bhs) > batch_size: bc.add_headers(bhs) bhs = [] if len(current_state) > cached_headers: for bh in current_state[:cached_headers]: f.jump_to(bh.info) block = block_class.parse(f) yield block index += 1 bc.lock_to_index(index) current_state = current_state[cached_headers:]
def locked_blocks_iterator(start_info=(0, 0), cached_headers=50, batch_size=50, base_dir=None, headers_only=False): """ This method loads blocks from disk, skipping any orphan blocks. """ block_class = BlockHeader if headers_only else Block f = Blockfiles(base_dir, start_info) for initial_location in block_info_iterator(start_info, base_dir): f.jump_to(initial_location) initial_header = BlockHeader.parse(f) break index_table = {initial_header.previous_block_hash: (-1, None, None)} head_hash = initial_header.previous_block_hash max_index = -1 for info in block_info_iterator(start_info, base_dir): bh = blockheader_for_offset_info(info, base_dir) t = index_table.get(bh.previous_block_hash) if t is None: logger.debug("ignoring block with hash %s" % bh.id()) continue (parent_index, info_1, parent_bh) = t h = bh.hash() index_table[h] = (parent_index + 1, info, bh) max_index = max(max_index, parent_index + 1) chain_length = max_index - index_table[head_hash][0] if chain_length > cached_headers + batch_size: last_hash = h best_chain = [last_hash] while last_hash != head_hash: bh = index_table[last_hash][-1] if bh is None: break last_hash = bh.previous_block_hash best_chain.append(last_hash) best_chain.reverse() for h in best_chain[:cached_headers]: (parent_index, info_1, parent_bh) = index_table[h] if info_1: f.jump_to(info_1) block = block_class.parse(f) yield block index_table = dict((k, index_table.get(k)) for k in best_chain[cached_headers:] if k in index_table) head_hash = best_chain[cached_headers]
def locked_blocks_iterator(start_info=(0, 0), cached_headers=50, batch_size=50, base_dir=None, headers_only=False): """ This method loads blocks from disk, skipping any orphan blocks. """ block_class = BlockHeader if headers_only else Block f = Blockfiles(base_dir, start_info) for initial_location in block_info_iterator(start_info, base_dir): f.jump_to(initial_location) initial_header = BlockHeader.parse(f) break current_state = [] def change_state(bc, ops): for op, bh, work in ops: if op == 'add': current_state.append(bh) pass else: current_state.pop() bc = BlockChain() bc.add_change_callback(change_state) bhs = [] index = 0 for info in block_info_iterator(start_info, base_dir): bh = blockheader_for_offset_info(info, base_dir) bh.info = info bhs.append(bh) if len(bhs) > batch_size: bc.add_headers(bhs) bhs = [] if len(current_state) > cached_headers: for bh in current_state[:cached_headers]: f.jump_to(bh.info) block = block_class.parse(f) yield block index += 1 bc.lock_to_index(index) current_state = current_state[cached_headers:]
def blockheader_for_offset_info(offset_info, base_dir=None): f = Blockfiles(base_dir, offset_info) block = BlockHeader.parse(f) f.close() return block