def test_chain_locking(): SIZE = 2000 COUNT = 200 ITEMS = [FakeBlock(i, i-1) for i in range(SIZE*COUNT)] ITEMS[0] = FakeBlock(0, parent_for_0) BC = BlockChain(parent_for_0) assert longest_block_chain(BC) == [] assert BC.locked_length() == 0 assert BC.length() == 0 assert set(BC.chain_finder.missing_parents()) == set() for i in range(COUNT): start, end = i*SIZE, (i+1)*SIZE lock_start = max(0, start-10) expected_parent = lock_start-1 if lock_start else parent_for_0 assert BC.length() == start assert BC.locked_length() == lock_start ops = BC.add_headers(ITEMS[start:end]) assert ops == [("add", ITEMS[i], i) for i in range(start, end)] assert longest_locked_block_chain(BC) == list(range(lock_start, end)) assert set(BC.chain_finder.missing_parents()) == {expected_parent} assert BC.parent_hash == expected_parent assert BC.locked_length() == lock_start assert BC.length() == end for i in range(start, end): v = BC.tuple_for_index(i) assert v[0] == i assert v[1] == parent_for_0 if i == 0 else i assert BC.index_for_hash(-1) is None assert BC.locked_length() == max(0, lock_start) BC.lock_to_index(end-10) assert BC.locked_length() == end-10
def test_chain_locking(): SIZE = 2000 COUNT = 200 ITEMS = [FakeBlock(i, i - 1) for i in range(SIZE * COUNT)] ITEMS[0] = FakeBlock(0, parent_for_0) BC = BlockChain(parent_for_0) assert longest_block_chain(BC) == [] assert BC.locked_length() == 0 assert BC.length() == 0 assert set(BC.chain_finder.missing_parents()) == set() for i in range(COUNT): start, end = i * SIZE, (i + 1) * SIZE lock_start = max(0, start - 10) expected_parent = lock_start - 1 if lock_start else parent_for_0 assert BC.length() == start assert BC.locked_length() == lock_start ops = BC.add_headers(ITEMS[start:end]) assert ops == [("add", ITEMS[i], i) for i in range(start, end)] assert longest_locked_block_chain(BC) == list(range(lock_start, end)) assert set(BC.chain_finder.missing_parents()) == {expected_parent} assert BC.parent_hash == expected_parent assert BC.locked_length() == lock_start assert BC.length() == end for i in range(start, end): v = BC.tuple_for_index(i) assert v[0] == i assert v[1] == parent_for_0 if i == 0 else i assert BC.index_for_hash(-1) is None assert BC.locked_length() == max(0, lock_start) BC.lock_to_index(end - 10) assert BC.locked_length() == end - 10
def locked_blocks_iterator(blockfile, start_info=(0, 0), cached_headers=50, batch_size=50): """ This method loads blocks from disk, skipping any orphan blocks. """ f = blockfile current_state = [] def change_state(bc, ops): for op, bh, work in ops: if op == 'add': current_state.append(bh) pass else: current_state.pop() bc = BlockChain() bc.add_change_callback(change_state) bhs = [] index = 0 info_offset = start_info while 1: v = blockfile.next_offset(info_offset) if v is None: break block_offset, info_offset = v f.jump_to(block_offset) bh = Block.parse_as_header(f) bh.info = block_offset bhs.append(bh) if len(bhs) > batch_size: bc.add_headers(bhs) bhs = [] if len(current_state) > cached_headers: for bh in current_state[:cached_headers]: bh.index = index yield bh index += 1 bc.lock_to_index(index) current_state = current_state[cached_headers:]
def locked_blocks_iterator(start_info=(0, 0), cached_headers=50, batch_size=50, base_dir=None, headers_only=False): """ This method loads blocks from disk, skipping any orphan blocks. """ block_class = BlockHeader if headers_only else Block f = Blockfiles(base_dir, start_info) for initial_location in block_info_iterator(start_info, base_dir): f.jump_to(initial_location) initial_header = BlockHeader.parse(f) break current_state = [] def change_state(bc, ops): for op, bh, work in ops: if op == 'add': current_state.append(bh) pass else: current_state.pop() bc = BlockChain() bc.add_change_callback(change_state) bhs = [] index = 0 for info in block_info_iterator(start_info, base_dir): bh = blockheader_for_offset_info(info, base_dir) bh.info = info bhs.append(bh) if len(bhs) > batch_size: bc.add_headers(bhs) bhs = [] if len(current_state) > cached_headers: for bh in current_state[:cached_headers]: f.jump_to(bh.info) block = block_class.parse(f) yield block index += 1 bc.lock_to_index(index) current_state = current_state[cached_headers:]
def locked_blocks_iterator(start_info=(0, 0), cached_headers=50, batch_size=50, base_dir=None, headers_only=False): """ This method loads blocks from disk, skipping any orphan blocks. """ parse_method = Block.parse_as_header if headers_only else Block.parse f = Blockfiles(base_dir, start_info) for initial_location in block_info_iterator(start_info, base_dir): f.jump_to(initial_location) parse_method(f) break current_state = [] def change_state(bc, ops): for op, bh, work in ops: if op == 'add': current_state.append(bh) pass else: current_state.pop() bc = BlockChain() bc.add_change_callback(change_state) bhs = [] index = 0 for info in block_info_iterator(start_info, base_dir): bh = blockheader_for_offset_info(info, base_dir) bh.info = info bhs.append(bh) if len(bhs) > batch_size: bc.add_headers(bhs) bhs = [] if len(current_state) > cached_headers: for bh in current_state[:cached_headers]: f.jump_to(bh.info) block = parse_method(f) yield block index += 1 bc.lock_to_index(index) current_state = current_state[cached_headers:]