def run_deltas(spec, state): """ Run all deltas functions yielding: - pre-state ('pre') - source deltas ('source_deltas') - target deltas ('target_deltas') - head deltas ('head_deltas') - not if is_post_altair(spec) - inclusion delay deltas ('inclusion_delay_deltas') - inactivity penalty deltas ('inactivity_penalty_deltas') """ yield 'pre', state if is_post_altair(spec): def get_source_deltas(state): return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_WEIGHT) def get_head_deltas(state): return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_WEIGHT) def get_target_deltas(state): return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_WEIGHT) yield from run_attestation_component_deltas( spec, state, spec.get_source_deltas if not is_post_altair(spec) else get_source_deltas, spec.get_matching_source_attestations, 'source_deltas', ) yield from run_attestation_component_deltas( spec, state, spec.get_target_deltas if not is_post_altair(spec) else get_target_deltas, spec.get_matching_target_attestations, 'target_deltas', ) yield from run_attestation_component_deltas( spec, state, spec.get_head_deltas if not is_post_altair(spec) else get_head_deltas, spec.get_matching_head_attestations, 'head_deltas', ) if not is_post_altair(spec): yield from run_get_inclusion_delay_deltas(spec, state) yield from run_get_inactivity_penalty_deltas(spec, state)
def test_attestation(spec, state): next_epoch(spec, state) yield 'pre', state attestation_block = build_empty_block( spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) index = 0 # if spec.fork == SHARDING: # TODO add shard data to block to vote on attestation = get_valid_attestation(spec, state, index=index, signed=True, on_time=True) if not is_post_altair(spec): pre_current_attestations_len = len(state.current_epoch_attestations) # Add to state via block transition attestation_block.body.attestations.append(attestation) signed_attestation_block = state_transition_and_sign_block( spec, state, attestation_block) if not is_post_altair(spec): assert len(state.current_epoch_attestations ) == pre_current_attestations_len + 1 # Epoch transition should move to previous_epoch_attestations pre_current_attestations_root = spec.hash_tree_root( state.current_epoch_attestations) else: pre_current_epoch_participation_root = spec.hash_tree_root( state.current_epoch_participation) epoch_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH) signed_epoch_block = state_transition_and_sign_block( spec, state, epoch_block) yield 'blocks', [signed_attestation_block, signed_epoch_block] yield 'post', state if not is_post_altair(spec): assert len(state.current_epoch_attestations) == 0 assert spec.hash_tree_root( state.previous_epoch_attestations) == pre_current_attestations_root else: for index in range(len(state.validators)): assert state.current_epoch_participation[ index] == spec.ParticipationFlags(0b0000_0000) assert spec.hash_tree_root(state.previous_epoch_participation ) == pre_current_epoch_participation_root
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name): """ Run ``component_delta_fn``, yielding: - deltas ('{``deltas_name``}') """ rewards, penalties = component_delta_fn(state) yield deltas_name, Deltas(rewards=rewards, penalties=penalties) if not is_post_altair(spec): matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state)) matching_indices = spec.get_unslashed_attesting_indices( state, matching_attestations) else: matching_indices = spec.get_unslashed_participating_indices( state, deltas_name_to_flag_index(spec, deltas_name), spec.get_previous_epoch(state)) eligible_indices = spec.get_eligible_validator_indices(state) for index in range(len(state.validators)): if index not in eligible_indices: assert rewards[index] == 0 assert penalties[index] == 0 continue validator = state.validators[index] enough_for_reward = has_enough_for_reward(spec, state, index) if index in matching_indices and not validator.slashed: if is_post_altair(spec): if not spec.is_in_inactivity_leak(state) and enough_for_reward: assert rewards[index] > 0 else: assert rewards[index] == 0 else: if enough_for_reward: assert rewards[index] > 0 else: assert rewards[index] == 0 assert penalties[index] == 0 else: assert rewards[index] == 0 if is_post_altair(spec) and 'head' in deltas_name: assert penalties[index] == 0 elif enough_for_reward: assert penalties[index] > 0 else: assert penalties[index] == 0
def run_get_inactivity_penalty_deltas(spec, state): """ Run ``get_inactivity_penalty_deltas``, yielding: - inactivity penalty deltas ('inactivity_penalty_deltas') """ rewards, penalties = spec.get_inactivity_penalty_deltas(state) yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties) if not is_post_altair(spec): matching_attestations = spec.get_matching_target_attestations( state, spec.get_previous_epoch(state)) matching_attesting_indices = spec.get_unslashed_attesting_indices( state, matching_attestations) else: matching_attesting_indices = spec.get_unslashed_participating_indices( state, spec.TIMELY_TARGET_FLAG_INDEX, spec.get_previous_epoch(state)) eligible_indices = spec.get_eligible_validator_indices(state) for index in range(len(state.validators)): assert rewards[index] == 0 if index not in eligible_indices: assert penalties[index] == 0 continue if spec.is_in_inactivity_leak(state): # Compute base_penalty base_reward = spec.get_base_reward(state, index) if not is_post_altair(spec): cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward( state, index) if not has_enough_for_reward(spec, state, index): assert penalties[index] == 0 elif index in matching_attesting_indices or not has_enough_for_leak_penalty( spec, state, index): if is_post_altair(spec): assert penalties[index] == 0 else: assert penalties[index] == base_penalty else: if is_post_altair(spec): assert penalties[index] > 0 else: assert penalties[index] > base_penalty else: assert penalties[index] == 0
def test_is_valid_genesis_state_true(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) state = create_valid_beacon_state(spec) yield from run_is_valid_genesis_state(spec, state, valid=True)
def get_inactivity_penalty_quotient(spec): if is_post_bellatrix(spec): return spec.INACTIVITY_PENALTY_QUOTIENT_BELLATRIX elif is_post_altair(spec): return spec.INACTIVITY_PENALTY_QUOTIENT_ALTAIR else: return spec.INACTIVITY_PENALTY_QUOTIENT
def get_slashing_multiplier(spec): if is_post_bellatrix(spec): return spec.PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX elif is_post_altair(spec): return spec.PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR else: return spec.PROPORTIONAL_SLASHING_MULTIPLIER
def get_process_calls(spec): # unrecognized processing functions will be ignored. # This sums up the aggregate of processing functions of all phases. # Note: make sure to explicitly remove/override a processing function in later phases, # or the old function will stick around. return [ 'process_justification_and_finalization', 'process_inactivity_updates', # altair 'process_rewards_and_penalties', 'process_registry_updates', 'process_reveal_deadlines', # custody game 'process_challenge_deadlines', # custody game 'process_slashings', 'process_pending_header.', # sharding 'charge_confirmed_header_fees', # sharding 'reset_pending_headers', # sharding 'process_eth1_data_reset', 'process_effective_balance_updates', 'process_slashings_reset', 'process_randao_mixes_reset', 'process_historical_roots_update', # Altair replaced `process_participation_record_updates` with `process_participation_flag_updates` 'process_participation_flag_updates' if is_post_altair(spec) else ('process_participation_record_updates'), 'process_sync_committee_updates', # altair 'process_shard_epoch_increment' # sharding ]
def build_empty_block(spec, state, slot=None): """ Build empty block for ``slot``, built upon the latest block header seen by ``state``. Slot must be greater than or equal to the current slot in ``state``. """ if slot is None: slot = state.slot if slot < state.slot: raise Exception("build_empty_block cannot build blocks for past slots") if state.slot < slot: # transition forward in copied state to grab relevant data from state state = state.copy() spec.process_slots(state, slot) state, parent_block_root = get_state_and_beacon_parent_root_at_slot( spec, state, slot) empty_block = spec.BeaconBlock() empty_block.slot = slot empty_block.proposer_index = spec.get_beacon_proposer_index(state) empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index empty_block.parent_root = parent_block_root if is_post_altair(spec): empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY if is_post_merge(spec): empty_block.body.execution_payload = build_empty_execution_payload( spec, state) apply_randao_reveal(spec, state, empty_block) return empty_block
def test_initialize_beacon_state_from_eth1(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT deposits, deposit_root, _ = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE, deposit_count, signed=True, ) eth1_block_hash = b'\x12' * 32 eth1_timestamp = spec.MIN_GENESIS_TIME yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert state.genesis_time == eth1_timestamp + spec.GENESIS_DELAY assert len(state.validators) == deposit_count assert state.eth1_data.deposit_root == deposit_root assert state.eth1_data.deposit_count == deposit_count assert state.eth1_data.block_hash == eth1_block_hash assert spec.get_total_active_balance( state) == deposit_count * spec.MAX_EFFECTIVE_BALANCE # yield state yield 'state', state
def test_initialize_beacon_state_random_valid_genesis(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) # Make a bunch of random deposits random_deposits, _, deposit_data_list = prepare_random_genesis_deposits( spec, deposit_count=20, min_pubkey_index=spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 5, max_pubkey_index=spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 5, ) # Then make spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT full deposits full_deposits, _, _ = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE, deposit_count=spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT, signed=True, deposit_data_list=deposit_data_list) deposits = random_deposits + full_deposits eth1_block_hash = b'\x15' * 32 eth1_timestamp = spec.MIN_GENESIS_TIME + 2 yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert spec.is_valid_genesis_state(state) yield 'state', state
def validate_resulting_balances(spec, pre_state, post_state, attestations): attesting_indices = spec.get_unslashed_attesting_indices(post_state, attestations) current_epoch = spec.get_current_epoch(post_state) for index in range(len(pre_state.validators)): if not spec.is_active_validator(pre_state.validators[index], current_epoch): assert post_state.balances[index] == pre_state.balances[index] elif not is_post_altair(spec): proposer_indices = [a.proposer_index for a in post_state.previous_epoch_attestations] if spec.is_in_inactivity_leak(post_state): # Proposers can still make money during a leak before LIGHTCLIENT_PATCH if index in proposer_indices and index in attesting_indices: assert post_state.balances[index] > pre_state.balances[index] elif index in attesting_indices: # If not proposer but participated optimally, should have exactly neutral balance assert post_state.balances[index] == pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index] else: if index in attesting_indices: assert post_state.balances[index] > pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index] else: if spec.is_in_inactivity_leak(post_state): if index in attesting_indices: # If not proposer but participated optimally, should have exactly neutral balance assert post_state.balances[index] == pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index] else: if index in attesting_indices: assert post_state.balances[index] > pre_state.balances[index] else: assert post_state.balances[index] < pre_state.balances[index]
def process_and_sign_block_without_header_validations(spec, state, block): """ Artificially bypass the restrictions in the state transition to transition and sign block WARNING UNSAFE: Only use when generating valid-looking invalid blocks for test vectors """ # Perform single mutation in `process_block_header` state.latest_block_header = spec.BeaconBlockHeader( slot=block.slot, proposer_index=block.proposer_index, parent_root=block.parent_root, state_root=spec.Bytes32(), body_root=block.body.hash_tree_root(), ) if is_post_merge(spec): if spec.is_execution_enabled(state, block.body): spec.process_execution_payload(state, block.body.execution_payload, spec.EXECUTION_ENGINE) # Perform rest of process_block transitions spec.process_randao(state, block.body) spec.process_eth1_data(state, block.body) spec.process_operations(state, block.body) if is_post_altair(spec): spec.process_sync_aggregate(state, block.body.sync_aggregate) # Insert post-state rot block.state_root = state.hash_tree_root() # Sign block return sign_block(spec, state, block)
def get_min_slashing_penalty_quotient(spec): if is_post_bellatrix(spec): return spec.MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX elif is_post_altair(spec): return spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR else: return spec.MIN_SLASHING_PENALTY_QUOTIENT
def test_is_valid_genesis_state_true_more_balance(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) state = create_valid_beacon_state(spec) state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1 yield from run_is_valid_genesis_state(spec, state, valid=True)
def test_is_valid_genesis_state_false_invalid_timestamp(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) state = create_valid_beacon_state(spec) state.genesis_time = spec.MIN_GENESIS_TIME - 1 yield from run_is_valid_genesis_state(spec, state, valid=False)
def test_incentives(spec, state): # Ensure no ETH is minted in slash_validator if is_post_bellatrix(spec): assert spec.MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX <= spec.WHISTLEBLOWER_REWARD_QUOTIENT elif is_post_altair(spec): assert spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR <= spec.WHISTLEBLOWER_REWARD_QUOTIENT else: assert spec.MIN_SLASHING_PENALTY_QUOTIENT <= spec.WHISTLEBLOWER_REWARD_QUOTIENT
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block=None): slashed_validator = state.validators[slashed_index] assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH proposer_index = spec.get_beacon_proposer_index(state) slash_penalty = state.validators[ slashed_index].effective_balance // get_min_slashing_penalty_quotient( spec) whistleblower_reward = state.validators[ slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT # Altair introduces sync committee (SC) reward and penalty sc_reward_for_slashed = sc_penalty_for_slashed = sc_reward_for_proposer = sc_penalty_for_proposer = 0 if is_post_altair(spec) and block is not None: committee_indices = compute_committee_indices( spec, state, state.current_sync_committee) committee_bits = block.body.sync_aggregate.sync_committee_bits sc_reward_for_slashed, sc_penalty_for_slashed = compute_sync_committee_participant_reward_and_penalty( spec, pre_state, slashed_index, committee_indices, committee_bits, ) sc_reward_for_proposer, sc_penalty_for_proposer = compute_sync_committee_participant_reward_and_penalty( spec, pre_state, proposer_index, committee_indices, committee_bits, ) if proposer_index != slashed_index: # slashed validator lost initial slash penalty assert (get_balance( state, slashed_index) == get_balance(pre_state, slashed_index) - slash_penalty + sc_reward_for_slashed - sc_penalty_for_slashed) # block proposer gained whistleblower reward # >= because proposer could have reported multiple assert ( get_balance(state, proposer_index) >= (get_balance(pre_state, proposer_index) + whistleblower_reward + sc_reward_for_proposer - sc_penalty_for_proposer)) else: # proposer reported themself so get penalty and reward # >= because proposer could have reported multiple assert (get_balance(state, slashed_index) >= (get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward + sc_reward_for_slashed - sc_penalty_for_slashed))
def _set_empty_participation(spec, state, current=True, previous=True): assert is_post_altair(spec) for index in range(len(state.validators)): if current: state.current_epoch_participation[index] = spec.ParticipationFlags( 0) if previous: state.previous_epoch_participation[ index] = spec.ParticipationFlags(0)
def randomize_epoch_participation(spec, state, epoch, rng): assert epoch in (spec.get_current_epoch(state), spec.get_previous_epoch(state)) if not is_post_altair(spec): if epoch == spec.get_current_epoch(state): pending_attestations = state.current_epoch_attestations else: pending_attestations = state.previous_epoch_attestations for pending_attestation in pending_attestations: # ~1/3 have bad target if rng.randint(0, 2) == 0: pending_attestation.data.target.root = b'\x55' * 32 # ~1/3 have bad head if rng.randint(0, 2) == 0: pending_attestation.data.beacon_block_root = b'\x66' * 32 # ~50% participation pending_attestation.aggregation_bits = [ rng.choice([True, False]) for _ in pending_attestation.aggregation_bits ] # Random inclusion delay pending_attestation.inclusion_delay = rng.randint( 1, spec.SLOTS_PER_EPOCH) else: if epoch == spec.get_current_epoch(state): epoch_participation = state.current_epoch_participation else: epoch_participation = state.previous_epoch_participation for index in range(len(state.validators)): # ~1/3 have bad head or bad target or not timely enough is_timely_correct_head = rng.randint(0, 2) != 0 flags = epoch_participation[index] def set_flag(index, value): nonlocal flags flag = spec.ParticipationFlags(2**index) if value: flags |= flag else: flags &= 0xff ^ flag set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head) if is_timely_correct_head: # If timely head, then must be timely target set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True) # If timely head, then must be timely source set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True) else: # ~50% of remaining have bad target or not timely enough set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False])) # ~50% of remaining have bad source or not timely enough set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False])) epoch_participation[index] = flags
def randomize_previous_epoch_participation(spec, state, rng=Random(8020)): cached_prepare_state_with_attestations(spec, state) randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng) if not is_post_altair(spec): state.current_epoch_attestations = [] else: state.current_epoch_participation = [ spec.ParticipationFlags(0b0000_0000) for _ in range(len(state.validators)) ]
def run_attestation_processing(spec, state, attestation, valid=True): """ Run ``process_attestation``, yielding: - pre-state ('pre') - attestation ('attestation') - post-state ('post'). If ``valid == False``, run expecting ``AssertionError`` """ # yield pre-state yield 'pre', state yield 'attestation', attestation # If the attestation is invalid, processing is aborted, and there is no post-state. if not valid: expect_assertion_error( lambda: spec.process_attestation(state, attestation)) yield 'post', None return if not is_post_altair(spec): current_epoch_count = len(state.current_epoch_attestations) previous_epoch_count = len(state.previous_epoch_attestations) # process attestation spec.process_attestation(state, attestation) # Make sure the attestation has been processed if not is_post_altair(spec): if attestation.data.target.epoch == spec.get_current_epoch(state): assert len( state.current_epoch_attestations) == current_epoch_count + 1 else: assert len( state.previous_epoch_attestations) == previous_epoch_count + 1 else: # After accounting reform, there are cases when processing an attestation does not result in any flag updates pass # yield post-state yield 'post', state
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)): cached_prepare_state_with_attestations(spec, state) if not is_post_altair(spec): for a in state.previous_epoch_attestations: a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits] else: for index in range(len(state.validators)): if rng.choice([True, False]): state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000) yield from run_deltas(spec, state)
def _set_full_participation(spec, state, current=True, previous=True): assert is_post_altair(spec) full_flags = spec.ParticipationFlags(0) for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)): full_flags = spec.add_flag(full_flags, flag_index) for index in range(len(state.validators)): if current: state.current_epoch_participation[index] = full_flags.copy() if previous: state.previous_epoch_participation[index] = full_flags.copy()
def run_get_inclusion_delay_deltas(spec, state): """ Run ``get_inclusion_delay_deltas``, yielding: - inclusion delay deltas ('inclusion_delay_deltas') """ if is_post_altair(spec): # No inclusion_delay_deltas yield 'inclusion_delay_deltas', Deltas( rewards=[0] * len(state.validators), penalties=[0] * len(state.validators)) return rewards, penalties = spec.get_inclusion_delay_deltas(state) yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties) eligible_attestations = spec.get_matching_source_attestations( state, spec.get_previous_epoch(state)) attesting_indices = spec.get_unslashed_attesting_indices( state, eligible_attestations) rewarded_indices = set() rewarded_proposer_indices = set() # Ensure attesters with enough balance are rewarded for attestations # Track those that are rewarded and track proposers that should be rewarded for index in range(len(state.validators)): if index in attesting_indices and has_enough_for_reward( spec, state, index): assert rewards[index] > 0 rewarded_indices.add(index) # Track proposer of earliest included attestation for the validator defined by index earliest_attestation = min([ a for a in eligible_attestations if index in spec.get_attesting_indices(state, a.data, a.aggregation_bits) ], key=lambda a: a.inclusion_delay) rewarded_proposer_indices.add(earliest_attestation.proposer_index) # Ensure all expected proposers have been rewarded # Track rewarde indices proposing_indices = [a.proposer_index for a in eligible_attestations] for index in proposing_indices: if index in rewarded_proposer_indices: assert rewards[index] > 0 rewarded_indices.add(index) # Ensure all expected non-rewarded indices received no reward for index in range(len(state.validators)): assert penalties[index] == 0 if index not in rewarded_indices: assert rewards[index] == 0
def prepare_state_with_attestations(spec, state, participation_fn=None): """ Prepare state with attestations according to the ``participation_fn``. If no ``participation_fn``, default to "full" -- max committee participation at each slot. participation_fn: (slot, committee_index, committee_indices_set) -> participants_indices_set """ # Go to start of next epoch to ensure can have full participation next_epoch(spec, state) start_slot = state.slot start_epoch = spec.get_current_epoch(state) next_epoch_start_slot = spec.compute_start_slot_at_epoch(start_epoch + 1) attestations = [] for _ in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY): # create an attestation for each index in each slot in epoch if state.slot < next_epoch_start_slot: for committee_index in range( spec.get_committee_count_per_slot( state, spec.get_current_epoch(state))): def temp_participants_filter(comm): if participation_fn is None: return comm else: return participation_fn(state.slot, committee_index, comm) attestation = get_valid_attestation( spec, state, index=committee_index, filter_participant_set=temp_participants_filter, signed=True) if any(attestation.aggregation_bits ): # Only if there is at least 1 participant. attestations.append(attestation) # fill each created slot in state after inclusion delay if state.slot >= start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY: inclusion_slot = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY include_attestations = [ att for att in attestations if att.data.slot == inclusion_slot ] add_attestations_to_state(spec, state, include_attestations, state.slot) next_slot(spec, state) assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY if not is_post_altair(spec): assert len(state.previous_epoch_attestations) == len(attestations) return attestations
def run_test_partial(spec, state, fraction_filled): cached_prepare_state_with_attestations(spec, state) # Remove portion of attestations if not is_post_altair(spec): num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled) state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations] else: for index in range(int(len(state.validators) * fraction_filled)): state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000) yield from run_deltas(spec, state)
def run_test_full_random(spec, state, rng=Random(8020)): set_some_new_deposits(spec, state, rng) exit_random_validators(spec, state, rng) slash_random_validators(spec, state, rng) cached_prepare_state_with_attestations(spec, state) if not is_post_altair(spec): for pending_attestation in state.previous_epoch_attestations: # ~1/3 have bad target if rng.randint(0, 2) == 0: pending_attestation.data.target.root = b'\x55' * 32 # ~1/3 have bad head if rng.randint(0, 2) == 0: pending_attestation.data.beacon_block_root = b'\x66' * 32 # ~50% participation pending_attestation.aggregation_bits = [ rng.choice([True, False]) for _ in pending_attestation.aggregation_bits ] # Random inclusion delay pending_attestation.inclusion_delay = rng.randint( 1, spec.SLOTS_PER_EPOCH) else: for index in range(len(state.validators)): # ~1/3 have bad head or bad target or not timely enough is_timely_correct_head = rng.randint(0, 2) != 0 flags = state.previous_epoch_participation[index] def set_flag(index, value): nonlocal flags flag = spec.ParticipationFlags(2**index) if value: flags |= flag else: flags &= 0xff ^ flag set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head) if is_timely_correct_head: # If timely head, then must be timely target set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True) # If timely head, then must be timely source set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True) else: # ~50% of remaining have bad target or not timely enough set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False])) # ~50% of remaining have bad source or not timely enough set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False])) state.previous_epoch_participation[index] = flags yield from run_deltas(spec, state)
def mock_deposit(spec, state, index): """ Mock validator at ``index`` as having just made a deposit """ assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)) state.validators[ index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH state.validators[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE if is_post_altair(spec): state.inactivity_scores[index] = 0 assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
def test_initialize_beacon_state_one_topup_activation(spec): if is_post_altair(spec): yield 'description', 'meta', get_post_altair_description(spec) # Submit all but one deposit as MAX_EFFECTIVE_BALANCE main_deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1 main_deposits, _, deposit_data_list = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE, deposit_count=main_deposit_count, signed=True, ) # Submit last pubkey deposit as MAX_EFFECTIVE_BALANCE - MIN_DEPOSIT_AMOUNT partial_deposits, _, deposit_data_list = prepare_full_genesis_deposits( spec, spec.MAX_EFFECTIVE_BALANCE - spec.MIN_DEPOSIT_AMOUNT, deposit_count=1, min_pubkey_index=main_deposit_count, signed=True, deposit_data_list=deposit_data_list, ) # Top up thelast pubkey deposit as MIN_DEPOSIT_AMOUNT to complete the deposit top_up_deposits, _, _ = prepare_full_genesis_deposits( spec, spec.MIN_DEPOSIT_AMOUNT, deposit_count=1, min_pubkey_index=main_deposit_count, signed=True, deposit_data_list=deposit_data_list, ) deposits = main_deposits + partial_deposits + top_up_deposits eth1_block_hash = b'\x13' * 32 eth1_timestamp = spec.MIN_GENESIS_TIME yield from eth1_init_data(eth1_block_hash, eth1_timestamp) yield 'deposits', deposits # initialize beacon_state state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits) assert spec.is_valid_genesis_state(state) # yield state yield 'state', state