def generate_config_by_dict(dict_config: Dict[str, Any]) -> Eth2Config: config_without_domains = keyfilter(lambda name: "DOMAIN_" not in name, dict_config) config_without_phase_1 = keyfilter( lambda name: "EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS" not in name, config_without_domains, ) return Eth2Config(**assoc( config_without_phase_1, "GENESIS_EPOCH", compute_epoch_of_slot( dict_config['GENESIS_SLOT'], dict_config['SLOTS_PER_EPOCH'], )))
def generate_config_by_dict(dict_config: Dict[str, Any]) -> Eth2Config: filtered_keys = ( "DOMAIN_", "ETH1_FOLLOW_DISTANCE", "TARGET_AGGREGATORS_PER_COMMITTEE", "RANDOM_SUBNETS_PER_VALIDATOR", "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION", # Phase 1 "MAX_EPOCHS_PER_CROSSLINK", "EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS", "EPOCHS_PER_CUSTODY_PERIOD", "CUSTODY_PERIOD_TO_RANDAO_PADDING", "SHARD_SLOTS_PER_BEACON_SLOT", "EPOCHS_PER_SHARD_PERIOD", "PHASE_1_FORK_EPOCH", "PHASE_1_FORK_SLOT", ) return Eth2Config(**assoc( keyfilter(lambda name: all(key not in name for key in filtered_keys), dict_config), "GENESIS_EPOCH", compute_epoch_at_slot(dict_config["GENESIS_SLOT"], dict_config["SLOTS_PER_EPOCH"]), ))
def generate_config_by_dict(dict_config: Dict[str, Any]) -> Eth2Config: filtered_keys = ("DOMAIN_", "EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS") return Eth2Config(**assoc( keyfilter(lambda name: all(key not in name for key in filtered_keys), dict_config), "GENESIS_EPOCH", compute_epoch_of_slot(dict_config["GENESIS_SLOT"], dict_config["SLOTS_PER_EPOCH"]), ))
def generate_config_by_dict(dict_config: Dict[str, Any]) -> Eth2Config: filtered_keys = ( "DOMAIN_", "ETH1_FOLLOW_DISTANCE", "TARGET_AGGREGATORS_PER_COMMITTEE", "RANDOM_SUBNETS_PER_VALIDATOR", "EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION", # Phase 1 "MAX_EPOCHS_PER_CROSSLINK", "EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS", "EPOCHS_PER_CUSTODY_PERIOD", "CUSTODY_PERIOD_TO_RANDAO_PADDING", "SHARD_SLOTS_PER_BEACON_SLOT", "EPOCHS_PER_SHARD_PERIOD", "PHASE_1_FORK_EPOCH", "PHASE_1_GENESIS_SLOT", "PHASE_1_FORK_SLOT", "PHASE_1_FORK_VERSION", "SECONDS_PER_ETH1_BLOCK", "INITIAL_ACTIVE_SHARDS", "MAX_SHARDS", "ONLINE_SHARDS", "ONLINE_PERIOD", "LIGHT_CLIENT_COMMITTEE_SIZE", "LIGHT_CLIENT_COMMITTEE_PERIOD", "SHARD_BLOCK_CHUNK_SIZE", "MAX_SHARD_BLOCK_CHUNKS", "TARGET_SHARD_BLOCK_SIZE", "SHARD_BLOCK_OFFSETS", "MAX_SHARD_BLOCKS_PER_ATTESTATION", "MAX_GASPRICE", "MIN_GASPRICE", "GASPRICE_ADJUSTMENT_COEFFICIENT", "RANDAO_PENALTY_EPOCH", "RANDAO_PENALTY_EPOCHS", "MAX_REVEAL_LATENESS_DECREMENT", "MAX_CUSTODY_KEY_REVEALS", "MAX_EARLY_DERIVED_SECRET_REVEALS", "MAX_CUSTODY_SLASHINGS", "EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE", "EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS", "EPOCHS_PER_CUSTODY_PERIOD", "CUSTODY_PERIOD_TO_RANDAO_PADDING", "MINOR_REWARD_QUOTIENT", ) return Eth2Config( **keyfilter(lambda name: all(key not in name for key in filtered_keys), dict_config))
def generate_config_by_dict(dict_config: Dict[str, Any]) -> Eth2Config: filtered_keys = ( "DOMAIN_", # TODO: Fork choice rule "SAFE_SLOTS_TO_UPDATE_JUSTIFIED", # Phase 1 "MAX_EPOCHS_PER_CROSSLINK", "EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS", "EPOCHS_PER_CUSTODY_PERIOD", "CUSTODY_PERIOD_TO_RANDAO_PADDING", "SHARD_SLOTS_PER_BEACON_SLOT", "EPOCHS_PER_SHARD_PERIOD", "PHASE_1_FORK_EPOCH", "PHASE_1_FORK_SLOT", ) return Eth2Config(**assoc( keyfilter(lambda name: all(key not in name for key in filtered_keys), dict_config), "GENESIS_EPOCH", compute_epoch_at_slot(dict_config["GENESIS_SLOT"], dict_config["SLOTS_PER_EPOCH"]), ))
def test_store_get_latest_attestation(genesis_state, genesis_block, config, collisions_from_another_epoch): """ Given some attestations across the various sources, can we find the latest ones for each validator? """ some_epoch = 3 state = genesis_state.set( "slot", compute_start_slot_at_epoch(some_epoch, config.SLOTS_PER_EPOCH)) some_time = (_compute_seconds_since_genesis_for_epoch(some_epoch, config) + state.genesis_time) previous_epoch = state.previous_epoch(config.SLOTS_PER_EPOCH, config.GENESIS_EPOCH) previous_epoch_committee_count = _get_committee_count( state, previous_epoch, config) current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH) current_epoch_committee_count = _get_committee_count( state, current_epoch, config) number_of_committee_samples = 4 assert number_of_committee_samples <= previous_epoch_committee_count assert number_of_committee_samples <= current_epoch_committee_count block_producer = _mk_block_at_slot(genesis_block) # prepare samples from previous epoch previous_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count( number_of_committee_samples, previous_epoch, block_producer, state, config) previous_epoch_attestations = _extract_attestations_from_index_keying( previous_epoch_attestations_by_index.values()) # prepare samples from current epoch current_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count( number_of_committee_samples, current_epoch, block_producer, state, config) current_epoch_attestations_by_index = keyfilter( lambda index: index not in previous_epoch_attestations_by_index, current_epoch_attestations_by_index, ) current_epoch_attestations = _extract_attestations_from_index_keying( current_epoch_attestations_by_index.values()) pool_attestations_by_index = _mk_attestations_for_epoch_by_count( number_of_committee_samples, current_epoch, block_producer, state, config) pool_attestations_by_index = keyfilter( lambda index: (index not in previous_epoch_attestations_by_index or index not in current_epoch_attestations_by_index), pool_attestations_by_index, ) pool_attestations = _extract_attestations_from_index_keying( pool_attestations_by_index.values()) all_attestations_by_index = ( previous_epoch_attestations_by_index, current_epoch_attestations_by_index, pool_attestations_by_index, ) if collisions_from_another_epoch: ( previous_epoch_attestations_by_index, current_epoch_attestations_by_index, pool_attestations_by_index, ) = _introduce_collisions(all_attestations_by_index, block_producer, state, config) previous_epoch_attestations = _extract_attestations_from_index_keying( previous_epoch_attestations_by_index.values()) current_epoch_attestations = _extract_attestations_from_index_keying( current_epoch_attestations_by_index.values()) pool_attestations = _extract_attestations_from_index_keying( pool_attestations_by_index.values()) # build expected results expected_index = merge_with( _keep_by_latest_slot, previous_epoch_attestations_by_index, current_epoch_attestations_by_index, pool_attestations_by_index, ) chain_db = None # not relevant for this test context = Context.from_genesis(genesis_state, genesis_block) context.time = some_time store = Store(chain_db, SignedBeaconBlock, config, context) for attestations in ( previous_epoch_attestations, current_epoch_attestations, pool_attestations, ): for attestation in attestations: # NOTE: we need to synchronize the context w/ chain data used to construct # attestations above; this synchronization takes advantage of some of the # internals of ``on_attestation`` to shortcut constructing the complete network # state needed to test the function of the ``Store``. block = block_producer(attestation.data.slot) context.blocks[block.message.hash_tree_root] = block context.block_states[block.message.hash_tree_root] = genesis_state store.on_attestation(attestation, validate_signature=False) # sanity check assert expected_index.keys() == store._context.latest_messages.keys() for validator_index in range(len(state.validators)): expected_attestation_data = expected_index.get(validator_index, None) target = expected_attestation_data.target expected_message = LatestMessage(epoch=target.epoch, root=target.root) stored_message = store._context.latest_messages.get( validator_index, None) assert expected_message == stored_message
def test_store_get_latest_attestation(genesis_state, empty_attestation_pool, config, collisions_from_another_epoch): """ Given some attestations across the various sources, can we find the latest ones for each validator? """ some_epoch = 3 state = genesis_state.copy( slot=compute_start_slot_of_epoch(some_epoch, config.SLOTS_PER_EPOCH), ) previous_epoch = state.previous_epoch(config.SLOTS_PER_EPOCH, config.GENESIS_EPOCH) previous_epoch_committee_count = _get_committee_count( state, previous_epoch, config, ) current_epoch = state.current_epoch(config.SLOTS_PER_EPOCH) current_epoch_committee_count = _get_committee_count( state, current_epoch, config, ) next_epoch = state.next_epoch(config.SLOTS_PER_EPOCH) next_epoch_committee_count = _get_committee_count( state, next_epoch, config, ) number_of_committee_samples = 4 assert number_of_committee_samples <= previous_epoch_committee_count assert number_of_committee_samples <= current_epoch_committee_count assert number_of_committee_samples <= next_epoch_committee_count # prepare samples from previous epoch previous_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count( number_of_committee_samples, previous_epoch, state, config, ) previous_epoch_attestations = _extract_attestations_from_index_keying( previous_epoch_attestations_by_index.values(), ) # prepare samples from current epoch current_epoch_attestations_by_index = _mk_attestations_for_epoch_by_count( number_of_committee_samples, current_epoch, state, config, ) current_epoch_attestations_by_index = keyfilter( lambda index: index not in previous_epoch_attestations_by_index, current_epoch_attestations_by_index, ) current_epoch_attestations = _extract_attestations_from_index_keying( current_epoch_attestations_by_index.values(), ) # prepare samples for pool, taking half from the current epoch and half from the next epoch pool_attestations_in_current_epoch_by_index = _mk_attestations_for_epoch_by_count( number_of_committee_samples // 2, current_epoch, state, config, ) pool_attestations_in_next_epoch_by_index = _mk_attestations_for_epoch_by_count( number_of_committee_samples // 2, next_epoch, state, config, ) pool_attestations_by_index = merge( pool_attestations_in_current_epoch_by_index, pool_attestations_in_next_epoch_by_index, ) pool_attestations_by_index = keyfilter( lambda index: ( index not in previous_epoch_attestations_by_index or index not in current_epoch_attestations_by_index ), pool_attestations_by_index, ) pool_attestations = _extract_attestations_from_index_keying( pool_attestations_by_index.values(), ) all_attestations_by_index = ( previous_epoch_attestations_by_index, current_epoch_attestations_by_index, pool_attestations_by_index, ) if collisions_from_another_epoch: ( previous_epoch_attestations_by_index, current_epoch_attestations_by_index, pool_attestations_by_index, ) = _introduce_collisions( all_attestations_by_index, state, config, ) previous_epoch_attestations = _extract_attestations_from_index_keying( previous_epoch_attestations_by_index.values(), ) current_epoch_attestations = _extract_attestations_from_index_keying( current_epoch_attestations_by_index.values(), ) pool_attestations = _extract_attestations_from_index_keying( pool_attestations_by_index.values(), ) # build expected results expected_index = merge_with( _keep_by_latest_slot, previous_epoch_attestations_by_index, current_epoch_attestations_by_index, pool_attestations_by_index, ) # ensure we get the expected results state = state.copy( previous_epoch_attestations=previous_epoch_attestations, current_epoch_attestations=current_epoch_attestations, ) pool = empty_attestation_pool for attestation in pool_attestations: pool.add(attestation) chain_db = None # not relevant for this test store = Store(chain_db, state, pool, BeaconBlock, config) # sanity check assert expected_index.keys() == store._attestation_index.keys() for validator_index in range(len(state.validators)): expected_attestation_data = expected_index.get(validator_index, None) stored_attestation_data = store._get_latest_attestation(validator_index) assert expected_attestation_data == stored_attestation_data