def set_prev_pool_metrics(self, metrics: PoolMetrics): self.prevEthV1BlockNumber.set(metrics.blockNumber) self.prevEpoch.set(metrics.epoch) self.prevBeaconBalance.set(metrics.beaconBalance) self.prevBeaconValidators.set(metrics.beaconValidators) self.prevTimestamp.set(metrics.timestamp) self.prevBufferedBalance.set(metrics.bufferedBalance) self.prevDepositedValidators.set(metrics.depositedValidators) self.prevActiveValidatorBalance.set(metrics.activeValidatorBalance) self.prevTotalPooledEther.set(metrics.getTotalPooledEther()) self.prevTransientValidators.set(metrics.getTransientValidators()) self.prevTransientBalance.set(metrics.getTransientBalance())
def test_pool_metrics_get_total_pooled_ether_non_empty(): pm = PoolMetrics() pm.beaconBalance = int(65 * 1e18) pm.beaconValidators = 2 pm.depositedValidators = 3 pm.bufferedBalance = int(31 * 1e18) assert pm.getTotalPooledEther() == int(128 * 1e18) assert pm.getTransientValidators() == 1 assert pm.getTransientBalance() == int(32 * 1e18)
def set_current_pool_metrics(self, metrics: PoolMetrics): self.currentEthV1BlockNumber.set(metrics.blockNumber) self.currentEpoch.set(metrics.epoch) self.currentBeaconBalance.set(metrics.beaconBalance) self.currentBeaconValidators.set(metrics.beaconValidators) self.currentTimestamp.set(metrics.timestamp) self.currentBufferedBalance.set(metrics.bufferedBalance) self.currentDepositedValidators.set(metrics.depositedValidators) self.currentActiveValidatorBalance.set(metrics.activeValidatorBalance) self.currentTotalPooledEther.set(metrics.getTotalPooledEther()) self.currentTransientValidators.set(metrics.getTransientValidators()) self.currentTransientBalance.set(metrics.getTransientBalance()) if metrics.validatorsKeysNumber is not None: self.currentValidatorsKeysNumber.set(metrics.validatorsKeysNumber)
def get_current_metrics( w3, beacon, pool, oracle, registry, beacon_spec, partial_metrics: t.Optional[PoolMetrics] = None) -> PoolMetrics: """If the result of previous get_current_metrics call isn't given create and return partial metric.mSince it doesn't get keys from registry and doesn't retrieve beacon state, it's much faster.""" if partial_metrics is None: epochs_per_frame = beacon_spec[0] partial_metrics = PoolMetrics() partial_metrics.blockNumber = w3.eth.getBlock( 'latest' )['number'] # Get the the epoch that is both finalized and reportable current_frame = oracle.functions.getCurrentFrame().call() potentially_reportable_epoch = current_frame[0] logging.info( f'Potentially reportable epoch: {potentially_reportable_epoch} (from ETH1 contract)' ) finalized_epoch_beacon = beacon.get_finalized_epoch() # For Web3 client # finalized_epoch_beacon = int(beacon.get_finality_checkpoint()['data']['finalized']['epoch']) logging.info( f'Last finalized epoch: {finalized_epoch_beacon} (from Beacon)') partial_metrics.epoch = min( potentially_reportable_epoch, (finalized_epoch_beacon // epochs_per_frame) * epochs_per_frame) partial_metrics.timestamp = get_timestamp_by_epoch( beacon_spec, partial_metrics.epoch) partial_metrics.depositedValidators = pool.functions.getBeaconStat( ).call()[0] partial_metrics.bufferedBalance = pool.functions.getBufferedEther( ).call() return partial_metrics """If partial result provided, the oracle fetches all the required states from ETH1 and ETH2""" slots_per_epoch = beacon_spec[1] slot = partial_metrics.epoch * slots_per_epoch logging.info( f'Reportable state: epoch:{partial_metrics.epoch} slot:{slot}') validators_keys = get_validators_keys(w3) logging.info(f'Total validator keys in registry: {len(validators_keys)}') full_metrics = partial_metrics full_metrics.validatorsKeysNumber = len(validators_keys) full_metrics.beaconBalance, full_metrics.beaconValidators, full_metrics.activeValidatorBalance = \ beacon.get_balances(slot, validators_keys) logging.info( f'Lido validators\' sum. balance on Beacon: {full_metrics.beaconBalance} wei or {full_metrics.beaconBalance/1e18} ETH' ) logging.info( f'Lido validators visible on Beacon: {full_metrics.beaconValidators}') return full_metrics
def __init__(self, period, log_filename, food_size, history_size): self.pool_metrics = PoolMetrics(food_size, history_size) self.period = period self.json_reporter = JSONReporter(self.pool_metrics, log_filename)\ if log_filename else None self.console_reporter = ConsoleReporter(self.pool_metrics, period) self.track_metric('current_expressions_count', 'count') self.track_metric('current_expressions_distinct_count', 'dcount') self.track_metric('current_expressions_reducible_count', 'red.count') self.track_metric('current_expressions_top10_length') self.track_metric('current_expressions_max_length', 'maxlen') self.track_metric('current_expressions_mean_length') # debug self.track_metric('current_expressions_max_depth', 'maxdepth') #self.track_metric('current_expressions_reduction_count', 'reductions', '{:.1f}') # end debug self.track_metric('current_expressions_mean_length', 'meanlen', '{:.2f}') self.track_metric('recent_expressions_recurrence_count') self.track_metric('recent_largest_scc_size', 'sccLen') self.track_metric('recent_scc_count', '#scc') self.track_metric('recent_raf_scc_count') self.track_metric('recent_raf_length', 'raf') self.track_metric('recent_raf_product_max_length') self.track_metric('recent_raf_products_count') self.track_metric('recent_reactions_count') self.track_metric('current_expressions_max_multiplicity') self.track_metric('current_expressions_mean_multiplicity') self.track_metric('current_expressions_percent_at_1', '@1', '{:.0f}') self.track_metric('current_expressions_percent_at_2', '@2', '{:.0f}') self.track_metric('recent_raf_products_max_multiplicity', 'raf_mult') self.track_metric('recent_raf_complement_products_max_multiplicity') self.track_metric('recent_raf_cycle_length', 'raf_lvl') self.track_metric('recent_raf_substrate_count', 'sbst') self.track_metric('current_expressions_max_multiplicity_length') self.track_metric('current_p_reduce', 'Pr', '{:.2f}') self.track_metric('current_p_break') self.track_metric('current_p_combine') self.track_metric('current_n_reduce') self.track_metric('current_n_break') self.track_metric('current_n_combine') self.track_metric('current_total_size', 'T')
def test_compare_pool_metrics_prev_null_tpe(caplog): caplog.set_level(logging.INFO) prev = PoolMetrics() curr = PoolMetrics() curr.beaconBalance = int(33 * 1e18) curr.beaconValidators = 1 curr.depositedValidators = 3 curr.bufferedBalance = int(100 * 1e18) curr.timestamp = 123 compare_pool_metrics(prev, curr) assert "Time delta: 0:02:03 or 123" in caplog.text # 123 s assert "depositedValidators before:0 after:3 change:3" in caplog.text assert "beaconValidators before:0 after:1 change:1" in caplog.text assert "transientValidators before:0 after:2 change:2" in caplog.text # =3-1 assert "beaconBalance before:0 after:33000000000000000000 change:33000000000000000000" in caplog.text assert "bufferedBalance before:0 after:100000000000000000000 change:100000000000000000000" in caplog.text assert "transientBalance before:0 after:64000000000000000000 change:64000000000000000000" in caplog.text # 2 validators * 32 assert "totalPooledEther before:0 after:197000000000000000000" in caplog.text # 33 + 2*32 + 100 assert "The Lido has no funds under its control" in caplog.text assert "activeValidatorBalance now:0" in caplog.text
def get_previous_metrics(w3, pool, oracle, beacon_spec, from_block=0) -> PoolMetrics: """Since the contract lacks a method that returns the time of last report and the reported numbers we are using web3.py filtering to fetch it from the contract events.""" logging.info( 'Getting previously reported numbers (will be fetched from events)...') genesis_time = beacon_spec[3] result = PoolMetrics() result.depositedValidators, result.beaconValidators, result.beaconBalance = pool.functions.getBeaconStat( ).call() result.bufferedBalance = pool.functions.getBufferedEther().call() # Calculate earliest block to limit scanning depth SECONDS_PER_ETH1_BLOCK = 14 latest_block = w3.eth.getBlock('latest') from_block = max( from_block, int((latest_block['timestamp'] - genesis_time) / SECONDS_PER_ETH1_BLOCK)) step = 10000 # Try to fetch and parse last 'Completed' event from the contract. for end in range(latest_block['number'], from_block, -step): start = max(end - step + 1, from_block) events = oracle.events.Completed.getLogs(fromBlock=start, toBlock=end) if events: event = events[-1] result.epoch = event['args']['epochId'] result.blockNumber = event.blockNumber break # If the epoch has been assigned from the last event (not the first run) if result.epoch: result.timestamp = get_timestamp_by_epoch(beacon_spec, result.epoch) else: # If it's the first run, we set timestamp to genesis time result.timestamp = genesis_time return result
def compare_pool_metrics(previous: PoolMetrics, current: PoolMetrics) -> bool: """Describes the economics of metrics change. Helps the Node operator to understand the effect of firing composed TX Returns true on suspicious metrics""" warnings = False assert previous.DEPOSIT_SIZE == current.DEPOSIT_SIZE DEPOSIT_SIZE = previous.DEPOSIT_SIZE delta_seconds = current.timestamp - previous.timestamp metrics_exporter_state.deltaSeconds.set( delta_seconds) # fixme: get rid of side effects appeared_validators = current.beaconValidators - previous.beaconValidators metrics_exporter_state.appearedValidators.set(appeared_validators) logging.info( f'Time delta: {datetime.timedelta(seconds=delta_seconds)} or {delta_seconds} s' ) logging.info( f'depositedValidators before:{previous.depositedValidators} after:{current.depositedValidators} change:{current.depositedValidators - previous.depositedValidators}' ) if current.beaconValidators < previous.beaconValidators: warnings = True logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) logging.warning( 'The number of beacon validators unexpectedly decreased!') logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) logging.info( f'beaconValidators before:{previous.beaconValidators} after:{current.beaconValidators} change:{appeared_validators}' ) logging.info( f'transientValidators before:{previous.getTransientValidators()} after:{current.getTransientValidators()} change:{current.getTransientValidators() - previous.getTransientValidators()}' ) logging.info( f'beaconBalance before:{previous.beaconBalance} after:{current.beaconBalance} change:{current.beaconBalance - previous.beaconBalance}' ) logging.info( f'bufferedBalance before:{previous.bufferedBalance} after:{current.bufferedBalance} change:{current.bufferedBalance - previous.bufferedBalance}' ) logging.info( f'transientBalance before:{previous.getTransientBalance()} after:{current.getTransientBalance()} change:{current.getTransientBalance() - previous.getTransientBalance()}' ) logging.info( f'totalPooledEther before:{previous.getTotalPooledEther()} after:{current.getTotalPooledEther()} ' ) logging.info( f'activeValidatorBalance now:{current.activeValidatorBalance} ') reward_base = appeared_validators * DEPOSIT_SIZE + previous.beaconBalance reward = current.beaconBalance - reward_base if not previous.getTotalPooledEther(): logging.info( 'The Lido has no funds under its control. Probably the system has been just deployed and has never been deposited' ) return if not delta_seconds: logging.info( 'No time delta between current and previous epochs. Skip APR calculations.' ) assert (reward == 0) assert (current.beaconValidators == previous.beaconValidators) assert (current.beaconBalance == current.beaconBalance) return # APR calculation if current.activeValidatorBalance == 0: daily_reward_rate = 0 else: days = delta_seconds / 60 / 60 / 24 daily_reward_rate = reward / current.activeValidatorBalance / days apr = daily_reward_rate * 365 if reward >= 0: logging.info( f'Validators were rewarded {reward} wei or {reward/1e18} ETH') logging.info( f'Rewards will increase Total pooled ethers by: {reward / previous.getTotalPooledEther() * 100:.4f} %' ) logging.info( f'Daily staking reward rate for active validators: {daily_reward_rate * 100:.8f} %' ) logging.info(f'Staking APR for active validators: {apr * 100:.4f} %') if (apr > current.MAX_APR): warnings = True logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) logging.warning( 'Staking APR too high! Talk to your fellow oracles before submitting!' ) logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) if (apr < current.MIN_APR): warnings = True logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) logging.warning( 'Staking APR too low! Talk to your fellow oracles before submitting!' ) logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) else: warnings = True logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) logging.warning( f'Penalties will decrease totalPooledEther by {-reward} wei or {-reward/1e18} ETH' ) logging.warning( 'Validators were either slashed or suffered penalties!') logging.warning('Talk to your fellow oracles before submitting!') logging.warning( '!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!' ) if reward == 0: logging.info( 'Beacon balances stay intact (neither slashed nor rewarded). So this report won\'t have any economical impact on the pool.' ) return warnings
class PoolObserver(object): def __init__(self, period, log_filename, food_size, history_size): self.pool_metrics = PoolMetrics(food_size, history_size) self.period = period self.json_reporter = JSONReporter(self.pool_metrics, log_filename)\ if log_filename else None self.console_reporter = ConsoleReporter(self.pool_metrics, period) self.track_metric('current_expressions_count', 'count') self.track_metric('current_expressions_distinct_count', 'dcount') self.track_metric('current_expressions_reducible_count', 'red.count') self.track_metric('current_expressions_top10_length') self.track_metric('current_expressions_max_length', 'maxlen') self.track_metric('current_expressions_mean_length') # debug self.track_metric('current_expressions_max_depth', 'maxdepth') #self.track_metric('current_expressions_reduction_count', 'reductions', '{:.1f}') # end debug self.track_metric('current_expressions_mean_length', 'meanlen', '{:.2f}') self.track_metric('recent_expressions_recurrence_count') self.track_metric('recent_largest_scc_size', 'sccLen') self.track_metric('recent_scc_count', '#scc') self.track_metric('recent_raf_scc_count') self.track_metric('recent_raf_length', 'raf') self.track_metric('recent_raf_product_max_length') self.track_metric('recent_raf_products_count') self.track_metric('recent_reactions_count') self.track_metric('current_expressions_max_multiplicity') self.track_metric('current_expressions_mean_multiplicity') self.track_metric('current_expressions_percent_at_1', '@1', '{:.0f}') self.track_metric('current_expressions_percent_at_2', '@2', '{:.0f}') self.track_metric('recent_raf_products_max_multiplicity', 'raf_mult') self.track_metric('recent_raf_complement_products_max_multiplicity') self.track_metric('recent_raf_cycle_length', 'raf_lvl') self.track_metric('recent_raf_substrate_count', 'sbst') self.track_metric('current_expressions_max_multiplicity_length') self.track_metric('current_p_reduce', 'Pr', '{:.2f}') self.track_metric('current_p_break') self.track_metric('current_p_combine') self.track_metric('current_n_reduce') self.track_metric('current_n_break') self.track_metric('current_n_combine') self.track_metric('current_total_size', 'T') #self.track_metric('recent_recurrent_expression_length', 'rec_expr_len') #self.track_metric('recent_raf_scc_expressions_multiplicity', 'scc_mult') def track_metric(self, metric, console_abbr=None, console_fmt='{}'): if self.json_reporter: self.json_reporter.track_metric(metric) if console_abbr: self.console_reporter.track_metric(metric, console_abbr, console_fmt) def on_step_computed(self, pool, ticks): if ticks > 0 and ticks % self.period == 0: self.report(ticks) def on_reaction_computed(self, pool, reaction): self.pool_metrics.on_reaction_computed(pool, reaction) def report(self, generation): if self.json_reporter: self.json_reporter.report(generation) self.console_reporter.report(generation) self.pool_metrics.reset_perishable_history() def print_preceding_graph(self, graph, m, depth=1): if depth > 0: for reaction in graph.predecessors(m): reactives = list(map(str, reaction.reactives)) reaction_type = graph.node[reaction]['reaction_type'] if reaction_type != 'reduce': continue print(reaction_type, " + ".join(reactives), '->', m) for r in reaction: if r.size() >4: #if term.is_reducible(r, None): # print(" / ".join(map(term.to_str, map(operator.itemgetter(0), term.all_reductions(r))))) self.print_preceding_graph(graph, r, depth-1)
def test_compare_pool_metrics_primitive_1percent_daily_too_high(caplog): caplog.set_level(logging.INFO) # Yesterday the Lido had beaconBalance = 100 ETH # and some number of validators (it won't change) prev = PoolMetrics() prev.timestamp = 1600000000 prev.beaconBalance = 100 * ETH prev.beaconValidators = 100000 prev.depositedValidators = 100000 prev.bufferedBalance = 0 # Today the beaconBalance increased by 1% # Other numbers stay intact curr = PoolMetrics() curr.timestamp = 1600000000 + DAY # Interest gets calculated against cumulative balance of active validators. curr.activeValidatorBalance = 100 * ETH curr.beaconBalance = 101 * ETH curr.beaconValidators = 100000 curr.depositedValidators = 100000 curr.bufferedBalance = 0 compare_pool_metrics(prev, curr) assert "Time delta: 1 day" in caplog.text assert "Rewards will increase Total pooled ethers by: 1.0000 %" in caplog.text assert "Daily staking reward rate for active validators: 1.00000000 %" in caplog.text assert "Staking APR for active validators: 365.0000 %" in caplog.text assert "Staking APR too high! Talk to your fellow oracles before submitting!" in caplog.text
def test_pool_metrics_constants(): pm = PoolMetrics() assert pm.MAX_APR == 0.15 assert pm.MIN_APR == 0.01
def test_compare_pool_metrics_loss(caplog): caplog.set_level(logging.INFO) prev = PoolMetrics() prev.timestamp = 1600000000 prev.beaconBalance = 1000001 * ETH prev.beaconValidators = 123 prev.depositedValidators = 231 prev.bufferedBalance = 456 curr = PoolMetrics() curr.timestamp = 1600000001 curr.beaconBalance = 1000000 * ETH # loss curr.beaconValidators = 123 curr.depositedValidators = 231 curr.bufferedBalance = 123 compare_pool_metrics(prev, curr) assert "Penalties will decrease totalPooledEther by" in caplog.text assert "Validators were either slashed or suffered penalties!" in caplog.text
def test_compare_pool_metrics_0_balance_0_apr(caplog): caplog.set_level(logging.INFO) prev = PoolMetrics() prev.timestamp = 1600000000 prev.beaconBalance = 0 prev.beaconValidators = 0 prev.depositedValidators = 45 # Doesn't matter prev.bufferedBalance = 1 * ETH # Doesn't matter curr = PoolMetrics() curr.timestamp = 1600000000 + DAY # Doesn't matter curr.beaconBalance = 0 curr.activeValidatorBalance = 0 curr.beaconValidators = 0 curr.depositedValidators = 67 # Doesn't matter curr.bufferedBalance = 123 * ETH # Doesn't matter compare_pool_metrics(prev, curr) assert "beaconBalance before:0 after:0 change:0" in caplog.text assert "activeValidatorBalance now:0" in caplog.text assert "Validators were rewarded 0 wei or 0.0 ETH" in caplog.text assert "Daily staking reward rate for active validators: 0.00000000 %" in caplog.text assert "Staking APR for active validators: 0.0000 %" in caplog.text assert "Staking APR too low! Talk to your fellow oracles before submitting!" in caplog.text assert "Beacon balances stay intact (neither slashed nor rewarded). So this report won't have any economical impact on the pool." in caplog.text
def test_compare_pool_metrics_validators_decrease(caplog): caplog.set_level(logging.INFO) prev = PoolMetrics() prev.timestamp = 1600000000 prev.beaconBalance = 1 * ETH prev.beaconValidators = 31 prev.depositedValidators = 45 # Doesn't matter prev.bufferedBalance = 1 * ETH # Doesn't matter curr = PoolMetrics() curr.timestamp = 1600000000 + DAY # Doesn't matter curr.beaconBalance = 1 * ETH # Doesn't matter curr.activeValidatorBalance = 1 * ETH # Doesn't matter curr.beaconValidators = 30 curr.depositedValidators = 67 # Doesn't matter curr.bufferedBalance = 1 * ETH # Doesn't matter compare_pool_metrics(prev, curr) assert "beaconValidators before:31 after:30 change:-1" in caplog.text assert "The number of beacon validators unexpectedly decreased!" in caplog.text
def test_compare_pool_metrics_complex_too_low_apr(caplog): """Too low APR""" caplog.set_level(logging.INFO) # Last year the Lido had beaconBalance of 1000 ETH # (31 validators * 32 ETH + 8 ETH rewards) prev = PoolMetrics() prev.timestamp = 1600000000 prev.beaconBalance = 1000 * ETH prev.beaconValidators = 31 # Doesn't matter prev.depositedValidators = 45 # Doesn't matter prev.bufferedBalance = 345 * ETH # Doesn't matter # Since that time the active validators rewarded 10 ETH curr = PoolMetrics() curr.timestamp = 1600000000 + DAY * 365 curr.beaconBalance = 1010 * ETH curr.activeValidatorBalance = 1010 * ETH curr.beaconValidators = 31 # Doesn't matter curr.depositedValidators = 67 # Doesn't matter curr.bufferedBalance = 678 * ETH # Doesn't matter # so it produced 10.0/110 ~= 0.99% APR. # It's below the bottom threshold. Warning printed. compare_pool_metrics(prev, curr) assert "Time delta: 365 days, 0:00:00 or 31536000 s" in caplog.text assert "Staking APR for active validators: 0.9901 %" in caplog.text assert "Staking APR too low!" in caplog.text
def test_pool_metrics_get_total_pooled_ether_empty(): pm = PoolMetrics() assert pm.getTotalPooledEther() == 0 assert pm.getTransientBalance() == 0 assert pm.getTransientValidators() == 0
def test_compare_pool_metrics_complex_reasonable_apr(caplog): """More complex case with queued validators, deposits and reasonable APR 1-10 percent """ caplog.set_level(logging.INFO) # Last year the Lido had beaconBalance of 1000 ETH # (31 validators * 32 ETH + 8 ETH rewards) prev = PoolMetrics() prev.timestamp = 1600000000 prev.beaconBalance = 1000 * ETH prev.beaconValidators = 31 # Doesn't matter prev.depositedValidators = 45 # Doesn't matter prev.bufferedBalance = 345 * ETH # Doesn't matter # Since that time the active validators rewarded 100 ETH curr = PoolMetrics() curr.timestamp = 1600000000 + DAY * 365 curr.beaconBalance = 1175 * ETH curr.activeValidatorBalance = 1175 * ETH curr.beaconValidators = 31 # Doesn't matter curr.depositedValidators = 67 # Doesn't matter curr.bufferedBalance = 678 * ETH # Doesn't matter # so it produced 100.0/1100 ~= 9.0909% APR compare_pool_metrics(prev, curr) assert "Time delta: 365 days, 0:00:00 or 31536000 s" in caplog.text assert "Staking APR for active validators: 14.8936 %" in caplog.text # Output doesn't produce any warnings assert "Staking APR too " not in caplog.text