def is_expired(self) -> bool: if self.staking_agent: current_period = self.staking_agent.get_current_period( ) # on-chain else: current_period = datetime_to_period(maya.now()) # off-chain return bool(current_period >= self.end_period)
def stake(self, confirm_now=False, resume: bool = False, expiration: maya.MayaDT = None, lock_periods: int = None, *args, **kwargs) -> None: """High-level staking daemon loop""" if lock_periods and expiration: raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.") if expiration: lock_periods = datetime_to_period(expiration) if resume is False: _staking_receipts = self.initialize_stake(expiration=expiration, lock_periods=lock_periods, *args, **kwargs) # TODO: Check if this period has already been confirmed # TODO: Check if there is an active stake in the current period: Resume staking daemon # TODO: Validation and Sanity checks if confirm_now: self.confirm_activity() # record start time and periods self.__start_time = maya.now() self.__uptime_period = self.miner_agent.get_current_period() self.__terminal_period = self.__uptime_period + lock_periods self.__current_period = self.__uptime_period self.start_staking_loop()
def _collect_events(self, threaded: bool = True): if threaded: if self.__collecting_events: self.log.debug( "Skipping Round - Events collection thread is already running" ) return return reactor.callInThread(self._collect_events, threaded=False) self.__collecting_events = True blockchain_client = self.staking_agent.blockchain.client latest_block_number = blockchain_client.block_number from_block = self.__events_from_block #block_time = latest_block.timestamp # precision in seconds current_period = datetime_to_period( datetime=maya.now(), seconds_per_period=self.economics.seconds_per_period) events_list = list() for agent_class, event_names in self.ERROR_EVENTS.items(): agent = ContractAgency.get_agent(agent_class, registry=self.registry) for event_name in event_names: events = [agent.contract.events[event_name]] for event in events: event_filter = event.createFilter( fromBlock=from_block, toBlock=latest_block_number) entries = event_filter.get_all_entries() for event_record in entries: record = EventRecord(event_record) args = ", ".join(f"{k}:{v}" for k, v in record.args.items()) events_list.append( self.EVENT_LINE_PROTOCOL.format( measurement=self.EVENT_MEASUREMENT, txhash=record.transaction_hash, contract_name=agent.contract_name, contract_address=agent.contract_address, event_name=event_name, block_number=record.block_number, args=args, timestamp=blockchain_client.w3.eth.getBlock( record.block_number).timestamp, )) success = self._influx_client.write_points( events_list, database=self.INFLUX_DB_NAME, time_precision='s', batch_size=10000, protocol='line') self.__events_from_block = latest_block_number self.__collecting_events = False if not success: # TODO: What do we do here - Event hook for alerting? self.log.warn( f'Unable to write events to database {self.INFLUX_DB_NAME} ' f'| Period {current_period} starting from block {from_block}')
def divide_stake(self, stake_index: int, target_value: NU, additional_periods: int = None, expiration: maya.MayaDT = None) -> tuple: # Calculate duration in periods if additional_periods and expiration: raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.") # Select stake to divide from local cache try: current_stake = self.stakes[stake_index] except KeyError: if len(self.stakes): message = f"Cannot divide stake - No stake exists with index {stake_index}." else: message = "Cannot divide stake - There are no active stakes." raise Stake.StakingError(message) # Calculate stake duration in periods if expiration: additional_periods = datetime_to_period(datetime=expiration) - current_stake.end_period if additional_periods <= 0: raise Stake.StakingError(f"New expiration {expiration} must be at least 1 period from the " f"current stake's end period ({current_stake.end_period}).") # Do it already! modified_stake, new_stake = current_stake.divide(target_value=target_value, additional_periods=additional_periods) # Update staking cache self.__read_stakes() return modified_stake, new_stake
def _measure_time_remaining(self) -> str: current_period = datetime_to_period( datetime=maya.now(), seconds_per_period=self.economics.seconds_per_period) next_period = datetime_at_period( period=current_period + 1, seconds_per_period=self.economics.seconds_per_period) remaining = str(next_period - maya.now()) return remaining
def _measure_start_of_next_period(self) -> str: """Returns iso8601 datetime of next period""" current_period = datetime_to_period( datetime=maya.now(), seconds_per_period=self.economics.seconds_per_period) next_period = datetime_at_period( period=current_period + 1, seconds_per_period=self.economics.seconds_per_period, start_of_period=True) return next_period.iso8601()
def divide_stake(self, stake_index: int, target_value: int, additional_periods: int = None, expiration: maya.MayaDT = None) -> dict: """ Modifies the unlocking schedule and value of already locked tokens. This actor requires that is_me is True, and that the expiration datetime is after the existing locking schedule of this miner, or an exception will be raised. :param target_value: The quantity of tokens in the smallest denomination. :param expiration: The new expiration date to set. :return: Returns the blockchain transaction hash """ if additional_periods and expiration: raise ValueError( "Pass the number of lock periods or an expiration MayaDT; not both." ) _first_period, last_period, locked_value = self.miner_agent.get_stake_info( miner_address=self.checksum_public_address, stake_index=stake_index) if expiration: additional_periods = datetime_to_period( datetime=expiration) - last_period if additional_periods <= 0: raise self.MinerError( "Expiration {} must be at least 1 period from now.".format( expiration)) if target_value >= locked_value: raise self.MinerError( "Cannot divide stake; Value must be less than the specified stake value." ) # Ensure both halves are for valid amounts validate_stake_amount(amount=target_value) validate_stake_amount(amount=locked_value - target_value) tx = self.miner_agent.divide_stake( miner_address=self.checksum_public_address, stake_index=stake_index, target_value=target_value, periods=additional_periods) self.blockchain.wait_for_receipt(tx) return tx
def divide_stake(self, stake_index: int, target_value: NU, additional_periods: int = None, expiration: maya.MayaDT = None) -> dict: """ Modifies the unlocking schedule and value of already locked tokens. This actor requires that is_me is True, and that the expiration datetime is after the existing locking schedule of this miner, or an exception will be raised. :param stake_index: The miner's stake index of the stake to divide :param additional_periods: The number of periods to extend the stake by :param target_value: The quantity of tokens in the smallest denomination to divide. :param expiration: The new expiration date to set as an end period for stake division. :return: Returns the blockchain transaction hash """ if additional_periods and expiration: raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.") stake = self.__stakes[stake_index] if expiration: additional_periods = datetime_to_period(datetime=expiration) - stake.end_period if additional_periods <= 0: raise self.MinerError("Expiration {} must be at least 1 period from now.".format(expiration)) if target_value >= stake.value: raise self.MinerError(f"Cannot divide stake; Value ({target_value}) must be less " f"than the existing stake value {stake.value}.") # Ensure both halves are for valid amounts validate_stake_amount(amount=target_value) validate_stake_amount(amount=stake.value - target_value) tx = self.miner_agent.divide_stake(miner_address=self.checksum_public_address, stake_index=stake_index, target_value=int(target_value), periods=additional_periods) self.blockchain.wait_for_receipt(tx) self.__read_stakes() # update local on-chain stake cache return tx
def periods_remaining(self) -> int: """Returns the number of periods remaining in the stake from now.""" current_period = datetime_to_period(datetime=maya.now()) return self.end_period - current_period
def test_crawler_learn_about_nodes(new_influx_db, get_agent, get_economics, tempfile_path): mock_influxdb_client = new_influx_db.return_value mock_influxdb_client.write_points.return_value = True # TODO: issue with use of `agent.blockchain` causes spec=StakingEscrowAgent not to be specified in MagicMock # Get the following - AttributeError: Mock object has no attribute 'blockchain' staking_agent = MagicMock(autospec=True) contract_agency = MockContractAgency(staking_agent=staking_agent) get_agent.side_effect = contract_agency.get_agent token_economics = StandardTokenEconomics() get_economics.return_value = token_economics crawler = create_crawler(node_db_filepath=tempfile_path) node_db_client = CrawlerStorageClient(db_filepath=tempfile_path) try: crawler.start() assert crawler.is_running for i in range(0, 5): random_node = create_random_mock_node(generate_certificate=True) crawler.remember_node(node=random_node, record_fleet_state=True) known_nodes = node_db_client.get_known_nodes_metadata() assert len(known_nodes) > i assert random_node.checksum_address in known_nodes previous_states = node_db_client.get_previous_states_metadata() assert len(previous_states) > i # configure staking agent for blockchain calls tokens = NU(int(15000 + i * 2500), 'NU').to_nunits() current_period = datetime_to_period( maya.now(), token_economics.seconds_per_period) initial_period = current_period - i terminal_period = current_period + (i + 50) last_active_period = current_period - i staking_agent.get_worker_from_staker.side_effect = \ lambda staker_address: crawler.node_storage.get(federated_only=False, checksum_address=staker_address).worker_address configure_mock_staking_agent(staking_agent=staking_agent, tokens=tokens, current_period=current_period, initial_period=initial_period, terminal_period=terminal_period, last_active_period=last_active_period) # run crawler callable crawler._learn_about_nodes() # ensure data written to influx table mock_influxdb_client.write_points.assert_called_once() # expected db row added write_points_call_args_list = mock_influxdb_client.write_points.call_args_list influx_db_line_protocol_statement = str( write_points_call_args_list[0][0]) expected_arguments = [ f'staker_address={random_node.checksum_address}', f'worker_address="{random_node.worker_address}"', f'stake={float(NU.from_nunits(tokens).to_tokens())}', f'locked_stake={float(NU.from_nunits(tokens).to_tokens())}', f'current_period={current_period}i', f'last_confirmed_period={last_active_period}i', f'work_orders={len(random_node.work_orders())}i' ] for arg in expected_arguments: assert arg in influx_db_line_protocol_statement, \ f"{arg} in {influx_db_line_protocol_statement} for iteration {i}" mock_influxdb_client.reset_mock() finally: crawler.stop() mock_influxdb_client.close.assert_called_once() assert not crawler.is_running
def stake(self, sample_rate: int = 10, refresh_rate: int = 60, confirm_now=True, resume: bool = False, expiration: maya.MayaDT = None, lock_periods: int = None, *args, **kwargs) -> None: """High-level staking daemon loop""" if lock_periods and expiration: raise ValueError("Pass the number of lock periods or an expiration MayaDT; not both.") if expiration: lock_periods = datetime_to_period(expiration) if resume is False: _staking_receipts = super().initialize_stake(expiration=expiration, lock_periods=lock_periods, *args, **kwargs) # TODO: Check if this period has already been confirmed # TODO: Check if there is an active stake in the current period: Resume staking daemon # TODO: Validation and Sanity checks if confirm_now: self.confirm_activity() # record start time and periods start_time = maya.now() uptime_period = self.miner_agent.get_current_period() terminal_period = uptime_period + lock_periods current_period = uptime_period # # Daemon # try: while True: # calculate timedeltas now = maya.now() initialization_delta = now - start_time # check if iteration re-samples sample_stale = initialization_delta.seconds > (refresh_rate - 1) if sample_stale: period = self.miner_agent.get_current_period() # check for stale sample data if current_period != period: # check for stake expiration stake_expired = current_period >= terminal_period if stake_expired: break self.confirm_activity() current_period = period # wait before resampling time.sleep(sample_rate) continue finally: # TODO: Cleanup # pass
def _learn_about_nodes(self, threaded: bool = True): if threaded: if self.__collecting_nodes: self.log.debug( "Skipping Round - Nodes collection thread is already running" ) return return reactor.callInThread(self._learn_about_nodes, threaded=False) self.__collecting_nodes = True agent = self.staking_agent known_nodes = list(self.known_nodes) block_time = agent.blockchain.client.get_blocktime( ) # precision in seconds current_period = datetime_to_period( datetime=maya.now(), seconds_per_period=self.economics.seconds_per_period) log = f'Processing {len(known_nodes)} nodes at {MayaDT(epoch=block_time)} | Period {current_period}' self.log.info(log) data = list() for node in known_nodes: staker_address = node.checksum_address worker = agent.get_worker_from_staker(staker_address) stake = agent.owned_tokens(staker_address) staked_nu_tokens = float(NU.from_nunits(stake).to_tokens()) locked_nu_tokens = float( NU.from_nunits( agent.get_locked_tokens( staker_address=staker_address)).to_tokens()) economics = EconomicsFactory.get_economics(registry=self.registry) stakes = StakeList(checksum_address=staker_address, registry=self.registry) stakes.refresh() if stakes.initial_period is NOT_STAKING: continue # TODO: Skip this measurement for now start_date = datetime_at_period( stakes.initial_period, seconds_per_period=economics.seconds_per_period) start_date = start_date.datetime().timestamp() end_date = datetime_at_period( stakes.terminal_period, seconds_per_period=economics.seconds_per_period) end_date = end_date.datetime().timestamp() last_confirmed_period = agent.get_last_committed_period( staker_address) num_work_orders = 0 # len(node.work_orders()) # TODO: Only works for is_me with datastore attached # TODO: do we need to worry about how much information is in memory if number of nodes is # large i.e. should I check for size of data and write within loop if too big data.append( self.NODE_LINE_PROTOCOL.format( measurement=self.NODE_MEASUREMENT, staker_address=staker_address, worker_address=worker, start_date=start_date, end_date=end_date, stake=staked_nu_tokens, locked_stake=locked_nu_tokens, current_period=current_period, last_confirmed_period=last_confirmed_period, timestamp=block_time, work_orders=num_work_orders)) success = self._influx_client.write_points( data, database=self.INFLUX_DB_NAME, time_precision='s', batch_size=10000, protocol='line') self.__collecting_nodes = False if not success: # TODO: What do we do here - Event hook for alerting? self.log.warn( f'Unable to write node information to database {self.INFLUX_DB_NAME} at ' f'{MayaDT(epoch=block_time)} | Period {current_period}')
def _collect_stats(self, threaded: bool = True) -> None: # TODO: Handle faulty connection to provider (requests.exceptions.ReadTimeout) if threaded: if self.__collecting_stats: self.log.debug( "Skipping Round - Metrics collection thread is already running" ) return return reactor.callInThread(self._collect_stats, threaded=False) self.__collection_round += 1 self.__collecting_stats = True start = maya.now() click.secho( f"Scraping Round #{self.__collection_round} ========================", color='blue') self.log.info("Collecting Statistics...") # # Read # # Time block_time = self.staking_agent.blockchain.client.get_blocktime( ) # epoch current_period = datetime_to_period( datetime=maya.now(), seconds_per_period=self.economics.seconds_per_period) click.secho("✓ ... Current Period", color='blue') time_remaining = self._measure_time_remaining() # Nodes teacher = self._crawler_client.get_current_teacher_checksum() states = self._crawler_client.get_previous_states_metadata() known_nodes = self.measure_known_nodes() activity = self._measure_staker_activity() # Stake future_locked_tokens = self._measure_future_locked_tokens() global_locked_tokens = self.staking_agent.get_global_locked_tokens() click.secho("✓ ... Global Network Locked Tokens", color='blue') top_stakers = self._measure_top_stakers() # # Write # self._stats = { 'blocktime': block_time, 'current_period': current_period, 'next_period': time_remaining, 'prev_states': states, 'current_teacher': teacher, 'known_nodes': len(self.known_nodes), 'activity': activity, 'node_details': known_nodes, 'global_locked_tokens': global_locked_tokens, 'future_locked_tokens': future_locked_tokens, 'top_stakers': top_stakers, } done = maya.now() delta = done - start self.__collecting_stats = False click.echo( f"Scraping round completed (duration {delta}).", color='yellow') # TODO: Make optional, use emitter, or remove click.echo("==========================================") self.log.debug(f"Collected new metrics took {delta}.")
def measure_known_nodes(self): # # Setup # current_period = datetime_to_period( datetime=maya.now(), seconds_per_period=self.economics.seconds_per_period) buckets = { -1: ('green', 'Confirmed'), # Confirmed Next Period 0: ('#e0b32d', 'Pending'), # Pending Confirmation of Next Period current_period: ('#525ae3', 'Idle'), # Never confirmed NULL_ADDRESS: ('#d8d9da', 'Headless') # Headless Staker (No Worker) } shortest_uptime, newborn = float('inf'), None longest_uptime, uptime_king = 0, None uptime_template = '{days}d:{hours}h:{minutes}m' # # Scrape # payload = defaultdict(list) known_nodes = self._crawler_client.get_known_nodes_metadata() for staker_address in known_nodes: # # Confirmation Status Scraping # last_confirmed_period = self.staking_agent.get_last_committed_period( staker_address) missing_confirmations = current_period - last_confirmed_period worker = self.staking_agent.get_worker_from_staker(staker_address) if worker == NULL_ADDRESS: # missing_confirmations = NULL_ADDRESS continue # TODO: Skip this DetachedWorker and do not display it try: color, status_message = buckets[missing_confirmations] except KeyError: color, status_message = 'red', f'Unconfirmed' node_status = { 'status': status_message, 'missed_confirmations': missing_confirmations, 'color': color } # # Uptime Scraping # now = maya.now() timestamp = maya.MayaDT.from_iso8601( known_nodes[staker_address]['timestamp']) delta = now - timestamp node_qualifies_as_newborn = ( delta.total_seconds() < shortest_uptime) and missing_confirmations == -1 node_qualifies_for_uptime_king = ( delta.total_seconds() > longest_uptime) and missing_confirmations == -1 if node_qualifies_as_newborn: shortest_uptime, newborn = delta.total_seconds( ), staker_address elif node_qualifies_for_uptime_king: longest_uptime, uptime_king = delta.total_seconds( ), staker_address hours = delta.seconds // 3600 minutes = delta.seconds % 3600 // 60 natural_uptime = uptime_template.format(days=delta.days, hours=hours, minutes=minutes) # # Aggregate # known_nodes[staker_address]['status'] = node_status known_nodes[staker_address]['uptime'] = natural_uptime payload[status_message.lower()].append(known_nodes[staker_address]) # There are not always winners... if newborn: known_nodes[newborn]['newborn'] = True if uptime_king: known_nodes[uptime_king]['uptime_king'] = True return payload