def _handle_failed_transaction(cls, exception: Exception, transaction_dict: dict, contract_function: Union[ContractFunction, ContractConstructor], logger: Logger = None ) -> None: """ Re-raising error handler and context manager for transaction broadcast or build failure events at the interface layer. This method is a last line of defense against unhandled exceptions caused by transaction failures and must raise an exception. # TODO: #1504 - Additional Handling of validation failures (gas limits, invalid fields, etc.) """ try: response = exception.args[0] except (AttributeError, TypeError): # Python exceptions must have the 'args' attribute which must be a sequence (i.e. indexable) raise ValueError(f'{exception} is not a valid Exception instance') # Assume this error is formatted as an RPC response try: code = int(response['code']) message = response['message'] except (KeyError, ValueError): # TODO: #1504 - Try even harder to determine if this is insufficient funds causing the issue, # This may be best handled at the agent or actor layer for registry and token interactions. # Worst case scenario - raise the exception held in context implicitly raise exception if code != cls.TransactionFailed.IPC_CODE: # Only handle client-specific exceptions # https://www.jsonrpc.org/specification Section 5.1 raise exception if logger: logger.critical(message) # simple context transaction_failed = cls.TransactionFailed(message=message, # rich error (best case) contract_function=contract_function, transaction_dict=transaction_dict) raise transaction_failed from exception
class BaseContractRegistry(ABC): """ Records known contracts on the disk for future access and utility. This lazily writes to the filesystem during contract enrollment. WARNING: Unless you are developing NuCypher, you most likely won't ever need to use this. """ logger = Logger('ContractRegistry') _multi_contract = True _contract_name = NotImplemented # Registry REGISTRY_NAME = 'contract_registry.json' # TODO: #1511 Save registry with ID-time-based filename DEVELOPMENT_REGISTRY_NAME = 'dev_contract_registry.json' class RegistryError(Exception): pass class EmptyRegistry(RegistryError): pass class NoRegistry(RegistryError): pass class UnknownContract(RegistryError): pass class InvalidRegistry(RegistryError): """Raised when invalid data is encountered in the registry""" class CantOverwriteRegistry(RegistryError): pass def __init__(self, source=None, *args, **kwargs): self.__source = source self.log = Logger("registry") self._id = None def __eq__(self, other) -> bool: if self is other: return True # and that's all return bool(self.id == other.id) def __repr__(self) -> str: r = f"{self.__class__.__name__}(id={self.id[:6]})" return r @property def id(self) -> str: """Returns a hexstr of the registry contents.""" if not self._id: blake = hashlib.blake2b() blake.update(json.dumps(self.read()).encode()) self._id = blake.digest().hex() return self._id @abstractmethod def _destroy(self) -> None: raise NotImplementedError @abstractmethod def write(self, registry_data: list) -> None: raise NotImplementedError @abstractmethod def read(self) -> Union[list, dict]: raise NotImplementedError @classmethod def from_latest_publication(cls, *args, source_manager=None, network: str = NetworksInventory.DEFAULT, **kwargs) -> 'BaseContractRegistry': """ Get the latest contract registry available from a registry source chain. """ if not source_manager: source_manager = RegistrySourceManager() registry_data, source = source_manager.fetch_latest_publication( registry_class=cls, network=network) registry_instance = cls(*args, source=source, **kwargs) registry_instance.write(registry_data=json.loads(registry_data)) return registry_instance @property def source(self) -> 'CanonicalRegistrySource': return self.__source @property def enrolled_names(self) -> Iterator: entries = iter(record[0] for record in self.read()) return entries @property def enrolled_addresses(self) -> Iterator: entries = iter(record[2] for record in self.read()) return entries def enroll(self, contract_name, contract_address, contract_abi, contract_version) -> None: """ Enrolls a contract to the chain registry by writing the name, version, address, and abi information to the filesystem as JSON. Note: Unless you are developing NuCypher, you most likely won't ever need to use this. """ contract_data = [ contract_name, contract_version, contract_address, contract_abi ] try: registry_data = self.read() except self.RegistryError: self.log.info( "Blank registry encountered: enrolling {}:{}:{}".format( contract_name, contract_version, contract_address)) registry_data = list() # empty registry registry_data.append(contract_data) self.write(registry_data) self.log.info("Enrolled {}:{}:{} into registry.".format( contract_name, contract_version, contract_address)) def search(self, contract_name: str = None, contract_version: str = None, contract_address: str = None) -> tuple: """ Searches the registry for a contract with the provided name or address and returns the contracts component data. """ if not (bool(contract_name) ^ bool(contract_address)): raise ValueError( "Pass contract_name or contract_address, not both.") if bool(contract_version) and not bool(contract_name): raise ValueError( "Pass contract_version together with contract_name.") contracts = list() registry_data = self.read() try: for contract in registry_data: name, version, address, abi = contract if contract_address == address or \ contract_name == name and (contract_version is None or version == contract_version): contracts.append(contract) except ValueError: message = "Missing or corrupted registry data" self.log.critical(message) raise self.InvalidRegistry(message) if not contracts: raise self.UnknownContract(contract_name) if contract_address and len(contracts) > 1: m = f"Multiple records returned for address {contract_address}" self.log.critical(m) raise self.InvalidRegistry(m) result = tuple(contracts) if contract_name else contracts[0] return result
class WorkTracker: CLOCK = reactor INTERVAL_FLOOR = 60 * 15 # fifteen minutes INTERVAL_CEIL = 60 * 180 # three hours ALLOWED_DEVIATION = 0.5 # i.e., up to +50% from the expected confirmation time def __init__(self, worker, *args, **kwargs): super().__init__(*args, **kwargs) self.log = Logger('stake-tracker') self.worker = worker self.staking_agent = self.worker.staking_agent self.client = self.staking_agent.blockchain.client self.gas_strategy = self.staking_agent.blockchain.gas_strategy self._tracking_task = task.LoopingCall(self._do_work) self._tracking_task.clock = self.CLOCK self.__pending = dict() # TODO: Prime with pending worker transactions self.__requirement = None self.__current_period = None self.__start_time = NOT_STAKING self.__uptime_period = NOT_STAKING self._abort_on_error = False self._consecutive_fails = 0 @classmethod def random_interval(cls, fails=None) -> int: if fails is not None and fails > 0: return cls.INTERVAL_FLOOR return random.randint(cls.INTERVAL_FLOOR, cls.INTERVAL_CEIL) @property def current_period(self): return self.__current_period def max_confirmation_time(self) -> int: expected_time = EXPECTED_CONFIRMATION_TIME_IN_SECONDS[ self.gas_strategy] # FIXME: #2447 result = expected_time * (1 + self.ALLOWED_DEVIATION) return result def stop(self) -> None: if self._tracking_task.running: self._tracking_task.stop() self.log.info(f"STOPPED WORK TRACKING") def start(self, act_now: bool = True, requirement_func: Callable = None, force: bool = False) -> None: """ High-level stake tracking initialization, this function aims to be safely called at any time - For example, it is okay to call this function multiple times within the same period. """ if self._tracking_task.running and not force: return # Add optional confirmation requirement callable self.__requirement = requirement_func # Record the start time and period self.__start_time = maya.now() self.__uptime_period = self.staking_agent.get_current_period() self.__current_period = self.__uptime_period self.log.info(f"START WORK TRACKING (immediate action: {act_now})") d = self._tracking_task.start( interval=self.random_interval(fails=self._consecutive_fails), now=act_now) d.addErrback(self.handle_working_errors) def _crash_gracefully(self, failure=None) -> None: """ A facility for crashing more gracefully in the event that an exception is unhandled in a different thread. """ self._crashed = failure failure.raiseException() def handle_working_errors(self, *args, **kwargs) -> None: failure = args[0] if self._abort_on_error: self.log.critical( f'Unhandled error during node work tracking. {failure!r}', failure=failure) self.stop() reactor.callFromThread(self._crash_gracefully, failure=failure) else: self.log.warn( f'Unhandled error during work tracking (#{self._consecutive_fails}): {failure.getTraceback()!r}', failure=failure) # the effect of this is that we get one immediate retry. # After that, the random_interval will be honored until # success is achieved act_now = self._consecutive_fails < 1 self._consecutive_fails += 1 self.start(act_now=act_now) def __work_requirement_is_satisfied(self) -> bool: # TODO: Check for stake expiration and exit if self.__requirement is None: return True r = self.__requirement() if not isinstance(r, bool): raise ValueError(f"'requirement' must return a boolean.") return r @property def pending(self) -> Dict[int, HexBytes]: return self.__pending.copy() def __commitments_tracker_is_consistent(self) -> bool: worker_address = self.worker.worker_address tx_count_pending = self.client.get_transaction_count( account=worker_address, pending=True) tx_count_latest = self.client.get_transaction_count( account=worker_address, pending=False) txs_in_mempool = tx_count_pending - tx_count_latest if len(self.__pending) == txs_in_mempool: return True # OK! if txs_in_mempool > len( self.__pending): # We're missing some pending TXs return False else: # TODO #2429: What to do when txs_in_mempool < len(self.__pending)? What does this imply? return True def __track_pending_commitments(self) -> bool: # TODO: Keep a purpose-built persistent log of worker transaction history unmined_transactions = 0 pending_transactions = self.pending.items( ) # note: this must be performed non-mutatively for tx_firing_block_number, txhash in sorted(pending_transactions): if txhash is UNTRACKED_PENDING_TRANSACTION: unmined_transactions += 1 continue try: confirmed_tx_receipt = self.client.get_transaction_receipt( transaction_hash=txhash) except TransactionNotFound: unmined_transactions += 1 # mark as unmined - Keep tracking it for now continue else: confirmation_block_number = confirmed_tx_receipt['blockNumber'] confirmations = confirmation_block_number - tx_firing_block_number self.log.info( f'Commitment transaction {txhash.hex()[:10]} confirmed: {confirmations} confirmations' ) del self.__pending[tx_firing_block_number] if unmined_transactions: s = "s" if unmined_transactions > 1 else "" self.log.info( f'{unmined_transactions} pending commitment transaction{s} detected.' ) inconsistent_tracker = not self.__commitments_tracker_is_consistent() if inconsistent_tracker: # If we detect there's a mismatch between the number of internally tracked and # pending block transactions, create a special pending TX that accounts for this. # TODO: Detect if this untracked pending transaction is a commitment transaction at all. self.__pending[0] = UNTRACKED_PENDING_TRANSACTION return True return bool(self.__pending) def __fire_replacement_commitment(self, current_block_number: int, tx_firing_block_number: int) -> None: replacement_txhash = self.__fire_commitment() # replace self.__pending[ current_block_number] = replacement_txhash # track this transaction del self.__pending[ tx_firing_block_number] # assume our original TX is stuck def __handle_replacement_commitment(self, current_block_number: int) -> None: tx_firing_block_number, txhash = list(sorted(self.pending.items()))[0] if txhash is UNTRACKED_PENDING_TRANSACTION: # TODO: Detect if this untracked pending transaction is a commitment transaction at all. message = f"We have an untracked pending transaction. Issuing a replacement transaction." else: # If the transaction is still not mined after a max confirmation time # (based on current gas strategy) issue a replacement transaction. wait_time_in_blocks = current_block_number - tx_firing_block_number wait_time_in_seconds = wait_time_in_blocks * AVERAGE_BLOCK_TIME_IN_SECONDS if wait_time_in_seconds < self.max_confirmation_time(): self.log.info( f'Waiting for pending commitment transaction to be mined ({txhash.hex()}).' ) return else: message = f"We've waited for {wait_time_in_seconds}, but max time is {self.max_confirmation_time()}" \ f" for {self.gas_strategy} gas strategy. Issuing a replacement transaction." # Send a replacement transaction self.log.info(message) self.__fire_replacement_commitment( current_block_number=current_block_number, tx_firing_block_number=tx_firing_block_number) def __reset_tracker_state(self) -> None: self.__pending.clear() # Forget the past. This is a new beginning. self._consecutive_fails = 0 def _do_work(self) -> None: """ Async working task for Ursula # TODO: Split into multiple async tasks """ # Call once here, and inject later for temporal consistency current_block_number = self.client.block_number # Update on-chain status self.log.info( f"Checking for new period. Current period is {self.__current_period}" ) onchain_period = self.staking_agent.get_current_period( ) # < -- Read from contract if self.current_period != onchain_period: self.__current_period = onchain_period self.log.info(f"New period is {self.__current_period}") self.__reset_tracker_state() # TODO: #1515 and #1517 - Shut down at end of terminal stake # This slows down tests substantially and adds additional # RPC calls, but might be acceptable in production # self.worker.stakes.refresh() # Measure working interval interval = onchain_period - self.worker.last_committed_period if interval < 0: self.__reset_tracker_state() return # No need to commit to this period. Save the gas. if interval > 0: # TODO: #1516 Follow-up actions for missed commitments self.log.warn( f"MISSED COMMITMENTS - {interval} missed staking commitments detected." ) # Commitment tracking unmined_transactions = self.__track_pending_commitments() if unmined_transactions: self.__handle_replacement_commitment( current_block_number=current_block_number) # while there are known pending transactions, remain in fast interval mode self._tracking_task.interval = self.INTERVAL_FLOOR return # This cycle is finished. else: # Randomize the next task interval over time, within bounds. self._tracking_task.interval = self.random_interval( fails=self._consecutive_fails) # Only perform work this round if the requirements are met if not self.__work_requirement_is_satisfied(): self.log.warn( f'COMMIT PREVENTED (callable: "{self.__requirement.__name__}") - ' f'There are unmet commit requirements.') # TODO: Follow-up actions for failed requirements return txhash = self.__fire_commitment() self.__pending[current_block_number] = txhash def __fire_commitment(self): """Makes an initial/replacement worker commitment transaction""" transacting_power = self.worker.transacting_power with transacting_power: txhash = self.worker.commit_to_next_period( fire_and_forget=True) # < --- blockchain WRITE self.log.info( f"Making a commitment to period {self.current_period} - TxHash: {txhash.hex()}" ) return txhash
class WorkTracker: CLOCK = reactor REFRESH_RATE = 60 * 15 # Fifteen minutes def __init__(self, worker, refresh_rate: int = None, *args, **kwargs): super().__init__(*args, **kwargs) self.log = Logger('stake-tracker') self.worker = worker self.staking_agent = self.worker.staking_agent self._refresh_rate = refresh_rate or self.REFRESH_RATE self._tracking_task = task.LoopingCall(self._do_work) self._tracking_task.clock = self.CLOCK self.__requirement = None self.__current_period = None self.__start_time = NOT_STAKING self.__uptime_period = NOT_STAKING self._abort_on_error = True @property def current_period(self): return self.__current_period def stop(self) -> None: if self._tracking_task.running: self._tracking_task.stop() self.log.info(f"STOPPED WORK TRACKING") def start(self, act_now: bool = True, requirement_func: Callable = None, force: bool = False) -> None: """ High-level stake tracking initialization, this function aims to be safely called at any time - For example, it is okay to call this function multiple times within the same period. """ if self._tracking_task.running and not force: return # Add optional confirmation requirement callable self.__requirement = requirement_func # Record the start time and period self.__start_time = maya.now() self.__uptime_period = self.staking_agent.get_current_period() self.__current_period = self.__uptime_period self.log.info(f"START WORK TRACKING") d = self._tracking_task.start(interval=self._refresh_rate, now=act_now) d.addErrback(self.handle_working_errors) def _crash_gracefully(self, failure=None) -> None: """ A facility for crashing more gracefully in the event that an exception is unhandled in a different thread. """ self._crashed = failure failure.raiseException() def handle_working_errors(self, *args, **kwargs) -> None: failure = args[0] if self._abort_on_error: self.log.critical( 'Unhandled error during node work tracking. {failure!r}', failure=failure) reactor.callFromThread(self._crash_gracefully, failure=failure) else: self.log.warn( 'Unhandled error during work tracking: {failure.getTraceback()!r}', failure=failure) def __check_work_requirement(self) -> bool: # TODO: Check for stake expiration and exit if self.__requirement is None: return True try: r = self.__requirement() if not isinstance(r, bool): raise ValueError(f"'requirement' must return a boolean.") except TypeError: raise ValueError(f"'requirement' must be a callable.") return r def _do_work(self) -> None: # TODO: #1515 Shut down at end of terminal stake # Update on-chain status self.log.info( f"Checking for new period. Current period is {self.__current_period}" ) onchain_period = self.staking_agent.get_current_period( ) # < -- Read from contract if self.current_period != onchain_period: self.__current_period = onchain_period # self.worker.stakes.refresh() # TODO: #1517 Track stakes for fast access to terminal period. # Measure working interval interval = onchain_period - self.worker.last_committed_period if interval < 0: return # No need to commit to this period. Save the gas. if interval > 0: # TODO: #1516 Follow-up actions for downtime self.log.warn( f"MISSED COMMITMENTS - {interval} missed staking commitments detected." ) # Only perform work this round if the requirements are met if not self.__check_work_requirement(): self.log.warn( f'COMMIT PREVENTED (callable: "{self.__requirement.__name__}") - ' f'There are unmet commit requirements.') # TODO: Follow-up actions for downtime return # Make a Commitment self.log.info("Made a commitment to period {}".format( self.current_period)) transacting_power = self.worker.transacting_power with transacting_power: self.worker.commit_to_next_period() # < --- blockchain WRITE
class BlockchainInterface: """ Interacts with a solidity compiler and a registry in order to instantiate compiled ethereum contracts with the given web3 provider backend. """ TIMEOUT = 600 # seconds # TODO: Correlate with the gas strategy - #2070 DEFAULT_GAS_STRATEGY = 'medium' GAS_STRATEGIES = { 'glacial': time_based.glacial_gas_price_strategy, # 24h 'slow': time_based.slow_gas_price_strategy, # 1h 'medium': time_based.medium_gas_price_strategy, # 5m 'fast': time_based.fast_gas_price_strategy # 60s } process = NO_PROVIDER_PROCESS.bool_value(False) Web3 = Web3 _contract_factory = VersionedContract class InterfaceError(Exception): pass class NoProvider(InterfaceError): pass class UnsupportedProvider(InterfaceError): pass class ConnectionFailed(InterfaceError): pass class UnknownContract(InterfaceError): pass REASONS = { INSUFFICIENT_ETH: 'insufficient funds for gas * price + value', } class TransactionFailed(InterfaceError): IPC_CODE = -32000 # (geth) def __init__(self, message: str, transaction_dict: dict, contract_function: Union[ContractFunction, ContractConstructor], *args): self.base_message = message self.name = get_transaction_name( contract_function=contract_function) self.payload = transaction_dict self.contract_function = contract_function self.failures = { BlockchainInterface.REASONS[INSUFFICIENT_ETH]: self.insufficient_eth } self.message = self.failures.get(self.base_message, self.default) super().__init__(self.message, *args) @property def default(self) -> str: message = f'{self.name} from {self.payload["from"][:6]} - {self.base_message}' return message @property def insufficient_eth(self) -> str: gas = (self.payload.get('gas', 1) * self.payload['gasPrice'] ) # FIXME: If gas is not included... cost = gas + self.payload.get('value', 0) blockchain = BlockchainInterfaceFactory.get_interface() balance = blockchain.client.get_balance( account=self.payload['from']) message = f'{self.payload} from {self.payload["from"][:8]} - {self.base_message}.' \ f'Calculated cost is {cost} but sender only has {balance}.' return message def __init__( self, emitter=None, # TODO # 1754 poa: bool = None, light: bool = False, provider_process=NO_PROVIDER_PROCESS, provider_uri: str = NO_BLOCKCHAIN_CONNECTION, provider: Web3Providers = NO_BLOCKCHAIN_CONNECTION, gas_strategy: Union[str, Callable] = DEFAULT_GAS_STRATEGY): """ A blockchain "network interface"; the circumflex wraps entirely around the bounds of contract operations including compilation, deployment, and execution. TODO: #1502 - Move to API docs. Filesystem Configuration Node Client EVM ================ ====================== =============== ===================== =========================== Solidity Files -- SolidityCompiler - --- HTTPProvider ------ ... | | | | | | - *BlockchainInterface* -- IPCProvider ----- External EVM (geth, parity...) | | | | TestProvider ----- EthereumTester ------------- | | PyEVM (Development Chain) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Runtime Files -- --BlockchainInterface ----> Registry | | ^ | | | | | | Key Files ------ CharacterConfiguration Agent ... (Contract API) | | ^ | | | | | | | | Actor ...Blockchain-Character API) | | ^ | | | | | | Config File --- --------- Character ... (Public API) ^ | Human The Blockchain is the junction of the solidity compiler, a contract registry, and a collection of web3 network providers as a means of interfacing with the ethereum blockchain to execute or deploy contract code on the network. Compiler and Registry Usage ----------------------------- Contracts are freshly re-compiled if an instance of SolidityCompiler is passed; otherwise, The registry will read contract data saved to disk that is be used to retrieve contact address and op-codes. Optionally, A registry instance can be passed instead. Provider Usage --------------- https: // github.com / ethereum / eth - tester # available-backends * HTTP Provider - Web3 HTTP provider, typically JSON RPC 2.0 over HTTP * Websocket Provider - Web3 WS provider, typically JSON RPC 2.0 over WS, supply endpoint uri and websocket=True * IPC Provider - Web3 File based IPC provider transported over standard I/O * Custom Provider - A pre-initialized web3.py provider instance to attach to this interface """ self.log = Logger('Blockchain') self.poa = poa self.provider_uri = provider_uri self._provider = provider self._provider_process = provider_process self.w3 = NO_BLOCKCHAIN_CONNECTION self.client = NO_BLOCKCHAIN_CONNECTION # type: EthereumClient self.transacting_power = READ_ONLY_INTERFACE self.is_light = light self.gas_strategy = self.get_gas_strategy(gas_strategy) def __repr__(self): r = '{name}({uri})'.format(name=self.__class__.__name__, uri=self.provider_uri) return r @classmethod def from_dict(cls, payload: dict, **overrides) -> 'BlockchainInterface': payload.update({k: v for k, v in overrides.items() if v is not None}) blockchain = cls(**payload) return blockchain def to_dict(self) -> dict: payload = dict(provider_uri=self.provider_uri, poa=self.poa, light=self.is_light) return payload @property def is_connected(self) -> bool: """ https://web3py.readthedocs.io/en/stable/__provider.html#examples-using-automated-detection """ if self.client is NO_BLOCKCHAIN_CONNECTION: return False return self.client.is_connected @classmethod def get_gas_strategy(cls, gas_strategy: Union[str, Callable]) -> Callable: try: gas_strategy = cls.GAS_STRATEGIES[gas_strategy] except KeyError: if gas_strategy and not callable(gas_strategy): raise ValueError( f"{gas_strategy} must be callable to be a valid gas strategy." ) else: gas_strategy = cls.GAS_STRATEGIES[cls.DEFAULT_GAS_STRATEGY] return gas_strategy def attach_middleware(self): if self.poa is None: # If POA is not set explicitly, try to autodetect from chain id chain_id = int(self.client.chain_id) self.poa = chain_id in POA_CHAINS self.log.debug( f'Autodetecting POA chain ({self.client.chain_name})') # For use with Proof-Of-Authority test-blockchains if self.poa is True: self.log.debug('Injecting POA middleware at layer 0') self.client.inject_middleware(geth_poa_middleware, layer=0) # Gas Price Strategy self.client.w3.eth.setGasPriceStrategy(self.gas_strategy) self.client.w3.middleware_onion.add( middleware.time_based_cache_middleware) self.client.w3.middleware_onion.add( middleware.latest_block_based_cache_middleware) self.client.w3.middleware_onion.add(middleware.simple_cache_middleware) def connect(self): # Spawn child process if self._provider_process: self._provider_process.start() provider_uri = self._provider_process.provider_uri(scheme='file') else: provider_uri = self.provider_uri self.log.info( f"Using external Web3 Provider '{self.provider_uri}'") # Attach Provider self._attach_provider(provider=self._provider, provider_uri=provider_uri) self.log.info("Connecting to {}".format(self.provider_uri)) if self._provider is NO_BLOCKCHAIN_CONNECTION: raise self.NoProvider( "There are no configured blockchain providers") # Connect if not connected try: self.w3 = self.Web3(provider=self._provider) self.client = EthereumClient.from_w3(w3=self.w3) except requests.ConnectionError: # RPC raise self.ConnectionFailed( f'Connection Failed - {str(self.provider_uri)} - is RPC enabled?' ) except FileNotFoundError: # IPC File Protocol raise self.ConnectionFailed( f'Connection Failed - {str(self.provider_uri)} - is IPC enabled?' ) else: self.attach_middleware() return self.is_connected def sync(self, emitter=None) -> None: sync_state = self.client.sync() if emitter is not None: emitter.echo( f"Syncing: {self.client.chain_name.capitalize()}. Waiting for sync to begin.", verbosity=1) while not len(self.client.peers): emitter.echo("waiting for peers...", verbosity=1) time.sleep(5) peer_count = len(self.client.peers) emitter.echo( f"Found {'an' if peer_count == 1 else peer_count} Ethereum peer{('s' if peer_count > 1 else '')}.", verbosity=1) try: emitter.echo("Beginning sync...", verbosity=1) initial_state = next(sync_state) except StopIteration: # will occur if no syncing needs to happen emitter.echo("Local blockchain data is already synced.", verbosity=1) return prior_state = initial_state total_blocks_to_sync = int(initial_state.get( 'highestBlock', 0)) - int(initial_state.get('currentBlock', 0)) with click.progressbar( length=total_blocks_to_sync, label="sync progress", file=emitter.get_stream(verbosity=1)) as bar: for syncdata in sync_state: if syncdata: blocks_accomplished = int( syncdata['currentBlock']) - int( prior_state.get('currentBlock', 0)) bar.update(blocks_accomplished) prior_state = syncdata else: try: for syncdata in sync_state: self.client.log.info( f"Syncing {syncdata['currentBlock']}/{syncdata['highestBlock']}" ) except TypeError: # it's already synced return return @property def provider(self) -> Union[IPCProvider, WebsocketProvider, HTTPProvider]: return self._provider def _attach_provider(self, provider: Web3Providers = None, provider_uri: str = None) -> None: """ https://web3py.readthedocs.io/en/latest/providers.html#providers """ if not provider_uri and not provider: raise self.NoProvider("No URI or provider instances supplied.") if provider_uri and not provider: uri_breakdown = urlparse(provider_uri) if uri_breakdown.scheme == 'tester': providers = { 'pyevm': _get_pyevm_test_provider, 'geth': _get_test_geth_parity_provider, 'parity-ethereum': _get_test_geth_parity_provider, 'mock': _get_mock_test_provider } provider_scheme = uri_breakdown.netloc else: providers = { 'auto': _get_auto_provider, 'infura': _get_infura_provider, 'ipc': _get_IPC_provider, 'file': _get_IPC_provider, 'ws': _get_websocket_provider, 'wss': _get_websocket_provider, 'http': _get_HTTP_provider, 'https': _get_HTTP_provider, } provider_scheme = uri_breakdown.scheme # auto-detect for file based ipc if not provider_scheme: if os.path.exists(provider_uri): # file is available - assume ipc/file scheme provider_scheme = 'file' self.log.info( f"Auto-detected provider scheme as 'file://' for provider {provider_uri}" ) try: self._provider = providers[provider_scheme](provider_uri) except KeyError: raise self.UnsupportedProvider( f"{provider_uri} is an invalid or unsupported blockchain provider URI" ) else: self.provider_uri = provider_uri or NO_BLOCKCHAIN_CONNECTION else: self._provider = provider def __transaction_failed( self, exception: Exception, transaction_dict: dict, contract_function: Union[ContractFunction, ContractConstructor]) -> None: """ Re-raising error handler and context manager for transaction broadcast or build failure events at the interface layer. This method is a last line of defense against unhandled exceptions caused by transaction failures and must raise an exception. # TODO: #1504 - Additional Handling of validation failures (gas limits, invalid fields, etc.) """ try: # Assume this error is formatted as an IPC response code, message = exception.args[0].values() except (ValueError, IndexError, AttributeError): # TODO: #1504 - Try even harder to determine if this is insufficient funds causing the issue, # This may be best handled at the agent or actor layer for registry and token interactions. # Worst case scenario - raise the exception held in context implicitly raise exception else: if int(code) != self.TransactionFailed.IPC_CODE: # Only handle client-specific exceptions # https://www.jsonrpc.org/specification Section 5.1 raise exception self.log.critical(message) # simple context raise self.TransactionFailed( message=message, # rich error (best case) contract_function=contract_function, transaction_dict=transaction_dict) def __log_transaction(self, transaction_dict: dict, contract_function: ContractFunction): """ Format and log a transaction dict and return the transaction name string. This method *must not* mutate the original transaction dict. """ # Do not mutate the original transaction dict tx = dict(transaction_dict).copy() # Format if tx.get('to'): tx['to'] = to_checksum_address(contract_function.address) try: tx['selector'] = contract_function.selector except AttributeError: pass tx['from'] = to_checksum_address(tx['from']) tx.update({ f: prettify_eth_amount(v) for f, v in tx.items() if f in ('gasPrice', 'value') }) payload_pprint = ', '.join("{}: {}".format(k, v) for k, v in tx.items()) # Log transaction_name = get_transaction_name( contract_function=contract_function) self.log.debug(f"[TX-{transaction_name}] | {payload_pprint}") @validate_checksum_address def build_transaction( self, contract_function: ContractFunction, sender_address: str, payload: dict = None, transaction_gas_limit: int = None, ) -> dict: # # Build Payload # base_payload = { 'chainId': int(self.client.chain_id), 'nonce': self.client.w3.eth.getTransactionCount(sender_address, 'pending'), 'from': sender_address, 'gasPrice': self.client.gas_price } # Aggregate if not payload: payload = {} payload.update(base_payload) # Explicit gas override - will skip gas estimation in next operation. if transaction_gas_limit: payload['gas'] = int(transaction_gas_limit) # # Build Transaction # self.__log_transaction(transaction_dict=payload, contract_function=contract_function) try: transaction_dict = contract_function.buildTransaction( payload) # Gas estimation occurs here except (TestTransactionFailed, ValidationError, ValueError) as error: # Note: Geth raises ValueError in the same condition that pyevm raises ValidationError here. # Treat this condition as "Transaction Failed" during gas estimation. raise self.__transaction_failed( exception=error, transaction_dict=payload, contract_function=contract_function) return transaction_dict def sign_and_broadcast_transaction(self, transaction_dict, transaction_name: str = "", confirmations: int = 0) -> dict: # # Setup # # TODO # 1754 - Move this to singleton - I do not approve... nor does Bogdan? if GlobalLoggerSettings._json_ipc: emitter = JSONRPCStdoutEmitter() else: emitter = StdoutEmitter() if self.transacting_power is READ_ONLY_INTERFACE: raise self.InterfaceError(str(READ_ONLY_INTERFACE)) # # Sign # # TODO: Show the USD Price: https://api.coinmarketcap.com/v1/ticker/ethereum/ price = transaction_dict['gasPrice'] cost_wei = price * transaction_dict['gas'] cost = Web3.fromWei(cost_wei, 'gwei') if self.transacting_power.is_device: emitter.message( f'Confirm transaction {transaction_name} on hardware wallet... ({cost} gwei @ {price})', color='yellow') signed_raw_transaction = self.transacting_power.sign_transaction( transaction_dict) # # Broadcast # emitter.message( f'Broadcasting {transaction_name} Transaction ({cost} gwei @ {price})...', color='yellow') try: txhash = self.client.send_raw_transaction( signed_raw_transaction) # <--- BROADCAST except (TestTransactionFailed, ValueError) as error: raise # TODO: Unify with Transaction failed handling # # Receipt # try: # TODO: Handle block confirmation exceptions receipt = self.client.wait_for_receipt(txhash, timeout=self.TIMEOUT, confirmations=confirmations) except TimeExhausted: # TODO: #1504 - Handle transaction timeout raise else: self.log.debug( f"[RECEIPT-{transaction_name}] | txhash: {receipt['transactionHash'].hex()}" ) # # Confirmations # # Primary check transaction_status = receipt.get('status', UNKNOWN_TX_STATUS) if transaction_status == 0: failure = f"Transaction transmitted, but receipt returned status code 0. " \ f"Full receipt: \n {pprint.pformat(receipt, indent=2)}" raise self.InterfaceError(failure) if transaction_status is UNKNOWN_TX_STATUS: self.log.info( f"Unknown transaction status for {txhash} (receipt did not contain a status field)" ) # Secondary check tx = self.client.get_transaction(txhash) if tx["gas"] == receipt["gasUsed"]: raise self.InterfaceError( f"Transaction consumed 100% of transaction gas." f"Full receipt: \n {pprint.pformat(receipt, indent=2)}") return receipt def get_blocktime(self): return self.client.get_blocktime() @validate_checksum_address def send_transaction(self, contract_function: Union[ContractFunction, ContractConstructor], sender_address: str, payload: dict = None, transaction_gas_limit: int = None, confirmations: int = 0) -> dict: transaction = self.build_transaction( contract_function=contract_function, sender_address=sender_address, payload=payload, transaction_gas_limit=transaction_gas_limit) # Get transaction name try: transaction_name = contract_function.fn_name.upper() except AttributeError: transaction_name = 'DEPLOY' if isinstance( contract_function, ContractConstructor) else 'UNKNOWN' receipt = self.sign_and_broadcast_transaction( transaction_dict=transaction, transaction_name=transaction_name, confirmations=confirmations) return receipt def get_contract_by_name( self, registry: BaseContractRegistry, contract_name: str, contract_version: str = None, enrollment_version: Union[int, str] = None, proxy_name: str = None, use_proxy_address: bool = True) -> VersionedContract: """ Instantiate a deployed contract from registry data, and assimilate it with its proxy if it is upgradeable. """ target_contract_records = registry.search( contract_name=contract_name, contract_version=contract_version) if not target_contract_records: raise self.UnknownContract( f"No such contract records with name {contract_name}:{contract_version}." ) if proxy_name: # Lookup proxies; Search for a published proxy that targets this contract record proxy_records = registry.search(contract_name=proxy_name) results = list() for proxy_name, proxy_version, proxy_address, proxy_abi in proxy_records: proxy_contract = self.client.w3.eth.contract( abi=proxy_abi, address=proxy_address, version=proxy_version, ContractFactoryClass=self._contract_factory) # Read this dispatcher's target address from the blockchain proxy_live_target_address = proxy_contract.functions.target( ).call() for target_name, target_version, target_address, target_abi in target_contract_records: if target_address == proxy_live_target_address: if use_proxy_address: triplet = (proxy_address, target_version, target_abi) else: triplet = (target_address, target_version, target_abi) else: continue results.append(triplet) if len(results) > 1: address, _version, _abi = results[0] message = "Multiple {} deployments are targeting {}".format( proxy_name, address) raise self.InterfaceError(message.format(contract_name)) else: try: selected_address, selected_version, selected_abi = results[ 0] except IndexError: raise self.UnknownContract( f"There are no Dispatcher records targeting '{contract_name}':{contract_version}" ) else: # TODO: use_proxy_address doesnt' work in this case. Should we raise if used? # NOTE: 0 must be allowed as a valid version number if len(target_contract_records) != 1: if enrollment_version is None: m = f"{len(target_contract_records)} records enrolled " \ f"for contract {contract_name}:{contract_version} " \ f"and no version index was supplied." raise self.InterfaceError(m) enrollment_version = self.__get_enrollment_version_index( name=contract_name, contract_version=contract_version, version_index=enrollment_version, enrollments=len(target_contract_records)) else: enrollment_version = -1 # default _contract_name, selected_version, selected_address, selected_abi = target_contract_records[ enrollment_version] # Create the contract from selected sources unified_contract = self.client.w3.eth.contract( abi=selected_abi, address=selected_address, version=selected_version, ContractFactoryClass=self._contract_factory) return unified_contract @staticmethod def __get_enrollment_version_index(version_index: Union[int, str], enrollments: int, name: str, contract_version: str): version_names = {'latest': -1, 'earliest': 0} try: version = version_names[version_index] except KeyError: try: version = int(version_index) except ValueError: what_is_this = version_index raise ValueError( f"'{what_is_this}' is not a valid enrollment version number" ) else: if version > enrollments - 1: message = f"Version index '{version}' is larger than the number of enrollments " \ f"for {name}:{contract_version}." raise ValueError(message) return version
class WorkTrackerBase: """Baseclass for handling automated transaction tracking...""" CLOCK = reactor INTERVAL_FLOOR = 60 * 15 # fifteen minutes INTERVAL_CEIL = 60 * 180 # three hours ALLOWED_DEVIATION = 0.5 # i.e., up to +50% from the expected confirmation time def __init__(self, worker, *args, **kwargs): super().__init__(*args, **kwargs) self.log = Logger('stake-tracker') self.worker = worker # TODO: What to call the subject here? What is a work tracker without "work"? self._tracking_task = task.LoopingCall(self._do_work) self._tracking_task.clock = self.CLOCK self.__pending = dict() # TODO: Prime with pending worker transactions self.__requirement = None self.__start_time = NOT_STAKING self.__uptime_period = NOT_STAKING self._abort_on_error = False self._consecutive_fails = 0 self._configure(*args) self.gas_strategy = worker.application_agent.blockchain.gas_strategy @classmethod def random_interval(cls, fails=None) -> int: if fails is not None and fails > 0: return cls.INTERVAL_FLOOR return random.randint(cls.INTERVAL_FLOOR, cls.INTERVAL_CEIL) def max_confirmation_time(self) -> int: expected_time = EXPECTED_CONFIRMATION_TIME_IN_SECONDS[ self.gas_strategy] # FIXME: #2447 result = expected_time * (1 + self.ALLOWED_DEVIATION) return result def stop(self) -> None: if self._tracking_task.running: self._tracking_task.stop() self.log.info(f"STOPPED WORK TRACKING") def start(self, commit_now: bool = True, requirement_func: Callable = None, force: bool = False) -> None: """ High-level stake tracking initialization, this function aims to be safely called at any time - For example, it is okay to call this function multiple times within the same period. """ if self._tracking_task.running and not force: return # Add optional confirmation requirement callable self.__requirement = requirement_func # Record the start time and period self.__start_time = maya.now() self.log.info(f"START WORK TRACKING (immediate action: {commit_now})") d = self._tracking_task.start( interval=self.random_interval(fails=self._consecutive_fails), now=commit_now) d.addErrback(self.handle_working_errors) def _crash_gracefully(self, failure=None) -> None: """ A facility for crashing more gracefully in the event that an exception is unhandled in a different thread. """ self._crashed = failure failure.raiseException() def handle_working_errors(self, *args, **kwargs) -> None: failure = args[0] if self._abort_on_error: self.log.critical( f'Unhandled error during node work tracking. {failure!r}', failure=failure) self.stop() reactor.callFromThread(self._crash_gracefully, failure=failure) else: self.log.warn( f'Unhandled error during work tracking (#{self._consecutive_fails}): {failure.getTraceback()!r}', failure=failure) # the effect of this is that we get one immediate retry. # After that, the random_interval will be honored until # success is achieved commit_now = self._consecutive_fails < 1 self._consecutive_fails += 1 self.start(commit_now=commit_now) def _should_do_work_now(self) -> bool: # TODO: Check for stake expiration and exit if self.__requirement is None: return True r = self.__requirement(self.worker) if not isinstance(r, bool): raise ValueError(f"'requirement' must return a boolean.") return r @property def pending(self) -> Dict[int, HexBytes]: return self.__pending.copy() def __commitments_tracker_is_consistent(self) -> bool: operator_address = self.worker.operator_address tx_count_pending = self.client.get_transaction_count( account=operator_address, pending=True) tx_count_latest = self.client.get_transaction_count( account=operator_address, pending=False) txs_in_mempool = tx_count_pending - tx_count_latest if len(self.__pending) == txs_in_mempool: return True # OK! if txs_in_mempool > len( self.__pending): # We're missing some pending TXs return False else: # TODO #2429: What to do when txs_in_mempool < len(self.__pending)? What does this imply? return True def __track_pending_commitments(self) -> bool: # TODO: Keep a purpose-built persistent log of worker transaction history unmined_transactions = 0 pending_transactions = self.pending.items( ) # note: this must be performed non-mutatively for tx_firing_block_number, txhash in sorted(pending_transactions): if txhash is UNTRACKED_PENDING_TRANSACTION: unmined_transactions += 1 continue try: confirmed_tx_receipt = self.client.get_transaction_receipt( transaction_hash=txhash) except TransactionNotFound: unmined_transactions += 1 # mark as unmined - Keep tracking it for now continue else: confirmation_block_number = confirmed_tx_receipt['blockNumber'] confirmations = confirmation_block_number - tx_firing_block_number self.log.info( f'Commitment transaction {txhash.hex()[:10]} confirmed: {confirmations} confirmations' ) del self.__pending[tx_firing_block_number] if unmined_transactions: s = "s" if unmined_transactions > 1 else "" self.log.info( f'{unmined_transactions} pending commitment transaction{s} detected.' ) inconsistent_tracker = not self.__commitments_tracker_is_consistent() if inconsistent_tracker: # If we detect there's a mismatch between the number of internally tracked and # pending block transactions, create a special pending TX that accounts for this. # TODO: Detect if this untracked pending transaction is a commitment transaction at all. self.__pending[0] = UNTRACKED_PENDING_TRANSACTION return True return bool(self.__pending) def __fire_replacement_commitment(self, current_block_number: int, tx_firing_block_number: int) -> None: replacement_txhash = self._fire_commitment() # replace self.__pending[ current_block_number] = replacement_txhash # track this transaction del self.__pending[ tx_firing_block_number] # assume our original TX is stuck def __handle_replacement_commitment(self, current_block_number: int) -> None: tx_firing_block_number, txhash = list(sorted(self.pending.items()))[0] if txhash is UNTRACKED_PENDING_TRANSACTION: # TODO: Detect if this untracked pending transaction is a commitment transaction at all. message = f"We have an untracked pending transaction. Issuing a replacement transaction." else: # If the transaction is still not mined after a max confirmation time # (based on current gas strategy) issue a replacement transaction. wait_time_in_blocks = current_block_number - tx_firing_block_number wait_time_in_seconds = wait_time_in_blocks * AVERAGE_BLOCK_TIME_IN_SECONDS if wait_time_in_seconds < self.max_confirmation_time(): self.log.info( f'Waiting for pending commitment transaction to be mined ({txhash.hex()}).' ) return else: message = f"We've waited for {wait_time_in_seconds}, but max time is {self.max_confirmation_time()}" \ f" for {self.gas_strategy} gas strategy. Issuing a replacement transaction." # Send a replacement transaction self.log.info(message) self.__fire_replacement_commitment( current_block_number=current_block_number, tx_firing_block_number=tx_firing_block_number) def __reset_tracker_state(self) -> None: self.__pending.clear() # Forget the past. This is a new beginning. self._consecutive_fails = 0 def _do_work(self) -> None: """ Async working task for Ursula # TODO: Split into multiple async tasks """ if self._all_work_completed(): # nothing left to do self.stop() return self.log.info( f"{self.__class__.__name__} is running. Advancing to next work cycle." ) # TODO: What to call the verb the subject performs? # Call once here, and inject later for temporal consistency current_block_number = self.client.block_number if self._prep_work_state() is False: return # Commitment tracking unmined_transactions = self.__track_pending_commitments() if unmined_transactions: self.log.info('Tracking pending transaction.') self.__handle_replacement_commitment( current_block_number=current_block_number) # while there are known pending transactions, remain in fast interval mode self._tracking_task.interval = self.INTERVAL_FLOOR return # This cycle is finished. else: # Randomize the next task interval over time, within bounds. self._tracking_task.interval = self.random_interval( fails=self._consecutive_fails) # Only perform work this round if the requirements are met if not self._should_do_work_now(): self.log.warn( f'COMMIT PREVENTED (callable: "{self.__requirement.__name__}") - ' f'Situation does not call for doing work now.') # TODO: Follow-up actions for failed requirements return if self._final_work_prep_before_transaction() is False: return txhash = self._fire_commitment() self.__pending[current_block_number] = txhash # the following four methods are specific to PRE network schemes and must be implemented as below def _configure(self, stakes): """ post __init__ configuration dealing with contracts or state specific to this PRE flavor""" raise NotImplementedError def _prep_work_state(self) -> bool: """ configuration perfomed before transaction management in task execution """ raise NotImplementedError def _final_work_prep_before_transaction(self) -> bool: """ configuration perfomed after transaction management in task execution right before transaction firing""" raise NotImplementedError() def _fire_commitment(self): """ actually fire the tranasction """ raise NotImplementedError def _all_work_completed(self) -> bool: """ allows the work tracker to indicate that its work is completed and it can be shut down """ raise NotImplementedError
class Felix(Character, NucypherTokenActor): """ A NuCypher ERC20 faucet / Airdrop scheduler. Felix is a web application that gives NuCypher *testnet* tokens to registered addresses with a scheduled reduction of disbursement amounts, and an HTTP endpoint for handling new address registration. The main goal of Felix is to provide a source of testnet tokens for research and the development of production-ready nucypher dApps. """ _default_crypto_powerups = [SigningPower] # Intervals DISTRIBUTION_INTERVAL = 60 # seconds DISBURSEMENT_INTERVAL = 24 * 365 # only distribute tokens to the same address once each YEAR. STAGING_DELAY = 10 # seconds # Disbursement BATCH_SIZE = 10 # transactions MULTIPLIER = Decimal('0.9') # 10% reduction of previous disbursement is 0.9 # this is not relevant until the year of time declared above, passes. MINIMUM_DISBURSEMENT = int(1e18) # NuNits (1 NU) ETHER_AIRDROP_AMOUNT = int(1e17) # Wei (.1 ether) MAX_INDIVIDUAL_REGISTRATIONS = 3 # Registration Limit # Node Discovery LEARNING_TIMEOUT = 30 # seconds _SHORT_LEARNING_DELAY = 60 # seconds _LONG_LEARNING_DELAY = 120 # seconds _ROUNDS_WITHOUT_NODES_AFTER_WHICH_TO_SLOW_DOWN = 1 # Twisted _CLOCK = reactor _AIRDROP_QUEUE = dict() class NoDatabase(RuntimeError): pass def __init__(self, db_filepath: str, rest_host: str, rest_port: int, client_password: str = None, crash_on_error: bool = False, distribute_ether: bool = True, registry: BaseContractRegistry = None, *args, **kwargs): # Character super().__init__(registry=registry, *args, **kwargs) self.log = Logger(f"felix-{self.checksum_address[-6::]}") # Network self.rest_port = rest_port self.rest_host = rest_host self.rest_app = NOT_RUNNING self.crash_on_error = crash_on_error # Database self.db_filepath = db_filepath self.db = NO_DATABASE_AVAILABLE self.db_engine = create_engine(f'sqlite:///{self.db_filepath}', convert_unicode=True) # Blockchain transacting_power = TransactingPower(password=client_password, account=self.checksum_address, signer=self.signer, cache=True) self._crypto_power.consume_power_up(transacting_power) self.token_agent = ContractAgency.get_agent(NucypherTokenAgent, registry=registry) self.blockchain = self.token_agent.blockchain self.reserved_addresses = [self.checksum_address, NULL_ADDRESS] # Update reserved addresses with deployed contracts existing_entries = list(registry.enrolled_addresses) self.reserved_addresses.extend(existing_entries) # Distribution self.__distributed = 0 # Track NU Output self.__airdrop = 0 # Track Batch self.__disbursement = 0 # Track Quantity self._distribution_task = LoopingCall(f=self.airdrop_tokens) self._distribution_task.clock = self._CLOCK self.start_time = NOT_RUNNING self.economics = EconomicsFactory.get_economics(registry=registry) self.MAXIMUM_DISBURSEMENT = self.economics.maximum_allowed_locked self.INITIAL_DISBURSEMENT = self.economics.minimum_allowed_locked * 3 # Optionally send ether with each token transaction self.distribute_ether = distribute_ether # Banner self.log.info(FELIX_BANNER.format(self.checksum_address)) def __repr__(self): class_name = self.__class__.__name__ r = f'{class_name}(checksum_address={self.checksum_address}, db_filepath={self.db_filepath})' return r def start_learning_loop(self, now=False): """ Felix needs to not even be a Learner, but since it is at the moment, it certainly needs not to learn. """ def make_web_app(self): from flask import request from flask_sqlalchemy import SQLAlchemy # WSGI/Flask Service short_name = bytes(self.stamp).hex()[:6] self.rest_app = Flask(f"faucet-{short_name}", template_folder=TEMPLATES_DIR) self.rest_app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{self.db_filepath}' self.rest_app.config['MAX_CONTENT_LENGTH'] = MAX_UPLOAD_CONTENT_LENGTH try: self.rest_app.secret_key = sha256(os.environ['NUCYPHER_FELIX_DB_SECRET'].encode()) # uses envvar except KeyError: raise OSError("The 'NUCYPHER_FELIX_DB_SECRET' is not set. Export your application secret and try again.") # Database self.db = SQLAlchemy(self.rest_app) # Database Tables class Recipient(self.db.Model): """ The one and only table in Felix's database; Used to track recipients and airdrop metadata. """ __tablename__ = 'recipient' id = self.db.Column(self.db.Integer, primary_key=True) address = self.db.Column(self.db.String, nullable=False) joined = self.db.Column(self.db.DateTime, nullable=False, default=datetime.utcnow) total_received = self.db.Column(self.db.String, default='0', nullable=False) last_disbursement_amount = self.db.Column(self.db.String, nullable=False, default=0) last_disbursement_time = self.db.Column(self.db.DateTime, nullable=True, default=None) is_staking = self.db.Column(self.db.Boolean, nullable=False, default=False) def __repr__(self): return f'{self.__class__.__name__}(id={self.id})' self.Recipient = Recipient # Bind to outer class # Flask decorators rest_app = self.rest_app # # REST Routes # @rest_app.route("/status", methods=['GET']) def status(): with ThreadedSession(self.db_engine) as session: total_recipients = session.query(self.Recipient).count() last_recipient = session.query(self.Recipient).filter( self.Recipient.last_disbursement_time.isnot(None) ).order_by('last_disbursement_time').first() last_address = last_recipient.address if last_recipient else None last_transaction_date = last_recipient.last_disbursement_time.isoformat() if last_recipient else None unfunded = session.query(self.Recipient).filter( self.Recipient.last_disbursement_time.is_(None)).count() return json.dumps( { "total_recipients": total_recipients, "latest_recipient": last_address, "latest_disburse_date": last_transaction_date, "unfunded_recipients": unfunded, "state": { "eth": str(self.eth_balance), "NU": str(self.token_balance), "address": self.checksum_address, "contract_address": self.token_agent.contract_address, } } ) @rest_app.route("/register", methods=['POST']) def register(): """Handle new recipient registration via POST request.""" new_address = ( request.form.get('address') or request.get_json().get('address') ) if not new_address: return Response(response="no address was supplied", status=411) if not eth_utils.is_address(new_address): return Response(response="an invalid ethereum address was supplied. please ensure the address is a proper checksum.", status=400) else: new_address = eth_utils.to_checksum_address(new_address) if new_address in self.reserved_addresses: return Response(response="sorry, that address is reserved and cannot receive funds.", status=403) try: with ThreadedSession(self.db_engine) as session: existing = Recipient.query.filter_by(address=new_address).all() if len(existing) > self.MAX_INDIVIDUAL_REGISTRATIONS: # Address already exists; Abort self.log.debug(f"{new_address} is already enrolled {self.MAX_INDIVIDUAL_REGISTRATIONS} times.") return Response(response=f"{new_address} requested too many times - Please use another address.", status=409) # Create the record recipient = Recipient(address=new_address, joined=datetime.now()) session.add(recipient) session.commit() except Exception as e: # Pass along exceptions to the logger self.log.critical(str(e)) raise else: return Response(status=200) # TODO return rest_app def create_tables(self) -> None: self.make_web_app() return self.db.create_all(app=self.rest_app) def start(self, host: str, port: int, web_services: bool = True, distribution: bool = True, crash_on_error: bool = False): self.crash_on_error = crash_on_error if self.start_time is not NOT_RUNNING: raise RuntimeError("Felix is already running.") self.start_time = maya.now() payload = {"wsgi": self.rest_app, "http_port": port, "resources": get_static_resources()} deployer = HendrixDeploy(action="start", options=payload) if distribution is True: self.start_distribution() if web_services is True: deployer.run() # <-- Blocking call (Reactor) def start_distribution(self, now: bool = True) -> bool: """Start token distribution""" self.log.info(NU_BANNER) self.log.info("Starting NU Token Distribution | START") if self.token_balance == NU.ZERO(): raise self.ActorError(f"Felix address {self.checksum_address} has 0 NU tokens.") self._distribution_task.start(interval=self.DISTRIBUTION_INTERVAL, now=now) return True def stop_distribution(self) -> bool: """Start token distribution""" self.log.info("Stopping NU Token Distribution | STOP") self._distribution_task.stop() return True def __calculate_disbursement(self, recipient) -> int: """Calculate the next reward for a recipient once the are selected for distribution""" # Initial Reward - sets the future rates if recipient.last_disbursement_time is None: amount = self.INITIAL_DISBURSEMENT # Cap reached, We'll continue to leak the minimum disbursement elif int(recipient.total_received) >= self.MAXIMUM_DISBURSEMENT: amount = self.MINIMUM_DISBURSEMENT # Calculate the next disbursement else: amount = math.ceil(int(recipient.last_disbursement_amount) * self.MULTIPLIER) if amount < self.MINIMUM_DISBURSEMENT: amount = self.MINIMUM_DISBURSEMENT return int(amount) def __transfer(self, disbursement: int, recipient_address: str) -> str: """Perform a single token transfer transaction from one account to another.""" # Re-unlock from cache self.blockchain.transacting_power.activate() self.__disbursement += 1 receipt = self.token_agent.transfer(amount=disbursement, target_address=recipient_address, sender_address=self.checksum_address) txhash = receipt['transactionHash'] if self.distribute_ether: ether = self.ETHER_AIRDROP_AMOUNT transaction = {'to': recipient_address, 'from': self.checksum_address, 'value': ether, 'gasPrice': self.blockchain.client.gas_price_for_transaction()} transaction_dict = self.blockchain.build_payload(sender_address=self.checksum_address, payload=transaction, transaction_gas_limit=22000) _receipt = self.blockchain.sign_and_broadcast_transaction(transaction_dict=transaction_dict, transaction_name='transfer') self.log.info(f"Disbursement #{self.__disbursement} OK | NU {txhash.hex()[-6:]}" f"({str(NU(disbursement, 'NuNit'))} + {self.ETHER_AIRDROP_AMOUNT} wei) -> {recipient_address}") else: self.log.info( f"Disbursement #{self.__disbursement} OK" f"({str(NU(disbursement, 'NuNit'))} -> {recipient_address}") return txhash def airdrop_tokens(self): """ Calculate airdrop eligibility via faucet registration and transfer tokens to selected recipients. """ with ThreadedSession(self.db_engine) as session: population = session.query(self.Recipient).count() message = f"{population} registered faucet recipients; " \ f"Distributed {str(NU(self.__distributed, 'NuNit'))} since {self.start_time.slang_time()}." self.log.debug(message) if population == 0: return # Abort - no recipients are registered. # For filtration since = datetime.now() - timedelta(hours=self.DISBURSEMENT_INTERVAL) datetime_filter = or_(self.Recipient.last_disbursement_time <= since, self.Recipient.last_disbursement_time == None) # This must be `==` not `is` with ThreadedSession(self.db_engine) as session: candidates = session.query(self.Recipient).filter(datetime_filter).all() if not candidates: self.log.info("No eligible recipients this round.") return # Discard invalid addresses, in-depth invalid_addresses = list() def siphon_invalid_entries(candidate): address_is_valid = eth_utils.is_checksum_address(candidate.address) if not address_is_valid: invalid_addresses.append(candidate.address) return address_is_valid candidates = list(filter(siphon_invalid_entries, candidates)) if invalid_addresses: self.log.info(f"{len(invalid_addresses)} invalid entries detected. Pruning database.") # TODO: Is this needed? - Invalid entries are rejected at the endpoint view. # Prune database of invalid records # with ThreadedSession(self.db_engine) as session: # bad_eggs = session.query(self.Recipient).filter(self.Recipient.address in invalid_addresses).all() # for egg in bad_eggs: # session.delete(egg.id) # session.commit() if not candidates: self.log.info("No eligible recipients this round.") return d = threads.deferToThread(self.__do_airdrop, candidates=candidates) self._AIRDROP_QUEUE[self.__airdrop] = d return d def __do_airdrop(self, candidates: list): self.log.info(f"Staging Airdrop #{self.__airdrop}.") # Staging staged_disbursements = [(r, self.__calculate_disbursement(recipient=r)) for r in candidates] batches = list(staged_disbursements[index:index+self.BATCH_SIZE] for index in range(0, len(staged_disbursements), self.BATCH_SIZE)) total_batches = len(batches) self.log.info("====== Staged Airdrop ======") for recipient, disbursement in staged_disbursements: self.log.info(f"{recipient.address} ... {str(disbursement)[:-18]}") self.log.info("==========================") # Staging Delay self.log.info(f"Airdrop will commence in {self.STAGING_DELAY} seconds...") if self.STAGING_DELAY > 3: time.sleep(self.STAGING_DELAY - 3) for i in range(3): time.sleep(1) self.log.info(f"NU Token airdrop starting in {3 - i} seconds...") # Slowly, in series... for batch, staged_disbursement in enumerate(batches, start=1): self.log.info(f"======= Batch #{batch} ========") for recipient, disbursement in staged_disbursement: # Perform the transfer... leaky faucet. self.__transfer(disbursement=disbursement, recipient_address=recipient.address) self.__distributed += disbursement # Update the database record recipient.last_disbursement_amount = str(disbursement) recipient.total_received = str(int(recipient.total_received) + disbursement) recipient.last_disbursement_time = datetime.now() self.db.session.add(recipient) self.db.session.commit() # end inner loop self.log.info(f"Completed Airdrop #{self.__airdrop} Batch #{batch} of {total_batches}.") # end outer loop now = maya.now() next_interval_slang = now.add(seconds=self.DISTRIBUTION_INTERVAL).slang_time() self.log.info(f"Completed Airdrop #{self.__airdrop}; Next airdrop is {next_interval_slang}.") del self._AIRDROP_QUEUE[self.__airdrop] self.__airdrop += 1