def check_config_for_extra_fields( cls, values: Dict[str, Any]) -> Dict[str, Any]: extra = [key for key in values.keys() if key not in cls.__fields__] if extra: logger.warning( f"Unprocessed extra config fields not set '{extra}'.") return values
def __init__( self, deployment_bytecode: HexBytes, abi: ConstructorABI, ) -> None: self.deployment_bytecode = deployment_bytecode self.abi = abi if not self.deployment_bytecode: logger.warning("Deploying an empty contract (no bytecode)") self.deployment_bytecode = HexBytes("")
def is_valid(self) -> bool: if self.config_file.exists(): return True logger.warning( f"'{self.path.name}' is not an 'ApeProject', but attempting to process as one." ) # NOTE: We always return True as a last-chance attempt because it often # works anyway and prevents unnecessary plugin requirements. return True
def set_autosign(self, enabled: bool, passphrase: Optional[str] = None): """ Allow this account to automatically sign messages and transactions. Args: enabled (bool): ``True`` to enable, ``False`` to disable. passphrase (Optional[str]): Optionally provide the passphrase. If not provided, you will be prompted to enter it. """ self.unlock(passphrase=passphrase) logger.warning( "Danger! This account will now sign any transaction its given.") self.__autosign = enabled
def __init__(self, data: Dict, valid_ecosystems: Dict, valid_networks: List[str]): for ecosystem_name, networks in data.items(): if ecosystem_name not in valid_ecosystems: logger.warning( f"Invalid ecosystem '{ecosystem_name}' in deployments config." ) continue ecosystem = valid_ecosystems[ecosystem_name] for network_name, contract_deployments in networks.items(): if network_name not in valid_networks: logger.warning( f"Invalid network '{network_name}' in deployments config." ) continue for deployment in [d for d in contract_deployments]: address = deployment.get("address", None) if "address" not in deployment: logger.warning( f"Missing 'address' field in deployment " f"(ecosystem={ecosystem_name}, network={network_name})" ) continue try: deployment["address"] = ecosystem.decode_address( address) except ValueError as err: logger.warning(str(err)) super().__init__(data)
def _load_manifest_from_file(file_path: Path) -> Optional[PackageManifest]: if not file_path.exists(): return None try: manifest_dict = json.loads(file_path.read_text()) if not isinstance(manifest_dict, dict) or "manifest" not in manifest_dict: raise AssertionError() # To reach except block return PackageManifest(**manifest_dict) except (AssertionError, json.JSONDecodeError, ValidationError): logger.warning( f"Existing manifest file '{file_path}' corrupted. Re-building.") return None
def _isolation(self) -> Iterator[None]: """ Isolation logic used to implement isolation fixtures for each pytest scope. """ snapshot_id = None try: snapshot_id = self.chain_manager.snapshot() except NotImplementedError: if not self._warned_for_unimplemented_snapshot: logger.warning( "The connected provider does not support snapshotting. " "Tests will not be completely isolated." ) self._warned_for_unimplemented_snapshot = True yield if snapshot_id is not None and snapshot_id in self.chain_manager._snapshots: self.chain_manager.restore(snapshot_id)
def _warn_not_fully_implemented_error(self, results, plugin_name): if plugin_name in self._unimplemented_plugins: # Already warned return unimplemented_methods = [] # Find the best API name to warn about. if isinstance(results, (list, tuple)): classes = [p for p in results if hasattr(p, "__name__")] if classes: # Likely only ever a single class in a registration, but just in case. api_name = " - ".join([p.__name__ for p in classes]) for api_cls in classes: if hasattr(api_cls, "__abstractmethods__" ) and api_cls.__abstractmethods__: unimplemented_methods.extend( api_cls.__abstractmethods__) else: # This would only happen if the registration consisted of all primitives. api_name = " - ".join(results) elif hasattr(results, "__name__"): api_name = results.__name__ if hasattr(results, "__abstractmethods__") and results.__abstractmethods__: unimplemented_methods.extend(results.__abstractmethods__) else: api_name = results message = f"'{api_name}' from '{plugin_name}' is not fully implemented." if unimplemented_methods: methods_str = ", ".join(unimplemented_methods) message = f"{message} Remaining abstract methods: '{methods_str}'." logger.warning(message) # Record so we don't warn repeatedly self._unimplemented_plugins.append(plugin_name)
def get_package_version(obj: Any) -> str: """ Get the version of a single package. Args: obj: object to search inside for ``__version__``. Returns: str: version string. """ # If value is already cached/static if hasattr(obj, "__version__"): return obj.__version__ # NOTE: In case where don't pass a module name if not isinstance(obj, str): obj = obj.__name__ # Reduce module string to base package # NOTE: Assumed that string input is module name e.g. ``__name__`` pkg_name = obj.split(".")[0] # NOTE: In case the distribution and package name differ dists = get_distributions() if pkg_name in dists: # NOTE: Shouldn't really be more than 1, but never know if len(dists[pkg_name]) != 1: logger.warning(f"duplicate pkg_name '{pkg_name}'") pkg_name = dists[pkg_name][0] try: return str(version_metadata(pkg_name)) except PackageNotFoundError: # NOTE: Must handle empty string result here return ""
def compile(self, filepaths: List[Path], base_path: Optional[Path] = None) -> List[ContractType]: contract_types: List[ContractType] = [] for path in filepaths: abi = json.loads(path.read_text()) source_id = (str(get_relative_path(path, base_path)) if base_path and path.is_absolute() else str(path)) if not isinstance(abi, list): logger.warning( f"Not a valid ABI interface JSON file (sourceID={source_id})." ) else: contract = ContractType.parse_obj({ "contractName": path.stem, "abi": abi, "sourceId": source_id, }) contract_types.append(contract) return contract_types
def _try_close(warn_message): try: self.process.send_signal(SIGINT) self._wait_for_popen(self.PROCESS_WAIT_TIMEOUT) except KeyboardInterrupt: logger.warning(warn_message)
def connect(self): self._web3 = Web3(HTTPProvider(self.uri)) if not self._web3.isConnected(): if self.network.name != LOCAL_NETWORK_NAME: raise ProviderError( f"When running on network '{self.network.name}', " f"the Geth plugin expects the Geth process to already " f"be running on '{self.uri}'." ) # Start an ephemeral geth process. parsed_uri = urlparse(self.uri) if parsed_uri.hostname not in ("localhost", "127.0.0.1"): raise ConnectionError(f"Unable to connect web3 to {parsed_uri.hostname}.") if not shutil.which("geth"): raise GethNotInstalledError() # Use mnemonic from test config config_manager = self.network.config_manager test_config = config_manager.get_config("test") mnemonic = test_config["mnemonic"] num_of_accounts = test_config["number_of_accounts"] self._geth = EphemeralGeth( self.data_folder, parsed_uri.hostname, parsed_uri.port, mnemonic, number_of_accounts=num_of_accounts, ) self._geth.connect() if not self._web3.isConnected(): self._geth.disconnect() raise ConnectionError("Unable to connect to locally running geth.") else: client_version = self._web3.clientVersion if "geth" in client_version.lower(): logger.info(f"Connecting to existing Geth node at '{self.uri}'.") else: network_name = client_version.split("/")[0] logger.warning(f"Connecting Geth plugin to non-Geth network '{network_name}'.") self._web3.eth.set_gas_price_strategy(rpc_gas_price_strategy) def is_poa() -> bool: node_info: Mapping = self._node_info or {} chain_config = extract_nested_value(node_info, "protocols", "eth", "config") return chain_config is not None and "clique" in chain_config # If network is rinkeby, goerli, or kovan (PoA test-nets) if self._web3.eth.chain_id in (4, 5, 42) or is_poa(): self._web3.middleware_onion.inject(geth_poa_middleware, layer=0) if self.network.name != LOCAL_NETWORK_NAME and self.network.chain_id != self.chain_id: raise ProviderError( "HTTP Connection does not match expected chain ID. " f"Are you connected to '{self.network.name}'?" )
def _get_command( self, filepath: Path) -> Union[click.Command, click.Group, None]: relative_filepath = get_relative_path(filepath, self._project.path) # First load the code module by compiling it # NOTE: This does not execute the module logger.debug(f"Parsing module: {relative_filepath}") try: code = compile(filepath.read_text(), filepath, "exec") except SyntaxError as e: logger.error_from_exception( e, f"Exception while parsing script: {relative_filepath}") return None # Prevents stalling scripts # NOTE: Introspect code structure only for given patterns (do not execute it to find hooks) if "cli" in code.co_names: # If the module contains a click cli subcommand, process it and return the subcommand logger.debug(f"Found 'cli' command in script: {relative_filepath}") with use_temp_sys_path(filepath.parent.parent): try: ns = run_module(f"scripts.{filepath.stem}") except Exception as e: logger.error_from_exception( e, f"Exception while parsing script: {relative_filepath}") return None # Prevents stalling scripts self._namespace[filepath.stem] = ns return ns["cli"] elif "main" in code.co_names: logger.debug(f"Found 'main' method in script: {relative_filepath}") @click.command(cls=NetworkBoundCommand, short_help=f"Run '{relative_filepath}:main'") @network_option() def call(network): _ = network # Downstream might use this with use_temp_sys_path(filepath.parent.parent): ns = run_module(f"scripts.{filepath.stem}") ns["main"]() # Execute the script self._namespace[filepath.stem] = ns return call else: logger.warning( f"No 'main' method or 'cli' command in script: {relative_filepath}" ) @click.command(cls=NetworkBoundCommand, short_help=f"Run '{relative_filepath}'") @network_option() def call(network): _ = network # Downstream might use this with use_temp_sys_path(filepath.parent.parent): ns = run_module(f"scripts.{filepath.stem}") # Nothing to call, everything executes on loading self._namespace[filepath.stem] = ns return call
def _plugin_configs(self) -> Dict[str, PluginConfig]: project_name = self.PROJECT_FOLDER.stem if project_name in self._cached_configs: cache = self._cached_configs[project_name] self.name = cache.get("name", "") self.version = cache.get("version", "") self.default_ecosystem = cache.get("default_ecosystem", "ethereum") self.dependencies = cache.get("dependencies", []) self.deployments = cache.get("deployments", {}) self.contracts_folder = cache.get( "contracts_folder", self.PROJECT_FOLDER / "contracts") self.transaction_acceptance_timeout = cache.get( "transaction_acceptance_timeout", DEFAULT_TRANSACTION_ACCEPTANCE_TIMEOUT) return cache # First, load top-level configs. Then, load all the plugin configs. # The configs are popped off the dict for checking if all configs were processed. configs = {} config_file = self.PROJECT_FOLDER / CONFIG_FILE_NAME user_config = load_config(config_file) if config_file.exists() else {} self.name = configs["name"] = user_config.pop("name", "") self.version = configs["version"] = user_config.pop("version", "") self.default_ecosystem = configs[ "default_ecosystem"] = user_config.pop("default_ecosystem", "ethereum") self.transaction_acceptance_timeout = user_config.pop( "transaction_acceptance_timeout", DEFAULT_TRANSACTION_ACCEPTANCE_TIMEOUT) try: self.network_manager.set_default_ecosystem(self.default_ecosystem) except NetworkError as err: logger.warning(str(err)) dependencies = user_config.pop("dependencies", []) or [] if not isinstance(dependencies, list): raise ConfigError( "'dependencies' config item must be a list of dicts.") decode = self.dependency_manager.decode_dependency configs["dependencies"] = [decode(dep) for dep in dependencies] # type: ignore self.dependencies = configs["dependencies"] # NOTE: It is okay for this directory not to exist at this point. contracts_folder = ( Path(user_config.pop("contracts_folder")).expanduser().resolve() if "contracts_folder" in user_config else self.PROJECT_FOLDER / "contracts") self.contracts_folder = configs["contracts_folder"] = contracts_folder deployments = user_config.pop("deployments", {}) valid_ecosystems = dict(self.plugin_manager.ecosystems) valid_network_names = [ n[1] for n in [e[1] for e in self.plugin_manager.networks] ] self.deployments = configs["deployments"] = DeploymentConfigCollection( deployments, valid_ecosystems, valid_network_names) for plugin_name, config_class in self.plugin_manager.config_class: user_override = user_config.pop(plugin_name, {}) if config_class != ConfigDict: # NOTE: Will raise if improperly provided keys config = config_class(**user_override) # type: ignore else: # NOTE: Just use it directly as a dict if `ConfigDict` is passed config = user_override configs[plugin_name] = config remaining_keys = user_config.keys() if len(remaining_keys) > 0: remaining_keys_str = ", ".join(remaining_keys) logger.warning( f"Unprocessed plugin config(s): {remaining_keys_str}. " "Plugins may not be installed yet or keys may be mis-spelled.") self._cached_configs[project_name] = configs return configs