def _get_path(path_str: str) -> Tuple[Path, Optional[Project]]: # Returns path to a python module path = Path(path_str).with_suffix(".py") if not get_loaded_projects(): if not path.exists(): raise FileNotFoundError(f"Cannot find {path_str}") return path.resolve(), None if not path.is_absolute(): for project in get_loaded_projects(): if path.parts[:1] == (project._structure["scripts"],): script_path = project._path.joinpath(path) else: script_path = project._path.joinpath(project._structure["scripts"]).joinpath(path) if script_path.exists(): return script_path.resolve(), project raise FileNotFoundError(f"Cannot find {path_str}") if not path.exists(): raise FileNotFoundError(f"Cannot find {path_str}") try: project = next(i for i in get_loaded_projects() if path_str.startswith(i._path.as_posix())) except StopIteration: raise ProjectNotFound(f"{path_str} is not part of an active project") return path.resolve(), project
def run( script_path: str, method_name: str = "main", args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, project: Any = None, ) -> None: """Loads a project script and runs a method in it. script_path: path of script to load method_name: name of method to run args: method args kwargs: method kwargs project: project to add to the script namespace Returns: return value from called method """ if args is None: args = tuple() if kwargs is None: kwargs = {} if not project and len(get_loaded_projects()) == 1: project = get_loaded_projects()[0] default_path: str = "scripts" if project: # if there is an active project, temporarily add all the ContractContainer # instances to the main brownie namespace so they can be imported by the script brownie: Any = sys.modules["brownie"] brownie_dict = brownie.__dict__.copy() brownie_all = brownie.__all__.copy() brownie.__dict__.update(project) brownie.__all__.extend(project.__all__) default_path = project._project_path.joinpath("scripts").as_posix() try: script: Path = _get_path(script_path, default_path) module = _import_from_path(script) name = module.__name__ if not hasattr(module, method_name): raise AttributeError( f"Module '{name}' has no method '{method_name}'") print( f"\nRunning '{color['module']}{name}{color}.{color['callable']}{method_name}{color}'..." ) return getattr(module, method_name)(*args, **kwargs) finally: if project: # cleanup namespace brownie.__dict__.clear() brownie.__dict__.update(brownie_dict) brownie.__all__ = brownie_all
def pytest_sessionstart(): # load `pooldata.json` for each pool project = get_loaded_projects()[0] for path in [ i for i in project._path.glob("contracts/pools/*") if i.is_dir() ]: with path.joinpath('pooldata.json').open() as fp: _pooldata[path.name] = json.load(fp) _pooldata[path.name].update( name=path.name, swap_contract=next(i.stem for i in path.glob(f"StableSwap*"))) zap_contract = next((i.stem for i in path.glob(f"Deposit*")), None) if zap_contract: _pooldata[path.name]['zap_contract'] = zap_contract # create pooldata for templates lp_contract = sorted(i._name for i in project if i._name.startswith("CurveToken"))[-1] for path in [ i for i in project._path.glob("contracts/pool-templates/*") if i.is_dir() ]: with path.joinpath('pooldata.json').open() as fp: name = f"template-{path.name}" _pooldata[name] = json.load(fp) _pooldata[name].update( name=name, lp_contract=lp_contract, swap_contract=next(i.stem for i in path.glob(f"*Swap*")), ) zap_contract = next((i.stem for i in path.glob(f"Deposit*")), None) if zap_contract: _pooldata[name]['zap_contract'] = zap_contract
def pytest_sessionstart(): # load `pooldata.json` for each pool project = get_loaded_projects()[0] for path in [i for i in project._path.glob("contracts/pools/*") if i.is_dir()]: with path.joinpath('pooldata.json').open() as fp: _pooldata[path.name] = json.load(fp) _pooldata[path.name]['name'] = path.name
def pool_data(request): project = get_loaded_projects()[0] if hasattr(request, "param"): pool_name = request.param else: test_path = Path(request.fspath).relative_to(project._path) pool_name = test_path.parts[1] yield _pooldata[pool_name]
def pool_data(request): project = get_loaded_projects()[0] if hasattr(request, "param"): pool_name = request.param else: test_path = Path(request.fspath).relative_to(project._path) # ("tests", "pools" or "zaps", pool_name, ...) pool_name = test_path.parts[2] return _pooldata[pool_name]
def pytest_sessionstart(): # load `pooldata.json` for each pool project = get_loaded_projects()[0] for path in [ i for i in project._path.glob("contracts/pools/*") if i.is_dir() ]: with path.joinpath("pooldata.json").open() as fp: _pooldata[path.name] = json.load(fp) _pooldata[path.name].update( name=path.name, swap_contract=next(i.stem for i in path.glob("StableSwap*")))
def pytest_collection_modifyitems(config, items): project = get_loaded_projects()[0] is_forked = "fork" in CONFIG.active_network['id'] for item in items.copy(): try: params = item.callspec.params data = _pooldata[params['pool_data']] except Exception: continue # during forked tests, filter pools where pooldata does not contain deployment addresses if is_forked and next( (i for i in data["coins"] if "underlying_address" not in i), False): items.remove(item) continue # remove excess `itercoins` parametrized tests for marker in item.iter_markers(name="itercoins"): values = [params[i] for i in marker.args] if max(values) >= len(data['coins']) or len( set(values)) < len(values): items.remove(item) break if item not in items: continue # apply `skip_pool` marker for marker in item.iter_markers(name="skip_pool"): if params["pool_data"] in marker.args: items.remove(item) # apply `target_pool` marker for marker in item.iter_markers(name="target_pool"): if params["pool_data"] not in marker.args: items.remove(item) # apply `lending` marker for marker in item.iter_markers(name="lending"): deployer = getattr(project, data['swap_contract']) if "exchange_underlying" not in deployer.signatures: items.remove(item) # apply `lending` marker for marker in item.iter_markers(name="zap"): if "zap_contract" not in data: items.remove(item) # hacky magic to ensure the correct number of tests is shown in collection report config.pluginmanager.get_plugin("terminalreporter")._numcollected = len( items)
def run( script_path: str, method_name: str = "main", args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, project: Any = None, ) -> None: """Loads a project script and runs a method in it. script_path: path of script to load method_name: name of method to run args: method args kwargs: method kwargs project: (deprecated) Returns: return value from called method """ if args is None: args = tuple() if kwargs is None: kwargs = {} if not get_loaded_projects(): raise ProjectNotFound("Cannot run a script without an active project") script, project = _get_path(script_path) # temporarily add project objects to the main namespace, so the script can import them brownie: Any = sys.modules["brownie"] brownie_dict = brownie.__dict__.copy() brownie_all = brownie.__all__.copy() brownie.__dict__.update(project) brownie.__all__.extend(project.__all__) try: script = script.absolute().relative_to(project._path) module = _import_from_path(script) name = module.__name__ if not hasattr(module, method_name): raise AttributeError( f"Module '{name}' has no method '{method_name}'") print( f"\nRunning '{color['module']}{name}{color}.{color['callable']}{method_name}{color}'..." ) return getattr(module, method_name)(*args, **kwargs) finally: # cleanup namespace brownie.__dict__.clear() brownie.__dict__.update(brownie_dict) brownie.__all__ = brownie_all
def _get_path(path_str: str) -> Tuple[Path, Project]: # Returns path to a python module path = Path(path_str).with_suffix(".py") if not path.is_absolute(): if path.parts[0] != "scripts": path = Path("scripts").joinpath(path) for project in get_loaded_projects(): if project._path.joinpath(path).exists(): path = project._path.joinpath(path) return path, project raise FileNotFoundError(f"Cannot find {path_str}") if not path.exists(): raise FileNotFoundError(f"Cannot find {path_str}") try: project = next(i for i in get_loaded_projects() if path_str.startswith(i._path.as_posix())) except StopIteration: raise ProjectNotFound(f"{path_str} is not part of an active project") return path, project
def main(): project = get_loaded_projects()[0] balance = DEPLOYER.balance() # load data about the deployment from `pooldata.json` contracts_path = project._path.joinpath("contracts/pools") with contracts_path.joinpath(f"{POOL_NAME}/pooldata.json").open() as fp: pool_data = json.load(fp) swap_name = next(i.stem for i in contracts_path.glob(f"{POOL_NAME}/StableSwap*")) swap_deployer = getattr(project, swap_name) token_deployer = getattr(project, pool_data.get("lp_contract")) underlying_coins = [i["underlying_address"] for i in pool_data["coins"]] base_pool = None if "base_pool" in pool_data: with contracts_path.joinpath( f"{pool_data['base_pool']}/pooldata.json").open() as fp: base_pool_data = json.load(fp) base_pool = base_pool_data["swap_address"] # deploy the token contract token_args = pool_data["lp_constructor"] token = token_deployer.deploy(token_args["name"], token_args["symbol"], _tx_params()) # deploy the pool contract abi = next(i["inputs"] for i in swap_deployer.abi if i["type"] == "constructor") args = pool_data["swap_constructor"] args.update( _coins=underlying_coins, _underlying_coins=underlying_coins, _pool_token=token, _base_pool=base_pool, _owner=POOL_OWNER, ) deployment_args = [args[i["name"]] for i in abi] + [_tx_params()] swap = swap_deployer.deploy(*deployment_args) # set the lp minter to the pool address token.set_minter(swap, _tx_params()) print( f"Gas used in deployment: {(balance - DEPLOYER.balance()) / 1e18:.4f} ETH" )
def pytest_generate_tests(metafunc): project = get_loaded_projects()[0] itercoins_bound = max(len(i["coins"]) for i in _pooldata.values()) if "pool_data" in metafunc.fixturenames: # parametrize `pool_data` test_path = Path(metafunc.definition.fspath).relative_to(project._path) if test_path.parts[1] in ("pools", "zaps"): if test_path.parts[2] in ("common", "meta"): # parametrize common pool/zap tests to run against all pools if metafunc.config.getoption("pool"): params = metafunc.config.getoption("pool").split(",") else: params = list(_pooldata) if test_path.parts[2] == "meta": params = [ i for i in params if _pooldata[i].get("base_pool") ] else: # run targetted pool/zap tests against only the specific pool params = [test_path.parts[2]] if test_path.parts[1] == "zaps": # for zap tests, filter by pools that have a Deposit contract params = [ i for i in params if _pooldata[i].get("zap_contract") ] else: # pool tests outside `tests/pools` or `tests/zaps` will only run when # a target pool is explicitly declared try: params = metafunc.config.getoption("pool").split(",") except Exception: params = [] warnings.warn( f"'{test_path.as_posix()}' contains pool tests, but is outside of " "'tests/pools/'. To run it, specify a pool with `--pool [name]`" ) metafunc.parametrize("pool_data", params, indirect=True, scope="session") # apply initial parametrization of `itercoins` for marker in metafunc.definition.iter_markers(name="itercoins"): for item in marker.args: metafunc.parametrize(item, range(itercoins_bound))
def run( script_path: str, method_name: str = "main", args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, project: Any = None, ) -> None: """Loads a project script and runs a method in it. script_path: path of script to load method_name: name of method to run args: method args kwargs: method kwargs project: (deprecated) Returns: return value from called method """ if args is None: args = tuple() if kwargs is None: kwargs = {} if not get_loaded_projects(): raise ProjectNotFound("Cannot run a script without an active project") script, project = _get_path(script_path) # temporarily add project objects to the main namespace, so the script can import them project._add_to_main_namespace() try: script = script.absolute().relative_to(project._path) module = _import_from_path(script) name = module.__name__ if not hasattr(module, method_name): raise AttributeError(f"Module '{name}' has no method '{method_name}'") print( f"\nRunning '{color('bright blue')}{name}{color}." f"{color('bright cyan')}{method_name}{color}'..." ) return getattr(module, method_name)(*args, **kwargs) finally: # cleanup namespace project._remove_from_main_namespace()
def pytest_generate_tests(metafunc): project = get_loaded_projects()[0] itercoins_bound = max(len(i['coins']) for i in _pooldata.values()) if "pool_data" in metafunc.fixturenames: # parametrize `pool_data` test_path = Path(metafunc.definition.fspath).relative_to(project._path) if test_path.parts[1] == "common": if metafunc.config.getoption("pool"): params = metafunc.config.getoption("pool").split(',') else: params = list(_pooldata) metafunc.parametrize("pool_data", params, indirect=True, scope="session") # apply initial parametrization of `itercoins` for marker in metafunc.definition.iter_markers(name="itercoins"): for item in marker.args: metafunc.parametrize(item, range(itercoins_bound))
def pytest_sessionstart(): # load `pooldata.json` for each pool project = get_loaded_projects()[0] for path in [ i for i in project._path.glob("contracts/pools/*") if i.is_dir() ]: with path.joinpath('pooldata.json').open() as fp: _pooldata[path.name] = json.load(fp) _pooldata[path.name].update( name=path.name, swap_contract=next(i.stem for i in path.glob(f"StableSwap*"))) # create pooldata for templates lp_contract = sorted(i._name for i in project if i._name.startswith("CurveToken"))[-1] _pooldata['template-y'] = { "name": "template-y", "swap_contract": "StableSwapYLend", "lp_contract": lp_contract, "wrapped_contract": "yERC20", "coins": [{ "decimals": i, "tethered": bool(i), "wrapped": True, "wrapped_decimals": i } for i in hook_decimals] } _pooldata['template-base'] = { "name": "template-base", "swap_contract": "StableSwapBase", "lp_contract": lp_contract, "coins": [{ "decimals": i, "tethered": bool(i), "wrapped": False } for i in hook_decimals] }
def pytest_sessionstart(): # load `pooldata.json` for each pool project = get_loaded_projects()[0] for path in [ i for i in project._path.glob("contracts/pools/*") if i.is_dir() ]: with path.joinpath("pooldata.json").open() as fp: _pooldata[path.name] = json.load(fp) _pooldata[path.name].update( name=path.name, swap_contract=next(i.stem for i in path.glob("StableSwap*"))) zap_contract = next((i.stem for i in path.glob("Deposit*")), None) if zap_contract: _pooldata[path.name]["zap_contract"] = zap_contract # create pooldata for templates lp_contract = sorted(i._name for i in project if i._name.startswith("CurveToken"))[-1] for path in [ i for i in project._path.glob("contracts/pool-templates/*") if i.is_dir() ]: with path.joinpath("pooldata.json").open() as fp: name = f"template-{path.name}" _pooldata[name] = json.load(fp) _pooldata[name].update( name=name, lp_contract=lp_contract, swap_contract=next(i.stem for i in path.glob("*Swap*")), ) zap_contract = next((i.stem for i in path.glob("Deposit*")), None) if zap_contract: _pooldata[name]["zap_contract"] = zap_contract for _, data in _pooldata.items(): if "base_pool" in data: data["base_pool"] = _pooldata[data["base_pool"]] elif "base_pool_contract" in data: # for metapool templates, we target a contract instead of a specific pool base_swap = data["base_pool_contract"] base_data = next(v for v in _pooldata.values() if v["swap_contract"] == base_swap) data["base_pool"] = base_data
def pytest_ignore_collect(path, config): project = get_loaded_projects()[0] path = Path(path).relative_to(project._path) path_parts = path.parts[1:-1] if path.is_dir(): return None # always collect fixtures if path_parts[:1] == ("fixtures", ): return None # always allow forked tests if path_parts[0] == "forked": return None # with the `--unitary` flag, skip any tests in an `integration` subdirectory if config.getoption("unitary") and "integration" in path_parts: return True # with the `--integration` flag, skip any tests NOT in an `integration` subdirectory if config.getoption("integration") and "integration" not in path_parts: return True if config.getoption("pool") and path_parts: # with a specific pool targeted, only run pool and zap tests if path_parts[0] not in ("pools", "zaps"): return True # always run common tests if path_parts[1] == "common": return None target_pools = config.getoption("pool").split(",") # only include metapool tests if at least one targeted pool is a metapool if path_parts[1] == "meta": return next( (None for i in target_pools if _pooldata[i].get("base_pool")), True) # filter other pool-specific folders if path_parts[1] not in target_pools: return True
def pytest_collection_modifyitems(config, items): project = get_loaded_projects()[0] for item in items.copy(): try: params = item.callspec.params data = _pooldata[params['pool_data']] except Exception: continue # remove excess `itercoins` parametrized tests if next(item.iter_markers(name="itercoins"), None): values = [i for i in params.values() if isinstance(i, int)] if max(values) >= len(data['coins']) or len( set(values)) < len(values): items.remove(item) continue # apply `skip_pool` marker for marker in item.iter_markers(name="skip_pool"): if params["pool_data"] in marker.args: items.remove(item) continue # apply `target_pool` marker for marker in item.iter_markers(name="target_pool"): if params["pool_data"] not in marker.args: items.remove(item) continue # apply `lending` marker for marker in item.iter_markers(name="lending"): deployer = getattr(project, data['swap_contract']) if "exchange_underlying" not in deployer.signatures: items.remove(item) continue # hacky magic to ensure the correct number of tests is shown in collection report config.pluginmanager.get_plugin("terminalreporter")._numcollected = len( items)
def main(): project = get_loaded_projects()[0] balance = DEPLOYER.balance() # load data about the deployment from `pooldata.json` contracts_path = project._path.joinpath("contracts/pools") with contracts_path.joinpath(f"{POOL_NAME}/pooldata.json").open() as fp: pool_data = json.load(fp) swap_name = next(i.stem for i in contracts_path.glob(f"{POOL_NAME}/StableSwap*")) swap_deployer = getattr(project, swap_name) token_deployer = getattr(project, pool_data.get("lp_contract")) underlying_coins = [i["underlying_address"] for i in pool_data["coins"]] wrapped_coins = [ i.get("wrapped_address", i["underlying_address"]) for i in pool_data["coins"] ] base_pool = None if "base_pool" in pool_data: with contracts_path.joinpath( f"{pool_data['base_pool']}/pooldata.json").open() as fp: base_pool_data = json.load(fp) base_pool = base_pool_data["swap_address"] # deploy the token token_args = pool_data["lp_constructor"] token = token_deployer.deploy(token_args["name"], token_args["symbol"], _tx_params()) # deploy the pool abi = next(i["inputs"] for i in swap_deployer.abi if i["type"] == "constructor") args = pool_data["swap_constructor"] args.update( _coins=wrapped_coins, _underlying_coins=underlying_coins, _pool_token=token, _base_pool=base_pool, _owner=POOL_OWNER, ) deployment_args = [args[i["name"]] for i in abi] + [_tx_params()] swap = swap_deployer.deploy(*deployment_args) # set the minter token.set_minter(swap, _tx_params()) # deploy the liquidity gauge LiquidityGauge.deploy(token, MINTER, POOL_OWNER, _tx_params()) # deploy the zap zap_name = next( (i.stem for i in contracts_path.glob(f"{POOL_NAME}/Deposit*")), None) if zap_name is not None: zap_deployer = getattr(project, zap_name) abi = next(i["inputs"] for i in zap_deployer.abi if i["type"] == "constructor") args = { "_coins": wrapped_coins, "_underlying_coins": underlying_coins, "_token": token, "_pool": swap, "_curve": swap, } deployment_args = [args[i["name"]] for i in abi] + [_tx_params()] zap_deployer.deploy(*deployment_args) print( f"Gas used in deployment: {(balance - DEPLOYER.balance()) / 1e18:.4f} ETH" )
def project(): yield get_loaded_projects()[0]
def pytest_collection_modifyitems(config, items): project = get_loaded_projects()[0] try: is_forked = "fork" in CONFIG.active_network["id"] except Exception: is_forked = False for item in items.copy(): try: params = item.callspec.params data = _pooldata[params["pool_data"]] except Exception: continue # during forked tests, filter pools where pooldata does not contain deployment addresses if is_forked and next( (i for i in data["coins"] if "underlying_address" not in i), False): items.remove(item) continue # remove excess `itercoins` parametrized tests for marker in item.iter_markers(name="itercoins"): n_coins = len(data["coins"]) # for metapools, consider the base pool when calculating n_coins if marker.kwargs.get("underlying") and "base_pool" in data: n_coins = len(data["base_pool"]["coins"]) + 1 values = [params[i] for i in marker.args] if max(values) >= n_coins or len(set(values)) < len(values): items.remove(item) break if item not in items: continue # apply `skip_pool` marker for marker in item.iter_markers(name="skip_pool"): if params["pool_data"] in marker.args: items.remove(item) break if item not in items: continue # apply `skip_pool_type` marker for marker in item.iter_markers(name="skip_pool_type"): if len(set(data.get("pool_types", [])) & set(marker.args)): items.remove(item) break if item not in items: continue # apply `target_pool` marker for marker in item.iter_markers(name="target_pool"): if params["pool_data"] not in marker.args: items.remove(item) break if item not in items: continue # apply `lending` marker if next(item.iter_markers(name="lending"), False): deployer = getattr(project, data["swap_contract"]) if "exchange_underlying" not in deployer.signatures: items.remove(item) continue # apply `zap` marker if next(item.iter_markers(name="zap"), False) and "zap_contract" not in data: items.remove(item) continue # hacky magic to ensure the correct number of tests is shown in collection report config.pluginmanager.get_plugin("terminalreporter")._numcollected = len( items)
def pytest_collection_modifyitems(config, items): project = get_loaded_projects()[0] for item in items.copy(): path = Path(item.fspath).relative_to(project._path) path_parts = path.parts[1:-1] try: params = item.callspec.params pool_size = params["plain_pool_size"] pool_type = params["pool_type"] return_type = params["return_type"] decimals = params["decimals"] meta_implementation_idx = params["meta_implementation_idx"] except Exception: if path_parts == (): if pool_type != 2: items.remove(item) continue # optimized pool only supports return True/revert if pool_type == 2 and return_type != 0: items.remove(item) continue # optimized pool only supports precision == 18 if pool_type == 2 and decimals != 18: items.remove(item) continue # meta pools we only test against 1 type no parameterization needed if pool_type in [4, 5, 6]: if decimals != 18: items.remove(item) continue if return_type != 0: items.remove(item) continue if pool_size > 2: items.remove(item) continue else: if meta_implementation_idx > 0: items.remove(item) continue if "zaps" in path_parts: # zap tests only apply to the meta implementations # and we only use the template zap DepositZap.vy # all the zaps are essentially copies of this with # constants set appropriately items.remove(item) continue if len(path_parts) > 1 and path_parts[1] == "rebase": if pool_type != 3: items.remove(item) continue # only allow meta pools in the meta directory if len(path_parts) > 1 and path_parts[1] == "meta": if pool_type not in [4, 5, 6]: items.remove(item) continue if pool_type != 6 and "test_sidechain_rewards.py" in path.parts: items.remove(item) continue if "test_factory.py" not in path.parts and len(path.parts) == 2: if not (pool_type == 2 and pool_size == 2): items.remove(item) continue # hacky magic to ensure the correct number of tests is shown in collection report config.pluginmanager.get_plugin("terminalreporter")._numcollected = len( items)
def project(): return get_loaded_projects()[0]
def pytest_generate_tests(metafunc): project = get_loaded_projects()[0] if "token" in metafunc.fixturenames: params = [i for i in project if i._name.startswith("CurveToken")] metafunc.parametrize("token", params, indirect=True, scope="module")
def main(): project = get_loaded_projects()[0] # load data about the deployment from `pooldata.json` contracts_path = project._path.joinpath("contracts/pools") with contracts_path.joinpath(f"{POOL_NAME}/pooldata.json").open() as fp: pool_data = json.load(fp) swap_name = next(i.stem for i in contracts_path.glob(f"{POOL_NAME}/StableSwap*")) swap_deployer = getattr(project, swap_name) token_deployer = getattr(project, pool_data.get('lp_contract')) underlying_coins = [i['underlying_address'] for i in pool_data['coins']] wrapped_coins = [ i.get('wrapped_address', i['underlying_address']) for i in pool_data['coins'] ] base_pool = None if "base_pool" in pool_data: with contracts_path.joinpath( f"{pool_data['base_pool']}/pooldata.json").open() as fp: base_pool_data = json.load(fp) base_pool = base_pool_data['swap_address'] # deploy the token token_args = pool_data["lp_constructor"] token = token_deployer.deploy(token_args['name'], token_args['symbol'], 18, 0, _tx_params()) # deploy the pool abi = next(i['inputs'] for i in swap_deployer.abi if i['type'] == "constructor") args = pool_data["swap_constructor"] args.update( _coins=wrapped_coins, _underlying_coins=underlying_coins, _pool_token=token, _base_pool=base_pool, _owner=POOL_OWNER, ) deployment_args = [args[i['name']] for i in abi] + [_tx_params()] swap = swap_deployer.deploy(*deployment_args) # set the minter token.set_minter(swap, _tx_params()) # deploy the liquidity gauge LiquidityGauge.deploy(token, MINTER, _tx_params()) # deploy the zap zap_name = next( (i.stem for i in contracts_path.glob(f"{POOL_NAME}/Deposit*")), None) if zap_name is not None: zap_deployer = getattr(project, zap_name) abi = next(i['inputs'] for i in zap_deployer.abi if i['type'] == "constructor") args = { '_coins': wrapped_coins, '_underlying_coins': underlying_coins, '_token': token, '_pool': swap, '_curve': swap, } deployment_args = [args[i['name']] for i in abi] + [_tx_params()] zap_deployer.deploy(*deployment_args)