def setup(): """ Connect to badger system, and configure multisig for running transactions in local fork without access to accounts """ # Connect badger system from file badger = connect_badger("deploy-final.json") digg = connect_digg("deploy-final.json") # Sanity check file addresses expectedMultisig = "0xB65cef03b9B89f99517643226d76e286ee999e77" assert badger.devMultisig == expectedMultisig # Multisig wrapper multi = GnosisSafe(badger.devMultisig, testMode=True) manager = BadgerRewardsManager.at( "0x5B60952481Eb42B66bdfFC3E049025AC5b91c127") for key in strat_keys: print(key) strategy = badger.getStrategy(key) multi.execute( MultisigTxMetadata( description="Transfer Keeper for {}".format(key)), { "to": strategy.address, "data": strategy.setKeeper.encode_input(manager) }, ) return manager
def main(): badger = connect_badger(badger_config.prod_json, load_keeper=True, load_guardian=True) pendingContentHash = ( "0x346ec98585b52d981d43584477e1b831ce32165cb8e0a06d14d236241b36328e") pendingFile = "rewards-1-" + str(pendingContentHash) + ".json" with open(pendingFile) as f: after_file = json.load(f) pendingRewards = after_file # pendingRewards = fetch_current_rewards_tree(badger) currentRewards = fetch_current_rewards_tree(badger) accounts[0].transfer(badger.keeper, Wei("5 ether")) accounts[0].transfer(badger.guardian, Wei("5 ether")) # Upgrade Rewards Tree multi = GnosisSafe(badger.devMultisig) newLogic = BadgerTree.at("0x0f81D3f48Fedb8E67a5b87A8a4De57766157f19B") multi.execute( MultisigTxMetadata(description="Upgrade Tree", ), { "to": badger.opsProxyAdmin.address, "data": badger.opsProxyAdmin.upgrade.encode_input(badger.badgerTree, newLogic), }, ) assert (badger.badgerTree.hasRole(DEFAULT_ADMIN_ROLE, badger.devMultisig.address) == True) assert badger.badgerTree.getRoleMemberCount(DEFAULT_ADMIN_ROLE) == 1 assert badger.badgerTree.hasRole(ROOT_PROPOSER_ROLE, badger.keeper.address) == True assert badger.badgerTree.getRoleMemberCount(ROOT_PROPOSER_ROLE) == 1 # assert badger.badgerTree.hasRole(ROOT_VALIDATOR_ROLE, badger.guardian.address) == True # assert badger.badgerTree.getRoleMemberCount(ROOT_VALIDATOR_ROLE) == 1 assert badger.badgerTree.hasRole(PAUSER_ROLE, badger.guardian.address) == True assert badger.badgerTree.getRoleMemberCount(PAUSER_ROLE) == 1 assert badger.badgerTree.hasRole(UNPAUSER_ROLE, badger.devMultisig.address) == True assert badger.badgerTree.getRoleMemberCount(UNPAUSER_ROLE) == 1 verify_rewards( badger, pendingRewards["startBlock"], pendingRewards["endBlock"], currentRewards, pendingRewards, )
def generate(self, badger, multi, key, distributions, start=0, duration=0, end=0): dists = [] for asset, dist in distributions.items(): if dist == 0: continue console.print( "===== Distributions for asset {} on {} =====".format(asset, key), style="bold yellow", ) token = asset_to_address(asset) # == Distribute to Geyser == geyser = badger.getGeyser(key) rewardsEscrow = badger.rewardsEscrow multi = GnosisSafe(badger.devMultisig) opsMulti = GnosisSafe(badger.opsMultisig) print(key, geyser, rewardsEscrow) if rewardsEscrow.isApproved(geyser) == False: multi.execute( MultisigTxMetadata( description="Approve Recipient" ), { "to": rewardsEscrow.address, "data": rewardsEscrow.approveRecipient.encode_input(geyser), }, ) # Approve Geyser as recipient if required if not rewardsEscrow.isApproved(geyser): multi.execute( MultisigTxMetadata( description="Approve StakingRewards " + key, operation="transfer", ), { "to": rewardsEscrow.address, "data": rewardsEscrow.approveRecipient.encode_input(geyser), }, ) numSchedules = geyser.unlockScheduleCount(token) console.print( "Geyser Distribution for {}: {}".format(key, val(dist)), style="yellow", ) dists.append((geyser, asset_to_address(asset), dist, duration, start)) console.log(key, dists) return dists
def multisig_action(badger: BadgerSystem): multi = GnosisSafe(badger.opsMultisig) key = "native.bDiggBtcb" vault = badger.getSett(key) strategy = badger.getStrategy(key) multi.execute( MultisigTxMetadata(description="Set PoolId"), {"to": strategy.address, "data": strategy.setWantPid.encode_input(104)}, ) assert strategy.wantPid() == 104
def configure_router(self): admin = self.admin multi = GnosisSafe(admin) for strategy in self.strategies.values(): multi.execute( MultisigTxMetadata(description="Add Swap Strategy {}".format( strategy.address)), { "to": strategy.address, "data": self.router.addSwapStrategy.encode_input(strategy.address), }, )
def main(): badger = connect_badger(badger_config.prod_json, load_keeper=True, load_guardian=True) pendingContentHash = "0x07baafa95bf7c39ba948753dda64b2e82854b5a7597f6de9e15c23a96d0bfad3" pendingFile = "rewards-1-" + str(pendingContentHash) + ".json" with open(pendingFile) as f: after_file = json.load(f) pendingRewards = after_file pendingRewards = fetch_current_rewards_tree(badger) currentRewards = fetch_current_rewards_tree(badger) accounts[0].transfer(badger.keeper, Wei("5 ether")) accounts[0].transfer(badger.guardian, Wei("5 ether")) # Upgrade Rewards Tree multi = GnosisSafe(badger.devMultisig) newLogic = BadgerTree.deploy({"from": badger.keeper}) multi.execute( MultisigTxMetadata( description="Upgrade Tree", ), { "to": badger.devProxyAdmin.address, "data": badger.devProxyAdmin.upgrade.encode_input(badger.badgerTree, newLogic), }, ) assert badger.badgerTree.hasRole(DEFAULT_ADMIN_ROLE, badger.devMultisig.address) == True assert badger.badgerTree.getRoleMemberCount(DEFAULT_ADMIN_ROLE) == 1 assert badger.badgerTree.hasRole(ROOT_PROPOSER_ROLE, badger.keeper.address) == True assert badger.badgerTree.getRoleMemberCount(ROOT_PROPOSER_ROLE) == 1 assert badger.badgerTree.hasRole(ROOT_VALIDATOR_ROLE, badger.guardian.address) == True assert badger.badgerTree.getRoleMemberCount(ROOT_VALIDATOR_ROLE) == 1 assert badger.badgerTree.hasRole(PAUSER_ROLE, badger.guardian.address) == True assert badger.badgerTree.getRoleMemberCount(PAUSER_ROLE) == 1 assert badger.badgerTree.hasRole(UNPAUSER_ROLE, badger.devMultisig.address) == True assert badger.badgerTree.getRoleMemberCount(UNPAUSER_ROLE) == 1 # verify_rewards(badger, pendingRewards["startBlock"], pendingRewards["endBlock"], currentRewards, pendingRewards) # push_rewards(badger, pendingContentHash) if rpc.is_active(): test_claims(badger, pendingRewards["startBlock"], pendingRewards["endBlock"], currentRewards, pendingRewards)
def configure_strategies_grant_swapper_role(self, swapper): admin = self.admin multi = GnosisSafe(admin) for strategy in self.strategies.values(): multi.execute( MultisigTxMetadata( description="Add Swapper Role to {}".format(swapper)), { "to": swapper, "data": strategy.grantRole.encode_input(strategy.SWAPPER_ROLE(), swapper), }, )
def grant_token_locking_permission(badger: BadgerSystem, locker): multi = GnosisSafe(badger.devMultisig) for key in keys: geyser = badger.getGeyser(key) print(key, geyser) multi.execute( MultisigTxMetadata( description="Add Geyser permission for {} to {}".format(key, locker) ), { "to": geyser.address, "data": geyser.grantRole.encode_input(TOKEN_LOCKER_ROLE, locker), }, ) assert geyser.hasRole(TOKEN_LOCKER_ROLE, locker)
def main(): badger = connect_badger(badger_config.prod_json) deployer = badger.deployer multi = GnosisSafe(badger.devMultisig) rebaseParams = {} rebaseParams[ "minRebaseTimeIntervalSec"] = badger.digg.uFragmentsPolicy.minRebaseTimeIntervalSec( ) rebaseParams[ "rebaseWindowOffsetSec"] = badger.digg.uFragmentsPolicy.rebaseWindowOffsetSec( ) rebaseParams[ "rebaseWindowLengthSec"] = badger.digg.uFragmentsPolicy.rebaseWindowLengthSec( ) console.print(rebaseParams) newWindowLength = hours(6) console.print(newWindowLength) multi.execute( MultisigTxMetadata(description="Set Rebase Params"), { "to": badger.digg.uFragmentsPolicy.address, "data": badger.digg.uFragmentsPolicy.setRebaseTimingParameters. encode_input( rebaseParams["minRebaseTimeIntervalSec"], rebaseParams["rebaseWindowOffsetSec"], newWindowLength, ), }, ) chain.mine() tx = badger.digg.orchestrator.rebase({"from": badger.deployer}) print(tx.call_trace()) tx = badger.digg.orchestrator.rebase({"from": badger.deployer}) print(tx.call_trace())
def test_signal_token_lock(badger: BadgerSystem, locker): opsMulti = GnosisSafe(badger.opsMultisig) for key in keys: geyser = badger.getGeyser(key) print(key, geyser) opsMulti.execute( MultisigTxMetadata(description="Test signal token lock"), { "to": locker.address, "data": locker.signalTokenLocks.encode_input([ (geyser, badger.token, 1, 1, chain.time()), (geyser, badger.digg.token, 2, 2, chain.time()), ]), }, ) print(geyser.getUnlockSchedulesFor(badger.token)) print(geyser.getUnlockSchedulesFor(badger.digg.token))
def setup_badger(badger: BadgerSystem): # Set paths key = "native.bDiggBtcb" sett = badger.getSett(key) strategy = badger.getStrategy(key) multi = GnosisSafe(badger.opsMultisig) multi.execute( MultisigTxMetadata(description="Set path"), { "to": strategy.address, "data": strategy.setTokenSwapPath.encode_input( registry.pancake.cake, strategy.token0(), [ registry.pancake.cake, registry.tokens.btcb, strategy.token0() ], ), }, ) multi.execute( MultisigTxMetadata(description="Set path"), { "to": strategy.address, "data": strategy.setTokenSwapPath.encode_input( registry.pancake.cake, strategy.token1(), [registry.pancake.cake, registry.tokens.btcb], ), }, )
def transfer(self, token, amount, recipient): rewardsEscrow = self.badger.rewardsEscrow multi = GnosisSafe(self.badger.devMultisig) # Approve Geyser as recipient if required if not rewardsEscrow.isApproved(recipient): multi.execute( MultisigTxMetadata(description="Approve Recipient " + recipient.address), { "to": rewardsEscrow.address, "data": rewardsEscrow.approveRecipient.encode_input(recipient), }, ) before = token.balanceOf(recipient) # Top up Tree # TODO: Make the amount based on what we'll require for the next week id = multi.addTx( MultisigTxMetadata(description="Send {} {} to {}".format( token, amount, recipient)), { "to": rewardsEscrow.address, "data": rewardsEscrow.transfer.encode_input(token, recipient, amount), }, ) tx = multi.executeTx(id) print(tx.call_trace()) after = token.balanceOf(recipient) console.print({"before": before, "after": after}) assert after == before + amount
def main(): badger = connect_badger() digg = badger.digg raw = 0.9566301 scaled = raw * 10**18 centralizedOracle = GnosisSafe(digg.centralizedOracle) print("Raw Link") print(raw) print("Formatted for Median Oracle") print(f"{scaled:.0f}") tx = centralizedOracle.execute( MultisigTxMetadata(description="Set Market Data"), { "to": digg.marketMedianOracle.address, "data": digg.marketMedianOracle.pushReport.encode_input(scaled), }, )
def testTransactions(self): rewardsEscrow = self.badger.rewardsEscrow multi = GnosisSafe(self.badger.devMultisig) opsMulti = GnosisSafe(self.badger.opsMultisig) # Setup accounts[7].transfer(multi.get_first_owner(), Wei("2 ether")) print( "Supplied ETH", accounts.at(multi.get_first_owner(), force=True).balance(), ) badger = self.badger tree = self.badger.badgerTree before = badger.token.balanceOf(tree) top_up = Wei("100000 ether") top_up_digg = Wei("40 gwei") harvest_badger = Wei("30000 ether") harvest_digg = Wei("40 gwei") # Top up Tree # TODO: Make the amount based on what we'll require for the next week id = multi.addTx( MultisigTxMetadata(description="Top up badger tree with Badger"), { "to": rewardsEscrow.address, "data": rewardsEscrow.transfer.encode_input(badger.token, tree, top_up), }, ) tx = multi.executeTx(id) after = badger.token.balanceOf(tree) assert after == before + top_up before = badger.digg.token.balanceOf(tree) tx = multi.execute( MultisigTxMetadata(description="Top up badger tree with DIGG"), { "to": rewardsEscrow.address, "data": rewardsEscrow.transfer.encode_input(badger.digg.token, tree, top_up_digg), }, ) print(tx.call_trace(), before, after) after = badger.digg.token.balanceOf(tree) assert after == before + top_up_digg # multi.execute( # MultisigTxMetadata(description="Top up rewards manager with Badger"), # { # "to": rewardsEscrow.address, # "data": rewardsEscrow.transfer.encode_input( # badger.token, badger.badgerRewardsManager, harvest_badger # ), # }, # ) multi.execute( MultisigTxMetadata(description="Top up rewards manager with DIGG"), { "to": rewardsEscrow.address, "data": rewardsEscrow.transfer.encode_input( badger.digg.token, badger.badgerRewardsManager, harvest_digg), }, ) # grant_token_locking_permission(self.badger, self.badger.unlockScheduler) geyserDists = [] for key, distribution in self.distributions.items(): if distribution.hasGeyserDistribution() == True: print("has geyser distribution", key) dist = GeyserDistributor() dists = dist.generate( badger, multi, key, distributions=distribution.getGeyserDistributions(), start=self.start, duration=self.duration, end=self.end, ) geyserDists.extend(dists) console.log("after " + key, geyserDists) # Add unlock schedeules inbulk console.log(geyserDists) tx = opsMulti.execute( MultisigTxMetadata(description="Signal unlock schedules"), { "to": badger.unlockScheduler.address, "data": badger.unlockScheduler.signalTokenLocks.encode_input( geyserDists), }, ) print(tx.call_trace()) tokens = [self.badger.token.address, self.badger.digg.token.address] for key in geyser_keys: print(key) geyser = self.badger.getGeyser(key) for token in tokens: print(token) console.log( "{} schedules for {}".format(token, key), geyser.getUnlockSchedulesFor(token), )
def main(): badger = connect_badger(badger_config.prod_json) rewards = get_active_rewards_schedule(badger) b1 = rewards.getDistributions( "native.uniBadgerWbtc").getToStakingRewardsDaily("badger") * 5 b2 = rewards.getDistributions( "native.sushiBadgerWbtc").getToStakingRewardsDaily("badger") * 5 b3 = rewards.getDistributions("native.badger").getToStakingRewardsDaily( "badger") * 5 total_badger = b1 + b2 + b3 d1 = shares_to_fragments( rewards.getDistributions( "native.uniDiggWbtc").getToStakingRewardsDaily("digg")) * 5 d2 = shares_to_fragments( rewards.getDistributions( "native.sushiDiggWbtc").getToStakingRewardsDaily("digg")) * 5 d3 = shares_to_fragments( rewards.getDistributions("native.digg").getToStakingRewardsDaily( "digg")) * 6 total_digg = d1 + d2 + d3 table = [] table.append(["native.uniBadgerWbtc", val(b1)]) table.append(["native.sushiBadgerWbtc", val(b2)]) table.append(["native.badger", val(b3)]) table.append(["total badger", val(total_badger)]) print(tabulate(table, headers=["metric", "value"])) table = [] table.append(["native.uniDiggWbtc", val(d1, decimals=9)]) table.append(["native.sushiDiggWbtc", val(d2, decimals=9)]) table.append(["native.digg", val(d3, decimals=9)]) table.append(["total digg", val(total_digg, decimals=9)]) print(tabulate(table, headers=["metric", "value"])) rewards.printState("Geyser Emissions") # Generate Sufficient multi = GnosisSafe(badger.devMultisig) print(badger.badgerRewardsManager) multi.execute( MultisigTxMetadata(description="Transfer Remaining Weekly Badger"), { "to": badger.rewardsEscrow.address, "data": badger.rewardsEscrow.transfer.encode_input( badger.token, badger.badgerRewardsManager, total_badger) }) assert badger.token.balanceOf(badger.badgerRewardsManager) >= total_badger multi.execute( MultisigTxMetadata(description="Transfer Remaining Weekly Badger"), { "to": badger.rewardsEscrow.address, "data": badger.rewardsEscrow.transfer.encode_input( badger.digg.token, badger.badgerRewardsManager, total_digg) }) assert badger.digg.token.balanceOf( badger.badgerRewardsManager) >= total_digg
def migrate_strategies_via_migrator(badger): migrator = MigrationAssistant.at("0x8b459f4d8949f3748dc34430bc91441c954dc391") data = migrator.migrate.encode_input( badger.getController("native").address, [ ( registry.curve.pools.renCrv.token, "0x444B860128B7Bf8C0e864bDc3b7a36a940db7D88", badger.getStrategy("native.renCrv").address, ), ( registry.curve.pools.sbtcCrv.token, "0x3Efc97A8e23f463e71Bf28Eb19690d097797eb17", badger.getStrategy("native.sbtcCrv").address, ), ( registry.curve.pools.tbtcCrv.token, "0xE2fA197eAA5C726426003074147a08beaA59403B", badger.getStrategy("native.tbtcCrv").address, ), ], ) a = badger.getSett("native.renCrv") b = badger.getSett("native.sbtcCrv") c = badger.getSett("native.tbtcCrv") d = badger.getStrategy("native.renCrv") e = badger.getStrategy("native.sbtcCrv") f = badger.getStrategy("native.tbtcCrv") old_ren = interface.IStrategy("0x444B860128B7Bf8C0e864bDc3b7a36a940db7D88") old_sbtc = interface.IStrategy("0x3Efc97A8e23f463e71Bf28Eb19690d097797eb17") old_tbtc = interface.IStrategy("0xE2fA197eAA5C726426003074147a08beaA59403B") console.print( { "ren": a.balance(), "sbtc": b.balance(), "tbtc": c.balance(), "ren_ppfs": a.getPricePerFullShare(), "sbtc_ppfs": b.getPricePerFullShare(), "tbtc_ppfs": c.getPricePerFullShare(), "ren_strat_pool": d.balanceOfPool(), "ren_strat_want": d.balanceOfWant(), "sbtc_strat_pool": e.balanceOfPool(), "sbtc_strat_want": e.balanceOfWant(), "tbtc_strat_pool": f.balanceOfPool(), "tbtc_strat_want": f.balanceOfWant(), "old_ren_strat_pool": old_ren.balanceOfPool(), "old_ren_strat_want": old_ren.balanceOfWant(), "old_sbtc_strat_pool": old_sbtc.balanceOfPool(), "old_sbtc_strat_want": old_sbtc.balanceOfWant(), "old_tbtc_strat_pool": old_tbtc.balanceOfPool(), "old_tbtc_strat_want": old_tbtc.balanceOfWant(), } ) tokens = [ interface.IERC20(registry.curve.pools.renCrv.token), interface.IERC20(registry.curve.pools.sbtcCrv.token), interface.IERC20(registry.curve.pools.tbtcCrv.token), ] entities = [ a, b, c, d, e, f, badger.getController("native"), "0x444B860128B7Bf8C0e864bDc3b7a36a940db7D88", "0x3Efc97A8e23f463e71Bf28Eb19690d097797eb17", "0xE2fA197eAA5C726426003074147a08beaA59403B", ] table = [] for entity in entities: for token in tokens: table.append([entity, token.name(), token.balanceOf(entity)]) print(tabulate(table, ["entity", "asset", "value"])) multi = GnosisSafe(badger.opsMultisig) # 'to': badger.getController("native").address, tx = multi.execute( MultisigTxMetadata(description="CRV Migration"), {"to": migrator.address, "data": data, "operation": 1}, ) a.earn({"from": badger.deployer}) b.earn({"from": badger.deployer}) c.earn({"from": badger.deployer}) console.print( { "ren": a.balance(), "sbtc": b.balance(), "tbtc": c.balance(), "ren_ppfs": a.getPricePerFullShare(), "sbtc_ppfs": b.getPricePerFullShare(), "tbtc_ppfs": c.getPricePerFullShare(), "ren_strat_pool": d.balanceOfPool(), "ren_strat_want": d.balanceOfWant(), "sbtc_strat_pool": e.balanceOfPool(), "sbtc_strat_want": e.balanceOfWant(), "tbtc_strat_pool": f.balanceOfPool(), "tbtc_strat_want": f.balanceOfWant(), "old_ren_strat_pool": old_ren.balanceOfPool(), "old_ren_strat_want": old_ren.balanceOfWant(), "old_sbtc_strat_pool": old_sbtc.balanceOfPool(), "old_sbtc_strat_want": old_sbtc.balanceOfWant(), "old_tbtc_strat_pool": old_tbtc.balanceOfPool(), "old_tbtc_strat_want": old_tbtc.balanceOfWant(), } )
def main(): """ Connect to badger system, and configure multisig for running transactions in local fork without access to accounts """ # Connect badger system from file badger = connect_badger() digg = badger.digg # Sanity check file addresses expectedMultisig = "0xB65cef03b9B89f99517643226d76e286ee999e77" assert badger.devMultisig == expectedMultisig if rpc.is_active(): distribute_test_ether(badger.devMultisig, Wei("5 ether")) # Multisig wrapper # Get price data from sushiswap, uniswap, and coingecko digg_usd_coingecko = 41531.72 btc_usd_coingecko = 32601.13 digg_per_btc = digg_usd_coingecko / btc_usd_coingecko uniTWAP = get_average_daily_price("scripts/oracle/data/uni_digg_hour") sushiTWAP = get_average_daily_price("scripts/oracle/data/sushi_digg_hour") averageTWAP = Average([uniTWAP, sushiTWAP]) console.print({ "uniTWAP": uniTWAP, "sushiTWAP": sushiTWAP, "averageTWAP": averageTWAP }) supplyBefore = digg.token.totalSupply() print("spfBefore", digg.token._sharesPerFragment()) print("supplyBefore", digg.token.totalSupply()) marketValue = Wei(str(averageTWAP) + " ether") print(marketValue) print(int(marketValue * 10**18)) print("digg_per_btc", digg_per_btc, averageTWAP, marketValue) if rpc.is_active(): distribute_test_ether(digg.centralizedOracle, Wei("5 ether")) centralizedMulti = GnosisSafe(digg.centralizedOracle) print(digg.marketMedianOracle.providerReports(digg.centralizedOracle, 0)) print(digg.marketMedianOracle.providerReports(digg.centralizedOracle, 1)) print(digg.cpiMedianOracle.providerReports(digg.constantOracle, 0)) print(digg.cpiMedianOracle.providerReports(digg.constantOracle, 1)) print(digg.cpiMedianOracle.getData.call()) sushi = SushiswapSystem() pair = sushi.getPair(digg.token, registry.tokens.wbtc) uni = UniswapSystem() uniPair = uni.getPair(digg.token, registry.tokens.wbtc) print("pair before", pair.getReserves()) print("uniPair before", uniPair.getReserves()) tx = centralizedMulti.execute( MultisigTxMetadata(description="Set Market Data"), { "to": digg.marketMedianOracle.address, "data": digg.marketMedianOracle.pushReport.encode_input(marketValue), }, ) chain.mine() print(tx.call_trace()) print(tx.events) chain.sleep(hours(0.4)) chain.mine() in_rebase_window = digg.uFragmentsPolicy.inRebaseWindow() while not in_rebase_window: print("Not in rebase window...") chain.sleep(hours(0.1)) chain.mine() in_rebase_window = digg.uFragmentsPolicy.inRebaseWindow() tx = digg.orchestrator.rebase({"from": accounts[0]}) chain.mine() supplyAfter = digg.token.totalSupply() print("spfAfter", digg.token._sharesPerFragment()) print("supplyAfter", supplyAfter) print("supplyChange", supplyAfter / supplyBefore) print("supplyChangeOtherWay", supplyBefore / supplyAfter) print("pair after", pair.getReserves()) print("uniPair after", uniPair.getReserves())
def init_prod_digg(badger: BadgerSystem, user): deployer = badger.deployer digg = badger.digg multi = GnosisSafe(badger.devMultisig) digg_liquidity_amount = 1000000000 wbtc_liquidity_amount = 100000000 print("TOKEN_LOCKER_ROLE", TOKEN_LOCKER_ROLE) locker_role = "0x4bf6f2cdcc8ad6c087a7a4fbecf46150b3686b71387234cac2b3e2e6dc70e345" # TODO: Have this as proxy in real deploy seederLogic = DiggSeeder.deploy({"from": deployer}) seeder = deploy_proxy( "DiggSeeder", DiggSeeder.abi, seederLogic.address, badger.devProxyAdmin.address, seederLogic.initialize.encode_input(digg.diggDistributor), deployer, ) # # Take initial liquidity from DAO # aragon = AragonSystem() # voting = aragon.getVotingAt("0xdc344bfb12522bf3fa58ef0d6b9a41256fc79a1b") # PROD: Configure DIGG digg.uFragmentsPolicy.setCpiOracle( digg.cpiMedianOracle, {"from": deployer}, ) digg.uFragmentsPolicy.setMarketOracle( digg.marketMedianOracle, {"from": deployer}, ) digg.uFragmentsPolicy.setOrchestrator( digg.orchestrator, {"from": deployer}, ) digg.uFragments.setMonetaryPolicy( digg.uFragmentsPolicy, {"from": deployer}, ) # ===== Upgrade DAOTimelock to allow voting ===== # print(badger.logic.SimpleTimelockWithVoting.address) # timelockWithVotingLogic = SimpleTimelockWithVoting.at(badger.logic.SimpleTimelockWithVoting.address) # timelock = interface.ISimpleTimelockWithVoting(badger.daoBadgerTimelock.address) # multi.execute( # MultisigTxMetadata(description="Upgrade DAO Badger Timelock to Allow voting",), # { # "to": badger.devProxyAdmin.address, # "data": badger.devProxyAdmin.upgrade.encode_input( # badger.daoBadgerTimelock, timelockWithVotingLogic # ), # }, # ) # # ===== Vote to move initial liquidity funds to multisig ===== # tx = multi.execute( # MultisigTxMetadata(description="Vote on DAO Timelock from multisig",), # { # "to": timelock.address, # "data": timelock.vote.encode_input(0, True, True) # }, # ) # # Approve DAO voting as recipient # multi.execute( # MultisigTxMetadata(description="Approve DAO voting as recipient",), # { # "to": badger.rewardsEscrow.address, # "data": badger.rewardsEscrow.approveRecipient.encode_input(voting), # }, # ) # # Vote on DAO voting as rewardsEscrow # before = badger.token.balanceOf(badger.rewardsEscrow) # tx = multi.execute( # MultisigTxMetadata(description="Vote on Rewards Escrow from multisig",), # { # "to": badger.rewardsEscrow.address, # "data": badger.rewardsEscrow.call.encode_input( # voting, 0, voting.vote.encode_input(0, True, True) # ), # }, # ) # after = badger.token.balanceOf(badger.rewardsEscrow) # print(tx.call_trace()) # assert after == before # crvRen = interface.IERC20(registry.curve.pools.renCrv.token) wbtc = interface.IERC20(registry.tokens.wbtc) # assert crvRen.balanceOf(badger.devMultisig) >= wbtc_liquidity_amount * 10 ** 10 * 2 # crvPool = interface.ICurveZap(registry.curve.pools.renCrv.swap) # # crvPool.Remove_liquidity_one_coin( # # wbtc_liquidity_amount * 10 ** 10, 1, wbtc_liquidity_amount # # ) # # ===== Convert crvRen to wBTC on multisig ===== # tx = multi.execute( # MultisigTxMetadata(description="Withdraw crvRen for 100% WBTC",), # { # "to": crvPool.address, # "data": crvPool.remove_liquidity_one_coin.encode_input( # wbtc_liquidity_amount * 10 ** 10 * 2, 1, wbtc_liquidity_amount * 2 # ), # }, # ) # assert wbtc.balanceOf(badger.devMultisig) >= wbtc_liquidity_amount * 2 # ===== Move initial liquidity funds to Seeder ===== multi.execute( MultisigTxMetadata( description="Transfer initial liquidity WBTC to the Seeder", ), { "to": wbtc.address, "data": wbtc.transfer.encode_input(seeder, 200000000), }, ) # ===== Move DIGG to Seeder ===== digg.token.transfer(seeder, digg.token.totalSupply(), {"from": deployer}) # ===== Move Required Badger to Seeder from RewardsEscrow ===== multi.execute( MultisigTxMetadata( description="Move Required Badger to Seeder from RewardsEscrow", ), { "to": badger.rewardsEscrow.address, "data": badger.rewardsEscrow.transfer.encode_input(badger.token, seeder, Wei("30000 ether")), }, ) # ===== Add DIGG token to all geyser distribution lists ===== # (Also, add Seeder as approved schedule creator) geyser_keys = [ "native.badger", "native.renCrv", "native.sbtcCrv", "native.tbtcCrv", "native.uniBadgerWbtc", "harvest.renCrv", "native.sushiWbtcEth", "native.sushiBadgerWbtc", "native.uniDiggWbtc", "native.sushiDiggWbtc", ] for key in geyser_keys: geyser = badger.getGeyser(key) print(key, geyser) id = multi.addTx( MultisigTxMetadata( description="Add DIGG token to distribution tokens on {} geyser" .format(key), ), { "to": geyser.address, "data": geyser.addDistributionToken.encode_input(digg.token), }, ) tx = multi.executeTx(id) assert geyser.hasRole(DEFAULT_ADMIN_ROLE, badger.devMultisig) multi.execute( MultisigTxMetadata( description="Allow Seeder to set unlock schedules on {} geyser" .format(key), ), { "to": geyser.address, "data": geyser.grantRole.encode_input(locker_role, seeder), }, ) assert geyser.hasRole(locker_role, seeder) # Seeder needs to have admin role to config Faucets. Remove role as part of seed. rewards_keys = [ "native.digg", "native.uniDiggWbtc", "native.sushiDiggWbtc", ] for key in rewards_keys: rewards = badger.getSettRewards(key) rewards.grantRole(DEFAULT_ADMIN_ROLE, seeder, {"from": deployer}) rewards.grantRole(DEFAULT_ADMIN_ROLE, badger.devMultisig, {"from": deployer}) rewards.renounceRole(DEFAULT_ADMIN_ROLE, deployer, {"from": deployer}) # print(digg.token.balanceOf(deployer)) # assert digg.token.balanceOf(deployer) == digg.token.totalSupply() # digg.token.transfer(seeder, digg.token.totalSupply(), {"from": deployer}) # wbtc = interface.IERC20(token_registry.wbtc) # wbtc.transfer(seeder, 200000000, {"from": user}) # ===== Seed Prep ===== print("wbtc.balanceOf(seeder)", wbtc.balanceOf(seeder)) assert digg.token.balanceOf(seeder) >= digg.token.totalSupply() assert wbtc.balanceOf(seeder) >= 200000000 print(digg.diggDistributor.address) print("digg.diggDistributor", digg.diggDistributor.isOpen()) digg.diggDistributor.transferOwnership(seeder, {"from": deployer}) print("prePreSeed", digg.token.balanceOf(seeder)) seeder.preSeed({"from": deployer}) print("postPreSeed", digg.token.balanceOf(seeder)) seeder.seed({"from": deployer}) # seeder.transferOwnership(badger.devMultisig, {'from': deployer}) # tx = multi.execute( # MultisigTxMetadata(description="Withdraw crvRen for 100% WBTC",), # { # "to": seeder.address, # "data": seeder.preSeed.encode_input(), # }, # ) # seeder.initialize({"from": deployer}) # Unpause all Setts setts_to_unpause = [ "native.digg", "native.uniDiggWbtc", "native.sushiDiggWbtc", ] for key in setts_to_unpause: sett = badger.getSett(key) id = multi.addTx( MultisigTxMetadata(description="Unpause Sett {}".format(key), ), { "to": sett.address, "data": sett.unpause.encode_input(), }, ) tx = multi.executeTx(id) assert sett.paused() == False
def main(): """ Connect to badger, distribute assets to specified test user, and keep ganache open. Ganache will run with your default brownie settings for mainnet-fork """ # The address to test with user = accounts.at(decouple.config("TEST_ACCOUNT"), force=True) badger = connect_badger("deploy-final.json", load_deployer=False, load_keeper=False, load_guardian=False) # TODO: After prod deployment, just connect instead. # claw = deploy_claw_minimal(badger.deployer, printToFile=True) # # Deploy claw setts # sushiswap = SushiswapSystem() # for (settId, empName) in [("native.sushiBClawUSDC", "bClaw"), ("native.sushiSClawUSDC", "sClaw")]: # params = sett_config.sushi.sushiClawUSDC.params # token = claw.emps[empName].tokenCurrency() # if sushiswap.hasPair(token, token_registry.wbtc): # params.want = sushiswap.getPair(token, token_registry.wbtc) # else: # params.want = sushiswap.createPair( # token, # token_registry.wbtc, # badger.deployer, # ) # want = params.want # params.badgerTree = badger.badgerTree # params.pid = sushiswap.add_chef_rewards(want) # strategist = badger.daoProxyAdmin # controller = badger.add_controller(settId) # badger.deploy_sett( # settId, # want, # controller, # governance=badger.daoProxyAdmin, # strategist=strategist, # keeper=badger.keeper, # guardian=badger.guardian, # ) # badger.deploy_strategy( # settId, # "StrategySushiLpOptimizer", # controller, # params, # governance=badger.daoProxyAdmin, # strategist=strategist, # keeper=badger.keeper, # guardian=badger.guardian, # ) # print_to_file(badger, "deploy-test.json") console.print( "[blue]=== 🦡 Test ENV for account {} 🦡 ===[/blue]".format(user)) tree = badger.badgerTree newLogic = BadgerTree.deploy({"from": badger.deployer}) multi = GnosisSafe(badger.opsMultisig) # Upgrade Tree multi.execute( MultisigTxMetadata(description="Upgrade Tree"), { "to": badger.opsProxyAdmin.address, "data": badger.opsProxyAdmin.upgrade.encode_input(tree, newLogic), }, ) # Publish test root publish_new_root(badger, params["root"], params["contentHash"]) console.print("[green]=== ✅ Test ENV Setup Complete ✅ ===[/green]") # Keep ganache open until closed time.sleep(days(365))
def setup_badger(badger: BadgerSystem): configs = { "native.pancakeBnbBtcb": { "want": registry.pancake.chefPairs.bnbBtcb, "token0": registry.tokens.btcb, "token1": registry.tokens.bnb, "performanceFeeStrategist": 1000, "performanceFeeGovernance": 1000, "withdrawalFee": 50, "wantPid": registry.pancake.chefPids.bnbBtcb, }, "native.bBadgerBtcb": { "want": registry.pancake.chefPairs.bBadgerBtcb, "token0": registry.tokens.bBadger, "token1": registry.tokens.btcb, "performanceFeeStrategist": 1000, "performanceFeeGovernance": 1000, "withdrawalFee": 50, "wantPid": registry.pancake.chefPids.bBadgerBtcb, }, "native.bDiggBtcb": { "want": registry.pancake.chefPairs.bDiggBtcb, "token0": registry.tokens.bDigg, "token1": registry.tokens.btcb, "performanceFeeStrategist": 1000, "performanceFeeGovernance": 1000, "withdrawalFee": 50, "wantPid": registry.pancake.chefPids.bDiggBtcb, }, } # Set paths key = "native.bDiggBtcb" sett = badger.getSett(key) strategy = badger.getStrategy(key) multi = GnosisSafe(badger.opsMultisig) multi.execute( MultisigTxMetadata(description="Set path"), { "to": strategy.address, "data": strategy.setTokenSwapPath.encode_input( registry.pancake.cake, strategy.token0(), [ registry.pancake.cake, registry.tokens.btcb, strategy.token0() ], ), }, ) multi.execute( MultisigTxMetadata(description="Set path"), { "to": strategy.address, "data": strategy.setTokenSwapPath.encode_input( registry.pancake.cake, strategy.token1(), [ registry.pancake.cake, registry.tokens.btcb, strategy.token1() ], ), }, )
def main(): badger = connect_badger("deploy-final.json") test_user = accounts.at(decouple.config("TEST_ACCOUNT"), force=True) distribute_test_ether(test_user, Wei("20 ether")) distribute_from_whales(test_user, assets=["bBadger", "badger", "usdc"]) rest = get_active_rewards_schedule(badger) usdc = interface.IERC20(registry.tokens.usdc) usdc_per_badger = 40.37 * 0.75 usdc_total = 13386240 multi = GnosisSafe(badger.devMultisig) badger_total_scaled = usdc_total / usdc_per_badger badger_total = Wei(str(badger_total_scaled) + " ether") bBadger = badger.getSett("native.badger") ppfs = bBadger.getPricePerFullShare() bBadger_total = int(badger_total / ppfs * 10**18) badger_total = Wei(str(badger_total_scaled) + " ether") console.print({ "TRADE": "BASED", "usdc_per_badger": usdc_per_badger, "usdc_total": usdc_total, "badger_total_scaled": badger_total_scaled, "badger_total": badger_total, "ppfs": ppfs, "bBadger_total": str(bBadger_total), }) params = { "beneficiary": "0x3159b46a7829a0dbfa856888af768fe7146e7418", "duration": days(182), "usdcAmount": usdc_total * 10**6, "bBadgerAmount": bBadger_total, # "usdcAmount": 0, # "bBadgerAmount": 0, } console.print(params) # # Oxb1 Test beneficiary = accounts.at(params["beneficiary"], force=True) escrow = OtcEscrow.at("0x7163fB2fA38Ea3BBc1F8525F3d8D0417C0c9d903") # bBadger.transfer(badger.devMultisig, Wei("100000 ether"), {"from": test_user}) pre = get_token_balances( [usdc, bBadger], [test_user, escrow, badger.devMultisig, beneficiary]) pre.print() # assert usdc.balanceOf(params["beneficiary"]) >= params["usdcAmount"] # multi.execute(MultisigTxMetadata(description="Transfer to 0xb1"), { # "to": bBadger.address, # "data": bBadger.transfer.encode_input(escrow, bBadger_total + Wei("1000 ether")) # }) # assert usdc.allowance(beneficiary, escrow) >= params["usdcAmount"] # usdc.approve(escrow, params["usdcAmount"], {"from": beneficiary}) # tx = escrow.swap({"from": beneficiary}) tx = multi.execute(MultisigTxMetadata(description="Swap"), { "to": escrow.address, "data": escrow.swap.encode_input() }, print_output=False) chain.mine() print(tx.call_trace()) vesting = interface.ITokenTimelock( tx.events["VestingDeployed"][0]["vesting"]) console.print({ "token": vesting.token(), "beneficiary": vesting.beneficiary(), "releaseTime": to_utc_date(vesting.releaseTime()), }) post = get_token_balances( [usdc, bBadger], [test_user, escrow, badger.devMultisig, beneficiary]) diff_token_balances(pre, post) try: vesting.release({"from": test_user}) except: print("early vest failed!") chain.sleep(days(182)) chain.mine() # End vesting.release({"from": test_user}) post = get_token_balances( [usdc, bBadger], [test_user, escrow, badger.devMultisig, beneficiary]) diff_token_balances(pre, post) return escrow = OtcEscrow.deploy( params["beneficiary"], params["duration"], params["usdcAmount"], params["bBadgerAmount"], {"from": badger.deployer}, ) beneficiary = accounts.at(params["beneficiary"], force=True) usdc.transfer(beneficiary, params["usdcAmount"], {"from": test_user}) usdc.transfer(beneficiary, 1500000000000, {"from": test_user}) badger.token.transfer(badger.devMultisig, badger_total, {"from": test_user}) multi.execute( MultisigTxMetadata(description="Whitelist Multi"), { "to": bBadger.address, "data": bBadger.approveContractAccess.encode_input( badger.devMultisig), }, ) assert badger.token.balanceOf(badger.devMultisig) > Wei("100 ether") multi.execute( MultisigTxMetadata(description="Approve bBadger Contract"), { "to": badger.token.address, "data": badger.token.approve.encode_input(bBadger, badger_total), }, ) multi.execute( MultisigTxMetadata(description="Deposit"), { "to": bBadger.address, "data": bBadger.deposit.encode_input(badger_total) }, ) console.print( "bBadger.balanceOf(badger.devMultisig)", bBadger.balanceOf(badger.devMultisig), params["bBadgerAmount"], params["bBadgerAmount"] - bBadger.balanceOf(badger.devMultisig)) assert bBadger.balanceOf(badger.devMultisig) >= params["bBadgerAmount"] chain.mine() chain.sleep(14) chain.mine() multi.execute( MultisigTxMetadata(description="Transfer"), { "to": bBadger.address, "data": bBadger.transfer.encode_input(escrow, params["bBadgerAmount"]), }, ) assert bBadger.balanceOf(escrow) == params["bBadgerAmount"] multi.execute( MultisigTxMetadata(description="Revoke"), { "to": escrow.address, "data": escrow.revoke.encode_input() }, ) assert bBadger.balanceOf(escrow) == 0 assert bBadger.balanceOf(badger.devMultisig) >= params["bBadgerAmount"] print(bBadger.balanceOf(badger.devMultisig)) bBadger.transfer(escrow, params["bBadgerAmount"], {"from": test_user}) pre = get_token_balances( [usdc, bBadger], [test_user, escrow, badger.devMultisig, beneficiary]) console.print(pre) assert usdc.balanceOf(beneficiary) >= params["usdcAmount"] assert bBadger.balanceOf(escrow) == params["bBadgerAmount"] usdc.approve(escrow, params["usdcAmount"], {"from": beneficiary}) tx = escrow.swap({"from": beneficiary}) post = get_token_balances( [usdc, bBadger], [test_user, escrow, badger.devMultisig, beneficiary]) console.print(tx.events) post.print() diff_token_balances(pre, post) vesting = interface.ITokenTimelock( tx.events["VestingDeployed"][0]["vesting"]) console.print({ "token": vesting.token(), "beneficiary": vesting.beneficiary(), "releaseTime": to_utc_date(vesting.releaseTime()), }) chain.sleep(days(365)) chain.mine() vesting.release({"from": test_user})
def setup(): badger = connect_badger(badger_config.prod_json) deployer = accounts.load("badger_deployer") # distribute_from_whales(deployer) # Deploy Honeypot honeypotLogic = HoneypotMeme.deploy({"from": deployer}) honeypot_params = DotMap( token=badger.token, amount=Wei("2500 ether"), nftIndicies=[97, 98, 99, 100, 101, 102], meme="0xe4605d46Fd0B3f8329d936a8b258D69276cBa264", badgerCollection="0x14dC10FA6E4878280F9CA0D9f32dDAEa8C7d4d45", ) honeypot = deploy_proxy( "HoneypotMeme", HoneypotMeme.abi, honeypotLogic.address, badger.devProxyAdmin.address, honeypotLogic.initialize.encode_input( honeypot_params.token, honeypot_params.amount, honeypot_params.nftIndicies, ), deployer, ) # honeypot = HoneypotMeme.deploy({"from": deployer}) # honeypot.initialize( # honeypot_params.token, # honeypot_params.amount, # honeypot_params.nftIndicies, # {"from": deployer}, # ) # Transfer tokens to MEME multi = GnosisSafe(badger.devMultisig) tx = multi.execute( { "to": badger.rewardsEscrow.address, "data": badger.rewardsEscrow.approveRecipient.encode_input(honeypot), }, ) assert badger.rewardsEscrow.isApproved(honeypot) assert multisig_success(tx) assert badger.token.balanceOf( badger.rewardsEscrow) >= honeypot_params.amount tx = multi.execute( { "to": badger.rewardsEscrow.address, "data": badger.rewardsEscrow.transfer.encode_input(badger.token, honeypot, honeypot_params.amount), }, ) assert multisig_success(tx) assert badger.token.balanceOf(honeypot) == honeypot_params.amount # Mint MEME NFTs for deployer memeLtd = interface.IMemeLtd(honeypot_params.meme) badgerCollection = accounts.at(honeypot_params.badgerCollection, force=True) for index in honeypot_params.nftIndicies: memeLtd.mint(deployer, index, 5, "0x", {"from": badgerCollection}) for index in honeypot_params.nftIndicies: assert memeLtd.balanceOf(deployer, index) > 0 partialA = accounts[1] partialB = accounts[2] noCoiner = accounts[3] paritalAList = [97, 98, 99] paritalBList = [97, 98, 99, 100, 101] """ function safeTransferFrom( address _from, address _to, uint256 _id, uint256 _amount, bytes calldata _data ) external; """ for index in paritalAList: memeLtd.safeTransferFrom(deployer, partialA, index, 1, "0x", {"from": deployer}) assert memeLtd.balanceOf(partialA, index) > 0 for index in paritalBList: memeLtd.safeTransferFrom(deployer, partialB, index, 1, "0x", {"from": deployer}) assert memeLtd.balanceOf(partialB, index) > 0 return { "badger": badger, "honeypot": honeypot, "deployer": deployer, "partialA": partialA, "partialB": partialB, "noCoiner": noCoiner, "memeLtd": memeLtd, "honeypot_params": honeypot_params, }