Ejemplo n.º 1
0
 def run_test_split(self):
     votes, output = self.prepare_test_split(4000000)
     oldBalance, newBalance, oldDAORewards, newDAORewards = tokens_after_split(
         votes,
         self.token_amounts,
         self.dao_balance_after_rewards,
         self.dao_rewardToken_after_rewards
     )
     eval_test('split', output, {
         # default deposit,a simple way to test new DAO contract got created
         "newDAOProposalDeposit": 20,
         "oldDAOBalance": oldBalance,
         "newDAOBalance": newBalance,
         "oldDaoRewardTokens": oldDAORewards,
         "newDaoRewardTokens": newDAORewards
     })
Ejemplo n.º 2
0
    def run_test_rewards(self):
        if not self.prop_id:
            # run the proposal scenario first
            self.run_test_proposal()

        debate_secs = 15
        self.create_js_file(
            'rewards',
            {
                "dao_abi": self.dao_abi,
                "dao_address": self.dao_addr,
                "total_rewards": self.args.total_rewards,
                "proposal_deposit": self.args.proposal_deposit,
                "transaction_bytecode": '0x0',  # fallback function
                "debating_period": debate_secs,
                "prop_id": self.next_proposal_id()
            }
        )
        print(
            "Notice: Debate period is {} seconds so the test will wait "
            "as much".format(debate_secs)
        )
        output = self.run_script('rewards.js')
        results = eval_test('rewards', output, {
            "provider_reward_portion": calculate_reward(
                self.token_amounts[0],
                self.total_supply,
                self.args.total_rewards)
        })
        self.dao_balance_after_rewards = results['DAO_balance']
        self.dao_rewardToken_after_rewards = results['DAO_rewardToken']
Ejemplo n.º 3
0
    def run_test_proposal(self):
        if not self.token_amounts:
            # run the funding scenario first
            self.run_test_fund()

        debate_secs = 20
        minamount = 2  # is determined by the total costs + one time costs
        amount = random.randint(minamount, sum(self.token_amounts))
        votes = create_votes_array(
            self.token_amounts,
            not self.args.proposal_fail
        )
        yay, nay = count_token_votes(self.token_amounts, votes)
        # self.create_proposal_js(amount, debate_secs, votes)
        self.create_js_file(
            'proposal',
            {
                "dao_abi": self.dao_abi,
                "dao_address": self.dao_addr,
                "offer_abi": self.offer_abi,
                "offer_address": self.offer_addr,
                "offer_amount": amount,
                "offer_desc": 'Test Proposal',
                "proposal_deposit": self.args.proposal_deposit,
                "transaction_bytecode": '0x2ca15122',  # solc --hashes SampleOffer.sol
                "debating_period": debate_secs,
                "votes": arr_str(votes)
            }
        )
        print(
            "Notice: Debate period is {} seconds so the test will wait "
            "as much".format(debate_secs)
        )
        output = self.run_script('proposal.js')
        eval_test('proposal', output, {
            "dao_proposals_number": "1",
            "proposal_passed": True,
            "proposal_yay": yay,
            "proposal_nay": nay,
            "calculated_deposit": self.args.proposal_deposit,
            "onetime_costs": self.args.offer_onetime_costs,
            "deposit_returned": True,
            "offer_promise_valid": True
        })
        self.prop_id = 1
Ejemplo n.º 4
0
    def run_test_fund(self):
        # if deployment did not already happen do it now
        if not self.dao_addr:
            self.run_test_deploy()
        else:
            print(
                "WARNING: Running the funding scenario with a pre-deployed "
                "DAO contract. Closing time is {} which is approximately {} "
                "seconds from now.".format(
                    datetime.fromtimestamp(self.closing_time).strftime(
                        '%Y-%m-%d %H:%M:%S'
                    ),
                    self.closing_time - ts_now()
                )
            )

        sale_secs = self.closing_time - ts_now()
        self.total_supply = self.min_value + random.randint(1, 100)
        self.token_amounts = constrained_sum_sample_pos(
            len(self.accounts), self.total_supply
        )
        self.create_js_file(
            'fund',
            {
                "dao_abi": self.dao_abi,
                "dao_address": self.dao_addr,
                "wait_ms": (sale_secs-3)*1000,
                "amounts": arr_str(self.token_amounts)
            }
        )
        print(
            "Notice: Funding period is {} seconds so the test will wait "
            "as much".format(sale_secs)
        )
        output = self.run_script('fund.js')
        eval_test('fund', output, {
            "dao_funded": True,
            "total_supply": self.total_supply,
            "balances": self.token_amounts,
            "user0_after": self.token_amounts[0],
        })
Ejemplo n.º 5
0
    def run_test_split_insufficient_gas(self):
        """
        Test that splitting with insufficient gas, will fail reliably and will
        not leave an empty contract in the state burning away user tokens in
        the process.

        This should happen with the latest homestead changes:
        https://github.com/ethereum/EIPs/blob/master/EIPS/eip-2.mediawiki#specification
        """
        votes, output = self.prepare_test_split(1000)
        oldBalance, newBalance, oldDAORewards, newDAORewards = tokens_after_split(
            votes,
            self.token_amounts,
            self.dao_balance_after_rewards,
            self.dao_rewardToken_after_rewards
        )
        eval_test('split-insufficient-gas', output, {
            "newDAOProposalDeposit": 0,
            "oldDAOBalance": self.token_amounts,
            "newDAOBalance": [0] * len(self.token_amounts),
        })
Ejemplo n.º 6
0
 def execute(self, expected):
     output = self.run_script('{}.js'.format(self.running_scenario()))
     return eval_test(self.running_scenario(), output, expected)
Ejemplo n.º 7
0
 def execute(self, expected):
     output = self.run_script('{}.js'.format(self.running_scenario()))
     return eval_test(self.running_scenario(), output, expected)
Ejemplo n.º 8
0
import logging
from config import Config
import torch
from model import build_model
from transformers import BertTokenizer
from utils import eval_test
from utils.helper import qa, dialogue, real_report

logging.basicConfig(level=logging.INFO)

if __name__ == '__main__':
    logging.info('*** Initializing Predict Mode ***')
    epoch = 0
    device = torch.device(Config.device)
    tokenizer = BertTokenizer.from_pretrained(
        Config.pretrained_model_name_or_path)
    model = build_model(Config).to(device)
    state_dict = torch.load(f'{Config.data_dir}/{Config.fn}.pth')
    model.load_state_dict(state_dict['model'])
    model.eval()

    text = eval_test(Config, qa, dialogue, tokenizer, model, device)
    print('qa: {}'.format(qa))
    print('dialogue: {}'.format(dialogue))
    print('report: {}'.format(text))
    print('real report: {}'.format(real_report))
    print("eval test finish")