コード例 #1
0
                                     }
                                 }))
        tree = makeTree(setToConstantMatrix(my_dec, COOPERATED))
        world.setDynamics(my_dec, action, tree)

    # defines payoff matrices (equal to both agents)
    agent1.setReward(get_reward_tree(agent1, agents_dec[0], agents_dec[1]), 1)
    agent2.setReward(get_reward_tree(agent2, agents_dec[1], agents_dec[0]), 1)

    # define order
    my_turn_order = [{agent1.name, agent2.name}]
    world.setOrder(my_turn_order)

    # add true mental model of the other to each agent
    world.setMentalModel(agent1.name, agent2.name,
                         Distribution({agent2.get_true_model(): 1}))
    world.setMentalModel(agent2.name, agent1.name,
                         Distribution({agent1.get_true_model(): 1}))

    for h in range(MAX_HORIZON + 1):
        logging.info('====================================')
        logging.info(f'Horizon {h}')

        # set horizon (also to the true model!) and reset decisions
        for i in range(len(agents)):
            agents[i].setHorizon(h)
            agents[i].setHorizon(h, agents[i].get_true_model())
            world.setFeature(agents_dec[i], NOT_DECIDED, recurse=True)

        for t in range(NUM_STEPS):
コード例 #2
0
    # add mental model of the other for each agent
    world.setMentalModel(agent1.name, agent2.name,
                         Distribution({get_fake_model_name(agent2): 1}))
    world.setMentalModel(agent2.name, agent1.name,
                         Distribution({get_fake_model_name(agent1): 1}))

    # 'hides' right actions from models by setting them illegal
    # (therefore agents should always choose right because they think the other will choose left)
    set_illegal_action(agent1, rights[0], [get_fake_model_name(agent1)])
    set_illegal_action(agent2, rights[1], [get_fake_model_name(agent2)])

    # # ** unnecessary / just for illustration **: set left actions legal for both the agents and their models
    # set_legal_action(agent1, lefts[0], [agent1.get_true_model(), get_fake_model_name(agent1)])
    # set_legal_action(agent2, lefts[1], [agent2.get_true_model(), get_fake_model_name(agent2)])

    agent1.resetBelief(model=agent1.get_true_model())
    agent1.resetBelief(model=get_fake_model_name(agent1))
    agent2.resetBelief(model=agent2.get_true_model())
    agent2.resetBelief(model=get_fake_model_name(agent2))

    for t in range(NUM_STEPS):
        # reset decision
        for a in range(len(agents)):
            world.setFeature(sides[a], NOT_DECIDED, recurse=True)

        logging.info('====================================')
        logging.info(f'Step {t}')
        step = world.step()
        for a in range(len(agents)):
            logging.info(
                f'{agents[a].name}: {world.getFeature(sides[a], unique=True)}')