def run_univariate_function(name, symbol_fmt, func):
    print('\n*************************************')
    print('Testing {} function'.format(name))

    # PsychSim elements
    world = World()
    agent = Agent('The Agent')
    world.addAgent(agent)

    # gets samples from real non-linear function
    x_params, y_params, sample_values = \
        get_bivariate_samples(func, MIN_X, MAX_X, MIN_Y, MAX_Y, NUM_SAMPLES, NUM_SAMPLES)
    sample_mean = np.nanmean(sample_values)

    # create two features: one holding the variable, the other the result (dependent)
    var_x = world.defineState(agent.name, 'var_x', float, lo=MIN_X, hi=MAX_X)
    var_y = world.defineState(agent.name, 'var_y', float, lo=MIN_Y, hi=MAX_Y)
    result = world.defineState(agent.name,
                               'result',
                               float,
                               lo=np.min(sample_values),
                               hi=np.max(sample_values))
    world.setFeature(result, 0)

    # create action that is approximates the function, storing the result in the result feature
    action = agent.addAction({'verb': 'operation', 'action': name})
    tree = makeTree(
        tree_from_bivariate_samples(result, var_x, var_y, x_params, y_params,
                                    sample_values))
    world.setDynamics(result, action, tree)

    world.setOrder([agent.name])

    np.random.seed(SEED)
    values_original = []
    values_approx = []
    for i in range(NUM_TEST_SAMPLES):
        # gets random sample parameters
        x = MIN_X + np.random.rand() * (MAX_X - MIN_X)
        y = MIN_Y + np.random.rand() * (MAX_Y - MIN_Y)

        # sets variable and updates result
        world.setFeature(var_x, x)
        world.setFeature(var_y, y)
        world.step()

        real = func(x, y)
        psych = world.getValue(result)

        print('{:3}: {:30} | Expected: {:10.2f} | PsychSim: {:10.2f}'.format(
            i, symbol_fmt.format(x, y), real, psych))
        values_original.append(real)
        values_approx.append(psych)

    # gets error stats
    rmse = np.sqrt(np.mean((np.array(values_approx) - values_original)**2))
    print('=====================================')
    print('RMSE      = {:.3f}'.format(rmse))
    print('\nPress \'Enter\' to continue...')
    input()
Esempio n. 2
0
class Negotiate:
    def __init__(self, turnOrder):

        self.maxRounds = 8
        self.world = World()
        totals = {'apple': 1, 'pear': 2}
        batna_prePref = totals['apple'] + totals['pear']
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name,
                                   'appleOwned',
                                   int,
                                   lo=0,
                                   hi=totals['apple'])
            me.setState('appleOwned', 0)
            self.world.defineState(me.name,
                                   'appleOffered',
                                   int,
                                   lo=0,
                                   hi=totals['apple'])
            me.setState('appleOffered', 0)
            self.world.defineState(me.name,
                                   'pearOwned',
                                   int,
                                   lo=0,
                                   hi=totals['pear'])
            me.setState('pearOwned', 0)
            self.world.defineState(me.name,
                                   'pearOffered',
                                   int,
                                   lo=0,
                                   hi=totals['pear'])
            me.setState('pearOffered', 0)

            self.world.defineState(me.name, 'Batna', int, lo=0, hi=10)
            me.setState('Batna', batna_prePref)
            self.world.defineState(me.name, 'BatnaOwned', int, lo=0, hi=10)
            me.setState('BatnaOwned', 0)

            self.world.defineState(me.name, 'agree', bool)
            me.setState('agree', False)
            # Actions
            me.addAction({'verb': 'do nothing'})
            for amt in range(totals['apple'] + 1):
                tmp = me.addAction({
                    'verb': 'offerApple',
                    'object': other.name,
                    'amount': amt
                })
                me.setLegal(
                    tmp,
                    makeTree({
                        'if': trueRow(stateKey(None, 'agreement')),
                        False: {
                            'if': trueRow(stateKey(None,
                                                   'rejectedNegotiation')),
                            True: False,
                            False: True
                        },
                        True: False
                    }))

            for amt in range(totals['pear'] + 1):
                tmp = me.addAction({
                    'verb': 'offerPear',
                    'object': other.name,
                    'amount': amt
                })
                me.setLegal(
                    tmp,
                    makeTree({
                        'if': trueRow(stateKey(None, 'agreement')),
                        False: {
                            'if': trueRow(stateKey(None,
                                                   'rejectedNegotiation')),
                            True: False,
                            False: True
                        },
                        True: False
                    }))

            meReject = me.addAction({
                'verb': 'rejectNegotiation',
                'object': other.name
            })
            me.setLegal(
                meReject,
                makeTree({
                    'if': trueRow(stateKey(None, 'agreement')),
                    False: {
                        'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                        True: False,
                        False: True
                    },
                    True: False
                }))

            meAccept = me.addAction({
                'verb': 'accept offer',
                'object': other.name
            })
            me.setLegal(
                meAccept,
                makeTree({
                    'if': trueRow(stateKey(None, 'appleOffer')),
                    True: {
                        'if': trueRow(stateKey(None, 'pearOffer')),
                        True: {
                            'if': trueRow(stateKey(None, 'agreement')),
                            False: {
                                'if':
                                trueRow(stateKey(None, 'rejectedNegotiation')),
                                True:
                                False,
                                False:
                                True
                            },
                            True: False
                        },
                        False: False
                    },
                    False: False
                }))
            # Parameters
            me.setHorizon(6)
            me.setParameter('discount', 0.9)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        # Turn order: Uncomment the following if you want agents to act in parallel
        # world.setOrder([{agts[0].name,agts[1].name}])
        # Turn order: Uncomment the following if you want agents to act sequentially

        self.world.setOrder(turnOrder)

        # World state
        self.world.defineState(None, 'agreement', bool)
        self.world.setState(None, 'agreement', False)
        self.world.defineState(None, 'appleOffer', bool)
        self.world.setState(None, 'appleOffer', False)
        self.world.defineState(None, 'pearOffer', bool)
        self.world.setState(None, 'pearOffer', False)
        self.world.defineState(
            None,
            'round',
            int,
            description='The current round of the negotiation')
        self.world.setState(None, 'round', 0)
        self.world.defineState(
            None,
            'rejectedNegotiation',
            bool,
            description='Have one of the players walked out?')
        self.world.setState(None, 'rejectedNegotiation', False)

        # dont terminate so agent sees benefit of early agreement
        #    world.addTermination(makeTree({'if': trueRow(stateKey(None,'agreement')),
        #                                   True: True,
        #                                   False: False}))

        #    world.addTermination(makeTree({'if': trueRow(stateKey(None,'rejectedNegotiation')),
        #                                   True: True,
        #                                   False: False}))

        self.world.addTermination(
            makeTree({
                'if':
                thresholdRow(stateKey(None, 'round'), self.maxRounds),
                True:
                True,
                False:
                False
            }))

        # Dynamics of offers
        agents = [david.name, stacy.name]
        for i in range(2):
            for fruit in ['apple', 'pear']:
                atom = Action({
                    'subject': agents[i],
                    'verb': 'offer%s' % (fruit.capitalize()),
                    'object': agents[1 - i]
                })
                parties = [atom['subject'], atom['object']]
                for j in range(2):
                    # Set offer amount
                    offer = stateKey(parties[j], '%sOffered' % (fruit))
                    amount = actionKey('amount') if j == 1 else '%d-%s' % (
                        totals[fruit], actionKey('amount'))
                    tree = makeTree(setToConstantMatrix(offer, amount))
                    self.world.setDynamics(parties[j], '%sOffered' % (fruit),
                                           atom, tree)
                    # reset agree flags whenever an offer is made
                    agreeFlag = stateKey(parties[j], 'agree')
                    tree = makeTree(setFalseMatrix(agreeFlag))
                    self.world.setDynamics(parties[j], 'agree', atom, tree)
                # Offers set offer flag in world state
                tree = makeTree(
                    setTrueMatrix(stateKey(None, '%sOffer' % (fruit))))
                self.world.setDynamics(None, '%sOffer' % (fruit), atom, tree)

    # agents = [david.name,stacy.name]
    # Dynamics of agreements
        for i in range(2):
            atom = Action({
                'subject': agents[i],
                'verb': 'accept offer',
                'object': agents[1 - i]
            })

            # accept offer sets accept
            tree = makeTree(setTrueMatrix(stateKey(atom['subject'], 'agree')))
            self.world.setDynamics(atom['subject'], 'agree', atom, tree)

            # accept offer sets agreement if object has accepted
            tree = makeTree({
                'if': trueRow(stateKey(atom['object'], 'agree')),
                True: setTrueMatrix(stateKey(None, 'agreement')),
                False: noChangeMatrix(stateKey(None, 'agreement'))
            })
            self.world.setDynamics(None, 'agreement', atom, tree)

            # Accepting offer sets ownership
            parties = [atom['subject'], atom['object']]
            for fruit in ['apple', 'pear']:
                # atom = Action({'subject': agents[i],'verb': 'accept offer', 'object': agents[1-i]})
                for j in range(2):
                    offer = stateKey(parties[j], '%sOffered' % (fruit))
                    owned = stateKey(parties[j], '%sOwned' % (fruit))
                    tree = makeTree({
                        'if':
                        trueRow(stateKey(atom['object'], 'agree')),
                        False:
                        noChangeMatrix(owned),
                        True:
                        setToFeatureMatrix(owned, offer)
                    })
                    self.world.setDynamics(parties[j], '%sOwned' % (fruit),
                                           atom, tree)
        # rejecting give us batna and ends negotiation
            atom = Action({
                'subject': agents[i],
                'verb': 'rejectNegotiation',
                'object': agents[1 - i]
            })

            tree = makeTree(
                setToFeatureMatrix(stateKey(atom['subject'], 'BatnaOwned'),
                                   stateKey(atom['subject'], 'Batna')))
            self.world.setDynamics(atom['subject'], 'BatnaOwned', atom, tree)

            tree = makeTree(
                setToFeatureMatrix(stateKey(atom['object'], 'BatnaOwned'),
                                   stateKey(atom['object'], 'Batna')))
            self.world.setDynamics(atom['object'], 'BatnaOwned', atom, tree)

            tree = makeTree(
                setTrueMatrix(stateKey(None, 'rejectedNegotiation')))
            self.world.setDynamics(None, 'rejectedNegotiation', atom, tree)

        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None, 'round'), 1))
            self.world.setDynamics(None, 'round', action, tree)
        for agent in self.world.agents.values():
            agent.addModel('pearLover', R={}, level=2, rationality=0.01)
            agent.addModel('appleLover', R={}, level=2, rationality=0.01)

    def modeltest(self, trueModels, davidBeliefAboutStacy,
                  stacyBeliefAboutDavid, strongerBelief):
        for agent in self.world.agents.values():
            for model in agent.models.keys():
                if model is True:
                    name = trueModels[agent.name]
                else:
                    name = model
                if name == 'appleLover':
                    agent.setReward(
                        maximizeFeature(stateKey(agent.name, 'appleOwned')),
                        4.0, model)
                    agent.setReward(
                        maximizeFeature(stateKey(agent.name, 'pearOwned')),
                        1.0, model)
                    agent.setReward(
                        maximizeFeature(stateKey(agent.name, 'BatnaOwned')),
                        0.1, model)
                elif name == 'pearLover':
                    agent.setReward(
                        maximizeFeature(stateKey(agent.name, 'appleOwned')),
                        1.0, model)
                    agent.setReward(
                        maximizeFeature(stateKey(agent.name, 'pearOwned')),
                        4.0, model)
                    agent.setReward(
                        maximizeFeature(stateKey(agent.name, 'BatnaOwned')),
                        0.1, model)
        weakBelief = 1.0 - strongerBelief
        belief = {'pearLover': weakBelief, 'appleLover': weakBelief}
        belief[davidBeliefAboutStacy] = strongerBelief
        self.world.setMentalModel('David', 'Stacy', belief)
        belief = {'pearLover': weakBelief, 'appleLover': weakBelief}
        belief[stacyBeliefAboutDavid] = strongerBelief
        self.world.setMentalModel('Stacy', 'David', belief)

    def runit(self, Msg):
        print Msg
        for t in range(self.maxRounds + 1):
            self.world.explain(self.world.step(), level=1)
            # print self.world.step()
            self.world.state.select()
            # self.world.printState()
            if self.world.terminated():
                break
Esempio n. 3
0
                True: setToFeatureMatrix(owned, offer)
            })
            world.setDynamics(atom['object'], '%ssOwned' % (fruit), atom, tree)
            offer = stateKey(atom['subject'], '%ssOffered' % (fruit))
            owned = stateKey(atom['subject'], '%ssOwned' % (fruit))
            tree = makeTree({
                'if': trueRow('agreement'),  # this test shouldn't be necessary
                False: setToFeatureMatrix(owned, offer),
                True: setToFeatureMatrix(owned, offer)
            })
            world.setDynamics(atom['subject'], '%ssOwned' % (fruit), atom,
                              tree)

    # Save scenario to compressed XML file
    world.save('default.psy')

    # Create configuration file
    # config = SafeConfigParser()
    # f = open('default.cfg','w')
    # config.write(f)
    # f.close()

    # Test saved scenario
    world = World('default.psy')
    world.printState()

    for t in range(7):
        world.explain(world.step())
        world.state.select()
        world.printState()
class Centipede:
    def __init__(self, turnOrder, maxRounds, payoff):

        self.maxRounds = maxRounds
        self.payoff = payoff
        print self.payoff
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name, 'money', int)
            me.setState('money', 0)
            mePass = me.addAction({'verb': 'pass', 'object': other.name})
            meTake = me.addAction({'verb': 'take', 'object': other.name})
            # Parameters
            me.setHorizon(6)
            me.setParameter('discount', 1.)
            # me.setParameter('discount',0.9)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(turnOrder)
        # World state
        self.world.defineState(None,
                               'round',
                               int,
                               description='The current round')
        self.world.setState(None, 'round', 0)
        self.world.defineState(None,
                               'gameOver',
                               bool,
                               description='whether game is over')
        self.world.setState(None, 'gameOver', False)

        self.world.addTermination(
            makeTree({
                'if':
                thresholdRow(stateKey(None, 'round'), self.maxRounds),
                True:
                True,
                False: {
                    'if': trueRow(stateKey(None, 'gameOver')),
                    True: True,
                    False: False
                }
            }))

        # Dynamics
        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None, 'round'), 1))
            self.world.setDynamics(None, 'round', action, tree)
            if (action['verb'] == 'take'):
                tree = makeTree(setTrueMatrix(stateKey(None, 'gameOver')))
                self.world.setDynamics(None, 'gameOver', action, tree)
                agts = ['Stacy', 'David']
                for i in range(2):
                    key = stateKey(agts[i], 'money')
                    tree = makeTree(
                        self.buildPayoff(0, key, self.payoff[agts[i]]))
                    self.world.setDynamics(agts[i], 'money', action, tree)
            elif action['verb'] == 'pass':
                agts = ['Stacy', 'David']
                for i in range(2):
                    key = stateKey(agts[i], 'money')
                    tree = makeTree({
                        'if':
                        equalRow(stateKey(None, 'round'), self.maxRounds - 1),
                        True:
                        setToConstantMatrix(
                            key, self.payoff[agts[i]][self.maxRounds]),
                        False:
                        noChangeMatrix(key)
                    })
                    self.world.setDynamics(agts[i], 'money', action, tree)

# really need to ask david about these levels - if adding modesl with levels, can
# the true model point to these but have a different level

        for agent in self.world.agents.values():
            agent.addModel('Christian',
                           R={},
                           level=2,
                           rationality=10.,
                           selection='distribution')
            agent.addModel('Capitalist',
                           R={},
                           level=2,
                           rationality=10.,
                           selection='distribution')
            # agent.addModel('Christian',R={},level=2,rationality=0.01)
            # agent.addModel('Capitalist',R={},level=2,rationality=0.01)

    def buildPayoff(self, rnd, key, payoff):
        if (rnd == self.maxRounds - 1):
            return setToConstantMatrix(key, payoff[rnd])
        else:
            return {
                'if': equalRow(stateKey(None, 'round'), rnd),
                True: setToConstantMatrix(key, payoff[rnd]),
                False: self.buildPayoff(rnd + 1, key, payoff)
            }

    def modeltest(self, trueModels, davidBeliefAboutStacy,
                  stacyBeliefAboutDavid, strongerBelief):
        agts = self.world.agents.values()
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            for model in me.models.keys():
                if model is True:
                    name = trueModels[me.name]
                else:
                    name = model
                if name == 'Capitalist':
                    me.setReward(maximizeFeature(stateKey(me.name, 'money')),
                                 1.0, model)
                elif name == 'Christian':
                    me.setReward(maximizeFeature(stateKey(me.name, 'money')),
                                 1.0, model)
                    me.setReward(
                        maximizeFeature(stateKey(other.name, 'money')), 1.0,
                        model)

        weakBelief = 1.0 - strongerBelief
        belief = {'Christian': weakBelief, 'Capitalist': weakBelief}
        belief[davidBeliefAboutStacy] = strongerBelief
        self.world.setMentalModel('David', 'Stacy', belief)
        belief = {'Christian': weakBelief, 'Capitalist': weakBelief}
        belief[stacyBeliefAboutDavid] = strongerBelief
        self.world.setMentalModel('Stacy', 'David', belief)

    def runit(self, Msg):
        print Msg
        for t in range(self.maxRounds + 1):
            self.world.explain(self.world.step(), level=2)
            # self.world.explain(self.world.step(),level=1)
            # print self.world.step()
            self.world.state.select()
            # self.world.printState()
            if self.world.terminated():
                break
Esempio n. 5
0
    # define order
    my_turn_order = [{agent1.name, agent2.name}]
    world.setOrder(my_turn_order)

    # add true mental model of the other to each agent
    world.setMentalModel(agent1.name, agent2.name,
                         Distribution({agent2.get_true_model(): 1}))
    world.setMentalModel(agent2.name, agent1.name,
                         Distribution({agent1.get_true_model(): 1}))

    for h in range(MAX_HORIZON + 1):
        logging.info('====================================')
        logging.info(f'Horizon {h}')

        # set horizon (also to the true model!) and reset decisions
        for i in range(len(agents)):
            agents[i].setHorizon(h)
            agents[i].setHorizon(h, agents[i].get_true_model())
            world.setFeature(agents_dec[i], NOT_DECIDED, recurse=True)

        for t in range(NUM_STEPS):

            # decision per step (1 per agent): cooperate or defect?
            logging.info('---------------------')
            logging.info(f'Step {t}')
            step = world.step()
            for i in range(len(agents)):
                logging.info(
                    f'{agents[i].name}: {world.getFeature(agents_dec[i], unique=True)}'
                )
    # Stacy's models of David
    david.addModel('pearLover',R={appleGoalD: 1.0,pearGoalD: 4.0,BatnaGoalD: 6.0},level=2,rationality=0.01)
    david.addModel('appleLover',R={appleGoalD: 4.0,pearGoalD: 1.0,BatnaGoalD: 0.1},level=2,rationality=0.01)
    world.setMentalModel(stacy.name,david.name,{'pearLover': 0.5,'appleLover': 0.5})


    
    # Save scenario to compressed XML file
    world.save('default.psy')

    # Create configuration file
    # config = SafeConfigParser()
    # f = open('default.cfg','w')
    # config.write(f)
    # f.close()

    # Test saved scenario
    world = World('default.psy')
    # world.printState()
    
    for t in range(maxRounds + 1):
        world.explain(world.step())
        # print world.step()
        world.state.select()
        world.printState()
        if world.terminated():
            break



Esempio n. 7
0
class Ultimatum:

    def __init__(self):
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]
        totalAmt = 4
        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            self.world.addAgent(me)
            self.world.defineState(me.name,'offered',int,lo=0,hi=totalAmt)
            self.world.defineState(me.name,'money',int,lo=0,hi=totalAmt)
            me.setState('offered',0)  
            me.setState('money',0)  
            if (me.name == 'Stacy'):
                for amt in range(totalAmt + 1):
                    me.addAction({'verb': 'offer','object': other.name,'amount': amt})
            else:
                mePass = me.addAction({'verb': 'accept','object': other.name})
                mePass = me.addAction({'verb': 'reject','object': other.name})
            # Parameters
            me.setHorizon(2)
            me.setParameter('discount',0.9)
            # me.setParameter('discount',1.0)
        
            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(['Stacy','David'])

        # World state
        self.world.defineState(None,'gameOver',bool,description='The current round')
        self.world.setState(None,'gameOver',False)

        self.world.addTermination(makeTree({'if': trueRow(stateKey(None,'gameOver')),
                                            True: True, False: False}))
        # offer dynamics
        atom = Action({'subject': 'Stacy','verb': 'offer', 'object': 'David'})
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j],'offered')
            amount = actionKey('amount') if j == 1 else '%d-%s' % (totalAmt,actionKey('amount'))
            tree = makeTree(setToConstantMatrix(offer,amount))
            self.world.setDynamics(parties[j],'offered',atom,tree)
        # accept dynamics
        atom = Action({'subject': 'David','verb': 'accept', 'object': 'Stacy'})
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j],'offered')
            money = stateKey(parties[j],'money')
            tree = makeTree(setToFeatureMatrix(money,offer))
            self.world.setDynamics(parties[j],'money',atom,tree)
        tree=makeTree(setTrueMatrix(stateKey(None,'gameOver')))
        self.world.setDynamics(None,'gameOver',atom,tree)
        # reject dynamics
        atom = Action({'subject': 'David','verb': 'reject', 'object': 'Stacy'})
        tree=makeTree(setTrueMatrix(stateKey(None,'gameOver')))
        self.world.setDynamics(None,'gameOver',atom,tree)

# really need to ask david about these levels - if adding modesl with levels, can
# the true model point to these but have a different level
        for agent in self.world.agents.values():
            agent.addModel('Christian',R={},level=2,rationality=25.,selection='distribution')
            agent.addModel('Capitalist',R={},level=2,rationality=25.,selection='distribution')
            # agent.addModel('Christian',R={},level=2,rationality=10.,selection='distribution')
            # agent.addModel('Capitalist',R={},level=2,rationality=10.,selection='distribution')
            # agent.addModel('Christian',R={},level=2,rationality=10.,selection='distribution')
            # agent.addModel('Capitalist',R={},level=2,rationality=10.,selection='random')



    def modeltest(self,trueModels,davidBeliefAboutStacy,stacyBeliefAboutDavid,strongerBelief):
        agts = self.world.agents.values()
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            for model in me.models.keys():
                if model is True:
                    name = trueModels[me.name]
                else:
                    name = model
                if name == 'Capitalist':
                    me.setReward(maximizeFeature(stateKey(me.name,'money')),1.0,model)
                elif name == 'Christian':
                    me.setReward(maximizeFeature(stateKey(me.name,'money')),1.0,model)
                    me.setReward(maximizeFeature(stateKey(other.name,'money')),1.0,model)

        weakBelief = 1.0 - strongerBelief          
        print weakBelief
        belief = {'Christian': weakBelief,'Capitalist': weakBelief}
        belief[davidBeliefAboutStacy] = strongerBelief
        self.world.setMentalModel('David','Stacy',belief)
        belief = {'Christian': weakBelief,'Capitalist': weakBelief}
        belief[stacyBeliefAboutDavid] = strongerBelief
        self.world.setMentalModel('Stacy','David',belief)

    def runit(self,Msg):
        print Msg
        for t in range(2):
            self.world.explain(self.world.step(),level=2)
            # print self.world.step()
            self.world.state.select()
            # self.world.printState()
            if self.world.terminated():
                break        
Esempio n. 8
0
class Ultimatum:
    def __init__(self):
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]
        totalAmt = 4
        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            self.world.addAgent(me)
            self.world.defineState(me.name, 'offered', int, lo=0, hi=totalAmt)
            self.world.defineState(me.name, 'money', int, lo=0, hi=totalAmt)
            me.setState('offered', 0)
            me.setState('money', 0)
            if (me.name == 'Stacy'):
                for amt in range(totalAmt + 1):
                    me.addAction({
                        'verb': 'offer',
                        'object': other.name,
                        'amount': amt
                    })
            else:
                mePass = me.addAction({'verb': 'accept', 'object': other.name})
                mePass = me.addAction({'verb': 'reject', 'object': other.name})
            # Parameters
            me.setHorizon(2)
            me.setParameter('discount', 0.9)
            # me.setParameter('discount',1.0)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(['Stacy', 'David'])

        # World state
        self.world.defineState(None,
                               'gameOver',
                               bool,
                               description='The current round')
        self.world.setState(None, 'gameOver', False)

        self.world.addTermination(
            makeTree({
                'if': trueRow(stateKey(None, 'gameOver')),
                True: True,
                False: False
            }))
        # offer dynamics
        atom = Action({'subject': 'Stacy', 'verb': 'offer', 'object': 'David'})
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j], 'offered')
            amount = actionKey('amount') if j == 1 else '%d-%s' % (
                totalAmt, actionKey('amount'))
            tree = makeTree(setToConstantMatrix(offer, amount))
            self.world.setDynamics(parties[j], 'offered', atom, tree)
        # accept dynamics
        atom = Action({
            'subject': 'David',
            'verb': 'accept',
            'object': 'Stacy'
        })
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j], 'offered')
            money = stateKey(parties[j], 'money')
            tree = makeTree(setToFeatureMatrix(money, offer))
            self.world.setDynamics(parties[j], 'money', atom, tree)
        tree = makeTree(setTrueMatrix(stateKey(None, 'gameOver')))
        self.world.setDynamics(None, 'gameOver', atom, tree)
        # reject dynamics
        atom = Action({
            'subject': 'David',
            'verb': 'reject',
            'object': 'Stacy'
        })
        tree = makeTree(setTrueMatrix(stateKey(None, 'gameOver')))
        self.world.setDynamics(None, 'gameOver', atom, tree)

        # really need to ask david about these levels - if adding modesl with levels, can
        # the true model point to these but have a different level
        for agent in self.world.agents.values():
            agent.addModel('Christian',
                           R={},
                           level=2,
                           rationality=25.,
                           selection='distribution')
            agent.addModel('Capitalist',
                           R={},
                           level=2,
                           rationality=25.,
                           selection='distribution')
            # agent.addModel('Christian',R={},level=2,rationality=10.,selection='distribution')
            # agent.addModel('Capitalist',R={},level=2,rationality=10.,selection='distribution')
            # agent.addModel('Christian',R={},level=2,rationality=10.,selection='distribution')
            # agent.addModel('Capitalist',R={},level=2,rationality=10.,selection='random')

    def modeltest(self, trueModels, davidBeliefAboutStacy,
                  stacyBeliefAboutDavid, strongerBelief):
        agts = self.world.agents.values()
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            for model in me.models.keys():
                if model is True:
                    name = trueModels[me.name]
                else:
                    name = model
                if name == 'Capitalist':
                    me.setReward(maximizeFeature(stateKey(me.name, 'money')),
                                 1.0, model)
                elif name == 'Christian':
                    me.setReward(maximizeFeature(stateKey(me.name, 'money')),
                                 1.0, model)
                    me.setReward(
                        maximizeFeature(stateKey(other.name, 'money')), 1.0,
                        model)

        weakBelief = 1.0 - strongerBelief
        print weakBelief
        belief = {'Christian': weakBelief, 'Capitalist': weakBelief}
        belief[davidBeliefAboutStacy] = strongerBelief
        self.world.setMentalModel('David', 'Stacy', belief)
        belief = {'Christian': weakBelief, 'Capitalist': weakBelief}
        belief[stacyBeliefAboutDavid] = strongerBelief
        self.world.setMentalModel('Stacy', 'David', belief)

    def runit(self, Msg):
        print Msg
        for t in range(2):
            self.world.explain(self.world.step(), level=2)
            # print self.world.step()
            self.world.state.select()
            # self.world.printState()
            if self.world.terminated():
                break
class Negotiate:



    def __init__(self,turnOrder):

        self.maxRounds=8
        self.world = World()
        totals = {'apple':1,'pear':2} 
        batna_prePref = totals['apple'] + totals['pear']
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name,'appleOwned',int,lo=0,hi=totals['apple'])
            me.setState('appleOwned',0)
            self.world.defineState(me.name,'appleOffered',int,lo=0,hi=totals['apple'])
            me.setState('appleOffered',0)  
            self.world.defineState(me.name,'pearOwned',int,lo=0,hi=totals['pear'])
            me.setState('pearOwned',0)
            self.world.defineState(me.name,'pearOffered',int,lo=0,hi=totals['pear'])
            me.setState('pearOffered',0)  

            self.world.defineState(me.name,'Batna',int,lo=0,hi=10)
            me.setState('Batna', batna_prePref)
            self.world.defineState(me.name,'BatnaOwned',int,lo=0,hi=10)
            me.setState('BatnaOwned',0)  

            self.world.defineState(me.name,'agree',bool)
            me.setState('agree',False)  
            # Actions
            me.addAction({'verb': 'do nothing'})
            for amt in range(totals['apple'] + 1):
                tmp = me.addAction({'verb': 'offerApple','object': other.name,'amount': amt})
                me.setLegal(tmp,makeTree({'if': trueRow(stateKey(None, 'agreement')),
                                          False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                  True: False,
                                                  False: True},
                                          True: False}))


            for amt in range(totals['pear'] + 1):
                tmp = me.addAction({'verb': 'offerPear','object': other.name,'amount': amt})
                me.setLegal(tmp,makeTree({'if': trueRow(stateKey(None, 'agreement')),
                                          False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                  True: False,
                                                  False: True},
                                          True: False}))

            meReject = me.addAction({'verb': 'rejectNegotiation','object': other.name})
            me.setLegal(meReject,makeTree({'if': trueRow(stateKey(None, 'agreement')),
                                           False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                   True: False,
                                                   False: True},
                                           True: False}))

            meAccept = me.addAction({'verb': 'accept offer','object': other.name})
            me.setLegal(meAccept,makeTree({'if': trueRow(stateKey(None, 'appleOffer')),
                                           True: {'if': trueRow(stateKey(None, 'pearOffer')),
                                                  True: {'if': trueRow(stateKey(None, 'agreement')),
                                                         False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                                 True: False,
                                                                 False: True},
                                                         True: False},
                                                  False: False},
                                           False: False}))
            # Parameters
            me.setHorizon(6)
            me.setParameter('discount',0.9)
        
            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

            # Turn order: Uncomment the following if you want agents to act in parallel
            # world.setOrder([{agts[0].name,agts[1].name}])
            # Turn order: Uncomment the following if you want agents to act sequentially

        self.world.setOrder(turnOrder)

        # World state
        self.world.defineState(None,'agreement',bool)
        self.world.setState(None,'agreement',False)
        self.world.defineState(None,'appleOffer',bool)
        self.world.setState(None,'appleOffer',False)
        self.world.defineState(None,'pearOffer',bool)
        self.world.setState(None,'pearOffer',False)
        self.world.defineState(None,'round',int,description='The current round of the negotiation')
        self.world.setState(None,'round',0)
        self.world.defineState(None,'rejectedNegotiation',bool,
                          description='Have one of the players walked out?')
        self.world.setState(None, 'rejectedNegotiation', False)


# dont terminate so agent sees benefit of early agreement
#    world.addTermination(makeTree({'if': trueRow(stateKey(None,'agreement')),
#                                   True: True, 
#                                   False: False}))

#    world.addTermination(makeTree({'if': trueRow(stateKey(None,'rejectedNegotiation')),
#                                   True: True, 
#                                   False: False}))

        self.world.addTermination(makeTree({'if': thresholdRow(stateKey(None,'round'),self.maxRounds),
                                   True: True, False: False}))

    # Dynamics of offers
        agents = [david.name,stacy.name]
        for i in range(2):
            for fruit in ['apple','pear']:
                atom = Action({'subject': agents[i],'verb': 'offer%s' % (fruit.capitalize()),
                               'object': agents[1-i]})
                parties = [atom['subject'], atom['object']]
                for j in range(2):
                    # Set offer amount
                    offer = stateKey(parties[j],'%sOffered' % (fruit))
                    amount = actionKey('amount') if j == 1 else '%d-%s' % (totals[fruit],actionKey('amount'))
                    tree = makeTree(setToConstantMatrix(offer,amount))
                    self.world.setDynamics(parties[j],'%sOffered' % (fruit),atom,tree)
                    # reset agree flags whenever an offer is made
                    agreeFlag = stateKey(parties[j],'agree')
                    tree = makeTree(setFalseMatrix(agreeFlag))
                    self.world.setDynamics(parties[j],'agree',atom,tree)
                # Offers set offer flag in world state
                tree = makeTree(setTrueMatrix(stateKey(None,'%sOffer' % (fruit))))
                self.world.setDynamics(None,'%sOffer' % (fruit) ,atom,tree)
 

    # agents = [david.name,stacy.name]
    # Dynamics of agreements
        for i in range(2):
            atom = Action({'subject': agents[i],'verb': 'accept offer', 'object': agents[1-i]})

            # accept offer sets accept
            tree = makeTree(setTrueMatrix(stateKey(atom['subject'],'agree')))
            self.world.setDynamics(atom['subject'],'agree',atom,tree)

            # accept offer sets agreement if object has accepted
            tree = makeTree({'if': trueRow(stateKey(atom['object'],'agree')),
                             True:  setTrueMatrix(stateKey(None,'agreement')),
                             False: noChangeMatrix(stateKey(None,'agreement'))})
            self.world.setDynamics(None,'agreement',atom,tree)

        # Accepting offer sets ownership
            parties = [atom['subject'], atom['object']]
            for fruit in ['apple','pear']:
                # atom = Action({'subject': agents[i],'verb': 'accept offer', 'object': agents[1-i]})
                for j in range(2):
                    offer = stateKey(parties[j],'%sOffered' % (fruit))
                    owned = stateKey(parties[j],'%sOwned' % (fruit))
                    tree = makeTree({'if': trueRow(stateKey(atom['object'],'agree')),
                                     False: noChangeMatrix(owned),
                                     True: setToFeatureMatrix(owned,offer)})
                    self.world.setDynamics(parties[j],'%sOwned' % (fruit),atom,tree)
        # rejecting give us batna and ends negotiation
            atom = Action({'subject': agents[i],'verb': 'rejectNegotiation',
                           'object': agents[1-i]})

            tree = makeTree(setToFeatureMatrix(stateKey(atom['subject'],'BatnaOwned') ,stateKey(atom['subject'], 'Batna')))
            self.world.setDynamics(atom['subject'],'BatnaOwned' ,atom,tree)

            tree = makeTree(setToFeatureMatrix(stateKey(atom['object'],'BatnaOwned') ,stateKey(atom['object'], 'Batna')))
            self.world.setDynamics(atom['object'],'BatnaOwned' ,atom,tree)

            tree = makeTree(setTrueMatrix(stateKey(None,'rejectedNegotiation')))
            self.world.setDynamics(None,'rejectedNegotiation' ,atom,tree)
 

        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None,'round'),1))
            self.world.setDynamics(None,'round',action,tree)
        for agent in self.world.agents.values():
            agent.addModel('pearLover',R={},level=2,rationality=0.01)
            agent.addModel('appleLover',R={},level=2,rationality=0.01)




    def modeltest(self,trueModels,davidBeliefAboutStacy,stacyBeliefAboutDavid,strongerBelief):
        for agent in self.world.agents.values():
            for model in agent.models.keys():
                if model is True:
                    name = trueModels[agent.name]
                else:
                    name = model
                if name == 'appleLover':
                    agent.setReward(maximizeFeature(stateKey(agent.name,'appleOwned')),4.0,model)
                    agent.setReward(maximizeFeature(stateKey(agent.name,'pearOwned')),1.0,model)
                    agent.setReward(maximizeFeature(stateKey(agent.name,'BatnaOwned')),0.1,model)
                elif name == 'pearLover':
                    agent.setReward(maximizeFeature(stateKey(agent.name,'appleOwned')),1.0,model)
                    agent.setReward(maximizeFeature(stateKey(agent.name,'pearOwned')),4.0,model)
                    agent.setReward(maximizeFeature(stateKey(agent.name,'BatnaOwned')),0.1,model)
        weakBelief = 1.0 - strongerBelief          
        belief = {'pearLover': weakBelief,'appleLover': weakBelief}
        belief[davidBeliefAboutStacy] = strongerBelief
        self.world.setMentalModel('David','Stacy',belief)
        belief = {'pearLover': weakBelief,'appleLover': weakBelief}
        belief[stacyBeliefAboutDavid] = strongerBelief
        self.world.setMentalModel('Stacy','David',belief)

    def runit(self,Msg):
        print Msg
        for t in range(self.maxRounds + 1):
            self.world.explain(self.world.step(),level=1)
            # print self.world.step()
            self.world.state.select()
            # self.world.printState()
            if self.world.terminated():
                break        
    # add single action that discretizes the feature
    action = agent.addAction({'verb': '', 'action': 'discretize'})
    tree = makeTree(discretization_tree(world, feat, NUM_BINS))
    world.setDynamics(feat, action, tree)

    world.setOrder([{agent.name}])

    print('====================================')
    print('High:\t{}'.format(HIGH))
    print('Low:\t{}'.format(LOW))
    print('Bins:\t{}'.format(NUM_BINS))

    print('\nSamples/steps:')
    values_original = []
    values_discrete = []
    for i in range(NUM_SAMPLES):
        num = np.random.uniform(LOW, HIGH)
        world.setFeature(feat, num)

        before = world.getValue(feat)
        world.step()
        after = world.getValue(feat)

        print('{:.3f}\t-> {}'.format(before, after))
        values_original.append(before)
        values_discrete.append(after)

    # calculates RMSE
    rmse = np.sqrt(np.mean((np.array(values_discrete) - values_original)**2))
    print('\nRMSE: {:.3f}'.format(rmse))
class Centipede:



    def __init__(self,turnOrder,maxRounds,payoff):

        self.maxRounds=maxRounds
        self.payoff = payoff
        print self.payoff
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name,'money',int)
            me.setState('money',0)
            mePass = me.addAction({'verb': 'pass','object': other.name})
            meTake = me.addAction({'verb': 'take','object': other.name})
            # Parameters
            me.setHorizon(6)
            me.setAttribute('discount',1.)
            # me.setAttribute('discount',0.9)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(turnOrder)
        # World state
        self.world.defineState(None,'round',int,description='The current round')
        self.world.setState(None,'round',0)
        self.world.defineState(None,'gameOver',bool,description='whether game is over')
        self.world.setState(None,'gameOver',False)

        self.world.addTermination(makeTree({'if': thresholdRow(stateKey(None,'round'),self.maxRounds),
                                            True: True,
                                            False: {'if': trueRow(stateKey(None,'gameOver')),
                                                    True: True,
                                                    False: False}}))

        # Dynamics
        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None,'round'),1))
            self.world.setDynamics(stateKey(None,'round'),action,tree)
            if (action['verb'] == 'take'):
                tree = makeTree(setTrueMatrix(stateKey(None,'gameOver')))
                self.world.setDynamics(stateKey(None,'gameOver'),action,tree)
                agts = ['Stacy','David']
                for i in range(2):
                    key = stateKey(agts[i],'money')
                    tree = makeTree(self.buildPayoff(0, key, self.payoff[agts[i]]))
                    self.world.setDynamics(stateKey(agts[i],'money'),action,tree)
            elif action['verb'] == 'pass':
                agts = ['Stacy','David']
                for i in range(2):
                    key = stateKey(agts[i],'money')
                    tree = makeTree({'if': equalRow(stateKey(None,'round'),self.maxRounds-1),
                                     True: setToConstantMatrix(key,self.payoff[agts[i]][self.maxRounds]),
                                     False: noChangeMatrix(key)})
                    self.world.setDynamics(stateKey(agts[i],'money'),action,tree)


# really need to ask david about these levels - if adding modesl with levels, can
# the true model point to these but have a different level

        for agent in self.world.agents.values():
            agent.addModel('Christian',R={},level=2,rationality=10.,selection='distribution')
            agent.addModel('Capitalist',R={},level=2,rationality=10.,selection='distribution')
            # agent.addModel('Christian',R={},level=2,rationality=0.01)
            # agent.addModel('Capitalist',R={},level=2,rationality=0.01)

    def buildPayoff(self,rnd,key,payoff):
        if (rnd == self.maxRounds - 1):
            return setToConstantMatrix(key,payoff[rnd])
        else:
            return {'if': equalRow(stateKey(None,'round'),rnd),
                    True: setToConstantMatrix(key,payoff[rnd]),
                    False: self.buildPayoff(rnd+1,key,payoff)}


    def modeltest(self,trueModels,davidBeliefAboutStacy,stacyBeliefAboutDavid,strongerBelief):
        agts = self.world.agents.values()
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            for model in me.models.keys():
                if model is True:
                    name = trueModels[me.name]
                else:
                    name = model
                if name == 'Capitalist':
                    me.setReward(maximizeFeature(stateKey(me.name,'money')),1.0,model)
                elif name == 'Christian':
                    me.setReward(maximizeFeature(stateKey(me.name,'money')),1.0,model)
                    me.setReward(maximizeFeature(stateKey(other.name,'money')),1.0,model)

        weakBelief = 1.0 - strongerBelief
        belief = {'Christian': weakBelief,'Capitalist': weakBelief}
        belief[davidBeliefAboutStacy] = strongerBelief
        self.world.setMentalModel('David','Stacy',belief)
        belief = {'Christian': weakBelief,'Capitalist': weakBelief}
        belief[stacyBeliefAboutDavid] = strongerBelief
        self.world.setMentalModel('Stacy','David',belief)

    def runit(self,Msg):
        print Msg
        for t in range(self.maxRounds + 1):
            self.world.explain(self.world.step(),level=2)
            # self.world.explain(self.world.step(),level=1)
            # print self.world.step()
            #self.world.state.select()
            # self.world.printState()
            if self.world.terminated():
                break