Пример #1
0
 def getActions(self,vector):
     targets = []
     resources = vector[stateKey(self.name,self.resourceName)]
     for obj in self.legalObjects(vector):
         targets.append(obj)
     actions = self.getCombos(targets,resources)
     return Agent.getActions(self,vector,actions).union(Agent.getActions(self,vector))
def run_univariate_function(name, symbol_fmt, func):
    print('\n*************************************')
    print('Testing {} function'.format(name))

    # PsychSim elements
    world = World()
    agent = Agent('The Agent')
    world.addAgent(agent)

    # gets samples from real non-linear function
    x_params, y_params, sample_values = \
        get_bivariate_samples(func, MIN_X, MAX_X, MIN_Y, MAX_Y, NUM_SAMPLES, NUM_SAMPLES)
    sample_mean = np.nanmean(sample_values)

    # create two features: one holding the variable, the other the result (dependent)
    var_x = world.defineState(agent.name, 'var_x', float, lo=MIN_X, hi=MAX_X)
    var_y = world.defineState(agent.name, 'var_y', float, lo=MIN_Y, hi=MAX_Y)
    result = world.defineState(agent.name,
                               'result',
                               float,
                               lo=np.min(sample_values),
                               hi=np.max(sample_values))
    world.setFeature(result, 0)

    # create action that is approximates the function, storing the result in the result feature
    action = agent.addAction({'verb': 'operation', 'action': name})
    tree = makeTree(
        tree_from_bivariate_samples(result, var_x, var_y, x_params, y_params,
                                    sample_values))
    world.setDynamics(result, action, tree)

    world.setOrder([agent.name])

    np.random.seed(SEED)
    values_original = []
    values_approx = []
    for i in range(NUM_TEST_SAMPLES):
        # gets random sample parameters
        x = MIN_X + np.random.rand() * (MAX_X - MIN_X)
        y = MIN_Y + np.random.rand() * (MAX_Y - MIN_Y)

        # sets variable and updates result
        world.setFeature(var_x, x)
        world.setFeature(var_y, y)
        world.step()

        real = func(x, y)
        psych = world.getValue(result)

        print('{:3}: {:30} | Expected: {:10.2f} | PsychSim: {:10.2f}'.format(
            i, symbol_fmt.format(x, y), real, psych))
        values_original.append(real)
        values_approx.append(psych)

    # gets error stats
    rmse = np.sqrt(np.mean((np.array(values_approx) - values_original)**2))
    print('=====================================')
    print('RMSE      = {:.3f}'.format(rmse))
    print('\nPress \'Enter\' to continue...')
    input()
 def setUp(self):
     # Create world
     self.world = World()
     # Create agents
     self.tom = Agent('Tom')
     self.world.addAgent(self.tom)
     self.jerry = Agent('Jerry')
     self.world.addAgent(self.jerry)
Пример #4
0
 def setUp(self):
     # Create world
     self.world = World()
     # Create agents
     self.tom = Agent('Tom')
     self.world.addAgent(self.tom)
     self.jerry = Agent('Jerry')
     self.world.addAgent(self.jerry)
Пример #5
0
 def __init__(self,name,resource=None,verb=None,objects=None):
     Agent.__init__(self,name)
     if not resource is None:
         self.resourceName = resource
         self.verbName = verb
         self.objects = objects
         self.objectLegality = {}
         for obj in objects:
             self.objectLegality[obj] = makeTree(True)
     self.allocateAll = False
Пример #6
0
 def parse(self,element):
     Agent.parse(self,element)
     self.resourceName = str(element.getAttribute('resource'))
     self.verbName = str(element.getAttribute('verb'))
     self.objects = []
     self.objectLegality = {}
     node = element.firstChild
     while node:
         if node.nodeType == node.ELEMENT_NODE:
             if node.tagName == 'object':
                 self.objects.append(str(node.firstChild.data).strip())
             elif node.tagName == 'objectlegal':
                 obj = str(node.getAttribute('object'))
                 subnode = node.firstChild
                 while subnode:
                     if subnode.nodeType == subnode.ELEMENT_NODE:
                         tree = KeyedTree(subnode)
                         self.objectLegality[obj] = tree
                         break
                     subnode = subnode.nextSibling
         node = node.nextSibling
Пример #7
0
 def __xml__(self):
     doc = Agent.__xml__(self)
     doc.documentElement.setAttribute('resource',self.resourceName)
     doc.documentElement.setAttribute('verb',self.verbName)
     for obj in self.objects:
         node = doc.createElement('object')
         node.appendChild(doc.createTextNode(obj))
         doc.documentElement.appendChild(node)
     for obj,tree in self.objectLegality.items():
         node = doc.createElement('objectlegal')
         node.setAttribute('object',obj)
         node.appendChild(tree.__xml__().documentElement)
         doc.documentElement.appendChild(node)
     return doc
                  'This method can be used to discretize features\' values at each timestep by having a "dummy" agent ' \
                  'that has a single action, i.e., the discretization action. The agent can be placed in the turn ' \
                  'order such that it acts *after* all other agents.' \
                  'A similar method can be used to have discretized rewards, i.e., to create a reward function whose ' \
                  'value is the discretized value of a feature.'

HIGH = 100
LOW = 50
NUM_BINS = 11
NUM_SAMPLES = 100

if __name__ == '__main__':

    # create world and add agent
    world = World()
    agent = Agent('Agent')
    world.addAgent(agent)

    # add variable
    feat = world.defineState(agent.name, 'x', float, lo=LOW, hi=HIGH)

    # add single action that discretizes the feature
    action = agent.addAction({'verb': '', 'action': 'discretize'})
    tree = makeTree(discretization_tree(world, feat, NUM_BINS))
    world.setDynamics(feat, action, tree)

    world.setOrder([{agent.name}])

    print('====================================')
    print('High:\t{}'.format(HIGH))
    print('Low:\t{}'.format(LOW))
Пример #9
0
    def __init__(self, turnOrder):

        self.maxRounds = 8
        self.world = World()
        totals = {'apple': 1, 'pear': 2}
        batna_prePref = totals['apple'] + totals['pear']
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name,
                                   'appleOwned',
                                   int,
                                   lo=0,
                                   hi=totals['apple'])
            me.setState('appleOwned', 0)
            self.world.defineState(me.name,
                                   'appleOffered',
                                   int,
                                   lo=0,
                                   hi=totals['apple'])
            me.setState('appleOffered', 0)
            self.world.defineState(me.name,
                                   'pearOwned',
                                   int,
                                   lo=0,
                                   hi=totals['pear'])
            me.setState('pearOwned', 0)
            self.world.defineState(me.name,
                                   'pearOffered',
                                   int,
                                   lo=0,
                                   hi=totals['pear'])
            me.setState('pearOffered', 0)

            self.world.defineState(me.name, 'Batna', int, lo=0, hi=10)
            me.setState('Batna', batna_prePref)
            self.world.defineState(me.name, 'BatnaOwned', int, lo=0, hi=10)
            me.setState('BatnaOwned', 0)

            self.world.defineState(me.name, 'agree', bool)
            me.setState('agree', False)
            # Actions
            me.addAction({'verb': 'do nothing'})
            for amt in range(totals['apple'] + 1):
                tmp = me.addAction({
                    'verb': 'offerApple',
                    'object': other.name,
                    'amount': amt
                })
                me.setLegal(
                    tmp,
                    makeTree({
                        'if': trueRow(stateKey(None, 'agreement')),
                        False: {
                            'if': trueRow(stateKey(None,
                                                   'rejectedNegotiation')),
                            True: False,
                            False: True
                        },
                        True: False
                    }))

            for amt in range(totals['pear'] + 1):
                tmp = me.addAction({
                    'verb': 'offerPear',
                    'object': other.name,
                    'amount': amt
                })
                me.setLegal(
                    tmp,
                    makeTree({
                        'if': trueRow(stateKey(None, 'agreement')),
                        False: {
                            'if': trueRow(stateKey(None,
                                                   'rejectedNegotiation')),
                            True: False,
                            False: True
                        },
                        True: False
                    }))

            meReject = me.addAction({
                'verb': 'rejectNegotiation',
                'object': other.name
            })
            me.setLegal(
                meReject,
                makeTree({
                    'if': trueRow(stateKey(None, 'agreement')),
                    False: {
                        'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                        True: False,
                        False: True
                    },
                    True: False
                }))

            meAccept = me.addAction({
                'verb': 'accept offer',
                'object': other.name
            })
            me.setLegal(
                meAccept,
                makeTree({
                    'if': trueRow(stateKey(None, 'appleOffer')),
                    True: {
                        'if': trueRow(stateKey(None, 'pearOffer')),
                        True: {
                            'if': trueRow(stateKey(None, 'agreement')),
                            False: {
                                'if':
                                trueRow(stateKey(None, 'rejectedNegotiation')),
                                True:
                                False,
                                False:
                                True
                            },
                            True: False
                        },
                        False: False
                    },
                    False: False
                }))
            # Parameters
            me.setHorizon(6)
            me.setParameter('discount', 0.9)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        # Turn order: Uncomment the following if you want agents to act in parallel
        # world.setOrder([{agts[0].name,agts[1].name}])
        # Turn order: Uncomment the following if you want agents to act sequentially

        self.world.setOrder(turnOrder)

        # World state
        self.world.defineState(None, 'agreement', bool)
        self.world.setState(None, 'agreement', False)
        self.world.defineState(None, 'appleOffer', bool)
        self.world.setState(None, 'appleOffer', False)
        self.world.defineState(None, 'pearOffer', bool)
        self.world.setState(None, 'pearOffer', False)
        self.world.defineState(
            None,
            'round',
            int,
            description='The current round of the negotiation')
        self.world.setState(None, 'round', 0)
        self.world.defineState(
            None,
            'rejectedNegotiation',
            bool,
            description='Have one of the players walked out?')
        self.world.setState(None, 'rejectedNegotiation', False)

        # dont terminate so agent sees benefit of early agreement
        #    world.addTermination(makeTree({'if': trueRow(stateKey(None,'agreement')),
        #                                   True: True,
        #                                   False: False}))

        #    world.addTermination(makeTree({'if': trueRow(stateKey(None,'rejectedNegotiation')),
        #                                   True: True,
        #                                   False: False}))

        self.world.addTermination(
            makeTree({
                'if':
                thresholdRow(stateKey(None, 'round'), self.maxRounds),
                True:
                True,
                False:
                False
            }))

        # Dynamics of offers
        agents = [david.name, stacy.name]
        for i in range(2):
            for fruit in ['apple', 'pear']:
                atom = Action({
                    'subject': agents[i],
                    'verb': 'offer%s' % (fruit.capitalize()),
                    'object': agents[1 - i]
                })
                parties = [atom['subject'], atom['object']]
                for j in range(2):
                    # Set offer amount
                    offer = stateKey(parties[j], '%sOffered' % (fruit))
                    amount = actionKey('amount') if j == 1 else '%d-%s' % (
                        totals[fruit], actionKey('amount'))
                    tree = makeTree(setToConstantMatrix(offer, amount))
                    self.world.setDynamics(parties[j], '%sOffered' % (fruit),
                                           atom, tree)
                    # reset agree flags whenever an offer is made
                    agreeFlag = stateKey(parties[j], 'agree')
                    tree = makeTree(setFalseMatrix(agreeFlag))
                    self.world.setDynamics(parties[j], 'agree', atom, tree)
                # Offers set offer flag in world state
                tree = makeTree(
                    setTrueMatrix(stateKey(None, '%sOffer' % (fruit))))
                self.world.setDynamics(None, '%sOffer' % (fruit), atom, tree)

    # agents = [david.name,stacy.name]
    # Dynamics of agreements
        for i in range(2):
            atom = Action({
                'subject': agents[i],
                'verb': 'accept offer',
                'object': agents[1 - i]
            })

            # accept offer sets accept
            tree = makeTree(setTrueMatrix(stateKey(atom['subject'], 'agree')))
            self.world.setDynamics(atom['subject'], 'agree', atom, tree)

            # accept offer sets agreement if object has accepted
            tree = makeTree({
                'if': trueRow(stateKey(atom['object'], 'agree')),
                True: setTrueMatrix(stateKey(None, 'agreement')),
                False: noChangeMatrix(stateKey(None, 'agreement'))
            })
            self.world.setDynamics(None, 'agreement', atom, tree)

            # Accepting offer sets ownership
            parties = [atom['subject'], atom['object']]
            for fruit in ['apple', 'pear']:
                # atom = Action({'subject': agents[i],'verb': 'accept offer', 'object': agents[1-i]})
                for j in range(2):
                    offer = stateKey(parties[j], '%sOffered' % (fruit))
                    owned = stateKey(parties[j], '%sOwned' % (fruit))
                    tree = makeTree({
                        'if':
                        trueRow(stateKey(atom['object'], 'agree')),
                        False:
                        noChangeMatrix(owned),
                        True:
                        setToFeatureMatrix(owned, offer)
                    })
                    self.world.setDynamics(parties[j], '%sOwned' % (fruit),
                                           atom, tree)
        # rejecting give us batna and ends negotiation
            atom = Action({
                'subject': agents[i],
                'verb': 'rejectNegotiation',
                'object': agents[1 - i]
            })

            tree = makeTree(
                setToFeatureMatrix(stateKey(atom['subject'], 'BatnaOwned'),
                                   stateKey(atom['subject'], 'Batna')))
            self.world.setDynamics(atom['subject'], 'BatnaOwned', atom, tree)

            tree = makeTree(
                setToFeatureMatrix(stateKey(atom['object'], 'BatnaOwned'),
                                   stateKey(atom['object'], 'Batna')))
            self.world.setDynamics(atom['object'], 'BatnaOwned', atom, tree)

            tree = makeTree(
                setTrueMatrix(stateKey(None, 'rejectedNegotiation')))
            self.world.setDynamics(None, 'rejectedNegotiation', atom, tree)

        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None, 'round'), 1))
            self.world.setDynamics(None, 'round', action, tree)
        for agent in self.world.agents.values():
            agent.addModel('pearLover', R={}, level=2, rationality=0.01)
            agent.addModel('appleLover', R={}, level=2, rationality=0.01)
Пример #10
0
def setup():
    global args

    np.random.seed(args.seed)
    # create world and add agents
    world = World()
    world.memory = False
    world.parallel = args.parallel
    agents = []
    agent_features = {}
    for ag in range(args.agents):
        agent = Agent('Agent' + str(ag))
        world.addAgent(agent)
        agents.append(agent)

        # set agent's params
        agent.setAttribute('discount', 1)
        agent.setHorizon(args.horizon)

        # add features, initialize at random
        features = []
        agent_features[agent] = features
        for f in range(args.features_agent):
            feat = world.defineState(agent.name, 'Feature{}'.format(f), int, lo=0, hi=1000)
            world.setFeature(feat, np.random.randint(0, MAX_FEATURE_VALUE))
            features.append(feat)

        # set random reward function
        agent.setReward(maximizeFeature(np.random.choice(features), agent.name), 1)

        # add mental copy of true model and make it static (we do not have beliefs in the models)
        agent.addModel(get_fake_model_name(agent), parent=get_true_model_name(agent))
        agent.setAttribute('static', True, get_fake_model_name(agent))

        # add actions
        for ac in range(args.actions):
            action = agent.addAction({'verb': '', 'action': 'Action{}'.format(ac)})
            i = ac
            while i + args.features_action < args.features_agent:

                weights = {}
                for j in range(args.features_action):
                    weights[features[i + j + 1]] = 1
                tree = makeTree(multi_set_matrix(features[i], weights))
                world.setDynamics(features[i], action, tree)

                i += args.features_action

    # define order
    world.setOrder([set(ag.name for ag in agents)])

    for agent in agents:
        # test belief update:
        # - set a belief in one feature to the actual initial value (should not change outcomes)
        # world.setModel(agent.name, Distribution({True: 1.0}))
        rand_feat = np.random.choice(agent_features[agent])
        agent.setBelief(rand_feat, world.getValue(rand_feat))
        print('{} will always observe {}={}'.format(agent.name, rand_feat, world.getValue(rand_feat)))

    # set mental model of each agent in all other agents
    for i in range(args.agents):
        for j in range(i + 1, args.agents):
            world.setMentalModel(agents[i].name, agents[j].name, Distribution({get_fake_model_name(agents[j]): 1}))
            world.setMentalModel(agents[j].name, agents[i].name, Distribution({get_fake_model_name(agents[i]): 1}))

    return world
Пример #11
0
__email__ = '*****@*****.**'
__description__ = 'Example of how to discretize a feature\'s value in-place using a helper function. ' \
                  'Discretization occurs by directly approximating the value of the feature to the closest bin from ' \
                  'a discrete set of bins.' \
                  'This method can be used to discretize features\' values at each timestep after world.update().'

HIGH = 100
LOW = 50
NUM_BINS = 11
NUM_SAMPLES = 100

if __name__ == '__main__':

    # create world and add agent
    world = World()
    agent = Agent('Agent')
    world.addAgent(agent)

    # add feature to world
    feat = world.defineState(agent.name, 'x', float, lo=LOW, hi=HIGH)

    print('====================================')
    print('High:\t{}'.format(HIGH))
    print('Low:\t{}'.format(LOW))
    print('Bins:\t{}'.format(NUM_BINS))

    print('\nSamples:')
    values_original = []
    values_discrete = []
    for i in range(NUM_SAMPLES):
        num = np.random.uniform(LOW, HIGH)
Пример #12
0
def createWorld(username='******',
                level=0,
                ability='good',
                explanation='none',
                embodiment='robot',
                acknowledgment='no',
                sequence=False,
                root='.',
                ext='xml',
                beliefs=True):
    """
    Creates the initial PsychSim scenario and saves it
    @param username: name of user ID to use in filenames
    @param level: robot mission level to use as template
    @param ability: the level of the robot's ability
      - good or C{True}: perfect sensors and sensor model
      - badSensor or C{False}: noisy sensors, but perfect model of noisy sensors
      - badModel: perfect sensors, but imperfect model of sensors
    @type ability: bool
    @param explanation: the type of explanation to use
      - none: No explanations
      - ability: Explanation based on robot ability provided.
      - abilitybenevolence: Explanation based on both robot's ability and benevolence provided.
    @type explanation: str
    @param embodiment: the robot's embodiment
      - robot: The robot looks like a robot
      - dog: The robot looks like a dog
    @type embodiment: str
    @param acknowledgment: the robot's behavior regarding the acknowledgment of errors
      - no: The robot does not acknowledge its errors
      - yes: The robot acknowledges its errors
    @type acknowledgment: str
    @param root: the root directory to use for files (default is current working directory)
    @param ext: the file extension for the PsychSim scenario file
      - xml: Save as uncompressed XML
      - psy: Save as bzipped XML
    @type ext: str
    @param beliefs: if C{True}, store robot's uncertain beliefs in scenario file, rather than compute them on the fly. Storing in scenario file makes the scenario a more complete model, but greatly increases file sixe. Default is C{True}
    @type beliefs: bool
    """

    print "**************************createWorld***********************"
    print 'Username:\t%s\nLevel:\t\t%s' % (username, level + 1)
    print 'Ability\t\t%s\nExplanation:\t%s\nEmbodiment:\t%s\nAcknowledge:\t%s' % \
        (ability,explanation,embodiment,acknowledgment)

    # Pre-compute symbols for this level's waypoints
    for point in WAYPOINTS[level]:
        if not point.has_key('symbol'):
            point['symbol'] = point['name'].replace(' ', '')

    world = World()

    world.defineState(
        None,
        'level',
        int,
        lo=0,
        hi=len(WAYPOINTS) - 1,
        description='Static variable indicating what mission level')
    world.setState(None, 'level', level)

    world.defineState(None, 'time', float)
    world.setState(None, 'time', 0.)

    world.defineState(None, 'complete', bool)
    world.setState(None, 'complete', False)
    world.addTermination(
        makeTree({
            'if': trueRow('complete'),
            True: True,
            False: False
        }))

    # Buildings
    threats = ['none', 'NBC', 'armed']
    for waypoint in WAYPOINTS[level]:
        if not waypoint.has_key('symbol'):
            waypoint['symbol'] = waypoint['name'].replace(' ', '')
        world.addAgent(Agent(waypoint['symbol']))
        # Have we visited this waypoint?
        key = world.defineState(waypoint['symbol'], 'visited', bool)
        world.setFeature(key, False)
        # Are there dangerous chemicals or armed people here?
        key = world.defineState(waypoint['symbol'], 'danger', list, threats[:])
        if waypoint.has_key('NBC') and waypoint['NBC']:
            world.setFeature(key, 'NBC')
        elif waypoint.has_key('armed') and waypoint['armed']:
            world.setFeature(key, 'armed')
        else:
            world.setFeature(key, 'none')
        key = world.defineState(waypoint['symbol'], 'recommendation', list,
                                ['none', 'protected', 'unprotected'])
        world.setFeature(key, 'none')

    # Human
    human = Agent('human')
    world.addAgent(human)

    world.defineState(human.name, 'alive', bool)
    human.setState('alive', True)
    world.defineState(human.name, 'deaths', int)
    human.setState('deaths', 0)

    # Robot
    robot = Agent('robot')
    world.addAgent(robot)

    # Robot states
    world.defineState(robot.name, 'waypoint', list,
                      [point['symbol'] for point in WAYPOINTS[level]])
    robot.setState('waypoint', WAYPOINTS[level][getStart(level)]['symbol'])

    world.defineState(robot.name, 'explanation', list, [
        'none', 'ability', 'abilitybenevolence', 'abilityconfidence',
        'confidence'
    ])
    robot.setState('explanation', explanation)

    world.defineState(robot.name, 'embodiment', list, ['robot', 'dog'])
    robot.setState('embodiment', embodiment)

    world.defineState(robot.name, 'acknowledgment', list, ['no', 'yes'])
    robot.setState('acknowledgment', acknowledgment)

    world.defineState(robot.name, 'ability', list,
                      ['badSensor', 'badModel', 'good'])
    if ability is True:
        # Backward compatibility with boolean ability
        ability = 'good'
    elif ability is False:
        ability = 'badSensor'
    robot.setState('ability', ability)

    # State of the robot's sensors
    world.defineState(robot.name, 'sensorModel', list, ['good', 'bad'])
    robot.setState('sensorModel', 'good')

    world.defineState(robot.name, 'command', list, ['none'] +
                      [point['symbol'] for point in WAYPOINTS[level]])
    robot.setState('command', 'none')

    # Actions
    for end in range(len(WAYPOINTS[level])):
        symbol = WAYPOINTS[level][end]['symbol']
        # Robot movement
        action = robot.addAction({'verb': 'moveto', 'object': symbol})
        # Legal if no contradictory command
        tree = makeTree({
            'if': equalRow(stateKey(robot.name, 'command'), 'none'),
            True: True,
            False: {
                'if': equalRow(stateKey(robot.name, 'command'), symbol),
                True: True,
                False: False
            }
        })
        robot.setLegal(action, tree)
        # Dynamics of robot's location
        tree = makeTree(
            setToConstantMatrix(stateKey(action['subject'], 'waypoint'),
                                symbol))
        world.setDynamics(stateKey(action['subject'], 'waypoint'), action,
                          tree)
        # Dynamics of visited flag
        key = stateKey(symbol, 'visited')
        tree = makeTree(setTrueMatrix(key))
        world.setDynamics(key, action, tree)
        # Dynamics of time
        key = stateKey(None, 'time')
        tree = setToConstantMatrix(key, 0.)
        for start in range(len(WAYPOINTS[level])):
            if start != end:
                startsymbol = WAYPOINTS[level][start]['symbol']
                if sequence:
                    # Distance is measured by level sequence
                    distance = abs(start - end) * 50
                else:
                    try:
                        distance = DISTANCES[WAYPOINTS[level][start]['name']][
                            WAYPOINTS[level][end]['name']]
                    except KeyError:
                        try:
                            distance = DISTANCES[WAYPOINTS[level][end][
                                'name']][WAYPOINTS[level][start]['name']]
                        except KeyError:
                            distance = 250
                tree = {
                    'if':
                    equalRow(stateKey(action['subject'], 'waypoint'),
                             startsymbol),
                    True:
                    setToConstantMatrix(key,
                                        float(distance) / 1000.),
                    False:
                    tree
                }
        world.setDynamics(key, action, makeTree(tree))
        # Human entry: Dead or alive if unprotected?
        key = stateKey(human.name, 'alive')
        action = robot.addAction({
            'verb': 'recommend unprotected',
            'object': symbol
        })
        tree = makeTree({
            'if': equalRow(stateKey(symbol, 'danger'), 'none'),
            True: setTrueMatrix(key),
            False: setFalseMatrix(key)
        })
        world.setDynamics(key, action, tree)
        robot.setLegal(action, makeTree(False))
        # Human entry: How much "time" if protected?
        action = robot.addAction({
            'verb': 'recommend protected',
            'object': symbol
        })
        key = stateKey(None, 'time')
        world.setDynamics(key, action, makeTree(setToConstantMatrix(key,
                                                                    0.25)))
        robot.setLegal(action, makeTree(False))

    # Robot goals
    goal = minimizeFeature(stateKey(None, 'time'))
    robot.setReward(goal, 2.)

    goal = maximizeFeature(stateKey(human.name, 'alive'))
    robot.setReward(goal, 1.)

    for point in WAYPOINTS[level]:
        robot.setReward(maximizeFeature(stateKey(point['symbol'], 'visited')),
                        1.)

    if beliefs:
        #        omega = 'danger'
        world.defineVariable(robot.name, ActionSet)
        # Robot beliefs
        world.setModel(robot.name, True)
        value = 1. / float(len(WAYPOINTS[level]))
        #        tree = KeyedVector({CONSTANT: world.value2float(omega,'none')})
        for index in range(len(WAYPOINTS[level])):
            waypoint = WAYPOINTS[level][index]
            key = stateKey(waypoint['symbol'], 'danger')
            #        if index > 0:
            # Starting state is safe
            robot.setBelief(
                key,
                psychsim.probability.Distribution({
                    'NBC': value / 2.,
                    'armed': value / 2.,
                    'none': 1. - value
                }))
            # Observation function


#            tree = {'if': equalRow(stateKey(robot.name,'waypoint'),waypoint['symbol']),
#                    True: generateO(world,key),
#                    False: tree}
#        robot.defineObservation(omega,makeTree(tree),domain=list,lo=['none','NBC','armed'])
        robot.defineObservation('microphone',
                                makeTree(None),
                                None,
                                domain=list,
                                lo=['nobody', 'friendly', 'suspicious'])
        robot.defineObservation('NBCsensor', makeTree(None), None, domain=bool)
        robot.defineObservation('camera', makeTree(None), None, domain=bool)
    else:
        robot.defineObservation('microphone',
                                makeTree(None),
                                None,
                                domain=list,
                                lo=['nobody', 'friendly', 'suspicious'])
        robot.defineObservation('NBCsensor', makeTree(None), None, domain=bool)
        robot.defineObservation('camera', makeTree(None), None, domain=bool)
    robot.setAttribute('horizon', 1)

    world.setOrder([robot.name])

    filename = getFilename(username, level, ext, root)

    world.save(filename, ext == 'psy')
    WriteLogData('%s user %s, level %d, ability %s, explanation %s' % \
                     (CREATE_TAG,username,level,ability,explanation),username,level,root=root)
    return world
Пример #13
0
    def __init__(self, turnOrder, maxRounds, payoff):

        self.maxRounds = maxRounds
        self.payoff = payoff
        print self.payoff
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name, 'money', int)
            me.setState('money', 0)
            mePass = me.addAction({'verb': 'pass', 'object': other.name})
            meTake = me.addAction({'verb': 'take', 'object': other.name})
            # Parameters
            me.setHorizon(6)
            me.setParameter('discount', 1.)
            # me.setParameter('discount',0.9)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(turnOrder)
        # World state
        self.world.defineState(None,
                               'round',
                               int,
                               description='The current round')
        self.world.setState(None, 'round', 0)
        self.world.defineState(None,
                               'gameOver',
                               bool,
                               description='whether game is over')
        self.world.setState(None, 'gameOver', False)

        self.world.addTermination(
            makeTree({
                'if':
                thresholdRow(stateKey(None, 'round'), self.maxRounds),
                True:
                True,
                False: {
                    'if': trueRow(stateKey(None, 'gameOver')),
                    True: True,
                    False: False
                }
            }))

        # Dynamics
        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None, 'round'), 1))
            self.world.setDynamics(None, 'round', action, tree)
            if (action['verb'] == 'take'):
                tree = makeTree(setTrueMatrix(stateKey(None, 'gameOver')))
                self.world.setDynamics(None, 'gameOver', action, tree)
                agts = ['Stacy', 'David']
                for i in range(2):
                    key = stateKey(agts[i], 'money')
                    tree = makeTree(
                        self.buildPayoff(0, key, self.payoff[agts[i]]))
                    self.world.setDynamics(agts[i], 'money', action, tree)
            elif action['verb'] == 'pass':
                agts = ['Stacy', 'David']
                for i in range(2):
                    key = stateKey(agts[i], 'money')
                    tree = makeTree({
                        'if':
                        equalRow(stateKey(None, 'round'), self.maxRounds - 1),
                        True:
                        setToConstantMatrix(
                            key, self.payoff[agts[i]][self.maxRounds]),
                        False:
                        noChangeMatrix(key)
                    })
                    self.world.setDynamics(agts[i], 'money', action, tree)

# really need to ask david about these levels - if adding modesl with levels, can
# the true model point to these but have a different level

        for agent in self.world.agents.values():
            agent.addModel('Christian',
                           R={},
                           level=2,
                           rationality=10.,
                           selection='distribution')
            agent.addModel('Capitalist',
                           R={},
                           level=2,
                           rationality=10.,
                           selection='distribution')
Пример #14
0
def scenarioCreationUseCase(enemy='Sylvania',model='powell',web=False,
                            fCollapse=None,sCollapse=None,maxRounds=15):
    """
    An example of how to create a scenario
    @param enemy: the name of the agent-controlled side, i.e., Freedonia's opponent (default: Sylvania)
    @type enemy: str
    @param model: which model do we use (default is "powell")
    @type model: powell or slantchev
    @param web: if C{True}, then create the web-based experiment scenario (default: C{False})
    @type web: bool
    @param fCollapse: the probability that Freedonia collapses (under powell, default: 0.1) or loses battle (under slantchev, default: 0.7)
    @type fCollapse: float
    @param sCollapse: the probability that Sylvania collapses, under powell (default: 0.1)
    @type sCollapse: float
    @param maxRounds: the maximum number of game rounds (default: 15)
    @type maxRounds: int
    @return: the scenario created
    @rtype: L{World}
    """
    # Handle defaults for battle probabilities, under each model
    posLo = 0
    posHi = 10
    if fCollapse is None:
        if model == 'powell':
            fCollapse = 0.1
        elif model == 'slantchev':
            fCollapse = 0.7
    if sCollapse is None:
        sCollapse = 0.1

    # Create scenario
    world = World()

    # Agents
    free = Agent('Freedonia')
    world.addAgent(free)
    sylv = Agent(enemy)
    world.addAgent(sylv)

    # User state
    world.defineState(free.name,'troops',int,lo=0,hi=50000,
                      description='Number of troops you have left')
    free.setState('troops',40000)
    world.defineState(free.name,'territory',int,lo=0,hi=100,
                      description='Percentage of disputed territory owned by you')
    free.setState('territory',15)
    world.defineState(free.name,'cost',int,lo=0,hi=50000,
                      description='Number of troops %s loses in an attack' % (free.name))
    free.setState('cost',2000)
    world.defineState(free.name,'position',int,lo=posLo,hi=posHi,
                      description='Current status of war (%d=%s is winner, %d=you are winner)' % (posLo,sylv.name,posHi))
    free.setState('position',5)
    world.defineState(free.name,'offered',int,lo=0,hi=100,
                      description='Percentage of disputed territory that %s last offered to you' % (sylv.name))
    free.setState('offered',0)
    if model == 'slantchev':
        # Compute new value for territory only *after* computing new value for position
        world.addDependency(stateKey(free.name,'territory'),stateKey(free.name,'position'))

    # Agent state
    world.defineState(sylv.name,'troops',int,lo=0,hi=500000,
                      description='Number of troops %s has left' % (sylv.name))
    sylv.setState('troops',30000)
    world.defineState(sylv.name,'cost',int,lo=0,hi=50000,
                      description='Number of troops %s loses in an attack' % (sylv.name))
    sylv.setState('cost',2000)
    world.defineState(sylv.name,'offered',int,lo=0,hi=100,
                      description='Percentage of disputed territory that %s last offered to %s' % (free.name,sylv.name))
    sylv.setState('offered',0)

    # World state
    world.defineState(None,'treaty',bool,
                      description='Have the two sides reached an agreement?')
    world.setState(None,'treaty',False)
    # Stage of negotiation, illustrating the use of an enumerated state feature
    world.defineState(None,'phase',list,['offer','respond','rejection','end','paused','engagement'],
                      description='The current stage of the negotiation game')
    world.setState(None,'phase','paused')
    # Game model, static descriptor
    world.defineState(None,'model',list,['powell','slantchev'],
                      description='The model underlying the negotiation game')
    world.setState(None,'model',model)
    # Round of negotiation
    world.defineState(None,'round',int,description='The current round of the negotiation')
    world.setState(None,'round',0)

    if not web:
        # Relationship value
        key = world.defineRelation(free.name,sylv.name,'trusts')
        world.setFeature(key,0.)
    # Game over if there is a treaty
    world.addTermination(makeTree({'if': trueRow(stateKey(None,'treaty')),
                                   True: True, False: False}))
    # Game over if Freedonia has no territory
    world.addTermination(makeTree({'if': thresholdRow(stateKey(free.name,'territory'),1),
                                   True: False, False: True}) )
    # Game over if Freedonia has all the territory
    world.addTermination(makeTree({'if': thresholdRow(stateKey(free.name,'territory'),99),
                                   True: True, False: False})) 
    # Game over if number of rounds exceeds limit
    world.addTermination(makeTree({'if': thresholdRow(stateKey(None,'round'),maxRounds),
                                   True: True, False: False}))

    # Turn order: Uncomment the following if you want agents to act in parallel
#    world.setOrder([set(world.agents.keys())])
    # Turn order: Uncomment the following if you want agents to act sequentially
    world.setOrder([free.name,sylv.name])

    # User actions
    freeBattle = free.addAction({'verb': 'attack','object': sylv.name})
    for amount in range(20,100,20):
        free.addAction({'verb': 'offer','object': sylv.name,'amount': amount})
    if model == 'powell':
        # Powell has null stages
        freeNOP = free.addAction({'verb': 'continue'})
    elif model == 'slantchev':
        # Slantchev has both sides receiving offers
        free.addAction({'verb': 'accept offer','object': sylv.name})
        free.addAction({'verb': 'reject offer','object': sylv.name})

    # Agent actions
    sylvBattle = sylv.addAction({'verb': 'attack','object': free.name})
    sylvAccept = sylv.addAction({'verb': 'accept offer','object': free.name})
    sylvReject = sylv.addAction({'verb': 'reject offer','object': free.name})
    if model == 'powell':
        # Powell has null stages
        sylvNOP = sylv.addAction({'verb': 'continue'})
    elif model == 'slantchev':
        # Slantchev has both sides making offers
        for amount in range(10,100,10):
            sylv.addAction({'verb': 'offer','object': free.name,'amount': amount})

    # Restrictions on when actions are legal, based on phase of game
    for action in filterActions({'verb': 'offer'},free.actions | sylv.actions):
        agent = world.agents[action['subject']]
        agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'offer'),
                                        True: True,     # Offers are legal in the offer phase
                                        False: False})) # Offers are illegal in all other phases
    if model == 'powell':
        # Powell has a special rejection phase
        for action in [freeNOP,freeBattle]:
            free.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'rejection'),
                                           True: True,     # Attacking and doing nothing are legal only in rejection phase
                                           False: False})) # Attacking and doing nothing are illegal in all other phases

    # Once offered, agent can respond
    if model == 'powell':
        # Under Powell, only Sylvania has to respond, and it can attack
        responses = [sylvBattle,sylvAccept,sylvReject]
    elif model == 'slantchev':
        # Under Slantchev, only accept/reject
        responses = filterActions({'verb': 'accept offer'},free.actions | sylv.actions)
        responses += filterActions({'verb': 'reject offer'},free.actions | sylv.actions)
    for action in responses:
        agent = world.agents[action['subject']]
        agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'respond'),
                                        True: True,     # Offeree must act in the response phase
                                        False: False})) # Offeree cannot act in any other phase

    if model == 'powell':
        # NOP is legal in exactly opposite situations to all other actions
        sylv.setLegal(sylvNOP,makeTree({'if': equalRow(stateKey(None,'phase'),'end'),
                                        True: True,     # Sylvania does not do anything in the null phase after Freedonia responds to rejection
                                        False: False})) # Sylvania must act in its other phases
    if model == 'slantchev':
        # Attacking legal only under engagement phase
        for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
            agent = world.agents[action['subject']]
            agent.setLegal(action,makeTree({'if': equalRow(stateKey(None,'phase'),'engagement'),
                                            True: True,     # Attacking legal only in engagement
                                            False: False})) # Attacking legal every other phase

    # Goals for Freedonia
    goalFTroops = maximizeFeature(stateKey(free.name,'troops'))
    free.setReward(goalFTroops,1.)
    goalFTerritory = maximizeFeature(stateKey(free.name,'territory'))
    free.setReward(goalFTerritory,1.)

    # Goals for Sylvania
    goalSTroops = maximizeFeature(stateKey(sylv.name,'troops'))
    sylv.setReward(goalSTroops,1.)
    goalSTerritory = minimizeFeature(stateKey(free.name,'territory'))
    sylv.setReward(goalSTerritory,1.)

    # Possible goals applicable to both
    goalAgreement = maximizeFeature(stateKey(None,'treaty'))

    # Silly goal, provided as an example of an achievement goal
    goalAchieve = achieveFeatureValue(stateKey(None,'phase'),'respond')

    # Horizons
    if model == 'powell':
        free.setAttribute('horizon',4)
        sylv.setAttribute('horizon',4)
    elif model == 'slantchev':
        free.setAttribute('horizon',6)
        sylv.setAttribute('horizon',6)

    # Discount factors
    free.setAttribute('discount',-1)
    sylv.setAttribute('discount',-1)

    # Levels of belief
    free.setRecursiveLevel(2)
    sylv.setRecursiveLevel(2)

    # Dynamics of battle
    freeTroops = stateKey(free.name,'troops')
    freeTerr = stateKey(free.name,'territory')
    sylvTroops = stateKey(sylv.name,'troops')
    # Effect of fighting
    for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
        # Effect on troops (cost of battle)
        tree = makeTree(addFeatureMatrix(freeTroops,stateKey(free.name,'cost'),-1.))
        world.setDynamics(freeTroops,action,tree,enforceMin=not web)
        tree = makeTree(addFeatureMatrix(sylvTroops,stateKey(sylv.name,'cost'),-1.))
        world.setDynamics(sylvTroops,action,tree,enforceMin=not web)
        if model == 'powell':
            # Effect on territory (probability of collapse)
            tree = makeTree({'distribution': [
                        ({'distribution': [(setToConstantMatrix(freeTerr,100),1.-fCollapse), # Sylvania collapses, Freedonia does not
                                           (noChangeMatrix(freeTerr),         fCollapse)]},  # Both collapse
                         sCollapse),
                        ({'distribution': [(setToConstantMatrix(freeTerr,0),fCollapse),      # Freedonia collapses, Sylvania does not
                                           (noChangeMatrix(freeTerr),       1.-fCollapse)]}, # Neither collapses
                         1.-sCollapse)]})
            world.setDynamics(freeTerr,action,tree)
        elif model == 'slantchev':
            # Effect on position
            pos = stateKey(free.name,'position')
            tree = makeTree({'distribution': [(incrementMatrix(pos,1),1.-fCollapse), # Freedonia wins battle
                                              (incrementMatrix(pos,-1),fCollapse)]}) # Freedonia loses battle
            world.setDynamics(pos,action,tree)
            # Effect on territory
            tree = makeTree({'if': thresholdRow(pos,posHi-.5), 
                             True: setToConstantMatrix(freeTerr,100),          # Freedonia won
                             False: {'if': thresholdRow(pos,posLo+.5),
                                     True: noChangeMatrix(freeTerr),
                                     False: setToConstantMatrix(freeTerr,0)}}) # Freedonia lost
            world.setDynamics(freeTerr,action,tree)

    # Dynamics of offers
    for index in range(2):
        atom =  Action({'subject': world.agents.keys()[index],'verb': 'offer',
                        'object': world.agents.keys()[1-index]})
        if atom['subject'] == free.name or model != 'powell':
            offer = stateKey(atom['object'],'offered')
            amount = actionKey('amount')
            tree = makeTree({'if': trueRow(stateKey(None,'treaty')),
                             True: noChangeMatrix(offer),
                             False: setToConstantMatrix(offer,amount)})
            world.setDynamics(offer,atom,tree,enforceMax=not web)

    # Dynamics of treaties
    for action in filterActions({'verb': 'accept offer'},free.actions | sylv.actions):
        # Accepting an offer means that there is now a treaty
        key = stateKey(None,'treaty')
        tree = makeTree(setTrueMatrix(key))
        world.setDynamics(key,action,tree)
        # Accepting offer sets territory
        offer = stateKey(action['subject'],'offered')
        territory = stateKey(free.name,'territory')
        if action['subject'] == free.name:
            # Freedonia accepts sets territory to last offer
            tree = makeTree(setToFeatureMatrix(territory,offer))
            world.setDynamics(freeTerr,action,tree)
        else:
            # Sylvania accepts sets territory to 1-last offer
            tree = makeTree(setToFeatureMatrix(territory,offer,pct=-1.,shift=100.))
            world.setDynamics(freeTerr,action,tree)

    # Dynamics of phase
    phase = stateKey(None,'phase')
    roundKey = stateKey(None,'round')
    # OFFER -> RESPOND
    for index in range(2):
        action = Action({'subject': world.agents.keys()[index],'verb': 'offer',
                         'object': world.agents.keys()[1-index]})
        if action['subject'] == free.name or model != 'powell':
            tree = makeTree(setToConstantMatrix(phase,'respond'))
            world.setDynamics(phase,action,tree)
    # RESPOND -> REJECTION or ENGAGEMENT
    for action in filterActions({'verb': 'reject offer'},free.actions | sylv.actions):
        if model == 'powell':
            tree = makeTree(setToConstantMatrix(phase,'rejection'))
        elif model == 'slantchev':
            tree = makeTree(setToConstantMatrix(phase,'engagement'))
        world.setDynamics(phase,action,tree)
    # accepting -> OFFER
    for action in filterActions({'verb': 'accept offer'},free.actions | sylv.actions):
        tree = makeTree(setToConstantMatrix(phase,'offer'))
        world.setDynamics(phase,action,tree)
    # attacking -> OFFER
    for action in filterActions({'verb': 'attack'},free.actions | sylv.actions):
        tree = makeTree(setToConstantMatrix(phase,'offer'))
        world.setDynamics(phase,action,tree)
        if action['subject'] == sylv.name or model == 'slantchev':
            tree = makeTree(incrementMatrix(roundKey,1))
            world.setDynamics(roundKey,action,tree)
    if model == 'powell':
        # REJECTION -> END
        for atom in [freeNOP,freeBattle]:
            tree = makeTree(setToConstantMatrix(phase,'end'))
            world.setDynamics(phase,atom,tree)
        # END -> OFFER
        atom =  Action({'subject': sylv.name,'verb': 'continue'})
        tree = makeTree(setToConstantMatrix(phase,'offer'))
        world.setDynamics(phase,atom,tree)
        tree = makeTree(incrementMatrix(roundKey,1))
        world.setDynamics(roundKey,atom,tree)


    if not web:
        # Relationship dynamics: attacking is bad for trust
        atom =  Action({'subject': sylv.name,'verb': 'attack','object': free.name})
        key = binaryKey(free.name,sylv.name,'trusts')
        tree = makeTree(approachMatrix(key,0.1,-1.))
        world.setDynamics(key,atom,tree)
    # Handcrafted policy for Freedonia
#    free.setPolicy(makeTree({'if': equalRow('phase','respond'),
#                             # Accept an offer greater than 50
#                             True: {'if': thresholdRow(stateKey(free.name,'offered'),50),
#                                    True: Action({'subject': free.name,'verb': 'accept offer','object': sylv.name}),
#                                    False: Action({'subject': free.name,'verb': 'reject offer','object': sylv.name})},
#                             False: {'if': equalRow('phase','engagement'),
#                             # Attack during engagement phase
#                                     True: Action({'subject': free.name,'verb': 'attack','object': sylv.name}),
#                             # Agent decides how what to do otherwise
#                                     False: False}}))
        # Mental models of enemy
        # Example of creating a model with incorrect reward all at once (a version of Freedonia who cares about reaching agreement as well)
        # sylv.addModel('false',R={goalSTroops: 10.,goalSTerritory: 1.,goalAgreement: 1.},
        #              rationality=1.,selection='distribution',parent=True)
        # Example of creating a model with incorrect beliefs
        sylv.addModel('false',rationality=10.,selection='distribution',parent=True)
        key = stateKey(free.name,'position')
        # Sylvania believes position to be fixed at 3
        sylv.setBelief(key,3,'false')

        # Freedonia is truly unsure about position (50% chance of being 7, 50% of being 3)
        world.setModel(free.name,True)
        free.setBelief(key,Distribution({7: 0.5,3: 0.5}),True)
        # Observations about military position
        tree = makeTree({'if': thresholdRow(key,1),
                         True: {'if': thresholdRow(key,9),
                                True: {'distribution': [(KeyedVector({key: 1}),0.9),
                                                        (KeyedVector({key: 1,CONSTANT: -1}),0.1)]},
                                False: {'distribution': [(KeyedVector({key: 1}),0.8),
                                                         (KeyedVector({key: 1,CONSTANT: -1}),0.1),
                                                         (KeyedVector({key: 1,CONSTANT: 1}),0.1)]}},
                         False: {'distribution': [(KeyedVector({key: 1}),0.9),
                                                  (KeyedVector({key: 1,CONSTANT: 1}),0.1)]}})
        free.defineObservation(key,tree)

        # Example of setting model parameters separately
        sylv.addModel('true',parent=True)
        sylv.setAttribute('rationality',10.,'true') # Override real agent's rationality with this value
        sylv.setAttribute('selection','distribution','true')
        world.setMentalModel(free.name,sylv.name,{'false': 0.9,'true': 0.1})
        
        # Goal of fooling Sylvania
        goalDeception = achieveFeatureValue(modelKey(sylv.name),sylv.model2index('false'))
    return world
Пример #15
0
import sys
from ConfigParser import SafeConfigParser
from optparse import OptionParser

from psychsim.pwl import *
from psychsim.action import Action, ActionSet
from psychsim.world import World, stateKey, actionKey
from psychsim.agent import Agent

if __name__ == '__main__':

    # Create scenario
    maxRounds = 8
    world = World()
    totals = {'scotch': 1, 'tequila': 1}  #  1 and 1
    stacy = Agent('Stacy')
    david = Agent('David')
    agts = [stacy, david]

    # Player state, actions and parameters common to both players
    for i in range(2):
        me = agts[i]
        other = agts[1 - i]
        world.addAgent(me)
        # State
        world.defineState(me.name,
                          'scotchOwned',
                          int,
                          lo=0,
                          hi=totals['scotch'])
        me.setState('scotchOwned', 0)
Пример #16
0
    def __init__(self):
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]
        totalAmt = 4
        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1 - i]
            self.world.addAgent(me)
            self.world.defineState(me.name, 'offered', int, lo=0, hi=totalAmt)
            self.world.defineState(me.name, 'money', int, lo=0, hi=totalAmt)
            me.setState('offered', 0)
            me.setState('money', 0)
            if (me.name == 'Stacy'):
                for amt in range(totalAmt + 1):
                    me.addAction({
                        'verb': 'offer',
                        'object': other.name,
                        'amount': amt
                    })
            else:
                mePass = me.addAction({'verb': 'accept', 'object': other.name})
                mePass = me.addAction({'verb': 'reject', 'object': other.name})
            # Parameters
            me.setHorizon(2)
            me.setParameter('discount', 0.9)
            # me.setParameter('discount',1.0)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(['Stacy', 'David'])

        # World state
        self.world.defineState(None,
                               'gameOver',
                               bool,
                               description='The current round')
        self.world.setState(None, 'gameOver', False)

        self.world.addTermination(
            makeTree({
                'if': trueRow(stateKey(None, 'gameOver')),
                True: True,
                False: False
            }))
        # offer dynamics
        atom = Action({'subject': 'Stacy', 'verb': 'offer', 'object': 'David'})
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j], 'offered')
            amount = actionKey('amount') if j == 1 else '%d-%s' % (
                totalAmt, actionKey('amount'))
            tree = makeTree(setToConstantMatrix(offer, amount))
            self.world.setDynamics(parties[j], 'offered', atom, tree)
        # accept dynamics
        atom = Action({
            'subject': 'David',
            'verb': 'accept',
            'object': 'Stacy'
        })
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j], 'offered')
            money = stateKey(parties[j], 'money')
            tree = makeTree(setToFeatureMatrix(money, offer))
            self.world.setDynamics(parties[j], 'money', atom, tree)
        tree = makeTree(setTrueMatrix(stateKey(None, 'gameOver')))
        self.world.setDynamics(None, 'gameOver', atom, tree)
        # reject dynamics
        atom = Action({
            'subject': 'David',
            'verb': 'reject',
            'object': 'Stacy'
        })
        tree = makeTree(setTrueMatrix(stateKey(None, 'gameOver')))
        self.world.setDynamics(None, 'gameOver', atom, tree)

        # really need to ask david about these levels - if adding modesl with levels, can
        # the true model point to these but have a different level
        for agent in self.world.agents.values():
            agent.addModel('Christian',
                           R={},
                           level=2,
                           rationality=25.,
                           selection='distribution')
            agent.addModel('Capitalist',
                           R={},
                           level=2,
                           rationality=25.,
                           selection='distribution')
    def __init__(self,turnOrder):

        self.maxRounds=8
        self.world = World()
        totals = {'apple':1,'pear':2} 
        batna_prePref = totals['apple'] + totals['pear']
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name,'appleOwned',int,lo=0,hi=totals['apple'])
            me.setState('appleOwned',0)
            self.world.defineState(me.name,'appleOffered',int,lo=0,hi=totals['apple'])
            me.setState('appleOffered',0)  
            self.world.defineState(me.name,'pearOwned',int,lo=0,hi=totals['pear'])
            me.setState('pearOwned',0)
            self.world.defineState(me.name,'pearOffered',int,lo=0,hi=totals['pear'])
            me.setState('pearOffered',0)  

            self.world.defineState(me.name,'Batna',int,lo=0,hi=10)
            me.setState('Batna', batna_prePref)
            self.world.defineState(me.name,'BatnaOwned',int,lo=0,hi=10)
            me.setState('BatnaOwned',0)  

            self.world.defineState(me.name,'agree',bool)
            me.setState('agree',False)  
            # Actions
            me.addAction({'verb': 'do nothing'})
            for amt in range(totals['apple'] + 1):
                tmp = me.addAction({'verb': 'offerApple','object': other.name,'amount': amt})
                me.setLegal(tmp,makeTree({'if': trueRow(stateKey(None, 'agreement')),
                                          False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                  True: False,
                                                  False: True},
                                          True: False}))


            for amt in range(totals['pear'] + 1):
                tmp = me.addAction({'verb': 'offerPear','object': other.name,'amount': amt})
                me.setLegal(tmp,makeTree({'if': trueRow(stateKey(None, 'agreement')),
                                          False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                  True: False,
                                                  False: True},
                                          True: False}))

            meReject = me.addAction({'verb': 'rejectNegotiation','object': other.name})
            me.setLegal(meReject,makeTree({'if': trueRow(stateKey(None, 'agreement')),
                                           False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                   True: False,
                                                   False: True},
                                           True: False}))

            meAccept = me.addAction({'verb': 'accept offer','object': other.name})
            me.setLegal(meAccept,makeTree({'if': trueRow(stateKey(None, 'appleOffer')),
                                           True: {'if': trueRow(stateKey(None, 'pearOffer')),
                                                  True: {'if': trueRow(stateKey(None, 'agreement')),
                                                         False: {'if': trueRow(stateKey(None, 'rejectedNegotiation')),
                                                                 True: False,
                                                                 False: True},
                                                         True: False},
                                                  False: False},
                                           False: False}))
            # Parameters
            me.setHorizon(6)
            me.setParameter('discount',0.9)
        
            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

            # Turn order: Uncomment the following if you want agents to act in parallel
            # world.setOrder([{agts[0].name,agts[1].name}])
            # Turn order: Uncomment the following if you want agents to act sequentially

        self.world.setOrder(turnOrder)

        # World state
        self.world.defineState(None,'agreement',bool)
        self.world.setState(None,'agreement',False)
        self.world.defineState(None,'appleOffer',bool)
        self.world.setState(None,'appleOffer',False)
        self.world.defineState(None,'pearOffer',bool)
        self.world.setState(None,'pearOffer',False)
        self.world.defineState(None,'round',int,description='The current round of the negotiation')
        self.world.setState(None,'round',0)
        self.world.defineState(None,'rejectedNegotiation',bool,
                          description='Have one of the players walked out?')
        self.world.setState(None, 'rejectedNegotiation', False)


# dont terminate so agent sees benefit of early agreement
#    world.addTermination(makeTree({'if': trueRow(stateKey(None,'agreement')),
#                                   True: True, 
#                                   False: False}))

#    world.addTermination(makeTree({'if': trueRow(stateKey(None,'rejectedNegotiation')),
#                                   True: True, 
#                                   False: False}))

        self.world.addTermination(makeTree({'if': thresholdRow(stateKey(None,'round'),self.maxRounds),
                                   True: True, False: False}))

    # Dynamics of offers
        agents = [david.name,stacy.name]
        for i in range(2):
            for fruit in ['apple','pear']:
                atom = Action({'subject': agents[i],'verb': 'offer%s' % (fruit.capitalize()),
                               'object': agents[1-i]})
                parties = [atom['subject'], atom['object']]
                for j in range(2):
                    # Set offer amount
                    offer = stateKey(parties[j],'%sOffered' % (fruit))
                    amount = actionKey('amount') if j == 1 else '%d-%s' % (totals[fruit],actionKey('amount'))
                    tree = makeTree(setToConstantMatrix(offer,amount))
                    self.world.setDynamics(parties[j],'%sOffered' % (fruit),atom,tree)
                    # reset agree flags whenever an offer is made
                    agreeFlag = stateKey(parties[j],'agree')
                    tree = makeTree(setFalseMatrix(agreeFlag))
                    self.world.setDynamics(parties[j],'agree',atom,tree)
                # Offers set offer flag in world state
                tree = makeTree(setTrueMatrix(stateKey(None,'%sOffer' % (fruit))))
                self.world.setDynamics(None,'%sOffer' % (fruit) ,atom,tree)
 

    # agents = [david.name,stacy.name]
    # Dynamics of agreements
        for i in range(2):
            atom = Action({'subject': agents[i],'verb': 'accept offer', 'object': agents[1-i]})

            # accept offer sets accept
            tree = makeTree(setTrueMatrix(stateKey(atom['subject'],'agree')))
            self.world.setDynamics(atom['subject'],'agree',atom,tree)

            # accept offer sets agreement if object has accepted
            tree = makeTree({'if': trueRow(stateKey(atom['object'],'agree')),
                             True:  setTrueMatrix(stateKey(None,'agreement')),
                             False: noChangeMatrix(stateKey(None,'agreement'))})
            self.world.setDynamics(None,'agreement',atom,tree)

        # Accepting offer sets ownership
            parties = [atom['subject'], atom['object']]
            for fruit in ['apple','pear']:
                # atom = Action({'subject': agents[i],'verb': 'accept offer', 'object': agents[1-i]})
                for j in range(2):
                    offer = stateKey(parties[j],'%sOffered' % (fruit))
                    owned = stateKey(parties[j],'%sOwned' % (fruit))
                    tree = makeTree({'if': trueRow(stateKey(atom['object'],'agree')),
                                     False: noChangeMatrix(owned),
                                     True: setToFeatureMatrix(owned,offer)})
                    self.world.setDynamics(parties[j],'%sOwned' % (fruit),atom,tree)
        # rejecting give us batna and ends negotiation
            atom = Action({'subject': agents[i],'verb': 'rejectNegotiation',
                           'object': agents[1-i]})

            tree = makeTree(setToFeatureMatrix(stateKey(atom['subject'],'BatnaOwned') ,stateKey(atom['subject'], 'Batna')))
            self.world.setDynamics(atom['subject'],'BatnaOwned' ,atom,tree)

            tree = makeTree(setToFeatureMatrix(stateKey(atom['object'],'BatnaOwned') ,stateKey(atom['object'], 'Batna')))
            self.world.setDynamics(atom['object'],'BatnaOwned' ,atom,tree)

            tree = makeTree(setTrueMatrix(stateKey(None,'rejectedNegotiation')))
            self.world.setDynamics(None,'rejectedNegotiation' ,atom,tree)
 

        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None,'round'),1))
            self.world.setDynamics(None,'round',action,tree)
        for agent in self.world.agents.values():
            agent.addModel('pearLover',R={},level=2,rationality=0.01)
            agent.addModel('appleLover',R={},level=2,rationality=0.01)
Пример #18
0
AGENT1_ID = 'Agent 1'
AGENT2_ID = 'Agent 2'

if __name__ == '__main__':

    # turn orders
    turn_orders = {
        'First increment, then copy': [AGENT1_ID, AGENT2_ID],
        'First copy, then increment': [AGENT2_ID, AGENT1_ID],
        'Simult. increment and copy': [{AGENT1_ID, AGENT2_ID}]
    }

    for label, turn_order in turn_orders.items():

        agent1 = Agent(AGENT1_ID)
        agent2 = Agent(AGENT2_ID)

        # create world and add agents
        world = World()
        world.addAgent(agent1)
        world.addAgent(agent2)

        # add variables
        var_counter = world.defineState(agent1.name,
                                        'counter',
                                        int,
                                        lo=0,
                                        hi=3)
        var_copy = world.defineState(agent2.name,
                                     'counter_copy',
Пример #19
0
from psychsim.pwl import *
from psychsim.action import Action,ActionSet
from psychsim.world import World,stateKey,actionKey
from psychsim.agent import Agent



if __name__ == '__main__':

    # Create scenario
    maxRounds=8
    world = World()
    totals = {'apple':1,'pear':2} 
    batna_prePref = totals['apple'] + totals['pear']
    stacy = Agent('Stacy')
    david = Agent('David')
    agts = [stacy, david]

    # Player state, actions and parameters common to both players
    for i in range(2):
        me = agts[i]
        other = agts[1-i]
        world.addAgent(me)
        # State
        world.defineState(me.name,'appleOwned',int,lo=0,hi=totals['apple'])
        me.setState('appleOwned',0)
        world.defineState(me.name,'appleOffered',int,lo=0,hi=totals['apple'])
        me.setState('appleOffered',0)  
        world.defineState(me.name,'pearOwned',int,lo=0,hi=totals['pear'])
        me.setState('pearOwned',0)
from optparse import OptionParser

from psychsim.pwl import *
from psychsim.action import Action,ActionSet
from psychsim.world import World,stateKey,actionKey
from psychsim.agent import Agent



if __name__ == '__main__':

    # Create scenario
    world = World()
    totals = {'apple':3,'pear':2}
    # Stacy state
    stacy = Agent('Stacy')
    world.addAgent(stacy)
    world.defineState(stacy.name,'applesOwned',int,lo=0,hi=totals['apple'])
    stacy.setState('applesOwned',0)
    world.defineState(stacy.name,'applesOffered',int,lo=0,hi=totals['apple'])
    stacy.setState('applesOffered',0)  
    world.defineState(stacy.name,'pearsOwned',int,lo=0,hi=totals['pear'])
    stacy.setState('pearsOwned',0)
    world.defineState(stacy.name,'pearsOffered',int,lo=0,hi=totals['pear'])
    stacy.setState('pearsOffered',0)  


    # David state
    david = Agent('David')
    world.addAgent(david)
Пример #21
0
def createWorld(username='******',level=0,ability='good',explanation='none',
                embodiment='robot',acknowledgment='no',sequence=False,
                root='.',ext='xml',beliefs=True):
    """
    Creates the initial PsychSim scenario and saves it
    @param username: name of user ID to use in filenames
    @param level: robot mission level to use as template
    @param ability: the level of the robot's ability
      - good or C{True}: perfect sensors and sensor model
      - badSensor or C{False}: noisy sensors, but perfect model of noisy sensors
      - badModel: perfect sensors, but imperfect model of sensors
    @type ability: bool
    @param explanation: the type of explanation to use
      - none: No explanations
      - ability: Explanation based on robot ability provided.
      - abilitybenevolence: Explanation based on both robot's ability and benevolence provided.
    @type explanation: str
    @param embodiment: the robot's embodiment
      - robot: The robot looks like a robot
      - dog: The robot looks like a dog
    @type embodiment: str
    @param acknowledgment: the robot's behavior regarding the acknowledgment of errors
      - no: The robot does not acknowledge its errors
      - yes: The robot acknowledges its errors
    @type acknowledgment: str
    @param root: the root directory to use for files (default is current working directory)
    @param ext: the file extension for the PsychSim scenario file
      - xml: Save as uncompressed XML
      - psy: Save as bzipped XML
    @type ext: str
    @param beliefs: if C{True}, store robot's uncertain beliefs in scenario file, rather than compute them on the fly. Storing in scenario file makes the scenario a more complete model, but greatly increases file sixe. Default is C{True}
    @type beliefs: bool
    """

    print "**************************createWorld***********************"
    print 'Username:\t%s\nLevel:\t\t%s' % (username,level+1)
    print 'Ability\t\t%s\nExplanation:\t%s\nEmbodiment:\t%s\nAcknowledge:\t%s' % \
        (ability,explanation,embodiment,acknowledgment)

    # Pre-compute symbols for this level's waypoints
    for point in WAYPOINTS[level]:
        if not point.has_key('symbol'):
            point['symbol'] = point['name'].replace(' ','')

    world = World()

    world.defineState(None,'level',int,lo=0,hi=len(WAYPOINTS)-1,
                      description='Static variable indicating what mission level')
    world.setState(None,'level',level)

    world.defineState(None,'time',float)
    world.setState(None,'time',0.)

    world.defineState(None,'complete',bool)
    world.setState(None,'complete',False)
    world.addTermination(makeTree({'if': trueRow('complete'), True: True, False: False}))

    # Buildings
    threats = ['none','NBC','armed']
    for waypoint in WAYPOINTS[level]:
        if not waypoint.has_key('symbol'):
            waypoint['symbol'] = waypoint['name'].replace(' ','')
        world.addAgent(Agent(waypoint['symbol']))
        # Have we visited this waypoint?
        key = world.defineState(waypoint['symbol'],'visited',bool)
        world.setFeature(key,False)
        # Are there dangerous chemicals or armed people here?
        key = world.defineState(waypoint['symbol'],'danger',list,threats[:])
        if waypoint.has_key('NBC') and waypoint['NBC']:
            world.setFeature(key,'NBC')
        elif waypoint.has_key('armed') and waypoint['armed']:
            world.setFeature(key,'armed')
        else:
            world.setFeature(key,'none')
        key = world.defineState(waypoint['symbol'],'recommendation',list,
                                ['none','protected','unprotected'])
        world.setFeature(key,'none')

    # Human
    human = Agent('human')
    world.addAgent(human)

    world.defineState(human.name,'alive',bool)
    human.setState('alive',True)
    world.defineState(human.name,'deaths',int)
    human.setState('deaths',0)

    # Robot
    robot = Agent('robot')
    world.addAgent(robot)

    # Robot states
    world.defineState(robot.name,'waypoint',list,[point['symbol'] for point in WAYPOINTS[level]])
    robot.setState('waypoint',WAYPOINTS[level][getStart(level)]['symbol'])

    world.defineState(robot.name,'explanation',list,['none','ability','abilitybenevolence','abilityconfidence','confidence'])
    robot.setState('explanation',explanation)

    world.defineState(robot.name,'embodiment',list,['robot','dog'])
    robot.setState('embodiment',embodiment)

    world.defineState(robot.name,'acknowledgment',list,['no','yes'])
    robot.setState('acknowledgment',acknowledgment)

    world.defineState(robot.name,'ability',list,['badSensor','badModel','good'])
    if ability is True:
        # Backward compatibility with boolean ability
        ability = 'good'
    elif ability is False:
        ability = 'badSensor'
    robot.setState('ability',ability)

    # State of the robot's sensors
    world.defineState(robot.name,'sensorModel',list,['good','bad'])
    robot.setState('sensorModel','good')
    
    world.defineState(robot.name,'command',list,['none']+[point['symbol'] for point in WAYPOINTS[level]])
    robot.setState('command','none')

    # Actions
    for end in range(len(WAYPOINTS[level])):
        symbol = WAYPOINTS[level][end]['symbol']
        # Robot movement
        action = robot.addAction({'verb': 'moveto','object': symbol})
        # Legal if no contradictory command
        tree = makeTree({'if': equalRow(stateKey(robot.name,'command'),'none'),
                         True: True,
                         False: {'if': equalRow(stateKey(robot.name,'command'),symbol),
                                 True: True, False: False}})
        robot.setLegal(action,tree)
        # Dynamics of robot's location
        tree = makeTree(setToConstantMatrix(stateKey(action['subject'],'waypoint'),symbol))
        world.setDynamics(stateKey(action['subject'],'waypoint'),action,tree)
        # Dynamics of visited flag
        key = stateKey(symbol,'visited')
        tree = makeTree(setTrueMatrix(key))
        world.setDynamics(key,action,tree)
        # Dynamics of time
        key = stateKey(None,'time')
        tree = setToConstantMatrix(key,0.)
        for start in range(len(WAYPOINTS[level])):
            if start != end:
                startsymbol = WAYPOINTS[level][start]['symbol']
                if sequence:
                    # Distance is measured by level sequence
                    distance = abs(start-end)*50
                else:
                    try:
                        distance = DISTANCES[WAYPOINTS[level][start]['name']][WAYPOINTS[level][end]['name']]
                    except KeyError:
                        try:
                            distance = DISTANCES[WAYPOINTS[level][end]['name']][WAYPOINTS[level][start]['name']]
                        except KeyError:
                            distance = 250
                tree = {'if': equalRow(stateKey(action['subject'],'waypoint'),startsymbol),
                        True: setToConstantMatrix(key,float(distance)/1000.),
                        False: tree}
        world.setDynamics(key,action,makeTree(tree))
        # Human entry: Dead or alive if unprotected?
        key = stateKey(human.name,'alive')
        action = robot.addAction({'verb': 'recommend unprotected','object': symbol})
        tree = makeTree({'if': equalRow(stateKey(symbol,'danger'),'none'),
                         True: setTrueMatrix(key), False: setFalseMatrix(key)})
        world.setDynamics(key,action,tree)
        robot.setLegal(action,makeTree(False))
        # Human entry: How much "time" if protected?
        action = robot.addAction({'verb': 'recommend protected','object': symbol})
        key = stateKey(None,'time')
        world.setDynamics(key,action,makeTree(setToConstantMatrix(key,0.25)))
        robot.setLegal(action,makeTree(False))

    # Robot goals
    goal = minimizeFeature(stateKey(None,'time'))
    robot.setReward(goal,2.)

    goal = maximizeFeature(stateKey(human.name,'alive'))
    robot.setReward(goal,1.)

    for point in WAYPOINTS[level]:
        robot.setReward(maximizeFeature(stateKey(point['symbol'],'visited')),1.)

    if beliefs:
#        omega = 'danger'
        world.defineVariable(robot.name,ActionSet)
        # Robot beliefs
        world.setModel(robot.name,True)
        value = 1./float(len(WAYPOINTS[level]))
#        tree = KeyedVector({CONSTANT: world.value2float(omega,'none')})
        for index in range(len(WAYPOINTS[level])):
            waypoint = WAYPOINTS[level][index]
            key = stateKey(waypoint['symbol'],'danger')
    #        if index > 0:
                # Starting state is safe
            robot.setBelief(key,psychsim.probability.Distribution({'NBC': value/2., 'armed': value/2.,'none': 1.-value}))
            # Observation function
#            tree = {'if': equalRow(stateKey(robot.name,'waypoint'),waypoint['symbol']),
#                    True: generateO(world,key),
#                    False: tree}
#        robot.defineObservation(omega,makeTree(tree),domain=list,lo=['none','NBC','armed'])
        robot.defineObservation('microphone',makeTree(None),None,domain=list,
                                lo=['nobody','friendly','suspicious'])
        robot.defineObservation('NBCsensor',makeTree(None),None,domain=bool)
        robot.defineObservation('camera',makeTree(None),None,domain=bool)
    else:
        robot.defineObservation('microphone',makeTree(None),None,domain=list,
                                lo=['nobody','friendly','suspicious'])
        robot.defineObservation('NBCsensor',makeTree(None),None,domain=bool)
        robot.defineObservation('camera',makeTree(None),None,domain=bool)
    robot.setAttribute('horizon',1)

    world.setOrder([robot.name])

    filename = getFilename(username,level,ext,root)

    world.save(filename,ext=='psy')
    WriteLogData('%s user %s, level %d, ability %s, explanation %s' % \
                     (CREATE_TAG,username,level,ability,explanation),username,level,root=root)
    return world
#         prob = noisyOr(tree[True],.75,.1)
#         return {'distribution': [(setTrueMatrix(key),prob),(setFalseMatrix(key),1.-prob)]}
#
#
#
# # def noisyOr(onCount,onProb,leak=0.):
#     return 1.- (1.-leak)*pow(1.-onProb,onCount)

if __name__ == '__main__':
    # State

    world = World()
    world.diagram = diagram.Diagram()
    world.diagram.setColor(None, 'ivory')

    resident = Agent('resident')
    world.diagram.setColor(resident.name, 'palegreen')
    world.addAgent(resident)
    world.defineState(resident.name, 'sea_level_house', list,
                      ['low', 'med', 'hi'])
    world.defineState(resident.name, 'home_structure', list,
                      ['mobile', 'other'])
    world.defineState(resident.name, 'storm_category', list,
                      ['1', '2', '3', '4', '5'])
    world.defineState(resident.name, 'gov_evac', list, ['strong', 'none'])
    world.defineState(resident.name, 'loc_evac_phone', bool)
    world.defineState(resident.name, 'peer_evac_info', bool)
    world.defineState(resident.name, 'expect_prop_risk', bool)
    world.defineState(resident.name, 'expect_personal_risk', bool)

    bel_seq = [
Пример #23
0
__author__ = 'Pedro Sequeira'
__email__ = '*****@*****.**'
__description__ = 'Example that models one agent navigating left or right along one dimension (one position feature). ' \
                  'We set a belief to the agent for an incorrect position, and see the agent act based on that belief, ' \
                  'i.e., in its mind the position is changing, but is not aligned with the real/true position.'

# parameters
HORIZON = 3
DISCOUNT = 1
MAX_STEPS = 3

if __name__ == '__main__':

    # create world and add agent
    world = World()
    agent = Agent('Agent')
    world.addAgent(agent)

    # set parameters
    agent.setAttribute('discount', DISCOUNT)
    agent.setHorizon(HORIZON)

    # add position variable
    pos = world.defineState(agent.name, 'position', int, lo=-100, hi=100)
    world.setFeature(pos, 0)

    # define agents' actions (stay 0, left -1 and right +1)
    action = agent.addAction({'verb': 'move', 'action': 'nowhere'})
    tree = makeTree(setToFeatureMatrix(pos, pos))
    world.setDynamics(pos, action, tree)
    action = agent.addAction({'verb': 'move', 'action': 'left'})
Пример #24
0
            }
        }
    })


if __name__ == '__main__':

    random.seed(0)

    # sets up log to screen
    logging.basicConfig(format='%(message)s',
                        level=logging.DEBUG if DEBUG else logging.INFO)

    # create world and add agent
    world = World()
    agent1 = Agent('Agent 1')
    world.addAgent(agent1)
    agent2 = Agent('Agent 2')
    world.addAgent(agent2)

    agents_dec = []
    agents = [agent1, agent2]
    for agent in agents:
        # set agent's params
        agent.setAttribute('discount', 1)
        agent.setAttribute('selection', TIEBREAK)
        # agent.setRecursiveLevel(1)

        # add "decision" variable (0 = didn't decide, 1 = Defected, 2 = Cooperated)
        dec = world.defineState(agent.name, 'decision', list,
                                [NOT_DECIDED, DEFECTED, COOPERATED])
Пример #25
0
from ConfigParser import SafeConfigParser
from optparse import OptionParser

from psychsim.pwl import *
from psychsim.action import Action,ActionSet
from psychsim.world import World,stateKey,actionKey
from psychsim.agent import Agent



if __name__ == '__main__':

    # Create scenario
    world = World()
    totals = {'scotch':1,'tequila':1} #  1 and 1
    stacy = Agent('Stacy')
    david = Agent('David')
    agts = [stacy, david]

    # Player state, actions and parameters common to both players
    for i in range(2):
        me = agts[i]
        other = agts[1-i]
        world.addAgent(me)
        # State
        world.defineState(me.name,'scotchOwned',int,lo=0,hi=totals['scotch'])
        me.setState('scotchOwned',0)
        world.defineState(me.name,'scotchOffered',int,lo=0,hi=totals['scotch'])
        me.setState('scotchOffered',0)  
        world.defineState(me.name,'tequilaOwned',int,lo=0,hi=totals['tequila'])
        me.setState('tequilaOwned',0)
Пример #26
0
def scenarioCreationUseCase(enemy='Sylvania',
                            model='powell',
                            web=False,
                            fCollapse=None,
                            sCollapse=None,
                            maxRounds=15):
    """
    An example of how to create a scenario
    @param enemy: the name of the agent-controlled side, i.e., Freedonia's opponent (default: Sylvania)
    @type enemy: str
    @param model: which model do we use (default is "powell")
    @type model: powell or slantchev
    @param web: if C{True}, then create the web-based experiment scenario (default: C{False})
    @type web: bool
    @param fCollapse: the probability that Freedonia collapses (under powell, default: 0.1) or loses battle (under slantchev, default: 0.7)
    @type fCollapse: float
    @param sCollapse: the probability that Sylvania collapses, under powell (default: 0.1)
    @type sCollapse: float
    @param maxRounds: the maximum number of game rounds (default: 15)
    @type maxRounds: int
    @return: the scenario created
    @rtype: L{World}
    """
    # Handle defaults for battle probabilities, under each model
    posLo = 0
    posHi = 10
    if fCollapse is None:
        if model == 'powell':
            fCollapse = 0.1
        elif model == 'slantchev':
            fCollapse = 0.7
    if sCollapse is None:
        sCollapse = 0.1

    # Create scenario
    world = World()

    # Agents
    free = Agent('Freedonia')
    world.addAgent(free)
    sylv = Agent(enemy)
    world.addAgent(sylv)

    # User state
    world.defineState(free.name,
                      'troops',
                      int,
                      lo=0,
                      hi=50000,
                      description='Number of troops you have left')
    free.setState('troops', 40000)
    world.defineState(
        free.name,
        'territory',
        int,
        lo=0,
        hi=100,
        description='Percentage of disputed territory owned by you')
    free.setState('territory', 15)
    world.defineState(free.name,
                      'cost',
                      int,
                      lo=0,
                      hi=50000,
                      description='Number of troops %s loses in an attack' %
                      (free.name))
    free.setState('cost', 2000)
    world.defineState(
        free.name,
        'position',
        int,
        lo=posLo,
        hi=posHi,
        description='Current status of war (%d=%s is winner, %d=you are winner)'
        % (posLo, sylv.name, posHi))
    free.setState('position', 5)
    world.defineState(
        free.name,
        'offered',
        int,
        lo=0,
        hi=100,
        description=
        'Percentage of disputed territory that %s last offered to you' %
        (sylv.name))
    free.setState('offered', 0)
    if model == 'slantchev':
        # Compute new value for territory only *after* computing new value for position
        world.addDependency(stateKey(free.name, 'territory'),
                            stateKey(free.name, 'position'))

    # Agent state
    world.defineState(sylv.name,
                      'troops',
                      int,
                      lo=0,
                      hi=500000,
                      description='Number of troops %s has left' % (sylv.name))
    sylv.setState('troops', 30000)
    world.defineState(sylv.name,
                      'cost',
                      int,
                      lo=0,
                      hi=50000,
                      description='Number of troops %s loses in an attack' %
                      (sylv.name))
    sylv.setState('cost', 2000)
    world.defineState(
        sylv.name,
        'offered',
        int,
        lo=0,
        hi=100,
        description=
        'Percentage of disputed territory that %s last offered to %s' %
        (free.name, sylv.name))
    sylv.setState('offered', 0)

    # World state
    world.defineState(None,
                      'treaty',
                      bool,
                      description='Have the two sides reached an agreement?')
    world.setState(None, 'treaty', False)
    # Stage of negotiation, illustrating the use of an enumerated state feature
    world.defineState(
        None,
        'phase',
        list, ['offer', 'respond', 'rejection', 'end', 'paused', 'engagement'],
        description='The current stage of the negotiation game')
    world.setState(None, 'phase', 'paused')
    # Game model, static descriptor
    world.defineState(None,
                      'model',
                      list, ['powell', 'slantchev'],
                      description='The model underlying the negotiation game')
    world.setState(None, 'model', model)
    # Round of negotiation
    world.defineState(None,
                      'round',
                      int,
                      description='The current round of the negotiation')
    world.setState(None, 'round', 0)

    if not web:
        # Relationship value
        key = world.defineRelation(free.name, sylv.name, 'trusts')
        world.setFeature(key, 0.)
    # Game over if there is a treaty
    world.addTermination(
        makeTree({
            'if': trueRow(stateKey(None, 'treaty')),
            True: True,
            False: False
        }))
    # Game over if Freedonia has no territory
    world.addTermination(
        makeTree({
            'if': thresholdRow(stateKey(free.name, 'territory'), 1),
            True: False,
            False: True
        }))
    # Game over if Freedonia has all the territory
    world.addTermination(
        makeTree({
            'if': thresholdRow(stateKey(free.name, 'territory'), 99),
            True: True,
            False: False
        }))
    # Game over if number of rounds exceeds limit
    world.addTermination(
        makeTree({
            'if': thresholdRow(stateKey(None, 'round'), maxRounds),
            True: True,
            False: False
        }))

    # Turn order: Uncomment the following if you want agents to act in parallel
    #    world.setOrder([set(world.agents.keys())])
    # Turn order: Uncomment the following if you want agents to act sequentially
    world.setOrder([free.name, sylv.name])

    # User actions
    freeBattle = free.addAction({'verb': 'attack', 'object': sylv.name})
    for amount in range(20, 100, 20):
        free.addAction({
            'verb': 'offer',
            'object': sylv.name,
            'amount': amount
        })
    if model == 'powell':
        # Powell has null stages
        freeNOP = free.addAction({'verb': 'continue'})
    elif model == 'slantchev':
        # Slantchev has both sides receiving offers
        free.addAction({'verb': 'accept offer', 'object': sylv.name})
        free.addAction({'verb': 'reject offer', 'object': sylv.name})

    # Agent actions
    sylvBattle = sylv.addAction({'verb': 'attack', 'object': free.name})
    sylvAccept = sylv.addAction({'verb': 'accept offer', 'object': free.name})
    sylvReject = sylv.addAction({'verb': 'reject offer', 'object': free.name})
    if model == 'powell':
        # Powell has null stages
        sylvNOP = sylv.addAction({'verb': 'continue'})
    elif model == 'slantchev':
        # Slantchev has both sides making offers
        for amount in range(10, 100, 10):
            sylv.addAction({
                'verb': 'offer',
                'object': free.name,
                'amount': amount
            })

    # Restrictions on when actions are legal, based on phase of game
    for action in filterActions({'verb': 'offer'},
                                free.actions | sylv.actions):
        agent = world.agents[action['subject']]
        agent.setLegal(
            action,
            makeTree({
                'if': equalRow(stateKey(None, 'phase'), 'offer'),
                True: True,  # Offers are legal in the offer phase
                False: False
            }))  # Offers are illegal in all other phases
    if model == 'powell':
        # Powell has a special rejection phase
        for action in [freeNOP, freeBattle]:
            free.setLegal(
                action,
                makeTree({
                    'if': equalRow(stateKey(None, 'phase'), 'rejection'),
                    True:
                    True,  # Attacking and doing nothing are legal only in rejection phase
                    False: False
                })
            )  # Attacking and doing nothing are illegal in all other phases

    # Once offered, agent can respond
    if model == 'powell':
        # Under Powell, only Sylvania has to respond, and it can attack
        responses = [sylvBattle, sylvAccept, sylvReject]
    elif model == 'slantchev':
        # Under Slantchev, only accept/reject
        responses = filterActions({'verb': 'accept offer'},
                                  free.actions | sylv.actions)
        responses += filterActions({'verb': 'reject offer'},
                                   free.actions | sylv.actions)
    for action in responses:
        agent = world.agents[action['subject']]
        agent.setLegal(
            action,
            makeTree({
                'if': equalRow(stateKey(None, 'phase'), 'respond'),
                True: True,  # Offeree must act in the response phase
                False: False
            }))  # Offeree cannot act in any other phase

    if model == 'powell':
        # NOP is legal in exactly opposite situations to all other actions
        sylv.setLegal(
            sylvNOP,
            makeTree({
                'if': equalRow(stateKey(None, 'phase'), 'end'),
                True:
                True,  # Sylvania does not do anything in the null phase after Freedonia responds to rejection
                False: False
            }))  # Sylvania must act in its other phases
    if model == 'slantchev':
        # Attacking legal only under engagement phase
        for action in filterActions({'verb': 'attack'},
                                    free.actions | sylv.actions):
            agent = world.agents[action['subject']]
            agent.setLegal(
                action,
                makeTree({
                    'if': equalRow(stateKey(None, 'phase'), 'engagement'),
                    True: True,  # Attacking legal only in engagement
                    False: False
                }))  # Attacking legal every other phase

    # Goals for Freedonia
    goalFTroops = maximizeFeature(stateKey(free.name, 'troops'))
    free.setReward(goalFTroops, 1.)
    goalFTerritory = maximizeFeature(stateKey(free.name, 'territory'))
    free.setReward(goalFTerritory, 1.)

    # Goals for Sylvania
    goalSTroops = maximizeFeature(stateKey(sylv.name, 'troops'))
    sylv.setReward(goalSTroops, 1.)
    goalSTerritory = minimizeFeature(stateKey(free.name, 'territory'))
    sylv.setReward(goalSTerritory, 1.)

    # Possible goals applicable to both
    goalAgreement = maximizeFeature(stateKey(None, 'treaty'))

    # Silly goal, provided as an example of an achievement goal
    goalAchieve = achieveFeatureValue(stateKey(None, 'phase'), 'respond')

    # Horizons
    if model == 'powell':
        free.setAttribute('horizon', 4)
        sylv.setAttribute('horizon', 4)
    elif model == 'slantchev':
        free.setAttribute('horizon', 6)
        sylv.setAttribute('horizon', 6)

    # Discount factors
    free.setAttribute('discount', -1)
    sylv.setAttribute('discount', -1)

    # Levels of belief
    free.setRecursiveLevel(2)
    sylv.setRecursiveLevel(2)

    # Dynamics of battle
    freeTroops = stateKey(free.name, 'troops')
    freeTerr = stateKey(free.name, 'territory')
    sylvTroops = stateKey(sylv.name, 'troops')
    # Effect of fighting
    for action in filterActions({'verb': 'attack'},
                                free.actions | sylv.actions):
        # Effect on troops (cost of battle)
        tree = makeTree(
            addFeatureMatrix(freeTroops, stateKey(free.name, 'cost'), -1.))
        world.setDynamics(freeTroops, action, tree, enforceMin=not web)
        tree = makeTree(
            addFeatureMatrix(sylvTroops, stateKey(sylv.name, 'cost'), -1.))
        world.setDynamics(sylvTroops, action, tree, enforceMin=not web)
        if model == 'powell':
            # Effect on territory (probability of collapse)
            tree = makeTree({
                'distribution': [
                    (
                        {
                            'distribution': [
                                (setToConstantMatrix(freeTerr,
                                                     100), 1. - fCollapse
                                 ),  # Sylvania collapses, Freedonia does not
                                (noChangeMatrix(freeTerr), fCollapse)
                            ]
                        },  # Both collapse
                        sCollapse),
                    (
                        {
                            'distribution': [
                                (setToConstantMatrix(freeTerr, 0), fCollapse
                                 ),  # Freedonia collapses, Sylvania does not
                                (noChangeMatrix(freeTerr), 1. - fCollapse)
                            ]
                        },  # Neither collapses
                        1. - sCollapse)
                ]
            })
            world.setDynamics(freeTerr, action, tree)
        elif model == 'slantchev':
            # Effect on position
            pos = stateKey(free.name, 'position')
            tree = makeTree({
                'distribution': [
                    (incrementMatrix(pos, 1),
                     1. - fCollapse),  # Freedonia wins battle
                    (incrementMatrix(pos, -1), fCollapse)
                ]
            })  # Freedonia loses battle
            world.setDynamics(pos, action, tree)
            # Effect on territory
            tree = makeTree({
                'if': thresholdRow(pos, posHi - .5),
                True: setToConstantMatrix(freeTerr, 100),  # Freedonia won
                False: {
                    'if': thresholdRow(pos, posLo + .5),
                    True: noChangeMatrix(freeTerr),
                    False: setToConstantMatrix(freeTerr, 0)
                }
            })  # Freedonia lost
            world.setDynamics(freeTerr, action, tree)

    # Dynamics of offers
    for index in range(2):
        atom = Action({
            'subject': world.agents.keys()[index],
            'verb': 'offer',
            'object': world.agents.keys()[1 - index]
        })
        if atom['subject'] == free.name or model != 'powell':
            offer = stateKey(atom['object'], 'offered')
            amount = actionKey('amount')
            tree = makeTree({
                'if': trueRow(stateKey(None, 'treaty')),
                True: noChangeMatrix(offer),
                False: setToConstantMatrix(offer, amount)
            })
            world.setDynamics(offer, atom, tree, enforceMax=not web)

    # Dynamics of treaties
    for action in filterActions({'verb': 'accept offer'},
                                free.actions | sylv.actions):
        # Accepting an offer means that there is now a treaty
        key = stateKey(None, 'treaty')
        tree = makeTree(setTrueMatrix(key))
        world.setDynamics(key, action, tree)
        # Accepting offer sets territory
        offer = stateKey(action['subject'], 'offered')
        territory = stateKey(free.name, 'territory')
        if action['subject'] == free.name:
            # Freedonia accepts sets territory to last offer
            tree = makeTree(setToFeatureMatrix(territory, offer))
            world.setDynamics(freeTerr, action, tree)
        else:
            # Sylvania accepts sets territory to 1-last offer
            tree = makeTree(
                setToFeatureMatrix(territory, offer, pct=-1., shift=100.))
            world.setDynamics(freeTerr, action, tree)

    # Dynamics of phase
    phase = stateKey(None, 'phase')
    roundKey = stateKey(None, 'round')
    # OFFER -> RESPOND
    for index in range(2):
        action = Action({
            'subject': world.agents.keys()[index],
            'verb': 'offer',
            'object': world.agents.keys()[1 - index]
        })
        if action['subject'] == free.name or model != 'powell':
            tree = makeTree(setToConstantMatrix(phase, 'respond'))
            world.setDynamics(phase, action, tree)
    # RESPOND -> REJECTION or ENGAGEMENT
    for action in filterActions({'verb': 'reject offer'},
                                free.actions | sylv.actions):
        if model == 'powell':
            tree = makeTree(setToConstantMatrix(phase, 'rejection'))
        elif model == 'slantchev':
            tree = makeTree(setToConstantMatrix(phase, 'engagement'))
        world.setDynamics(phase, action, tree)
    # accepting -> OFFER
    for action in filterActions({'verb': 'accept offer'},
                                free.actions | sylv.actions):
        tree = makeTree(setToConstantMatrix(phase, 'offer'))
        world.setDynamics(phase, action, tree)
    # attacking -> OFFER
    for action in filterActions({'verb': 'attack'},
                                free.actions | sylv.actions):
        tree = makeTree(setToConstantMatrix(phase, 'offer'))
        world.setDynamics(phase, action, tree)
        if action['subject'] == sylv.name or model == 'slantchev':
            tree = makeTree(incrementMatrix(roundKey, 1))
            world.setDynamics(roundKey, action, tree)
    if model == 'powell':
        # REJECTION -> END
        for atom in [freeNOP, freeBattle]:
            tree = makeTree(setToConstantMatrix(phase, 'end'))
            world.setDynamics(phase, atom, tree)
        # END -> OFFER
        atom = Action({'subject': sylv.name, 'verb': 'continue'})
        tree = makeTree(setToConstantMatrix(phase, 'offer'))
        world.setDynamics(phase, atom, tree)
        tree = makeTree(incrementMatrix(roundKey, 1))
        world.setDynamics(roundKey, atom, tree)

    if not web:
        # Relationship dynamics: attacking is bad for trust
        atom = Action({
            'subject': sylv.name,
            'verb': 'attack',
            'object': free.name
        })
        key = binaryKey(free.name, sylv.name, 'trusts')
        tree = makeTree(approachMatrix(key, 0.1, -1.))
        world.setDynamics(key, atom, tree)
        # Handcrafted policy for Freedonia
        #    free.setPolicy(makeTree({'if': equalRow('phase','respond'),
        #                             # Accept an offer greater than 50
        #                             True: {'if': thresholdRow(stateKey(free.name,'offered'),50),
        #                                    True: Action({'subject': free.name,'verb': 'accept offer','object': sylv.name}),
        #                                    False: Action({'subject': free.name,'verb': 'reject offer','object': sylv.name})},
        #                             False: {'if': equalRow('phase','engagement'),
        #                             # Attack during engagement phase
        #                                     True: Action({'subject': free.name,'verb': 'attack','object': sylv.name}),
        #                             # Agent decides how what to do otherwise
        #                                     False: False}}))
        # Mental models of enemy
        # Example of creating a model with incorrect reward all at once (a version of Freedonia who cares about reaching agreement as well)
        # sylv.addModel('false',R={goalSTroops: 10.,goalSTerritory: 1.,goalAgreement: 1.},
        #              rationality=1.,selection='distribution',parent=True)
        # Example of creating a model with incorrect beliefs
        sylv.addModel('false',
                      rationality=10.,
                      selection='distribution',
                      parent=True)
        key = stateKey(free.name, 'position')
        # Sylvania believes position to be fixed at 3
        sylv.setBelief(key, 3, 'false')

        # Freedonia is truly unsure about position (50% chance of being 7, 50% of being 3)
        world.setModel(free.name, True)
        free.setBelief(key, Distribution({7: 0.5, 3: 0.5}), True)
        # Observations about military position
        tree = makeTree({
            'if': thresholdRow(key, 1),
            True: {
                'if': thresholdRow(key, 9),
                True: {
                    'distribution': [(KeyedVector({key: 1}), 0.9),
                                     (KeyedVector({
                                         key: 1,
                                         CONSTANT: -1
                                     }), 0.1)]
                },
                False: {
                    'distribution': [(KeyedVector({key: 1}), 0.8),
                                     (KeyedVector({
                                         key: 1,
                                         CONSTANT: -1
                                     }), 0.1),
                                     (KeyedVector({
                                         key: 1,
                                         CONSTANT: 1
                                     }), 0.1)]
                }
            },
            False: {
                'distribution': [(KeyedVector({key: 1}), 0.9),
                                 (KeyedVector({
                                     key: 1,
                                     CONSTANT: 1
                                 }), 0.1)]
            }
        })
        free.defineObservation(key, tree)

        # Example of setting model parameters separately
        sylv.addModel('true', parent=True)
        sylv.setAttribute(
            'rationality', 10.,
            'true')  # Override real agent's rationality with this value
        sylv.setAttribute('selection', 'distribution', 'true')
        world.setMentalModel(free.name, sylv.name, {'false': 0.9, 'true': 0.1})

        # Goal of fooling Sylvania
        goalDeception = achieveFeatureValue(modelKey(sylv.name),
                                            sylv.model2index('false'))
    return world
Пример #27
0
        return {'if': tree['if'],
                True: leaf2matrix(tree[True],key),
                False: leaf2matrix(tree[False],key)}
    else:
        prob = noisyOr(tree[True],.75,.1)
        return {'distribution': [(setTrueMatrix(key),prob),(setFalseMatrix(key),1.-prob)]}

def noisyOr(onCount,onProb,leak=0.):
    return 1.- (1.-leak)*pow(1.-onProb,onCount)

if __name__ == '__main__':
    world = World()
    world.diagram = diagram.Diagram()
    world.diagram.setColor(None,'ivory')

    resident = Agent('resident')
    world.diagram.setColor(resident.name,'palegreen')
    world.addAgent(resident)
#    family = Agent('family')
#    world.diagram.setColor(family.name,'mediumseagreen')
#    world.addAgent(family)
    gov = Agent('government')
    world.diagram.setColor(gov.name,'cornflowerblue')
    world.addAgent(gov)

    world.setOrder([resident.name])

    # Keep track of orthogonal dimensions of decision-making
    phase = world.defineState(None,'phase',list,['where','how'],
                              description='What is being decided at this stage of the simulation')
    world.setState(None,'phase','where')
Пример #28
0
import sys
from ConfigParser import SafeConfigParser
from optparse import OptionParser

from psychsim.pwl import *
from psychsim.action import Action, ActionSet
from psychsim.world import World, stateKey, actionKey
from psychsim.agent import Agent

if __name__ == '__main__':

    # Create scenario
    world = World()
    totals = {'apple': 3, 'pear': 2}
    # Stacy state
    stacy = Agent('Stacy')
    world.addAgent(stacy)
    world.defineState(stacy.name, 'applesOwned', int, lo=0, hi=totals['apple'])
    stacy.setState('applesOwned', 0)
    world.defineState(stacy.name,
                      'applesOffered',
                      int,
                      lo=0,
                      hi=totals['apple'])
    stacy.setState('applesOffered', 0)
    world.defineState(stacy.name, 'pearsOwned', int, lo=0, hi=totals['pear'])
    stacy.setState('pearsOwned', 0)
    world.defineState(stacy.name, 'pearsOffered', int, lo=0, hi=totals['pear'])
    stacy.setState('pearsOffered', 0)

    # David state
Пример #29
0

if __name__ == '__main__':

    # Create scenario
    world = World()
    totals = {'exiter':3,'follower':4, 'avoider':3} # 

    # there are a mix of agent types that have different reward preferences for heading towards door,
    # following someone who is closest or avoiding the fire
    rewardWeights = {'exiter':{'fire':.4,'door':.5,'follow':.1},'follower':{'fire':.2,'door':.2,'follow':.6},'avoider':{'fire':.6,'door':.3,'follow':.1}}


    # the fire and door are modeled as agents with no actions - they only have a fixed location

    me = Agent('door')
    world.addAgent(me)
    world.defineState(me.name,'x',float)
    world.setState(me.name,'x',5)
    world.defineState(me.name,'y',float)
    world.setState(me.name,'y',5)
    me.setHorizon(0)

    me = Agent('fire')
    world.addAgent(me)
    world.defineState(me.name,'x',float)
    world.setState(me.name,'x',1)
    world.defineState(me.name,'y',float)
    world.setState(me.name,'y',1)
    me.setHorizon(0)
Пример #30
0
def parseGDELT(fname, targets=[]):
    """
    Extracts events from a single GDELT CSV file
    """
    # Parse filename
    root, ext = os.path.splitext(os.path.basename(fname))
    assert ext == '.csv', 'CSV file expected instead of %s' % (fname)
    if len(root) == 4:
        year = int(root)
        month = 0
    else:
        assert len(
            root) == 6, 'Filename not in YYYY.csv or YYYYMM.csv format: %s' % (
                fname)
        year = int(root[:4])
        month = int(root[4:])
    # Initialize storage
    result = {
        'month': month,
        'year': year,
        'agents': {},
        'matrix': {},
        'calendar': {},
    }
    today = None
    start = time.time()
    lines = 0
    for line in fileinput.input(fname):
        lines += 1
        # Extract the event fields
        elements = map(lambda x: x.strip(), line.split('\t'))
        event = {}
        for index in range(len(elements)):
            if len(elements[index]) == 0:
                event[headings[index]] = None
            elif headings[index] in intHeadings:
                event[headings[index]] = int(elements[index])
            elif headings[index] in floatHeadings:
                event[headings[index]] = float(elements[index])
            else:
                event[headings[index]] = elements[index].strip()
        if event['SQLDATE'] != today:
            today = event['SQLDATE']
            events = []
            result['calendar'][event['SQLDATE']] = events
            print >> sys.stderr, today
        if event['Actor1Code'] is None:
            # No actor?
            event['Actor1Code'] = 'Unknown'
        if lines % 10000 == 0 and events:
            print >> sys.stderr,'\t%dK (%d events,%d agents)' % \
                (lines/1000,len(events),len(result['agents']))
        if matchActor(event['Actor1Code'],targets) or \
                matchActor(event['Actor2Code'],targets):
            # Event matching our target
            events.append(event)
            if not result['agents'].has_key(event['Actor1Code']):
                agent = Agent(event['Actor1Code'])
                result['agents'][agent.name] = agent
            event['action'] = Action({
                'subject': event['Actor1Code'],
                'verb': cameo[event['EventCode']]
            })
            if event['Actor2Code']:
                if not result['agents'].has_key(event['Actor2Code']):
                    agent = Agent(event['Actor2Code'])
                    result['agents'][agent.name] = agent
                event['action']['object'] = event['Actor2Code']
            # Update relationship matrix
            if event['Actor1Code'] < event['Actor2Code']:
                key = '%s,%s' % (event['Actor1Code'], event['Actor2Code'])
            else:
                key = '%s,%s' % (event['Actor2Code'], event['Actor1Code'])
            try:
                result['matrix'][key].append(event)
            except KeyError:
                result['matrix'][key] = [event]
    return result
Пример #31
0
 def isXML(element):
     if not Agent.isXML(element):
         return False
     return len(element.getAttribute('resource')) > 0
Пример #32
0
class TestAgents(unittest.TestCase):

    def setUp(self):
        # Create world
        self.world = World()
        # Create agents
        self.tom = Agent('Tom')
        self.world.addAgent(self.tom)
        self.jerry = Agent('Jerry')
        self.world.addAgent(self.jerry)

    def addStates(self):
        """Create state features"""
        self.world.defineState(self.tom.name,'health',int,lo=0,hi=100,
                               description='%s\'s wellbeing' % (self.tom.name))
        self.world.setState(self.tom.name,'health',50)
        self.world.defineState(self.jerry.name,'health',int,lo=0,hi=100,
                               description='%s\'s wellbeing' % (self.jerry.name))
        self.world.setState(self.jerry.name,'health',50)

    def addActions(self):
        """Create actions"""
        self.chase = self.tom.addAction({'verb': 'chase','object': self.jerry.name})
        self.hit = self.tom.addAction({'verb': 'hit','object': self.jerry.name})
        self.run = self.jerry.addAction({'verb': 'run away'})
        self.trick = self.jerry.addAction({'verb': 'trick','object': self.tom.name})

    def addDynamics(self):
        """Create dynamics"""
        tree = makeTree(incrementMatrix(stateKey(self.jerry.name,'health'),-10))
        self.world.setDynamics(stateKey(self.jerry.name,'health'),self.hit,tree,enforceMin=True)

    def addModels(self,rationality=1.):
        self.tom.addModel('friend',rationality=rationality,parent=True)
        self.tom.setReward(maximizeFeature(stateKey(self.jerry.name,'health')),1.,'friend')
        self.tom.addModel('foe',rationality=rationality,parent=True)
        self.tom.setReward(minimizeFeature(stateKey(self.jerry.name,'health')),1.,'foe')

    def saveload(self):
        """Write scenario to file and then load from scratch"""
        self.world.save('/tmp/psychsim_test.psy')
        self.world = World('/tmp/psychsim_test.psy')
        self.tom = self.world.agents[self.tom.name]
        self.jerry = self.world.agents[self.jerry.name]

    def testEnumeratedState(self):
        self.addActions()
        self.world.defineVariable(self.tom.name,ActionSet)
        self.world.defineState(self.tom.name,'status',list,['dead','injured','healthy'])
        self.world.setState(self.tom.name,'status','healthy')
        goal = achieveFeatureValue(stateKey(self.tom.name,'status'),'healthy')
        self.tom.setReward(goal,1.)
        goal = achieveFeatureValue(stateKey(self.tom.name,'status'),'injured')
        self.jerry.setReward(goal,1.)
        self.saveload()
        self.assertEqual(len(self.world.state[None]),1)
        vector = self.world.state[None].domain()[0]
        tVal = self.tom.reward(vector)
        self.assertAlmostEqual(tVal,1.,8)
        jVal = self.jerry.reward(vector)
        self.assertAlmostEqual(jVal,0.,8)
        for action in self.tom.actions:
            encoding = self.world.value2float(self.tom.name,action)
            self.assertEqual(action,self.world.float2value(self.tom.name,encoding))

    def testBeliefModels(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.world.setOrder([self.tom.name])
        self.tom.addModel('optimist')
        self.tom.setBelief(stateKey(self.jerry.name,'health'),20,'optimist')
        self.tom.addModel('pessimist')
        self.world.setModel(self.jerry.name,True)
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'optimist': 0.5,'pessimist': 0.5})
        actions = {self.tom.name: self.hit}
        self.world.step(actions)
        vector = self.world.state[None].domain()[0]
        beliefs = self.jerry.getAttribute('beliefs',self.world.getModel(self.jerry.name,vector))
        for belief in beliefs.domain():
            model = self.world.getModel(self.tom.name,belief)
            if self.tom.models[model].has_key('beliefs'):
                nested = self.tom.models[model]['beliefs']
                self.assertEqual(len(nested),1)
                nested = nested.domain()[0]
                self.assertEqual(len(nested),1)
                self.assertAlmostEqual(nested[stateKey(self.jerry.name,'health')],10.,8)

    def testObservation(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.world.setOrder([self.tom.name])
        self.world.setModel(self.jerry.name,True)
        key = stateKey(self.jerry.name,'health')
        self.jerry.setBelief(key,Distribution({20: 0.5, 50: 0.5}))
        tree = makeTree({'if': thresholdRow(key,40),
                         True: {'distribution': [(KeyedVector({CONSTANT: 50}),.8),
                                                 (KeyedVector({CONSTANT: 20}),.2)]},
                         False: {'distribution': [(KeyedVector({CONSTANT: 50}),.2),
                                                  (KeyedVector({CONSTANT: 20}),.8)]}})
        self.jerry.defineObservation(key,tree)
        actions = {self.tom.name: self.hit}
        vector = self.world.state[None].domain()[0]
        omegaDist = self.jerry.observe(vector,actions)
        for omega in omegaDist.domain():
            new = KeyedVector(vector)
            model = self.jerry.index2model(self.jerry.stateEstimator(vector,new,omega))
            beliefs = self.jerry.models[model]['beliefs']
            if omega[key] > 30:
                # We observed a high value, so we should have a stronger belief in the higher value
                # which is now 40 after the hit
                for belief in beliefs.domain():
                    if beliefs[belief] > 0.5:
                        self.assertAlmostEqual(belief[key],40,8)
                    else:
                        self.assertAlmostEqual(belief[key],10,8)
            else:
                # We observed a low value, so we should have a stronger belief in the lower value
                # which is now 10 after the hit
                for belief in beliefs.domain():
                    if beliefs[belief] < 0.5:
                        self.assertAlmostEqual(belief[key],40,8)
                    else:
                        self.assertAlmostEqual(belief[key],10,8)

    def testUnobservedAction(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.addModels()
        self.world.setOrder([self.tom.name])
        self.world.setModel(self.jerry.name,True)
        self.jerry.setBelief(stateKey(self.jerry.name,'health'),50)
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        tree = makeTree(True)
        self.jerry.defineObservation(self.tom.name,tree,self.hit,domain=ActionSet)
        tree = makeTree({'distribution': [(True,0.25),(False,0.75)]})
        self.jerry.defineObservation(self.tom.name,tree,self.chase,domain=ActionSet)
        vector = self.world.state[None].domain()[0]
        self.saveload()
        self.world.step({self.tom.name: self.hit})
        vector = self.world.state[None].domain()[0]

    def testRewardModels(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.addModels()
        self.world.setOrder([self.tom.name])
        # Add Jerry's model to the world (so that it gets updated)
        self.world.setModel(self.jerry.name,True)
        # Give Jerry uncertainty about Tom
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        self.saveload()
        # Hitting should make Jerry think Tom is more of a foe
        actions = {self.tom.name: self.hit}
        self.world.step(actions)
        vector = self.world.state[None].domain()[0]
        belief01 = self.jerry.getAttribute('beliefs',self.world.getModel(self.jerry.name,vector))
        key = modelKey(self.tom.name)
        for belief in belief01.domain():
            if self.tom.index2model(belief[key]) == 'foe':
                prob01 = belief01[belief]
                break
        self.assertGreater(prob01,0.5)
        # If we think of Tom as even more of an optimizer, then our update should be stronger
        self.tom.setAttribute('rationality',10.,'foe')
        self.tom.setAttribute('rationality',10.,'friend')
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        self.world.step(actions)
        vector = self.world.state[None].domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief10 = self.jerry.getAttribute('beliefs',model)
        key = modelKey(self.tom.name)
        for belief in belief10.domain():
            if self.tom.index2model(belief[key]) == 'foe':
                prob10 = belief10[belief]
                break
        self.assertGreater(prob10,prob01)
        # If we keep the same models, but get another observation, we should update even more
        self.world.step(actions)
        vector = self.world.state[None].domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief1010 = self.jerry.getAttribute('beliefs',model)
        key = modelKey(self.tom.name)
        for belief in belief1010.domain():
            if self.tom.index2model(belief[key]) == 'foe':
                prob1010 = belief1010[belief]
                break
        self.assertGreater(prob1010,prob10)

    def testDynamics(self):
        self.world.setOrder([self.tom.name])
        self.addStates()
        self.addActions()
        self.addDynamics()
        key = stateKey(self.jerry.name,'health')
        self.assertEqual(len(self.world.state[None]),1)
        vector = self.world.state[None].domain()[0]
        self.assertTrue(vector.has_key(stateKey(self.tom.name,'health')))
        self.assertTrue(vector.has_key(turnKey(self.tom.name)))
        self.assertTrue(vector.has_key(key))
        self.assertTrue(vector.has_key(CONSTANT))
        self.assertEqual(len(vector),4)
        self.assertEqual(vector[stateKey(self.tom.name,'health')],50)
        self.assertEqual(vector[key],50)
        outcome = self.world.step({self.tom.name: self.chase})
        for i in range(7):
            self.assertEqual(len(self.world.state[None]),1)
            vector = self.world.state[None].domain()[0]
            self.assertTrue(vector.has_key(stateKey(self.tom.name,'health')))
            self.assertTrue(vector.has_key(turnKey(self.tom.name)))
            self.assertTrue(vector.has_key(key))
            self.assertTrue(vector.has_key(CONSTANT))
            self.assertEqual(len(vector),4)
            self.assertEqual(vector[stateKey(self.tom.name,'health')],50)
            self.assertEqual(vector[key],max(50-10*i,0))
            outcome = self.world.step({self.tom.name: self.hit})
            self.saveload()

    def testRewardOnOthers(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.world.setOrder([self.tom.name])
        vector = self.world.state[None].domain()[0]
        # Create Jerry's goals
        goal = maximizeFeature(stateKey(self.jerry.name,'health'))
        self.jerry.setReward(goal,1.)
        jVal = -self.jerry.reward(vector)
        # Create Tom's goals from scratch
        minGoal = minimizeFeature(stateKey(self.jerry.name,'health'))
        self.tom.setReward(minGoal,1.)
        self.saveload()
        tRawVal = self.tom.reward(vector)
        self.assertAlmostEqual(jVal,tRawVal,8)
        # Create Tom's goals as a function of Jerry's
        self.tom.models[True]['R'].clear()
        self.tom.setReward(self.jerry.name,-1.)
        self.saveload()
        tFuncVal = self.tom.reward(vector)
        self.assertAlmostEqual(tRawVal,tFuncVal,8)
        # Test effect of functional reward on value function
        self.tom.setHorizon(1)
        self.saveload()
        vHit = self.tom.value(vector,self.hit)['V']
        vChase = self.tom.value(vector,self.chase)['V']
        self.assertAlmostEqual(vHit,vChase+.1,8)

    def testReward(self):
        self.addStates()
        key = stateKey(self.jerry.name,'health')
        goal = makeTree({'if': thresholdRow(key,5),
                         True: KeyedVector({key: -2}),
                         False: KeyedVector({key: -1})})
        goal = goal.desymbolize(self.world.symbols)
        self.jerry.setReward(goal,1.)
        R = self.jerry.models[True]['R']
        self.assertEqual(len(R),1)
        newGoal = R.keys()[0]
        self.assertEqual(newGoal,goal)
        self.assertAlmostEqual(R[goal],1.,8)
        self.jerry.setReward(goal,2.)
        self.assertEqual(len(R),1)
        self.assertEqual(R.keys()[0],goal)
        self.assertAlmostEqual(R[goal],2.,8)

    def testTurnDynamics(self):
        self.addStates()
        self.addActions()
        self.world.setOrder([self.tom.name,self.jerry.name])
        self.assertEqual(self.world.maxTurn,1)
        self.saveload()
        vector = self.world.state[None].domain()[0]
        jTurn = turnKey(self.jerry.name)
        tTurn = turnKey(self.tom.name)
        self.assertEqual(self.world.next(),[self.tom.name])
        self.assertEqual(vector[tTurn],0)
        self.assertEqual(vector[jTurn],1)
        self.world.step()
        vector = self.world.state[None].domain()[0]
        self.assertEqual(self.world.next(),[self.jerry.name])
        self.assertEqual(vector[tTurn],1)
        self.assertEqual(vector[jTurn],0)
        self.world.step()
        vector = self.world.state[None].domain()[0]
        self.assertEqual(self.world.next(),[self.tom.name])
        self.assertEqual(vector[tTurn],0)
        self.assertEqual(vector[jTurn],1)
        # Try some custom dynamics
        self.world.setTurnDynamics(self.tom.name,self.hit,makeTree(noChangeMatrix(tTurn)))
        self.world.setTurnDynamics(self.jerry.name,self.hit,makeTree(noChangeMatrix(tTurn)))
        self.world.step()
        vector = self.world.state[None].domain()[0]
        self.assertEqual(self.world.next(),[self.tom.name])
        self.assertEqual(vector[tTurn],0)
        self.assertEqual(vector[jTurn],1)
        self.world.step({self.tom.name: self.chase})
        vector = self.world.state[None].domain()[0]
        self.assertEqual(self.world.next(),[self.jerry.name])
        self.assertEqual(vector[tTurn],1)
        self.assertEqual(vector[jTurn],0)

    def testStatic(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.addModels()
        self.world.setModel(self.jerry.name,True)
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        self.world.setOrder([self.tom.name])
        vector = self.world.state[None].domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief0 = self.jerry.models[model]['beliefs']
        result = self.world.step({self.tom.name: self.hit})
        vector = self.world.state[None].domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief1 = self.jerry.models[model]['beliefs']
        key = modelKey(self.tom.name)
        for vector in belief0.domain():
            if self.tom.index2model(vector[key]) == 'friend':
                self.assertGreater(belief0[vector],belief1[vector])
            else:
                self.assertGreater(belief1[vector],belief0[vector])
        # Now with the static beliefs
        self.jerry.setAttribute('static',True,model)
        self.saveload()
        self.world.step()
        vector = self.world.state[None].domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief2 = self.jerry.models[model]['beliefs']
        for vector in belief1.domain():
            self.assertAlmostEqual(belief1[vector],belief2[vector],8)
    def __init__(self,turnOrder,maxRounds,payoff):

        self.maxRounds=maxRounds
        self.payoff = payoff
        print self.payoff
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]

        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            self.world.addAgent(me)
            # State
            self.world.defineState(me.name,'money',int)
            me.setState('money',0)
            mePass = me.addAction({'verb': 'pass','object': other.name})
            meTake = me.addAction({'verb': 'take','object': other.name})
            # Parameters
            me.setHorizon(6)
            me.setAttribute('discount',1.)
            # me.setAttribute('discount',0.9)

            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(turnOrder)
        # World state
        self.world.defineState(None,'round',int,description='The current round')
        self.world.setState(None,'round',0)
        self.world.defineState(None,'gameOver',bool,description='whether game is over')
        self.world.setState(None,'gameOver',False)

        self.world.addTermination(makeTree({'if': thresholdRow(stateKey(None,'round'),self.maxRounds),
                                            True: True,
                                            False: {'if': trueRow(stateKey(None,'gameOver')),
                                                    True: True,
                                                    False: False}}))

        # Dynamics
        for action in stacy.actions | david.actions:
            tree = makeTree(incrementMatrix(stateKey(None,'round'),1))
            self.world.setDynamics(stateKey(None,'round'),action,tree)
            if (action['verb'] == 'take'):
                tree = makeTree(setTrueMatrix(stateKey(None,'gameOver')))
                self.world.setDynamics(stateKey(None,'gameOver'),action,tree)
                agts = ['Stacy','David']
                for i in range(2):
                    key = stateKey(agts[i],'money')
                    tree = makeTree(self.buildPayoff(0, key, self.payoff[agts[i]]))
                    self.world.setDynamics(stateKey(agts[i],'money'),action,tree)
            elif action['verb'] == 'pass':
                agts = ['Stacy','David']
                for i in range(2):
                    key = stateKey(agts[i],'money')
                    tree = makeTree({'if': equalRow(stateKey(None,'round'),self.maxRounds-1),
                                     True: setToConstantMatrix(key,self.payoff[agts[i]][self.maxRounds]),
                                     False: noChangeMatrix(key)})
                    self.world.setDynamics(stateKey(agts[i],'money'),action,tree)


# really need to ask david about these levels - if adding modesl with levels, can
# the true model point to these but have a different level

        for agent in self.world.agents.values():
            agent.addModel('Christian',R={},level=2,rationality=10.,selection='distribution')
            agent.addModel('Capitalist',R={},level=2,rationality=10.,selection='distribution')
Пример #34
0
class TestAgents(unittest.TestCase):

    def setUp(self):
        # Create world
        self.world = World()
        # Create agents
        self.tom = Agent('Tom')
        self.world.addAgent(self.tom)
        self.jerry = Agent('Jerry')
        self.world.addAgent(self.jerry)

    def addStates(self):
        """Create state features"""
        self.world.defineState(self.tom.name,'health',int,lo=0,hi=100,
                               description='%s\'s wellbeing' % (self.tom.name))
        self.world.setState(self.tom.name,'health',50)
        self.world.defineState(self.jerry.name,'health',int,lo=0,hi=100,
                               description='%s\'s wellbeing' % (self.jerry.name))
        self.world.setState(self.jerry.name,'health',50)

    def addActions(self):
        """Create actions"""
        self.chase = self.tom.addAction({'verb': 'chase','object': self.jerry.name})
        self.hit = self.tom.addAction({'verb': 'hit','object': self.jerry.name})
        self.run = self.jerry.addAction({'verb': 'run away'})
        self.trick = self.jerry.addAction({'verb': 'trick','object': self.tom.name})

    def addDynamics(self):
        """Create dynamics"""
        tree = makeTree(incrementMatrix(stateKey(self.jerry.name,'health'),-10))
        self.world.setDynamics(stateKey(self.jerry.name,'health'),self.hit,tree,enforceMin=True)

    def addModels(self,rationality=1.):
        self.tom.addModel('friend',rationality=rationality,parent=True)
        self.tom.setReward(maximizeFeature(stateKey(self.jerry.name,'health')),1.,'friend')
        self.tom.addModel('foe',rationality=rationality,parent=True)
        self.tom.setReward(minimizeFeature(stateKey(self.jerry.name,'health')),1.,'foe')

    def saveload(self):
        """Write scenario to file and then load from scratch"""
        self.world.save('/tmp/psychsim_test.psy')
        self.world = World('/tmp/psychsim_test.psy')
        self.tom = self.world.agents[self.tom.name]
        self.jerry = self.world.agents[self.jerry.name]

    def testEnumeratedState(self):
        self.addActions()
        self.world.defineVariable(self.tom.name,ActionSet)
        self.world.defineState(self.tom.name,'status',list,['dead','injured','healthy'])
        self.world.setState(self.tom.name,'status','healthy')
        goal = achieveFeatureValue(stateKey(self.tom.name,'status'),'healthy')
        self.tom.setReward(goal,1.)
        goal = achieveFeatureValue(stateKey(self.tom.name,'status'),'injured')
        self.jerry.setReward(goal,1.)
        self.saveload()
        self.assertEqual(len(self.world.state),1)
        vector = self.world.state.domain()[0]
        tVal = self.tom.reward(vector)
        self.assertAlmostEqual(tVal,1.,8)
        jVal = self.jerry.reward(vector)
        self.assertAlmostEqual(jVal,0.,8)
        for action in self.tom.actions:
            encoding = self.world.value2float(self.tom.name,action)
            self.assertEqual(action,self.world.float2value(self.tom.name,encoding))

    def testBeliefModels(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.world.setOrder([self.tom.name])
        self.tom.addModel('optimist')
        self.tom.setBelief(stateKey(self.jerry.name,'health'),20,'optimist')
        self.tom.addModel('pessimist')
        self.world.setModel(self.jerry.name,True)
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'optimist': 0.5,'pessimist': 0.5})
        actions = {self.tom.name: self.hit}
        self.world.step(actions)
        vector = self.world.state.domain()[0]
        beliefs = self.jerry.getAttribute('beliefs',self.world.getModel(self.jerry.name,vector))
        for belief in beliefs.domain():
            model = self.world.getModel(self.tom.name,belief)
            if self.tom.models[model].has_key('beliefs'):
                nested = self.tom.models[model]['beliefs']
                self.assertEqual(len(nested),1)
                nested = nested.domain()[0]
                self.assertEqual(len(nested),1)
                self.assertAlmostEqual(nested[stateKey(self.jerry.name,'health')],10.,8)

    def testObservation(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.world.setOrder([self.tom.name])
        self.world.setModel(self.jerry.name,True)
        key = stateKey(self.jerry.name,'health')
        self.jerry.setBelief(key,Distribution({20: 0.5, 50: 0.5}))
        tree = makeTree({'if': thresholdRow(key,40),
                         True: {'distribution': [(KeyedVector({CONSTANT: 50}),.8),
                                                 (KeyedVector({CONSTANT: 20}),.2)]},
                         False: {'distribution': [(KeyedVector({CONSTANT: 50}),.2),
                                                  (KeyedVector({CONSTANT: 20}),.8)]}})
        self.jerry.defineObservation(key,tree)
        actions = {self.tom.name: self.hit}
        vector = self.world.state.domain()[0]
        omegaDist = self.jerry.observe(vector,actions)
        for omega in omegaDist.domain():
            new = KeyedVector(vector)
            model = self.jerry.index2model(self.jerry.stateEstimator(vector,new,omega))
            beliefs = self.jerry.models[model]['beliefs']
            if omega[key] > 30:
                # We observed a high value, so we should have a stronger belief in the higher value
                # which is now 40 after the hit
                for belief in beliefs.domain():
                    if beliefs[belief] > 0.5:
                        self.assertAlmostEqual(belief[key],40,8)
                    else:
                        self.assertAlmostEqual(belief[key],10,8)
            else:
                # We observed a low value, so we should have a stronger belief in the lower value
                # which is now 10 after the hit
                for belief in beliefs.domain():
                    if beliefs[belief] < 0.5:
                        self.assertAlmostEqual(belief[key],40,8)
                    else:
                        self.assertAlmostEqual(belief[key],10,8)

    def testUnobservedAction(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.addModels()
        self.world.setOrder([self.tom.name])
        self.world.setModel(self.jerry.name,True)
        self.jerry.setBelief(stateKey(self.jerry.name,'health'),50)
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        tree = makeTree(True)
        self.jerry.defineObservation(self.tom.name,tree,self.hit,domain=ActionSet)
        tree = makeTree({'distribution': [(True,0.25),(False,0.75)]})
        self.jerry.defineObservation(self.tom.name,tree,self.chase,domain=ActionSet)
        vector = self.world.state.domain()[0]
        self.saveload()
        self.world.step({self.tom.name: self.hit})
        vector = self.world.state.domain()[0]

    def testRewardModels(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.addModels()
        self.world.setOrder([self.tom.name])
        # Add Jerry's model to the world (so that it gets updated)
        self.world.setModel(self.jerry.name,True)
        # Give Jerry uncertainty about Tom
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        self.saveload()
        # Hitting should make Jerry think Tom is more of a foe
        actions = {self.tom.name: self.hit}
        self.world.step(actions)
        vector = self.world.state.domain()[0]
        belief01 = self.jerry.getAttribute('beliefs',self.world.getModel(self.jerry.name,vector))
        key = modelKey(self.tom.name)
        for belief in belief01.domain():
            if self.tom.index2model(belief[key]) == 'foe':
                prob01 = belief01[belief]
                break
        self.assertGreater(prob01,0.5)
        # If we think of Tom as even more of an optimizer, then our update should be stronger
        self.tom.setAttribute('rationality',10.,'foe')
        self.tom.setAttribute('rationality',10.,'friend')
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        self.world.step(actions)
        vector = self.world.state.domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief10 = self.jerry.getAttribute('beliefs',model)
        key = modelKey(self.tom.name)
        for belief in belief10.domain():
            if self.tom.index2model(belief[key]) == 'foe':
                prob10 = belief10[belief]
                break
        self.assertGreater(prob10,prob01)
        # If we keep the same models, but get another observation, we should update even more
        self.world.step(actions)
        vector = self.world.state.domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief1010 = self.jerry.getAttribute('beliefs',model)
        key = modelKey(self.tom.name)
        for belief in belief1010.domain():
            if self.tom.index2model(belief[key]) == 'foe':
                prob1010 = belief1010[belief]
                break
        self.assertGreater(prob1010,prob10)

    def testDynamics(self):
        self.world.setOrder([self.tom.name])
        self.addStates()
        self.addActions()
        self.addDynamics()
        key = stateKey(self.jerry.name,'health')
        self.assertEqual(len(self.world.state),1)
        vector = self.world.state.domain()[0]
        self.assertTrue(vector.has_key(stateKey(self.tom.name,'health')))
        self.assertTrue(vector.has_key(turnKey(self.tom.name)))
        self.assertTrue(vector.has_key(key))
        self.assertTrue(vector.has_key(CONSTANT))
        self.assertEqual(len(vector),4)
        self.assertEqual(vector[stateKey(self.tom.name,'health')],50)
        self.assertEqual(vector[key],50)
        outcome = self.world.step({self.tom.name: self.chase})
        for i in range(7):
            self.assertEqual(len(self.world.state),1)
            vector = self.world.state.domain()[0]
            self.assertTrue(vector.has_key(stateKey(self.tom.name,'health')))
            self.assertTrue(vector.has_key(turnKey(self.tom.name)))
            self.assertTrue(vector.has_key(key))
            self.assertTrue(vector.has_key(CONSTANT))
            self.assertEqual(len(vector),4)
            self.assertEqual(vector[stateKey(self.tom.name,'health')],50)
            self.assertEqual(vector[key],max(50-10*i,0))
            outcome = self.world.step({self.tom.name: self.hit})
            self.saveload()

    def testRewardOnOthers(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.world.setOrder([self.tom.name])
        vector = self.world.state.domain()[0]
        # Create Jerry's goals
        goal = maximizeFeature(stateKey(self.jerry.name,'health'))
        self.jerry.setReward(goal,1.)
        jVal = -self.jerry.reward(vector)
        # Create Tom's goals from scratch
        minGoal = minimizeFeature(stateKey(self.jerry.name,'health'))
        self.tom.setReward(minGoal,1.)
        self.saveload()
        tRawVal = self.tom.reward(vector)
        self.assertAlmostEqual(jVal,tRawVal,8)
        # Create Tom's goals as a function of Jerry's
        self.tom.models[True]['R'].clear()
        self.tom.setReward(self.jerry.name,-1.)
        self.saveload()
        tFuncVal = self.tom.reward(vector)
        self.assertAlmostEqual(tRawVal,tFuncVal,8)
        # Test effect of functional reward on value function
        self.tom.setHorizon(1)
        self.saveload()
        vHit = self.tom.value(vector,self.hit)['V']
        vChase = self.tom.value(vector,self.chase)['V']
        self.assertAlmostEqual(vHit,vChase+.1,8)

    def testReward(self):
        self.addStates()
        key = stateKey(self.jerry.name,'health')
        goal = makeTree({'if': thresholdRow(key,5),
                         True: KeyedVector({key: -2}),
                         False: KeyedVector({key: -1})})
        self.jerry.setReward(goal,1.)
        R = self.jerry.models[True]['R']
        self.assertEqual(len(R),1)
        self.assertEqual(R.keys()[0],goal)
        self.assertAlmostEqual(R[goal],1.,8)
        self.jerry.setReward(goal,2.)
        self.assertEqual(len(R),1)
        self.assertEqual(R.keys()[0],goal)
        self.assertAlmostEqual(R[goal],2.,8)

    def testTurnDynamics(self):
        self.addStates()
        self.addActions()
        self.world.setOrder([self.tom.name,self.jerry.name])
        self.assertEqual(self.world.maxTurn,1)
        self.saveload()
        vector = self.world.state.domain()[0]
        jTurn = turnKey(self.jerry.name)
        tTurn = turnKey(self.tom.name)
        self.assertEqual(self.world.next(),[self.tom.name])
        self.assertEqual(vector[tTurn],0)
        self.assertEqual(vector[jTurn],1)
        self.world.step()
        vector = self.world.state.domain()[0]
        self.assertEqual(self.world.next(),[self.jerry.name])
        self.assertEqual(vector[tTurn],1)
        self.assertEqual(vector[jTurn],0)
        self.world.step()
        vector = self.world.state.domain()[0]
        self.assertEqual(self.world.next(),[self.tom.name])
        self.assertEqual(vector[tTurn],0)
        self.assertEqual(vector[jTurn],1)
        # Try some custom dynamics
        self.world.setTurnDynamics(self.tom.name,self.hit,makeTree(noChangeMatrix(tTurn)))
        self.world.setTurnDynamics(self.jerry.name,self.hit,makeTree(noChangeMatrix(tTurn)))
        self.world.step()
        vector = self.world.state.domain()[0]
        self.assertEqual(self.world.next(),[self.tom.name])
        self.assertEqual(vector[tTurn],0)
        self.assertEqual(vector[jTurn],1)
        self.world.step({self.tom.name: self.chase})
        vector = self.world.state.domain()[0]
        self.assertEqual(self.world.next(),[self.jerry.name])
        self.assertEqual(vector[tTurn],1)
        self.assertEqual(vector[jTurn],0)

    def testStatic(self):
        self.addStates()
        self.addActions()
        self.addDynamics()
        self.addModels()
        self.world.setModel(self.jerry.name,True)
        self.world.setMentalModel(self.jerry.name,self.tom.name,{'friend': 0.5,'foe': 0.5})
        self.world.setOrder([self.tom.name])
        vector = self.world.state.domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief0 = self.jerry.models[model]['beliefs']
        self.world.step()
        vector = self.world.state.domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief1 = self.jerry.models[model]['beliefs']
        key = modelKey(self.tom.name)
        for vector in belief0.domain():
            if self.tom.index2model(vector[key]) == 'friend':
                self.assertGreater(belief0[vector],belief1[vector])
            else:
                self.assertGreater(belief1[vector],belief0[vector])
        # Now with the static beliefs
        self.jerry.setAttribute('static',True,model)
        self.saveload()
        self.world.step()
        vector = self.world.state.domain()[0]
        model = self.world.getModel(self.jerry.name,vector)
        belief2 = self.jerry.models[model]['beliefs']
        for vector in belief1.domain():
            self.assertAlmostEqual(belief1[vector],belief2[vector],8)
from psychsim.pwl import *
from psychsim.action import Action,ActionSet
from psychsim.world import World,stateKey,actionKey
from psychsim.agent import Agent



if __name__ == '__main__':

    # Create scenario
    maxRounds=8
    world = World()
    totals = {'apple':1,'pear':2} 
    batna_prePref = totals['apple'] + totals['pear']
    stacy = Agent('Stacy')
    david = Agent('David')
    agts = [stacy, david]

    # Player state, actions and parameters common to both players
    for i in range(2):
        me = agts[i]
        other = agts[1-i]
        world.addAgent(me)
        # State
        world.defineState(me.name,'appleOwned',int,lo=0,hi=totals['apple'])
        me.setState('appleOwned',0)
        world.defineState(me.name,'appleOffered',int,lo=0,hi=totals['apple'])
        me.setState('appleOffered',0)  
        world.defineState(me.name,'pearOwned',int,lo=0,hi=totals['pear'])
        me.setState('pearOwned',0)
Пример #36
0
 def hasAction(self,atom):
     if atom['subject'] == self.name and atom['verb'] == self.verbName and \
             atom['object'] in self.objects:
         return True
     else:
         return Agent.hasAction(self,atom)
Пример #37
0
        return {
            'distribution': [(setTrueMatrix(key), prob),
                             (setFalseMatrix(key), 1. - prob)]
        }


def noisyOr(onCount, onProb, leak=0.):
    return 1. - (1. - leak) * pow(1. - onProb, onCount)


if __name__ == '__main__':
    world = World()
    world.diagram = diagram.Diagram()
    world.diagram.setColor(None, 'ivory')

    resident = Agent('resident')
    world.diagram.setColor(resident.name, 'palegreen')
    world.addAgent(resident)
    #    family = Agent('family')
    #    world.diagram.setColor(family.name,'mediumseagreen')
    #    world.addAgent(family)
    gov = Agent('government')
    world.diagram.setColor(gov.name, 'cornflowerblue')
    world.addAgent(gov)

    world.setOrder([resident.name])

    # Keep track of orthogonal dimensions of decision-making
    phase = world.defineState(
        None,
        'phase',
DEBUG = False


def get_fake_model_name(agent):
    return 'fake {} model'.format(agent.name)


if __name__ == '__main__':

    # sets up log to screen
    logging.basicConfig(format='%(message)s',
                        level=logging.DEBUG if DEBUG else logging.INFO)

    # create world and add agents
    world = World()
    ag_producer = Agent('Producer')
    world.addAgent(ag_producer)
    ag_consumer = Agent('Consumer')
    world.addAgent(ag_consumer)
    agents = [ag_producer, ag_consumer]

    # agent settings
    ag_producer.setAttribute('discount', 1)
    ag_producer.setHorizon(HORIZON)
    ag_consumer.setAttribute('discount', 1)
    ag_consumer.setHorizon(HORIZON)

    # add variables (capacity and asked/received amounts)
    var_half_cap = world.defineState(ag_producer.name, 'half capacity', bool)
    world.setFeature(var_half_cap, False)
    var_ask_amnt = world.defineState(ag_producer.name,
Пример #39
0
    def __init__(self):
        self.world = World()
        stacy = Agent('Stacy')
        david = Agent('David')
        agts = [stacy, david]
        totalAmt = 4
        # Player state, actions and parameters common to both players
        for i in range(2):
            me = agts[i]
            other = agts[1-i]
            self.world.addAgent(me)
            self.world.defineState(me.name,'offered',int,lo=0,hi=totalAmt)
            self.world.defineState(me.name,'money',int,lo=0,hi=totalAmt)
            me.setState('offered',0)  
            me.setState('money',0)  
            if (me.name == 'Stacy'):
                for amt in range(totalAmt + 1):
                    me.addAction({'verb': 'offer','object': other.name,'amount': amt})
            else:
                mePass = me.addAction({'verb': 'accept','object': other.name})
                mePass = me.addAction({'verb': 'reject','object': other.name})
            # Parameters
            me.setHorizon(2)
            me.setParameter('discount',0.9)
            # me.setParameter('discount',1.0)
        
            # Levels of belief
        david.setRecursiveLevel(3)
        stacy.setRecursiveLevel(3)

        self.world.setOrder(['Stacy','David'])

        # World state
        self.world.defineState(None,'gameOver',bool,description='The current round')
        self.world.setState(None,'gameOver',False)

        self.world.addTermination(makeTree({'if': trueRow(stateKey(None,'gameOver')),
                                            True: True, False: False}))
        # offer dynamics
        atom = Action({'subject': 'Stacy','verb': 'offer', 'object': 'David'})
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j],'offered')
            amount = actionKey('amount') if j == 1 else '%d-%s' % (totalAmt,actionKey('amount'))
            tree = makeTree(setToConstantMatrix(offer,amount))
            self.world.setDynamics(parties[j],'offered',atom,tree)
        # accept dynamics
        atom = Action({'subject': 'David','verb': 'accept', 'object': 'Stacy'})
        parties = [atom['subject'], atom['object']]
        for j in range(2):
            offer = stateKey(parties[j],'offered')
            money = stateKey(parties[j],'money')
            tree = makeTree(setToFeatureMatrix(money,offer))
            self.world.setDynamics(parties[j],'money',atom,tree)
        tree=makeTree(setTrueMatrix(stateKey(None,'gameOver')))
        self.world.setDynamics(None,'gameOver',atom,tree)
        # reject dynamics
        atom = Action({'subject': 'David','verb': 'reject', 'object': 'Stacy'})
        tree=makeTree(setTrueMatrix(stateKey(None,'gameOver')))
        self.world.setDynamics(None,'gameOver',atom,tree)

# really need to ask david about these levels - if adding modesl with levels, can
# the true model point to these but have a different level
        for agent in self.world.agents.values():
            agent.addModel('Christian',R={},level=2,rationality=25.,selection='distribution')
            agent.addModel('Capitalist',R={},level=2,rationality=25.,selection='distribution')
Пример #40
0
def createWorld(numPlayers,regionTable,starts,generation='additive',maxResources=32):
    """
    @param numPlayers: number of players in the game
    @type numPlayers: int
    @param regionTable: a table of regions, indexed by name
    @param starts: a list of starting regions, one for each player
    @param maxResources: the maximum number of resources a player may have
    @type maxResources: int
    """
    world = ResourceWorld(allocateVerb='allocate',allocationState='invaders',winnerState='invader')

    # Create regions
    regions = set()
    for name,table in regionTable.items():
        region = Agent(name)
        world.addAgent(region)
        regions.add(region)

        world.defineState(name,'occupants',int,lo=0,hi=maxResources,
                          description='Number of resources in %s' % (region))
        region.setState('occupants',table['occupants'] if table.has_key('occupants') else table['value'])

        world.defineState(name,'value',int,lo=0,hi=maxResources,
                          description='Number of resources generated by %s' % (region))
        region.setState('value',table['value'])

        world.defineState(name,'invaders',int,lo=0,hi=numPlayers*maxResources,
                         description='Number of resources invading %s' % (region))
        region.setState('invaders',0)
        world.dynamics[stateKey(region.name,'invaders')] = True

    # Create agents for human players
    players = []
    for player in range(numPlayers):
        players.append(ResourceAgent('Player%d' % (player+1),'resources','allocate',
                                     [region.name for region in regions]))
        world.addAgent(players[player])
        players[player].allocateAll = True

        world.defineState(players[player].name,'resources',int,lo=0,hi=maxResources,
                          description='Number of total resources owned by %s' % (players[player].name),
                          combinator='*')
        players[player].setState('resources',0)

    # Create agent for "enemy"
    enemy = Agent('Enemy')
    world.addAgent(enemy)

    owners = world.agents.keys()

    for region in regions:
        world.defineState(region.name,'owner',set,set(owners),
                          description='Name of owner of %s' % (region))
        region.setState('owner',enemy.name)

        world.defineState(region.name,'invader',set,set(owners)-{enemy.name}|{world.nullAgent},
                          description='Name of invader who will own %s if successful' % (region))
        try:
            index = starts.index(region.name)
            region.setState('invader','Player%d' % (index+1))
        except ValueError:
            region.setState('invader',world.nullAgent)
        world.dynamics[stateKey(region.name,'invader')] = True

    # Set players' initial territories
    for index in range(numPlayers):
        region = world.agents[starts[index]]
        region.setState('owner',players[index].name)
        # Players can invade only if enemy owns it and they (or teammate) own a neighboring country
        for region in regions:
            tree = False
            for neighbor in regionTable[region.name]['neighbors']:
                tree = {'if': equalRow(stateKey(neighbor,'owner'),enemy.name),
                        True: tree,
                        False: True}
            tree = makeTree({'if': equalRow(stateKey(region.name,'owner'),enemy.name),
                             True: tree,
                             False: False})
            players[index].objectLegality[region.name] = tree.desymbolize(world.symbols)

    # Create region "action"
    for region in regions:
        region.addAction({'verb': 'generate'})
    
    # Set order of play
    world.setOrder([set([region.name for region in regions]),set([player.name for player in players])])

    # Winner determination
    for region in regions:
        # Determine the owner after determining who's invading
        world.addDependency(stateKey(region.name,'owner'),stateKey(region.name,'invader'))
        # Determine the winner of the invasion
        owner = stateKey(region.name,'owner')
        world.dynamics[owner] = True
        invader = stateKey(region.name,'invader')
        defenders = stateKey(region.name,'occupants')
        invaders = stateKey(region.name,'invaders')
        value = stateKey(region.name,'value')
        for player in players:
            resources = stateKey(player.name,'resources')
            # Determine how many resources lost
            action = Action({'subject': player.name,'verb': 'allocate','object': region.name})
            world.addDependency(resources,invader)
            if generation == 'additive': # or generation == 'none':
                # Lose only those resources allocated
                tree = makeTree(incrementMatrix(resources,'-%s' % (actionKey('amount'))))
            else:
                # Lose all resources
                tree = makeTree(setToConstantMatrix(resources,0))
            world.setDynamics(resources,action,tree)
            # Regain resources from owned territories
            action = Action({'subject': region.name,'verb': 'generate'})
            if generation == 'additive' or generation == 'restorative':
                tree = makeTree({'if': equalRow(owner,player.name),
                                 True: addFeatureMatrix(resources,value),
                                 False: None})
            elif generation == 'minimal':
                if region is world.agents[starts[int(player.name[-1])-1]]:
                    # Get resources from home base (repeated)
                    tree = makeTree({'if': equalRow(owner,player.name),
                                     True: addFeatureMatrix(resources,value),
                                     False: None})
                else:
                    # And any new winnings (one-time)
                    tree = makeTree({'if': equalRow(owner,player.name),
                                     True: {'if': equalFeatureRow(owner,invader),
                                            True: addFeatureMatrix(resources,value),
                                            False: None},
                                     False: None})
            elif generation is 'none':
                if region is world.agents[starts[int(player.name[-1])-1]]:
                    # Get resources from home base if below threshold
                    tree = makeTree({'if': greaterThanRow(resources,value),
                                     True: None,
                                     False: setToFeatureMatrix(resources,value)})
                else:
                    tree = makeTree({'if': equalRow(owner,player.name),
                                     True: {'if': equalFeatureRow(owner,invader),
                                            True: addFeatureMatrix(resources,value),
                                            False: None},
                                     False: None})
            world.setDynamics(resources,action,tree)
    # The game has two phases: generating resources and allocating resources
    world.defineState(None,'phase',list,['generate','allocate'],combinator='*',
                      description='The current phase of the game')
    world.setState(None,'phase','generate')
    key = stateKey(None,'phase')
    # If we generate, then the phase becomes allocate
    action = Action({'subject': list(regions)[0].name,'verb': 'generate'})
    tree = makeTree(setToConstantMatrix(key,'allocate'))
    world.setDynamics(key,action,tree)
    # If we allocate, then the phase becomes generate
    tree = makeTree(setToConstantMatrix(key,'generate'))
    for region in regions:
        for player in players:
            action = Action({'subject': player.name,'verb': 'allocate','object': region.name})
            world.setDynamics(key,action,tree)

    # Game ends when territory is all won
    tree = {'if': equalRow(key,'allocate'),
            True: True,
            False: False}
    for region in regions:
        tree = {'if': equalRow(stateKey(region.name,'owner'),enemy.name),
                True: False,
                False: tree}
    world.addTermination(makeTree(tree))
    # Or if nobody has any resources
    vector = KeyedVector()
    for player in players:
        vector[stateKey(player.name,'resources')] = 1.
    tree = {'if': equalRow(stateKey(None,'phase'),'allocate'),
            True: {'if': KeyedPlane(vector,0.5),
                   True: False, False: True},
            False: False}
    world.addTermination(makeTree(tree))

    # Keep track of which round it is
    world.defineState(None,'round',int,description='The current round of the game')
    world.setState(None,'round',0)
    action = Action({'subject': list(regions)[0].name,
                     'verb': 'generate'})
    key = stateKey(None,'round')
    world.setDynamics(key,action,makeTree(incrementMatrix(key,1)))
    return world
Пример #41
0
            }
        }
    })


if __name__ == '__main__':

    random.seed(0)

    # sets up log to screen
    logging.basicConfig(format='%(message)s',
                        level=logging.DEBUG if DEBUG else logging.INFO)

    # create world and add agents
    world = World()
    agent1 = Agent('Agent 1')
    world.addAgent(agent1)
    agent2 = Agent('Agent 2')
    world.addAgent(agent2)

    sides = []
    rights = []
    lefts = []

    agents = [agent1, agent2]
    for agent in agents:
        # set agent's params
        agent.setAttribute('discount', 1)
        agent.setHorizon(1)
        agent.setAttribute('selection', TIEBREAK)