Example #1
0
 def pre_replay(self, logger=logging):
     # Create PsychSim model
     logger.info('Creating world with "{}" map'.format(self.map_table.name))
     try:
         self.world, self.triage_agent, self.observer, self.victims, self.world_map = \
             make_single_player_world(self.parser.player_name(), self.map_table.init_loc,
                                      self.map_table.adjacency, self.map_table.victims, False, True, {},
                                      self.create_observer, logger.getChild('make_single_player_world'))
     except:
         logger.error(traceback.format_exc())
         logger.error('Unable to create world')
         return False
     # Last-minute filling in of models. Would do it earlier if we extracted triage_agent's name
     features = None
     self.model_list = [{dimension: value[index] for index, dimension in enumerate(self.models)}
                        for value in itertools.product(*self.models.values()) if len(value) > 0]
     for index, model in enumerate(self.model_list):
         if 'name' not in model:
             model['name'] = '{}_{}'.format(self.triage_agent.name,
                                            '_'.join([model[dimension] for dimension in self.models]))
             for dimension in self.models:
                 model[dimension] = self.models[dimension][model[dimension]]
                 if dimension == 'reward':
                     if not isinstance(model[dimension], dict):
                         if features is None:
                             import atomic.model_learning.linear.rewards as rewards
                             features = rewards.create_reward_vector(
                                 self.triage_agent, self.world_map.all_locations,
                                 self.world_map.moveActions[self.triage_agent.name])
                         model[dimension] = {feature: model[dimension][i] for i, feature in enumerate(features)}
     if len(self.model_list) > 0:
         set_player_models(self.world, self.observer.name, self.triage_agent.name, self.victims, self.model_list)
     #        self.parser.victimsObj = self.victims
     return True
Example #2
0
    def pre_replay(self, logger=logging):
        # Create PsychSim model
        logger.info('Creating world')

        try:
            self.parser.startProcessing(self.derived_features, self.msg_types)
        except:
            logger.error('Unable to start parser')
            logger.error(traceback.format_exc())
            return False

        if self.feature_output:
            # processes data to extract features depending on type of count
            features = {'File': os.path.splitext(os.path.basename(self.file_name))[0]}
            if self.condition_fields is None:
                self.condition_fields = list(filename_to_condition(features['File']).keys())
            for feature in self.derived_features:
                features.update(_get_feature_values(feature))
            self.feature_data.append(features)

        try:
            if self.rddl_file:
                # Team mission
                self.rddl_converter = Converter()
                self.rddl_converter.convert_file(self.rddl_file)
                self.world = self.rddl_converter.world
                players = set(self.parser.agentToPlayer.keys())
                zero_models = {name: self.world.agents[name].zero_level() for name in players}
                for name in players:
                    agent = self.world.agents[name]
                    agent.create_belief_state()
                    agent.create_belief_state(model=zero_models[name])
                    agent.setAttribute('selection', 'distribution', zero_models[name])
                    for other_name in players-{name}:
                        other_agent = self.world.agents[other_name]
                        self.world.setModel(name, zero_models[name], other_name, other_agent.get_true_model())
                    agent.set_observations()
            else:
                # Solo mission
                self.world, self.triage_agent, _, self.victims, self.world_map = \
                    make_single_player_world(self.parser.player_name(), self.map_table.init_loc,
                                             self.map_table.adjacency, self.map_table.victims, False, True,
                                             False, logger.getChild('make_single_player_world'))
        except:
            logger.error('Unable to create world')
            exc_type, exc_value, exc_traceback = sys.exc_info()
            logger.error(traceback.format_exc())
            return False
        return True
Example #3
0
    fname = os.path.join(os.path.dirname(__file__), '../data',
                         'processed_20200728_Participant3_Cond1.csv')
    parser = ProcessCSV(fname, maxDist, logging)

######### Get Map Data
mapName = 'FalconEasy'
DEFAULT_MAPS = get_default_maps()
SandRLocs = DEFAULT_MAPS[mapName].adjacency
SandRVics = DEFAULT_MAPS[mapName].victims

## Fabricate a light switch map that maps each room with a switch to a list of rooms affected by the switch
shared = {'lh':8, 'rh':9, 'mb':5, 'wb':5}
lightMap = {k:[k] for k in SandRLocs.keys() if sum([k.startswith(shr) for shr in shared.keys()]) == 0}
for shr,num in shared.items():
    lightMap[shr + '1'] = []
    for i in range(1,num+1):
        lightMap[shr + '1'].append(shr + str(i))


#use_unobserved=True, full_obs=False, logger=logging):
world, triageAgent, agent, victimsObj, world_map = make_single_player_world(
    parser.player_name(), None, SandRLocs, SandRVics, False, True)


maxNumEvents = 350
runStartsAt = 0
runEndsAt = 20
fastFwdTo = 9999
parser.getActionsAndEvents(victimsObj, world_map, maxNumEvents)
parser.runTimeless(world, runStartsAt, runEndsAt, fastFwdTo)
Example #4
0
        plot_bar(OrderedDict(zip(subject_ids, lengths)), 'Trajectory Lengths',
                 os.path.join(args.output, 'parse-lengths.pdf'))

    # generate trajectories
    default_maps = get_default_maps()
    if args.trajectories == 0 or args.map_name not in default_maps:
        msg = 'Skipping generation benchmark. '
        if args.map_name not in default_maps:
            msg += 'Map name {} not in default maps.'.format(args.map_name)
        logging.info(msg)

    else:
        # create world, agent and observer
        map_table = default_maps[args.map_name]
        world, agent, observer, victims, world_map = make_single_player_world(
            PLAYER_NAME, map_table.init_loc, map_table.adjacency,
            map_table.victims, False, FULL_OBS, False)

        # agent params
        agent.setAttribute('rationality', args.rationality)
        agent.setAttribute('selection', args.selection)
        agent.setAttribute('horizon', args.horizon)

        # set agent rwd function
        rwd_vector = create_reward_vector(agent, map_table.rooms_list,
                                          world_map.moveActions[agent.name])
        rwd_vector.set_rewards(agent, REWARD_WEIGHTS)
        logging.info('Set reward vector: {}'.format(
            dict(zip(rwd_vector.names, REWARD_WEIGHTS))))

        # generate trajectories
if __name__ == '__main__':
    # create output
    create_clear_dir(OUTPUT_DIR)

    # sets up log to file
    change_log_handler(os.path.join(OUTPUT_DIR, 'inference.log'), 2 if DEBUG else 1)

    maps = get_default_maps()
    if EXPT not in maps:
        raise NameError(f'Experiment "{EXPT}" is not implemented yet')

    # create world, agent and observer
    map_data = maps[EXPT]
    world, agent, observer, victims, world_map = \
        make_single_player_world(AGENT_NAME, map_data.init_loc, map_data.adjacency, map_data.victims, False, FULL_OBS)
    agent.setAttribute('horizon', HORIZON)
    agent.setAttribute('selection', AGENT_SELECTION)
    agent.resetBelief(ignore={modelKey(observer.name)})

    model_names = create_mental_models(world, agent, observer, victims)

    # generates trajectory
    logging.info('Generating trajectory of length {}...'.format(NUM_STEPS))
    trajectory = generate_trajectory(agent, NUM_STEPS)
    save_object(trajectory, os.path.join(OUTPUT_DIR, 'trajectory.pkl.gz'), True)

    # gets evolution of inference over reward models of the agent
    probs = track_reward_model_inference(
        trajectory, model_names, agent, observer, [stateKey(agent.name, 'loc')], verbose=False)
Example #6
0
    replayer = Replayer([DATA_FILE])
    map_info = replayer.maps[MAP]
    neighbors = map_info.adjacency
    locations = list(map_info.rooms)
    victims_locs = map_info.victims
    coords = map_info.coordinates

    logging.info('Parsing data file {}...'.format(DATA_FILE))
    parser = TrajectoryParser(DATA_FILE)
    player_name = parser.player_name()
    logging.info('Got {} events for player "{}"'.format(
        parser.data.shape[0], player_name))

    # create world, agent and observer
    world, agent, observer, victims, world_map = \
        make_single_player_world(player_name, map_info['start'], neighbors, victims_locs, False, FULL_OBS)

    plot_environment(world, locations, neighbors,
                     os.path.join(OUTPUT_DIR, 'map.pdf'), coords)

    model_list = [{
        'name': PREFER_NONE_MODEL,
        'reward': {
            GREEN_VICTIM: MEAN_VAL,
            YELLOW_VICTIM: MEAN_VAL
        },
        'rationality': MODEL_RATIONALITY,
        'selection': MODEL_SELECTION
    }, {
        'name': PREFER_GREEN_MODEL,
        'reward': {
Example #7
0
    # loads clusters
    cluster_weights = load_cluster_reward_weights(CLUSTERS_FILE)
    logging.info('Loaded {} clusters from {}:'.format(len(cluster_weights),
                                                      CLUSTERS_FILE))
    for cluster in sorted(cluster_weights):
        logging.info('\tCluster {}: {}'.format(cluster,
                                               cluster_weights[cluster]))

    # create world and agent
    loc_neighbors = MAP_TABLE.adjacency
    locations = MAP_TABLE.rooms_list
    coords = MAP_TABLE.coordinates

    world, agent, observer, victims, world_map = \
        make_single_player_world(AGENT_NAME, MAP_TABLE.init_loc, loc_neighbors, MAP_TABLE.victims, False, FULL_OBS)
    plot_environment(world, locations, loc_neighbors,
                     os.path.join(OUTPUT_DIR, 'env.pdf'), coords)

    # set agent params
    agent.setAttribute('horizon', HORIZON)
    agent.setAttribute('selection', SELECTION)
    agent.setAttribute('rationality', RATIONALITY)

    # set agent rwd function
    rwd_vector = create_reward_vector(agent, locations,
                                      world_map.moveActions[agent.name])
    rwd_weights = random.sample(list(cluster_weights.values()), 1)[0]
    rwd_vector.set_rewards(agent, rwd_weights)
    logging.info('Set reward vector: {}'.format(
        dict(zip(rwd_vector.names, rwd_weights))))