Beispiel #1
0
 def new_game(self, game):
     if not hasattr(self, 'agent_id'):
         self.agent_id = agent.random_agent_name()
     self.scenario = Scenario.from_dict(self.schema, self.kb_dict(game))
     self.session = None
     self.time = 0
     self.outcomes = [None, None]
Beispiel #2
0
    def create_from_scenarios(self, scenarios):
        names = []
        req_mods = set()
        sc_data = []

        for sc in scenarios:
            names.append(sc.name)
            req_mods.update(sc.required_modules)
            sc_data.extend(sc.scenario_data)

        new_name = ', '.join(names)
        self.model.scenario = Scenario(new_name,
                                       required_modules=req_mods,
                                       scenario_data=sc_data)
def generate_scenario(schema, base_price, price_unit, discounts, listings):
    for listing in listings:
        listing = process_listing(listing)
        if listing:
            base_price = int(listing['price'])
            if base_price < price_unit:
                continue
            for ranges in generate_price_range(base_price, price_unit,
                                               discounts):
                kbs = generate_kbs(schema, listing)
                kbs[BUYER].facts['personal'].update(ranges[BUYER])
                kbs[SELLER].facts['personal'].update(ranges[SELLER])
                yield Scenario(generate_uuid('S'), listing['post_id'],
                               listing['category'], listing['images'],
                               schema.attributes, kbs)
Beispiel #4
0
import random

from core.price_history import PriceHistory
from core.scenario import Scenario

if __name__ == '__main__':
    random.seed(44)

    PriceHistory.initialize("data/prices.csv")
    scenario = Scenario("data/trades_1000.csv")
    scenario.run()

    #scenario.trade_graph.write_dot("data/graph.dot")
Beispiel #5
0
        type=bool)
    config_specification.add_argument(
        '--showAgeLabel',
        default=False,
        help=
        "Optional: decide whether to show the current age of the track the text annotation.",
        type=bool)

    parsed_config_specification = vars(config_specification.parse_args())
    return parsed_config_specification


if __name__ == '__main__':
    config = create_args()

    scenario = Scenario.load(get_scenario_config_dir() + config['scenario'] +
                             '.json')
    episode = scenario.load_episode(config["episode"])

    episode_dataset = pd.read_csv(
        get_data_dir() +
        '{}_e{}.csv'.format(config['scenario'], config["episode"]))

    goal_recognisers = {
        'prior': PriorBaseline,
        'trained_trees': TrainedDecisionTrees,
        'handcrafted_trees': HandcraftedGoalTrees
    }

    if config['goal_recogniser'] is not None:
        goal_recogniser = goal_recognisers[config['goal_recogniser']].load(
            config['scenario'])
import argparse
from cocoa.core.util import read_json, write_json
from cocoa.core.scenario_db import ScenarioDB
from cocoa.core.schema import Schema
from core.scenario import Scenario

parser = argparse.ArgumentParser()
parser.add_argument('--chats')
parser.add_argument('--scenarios')
parser.add_argument('--max', type=int)
args = parser.parse_args()

chats = read_json(args.chats)
scenarios = []
n = args.max or len(chats)
for chat in chats[:n]:
    scenarios.append(Scenario.from_dict(None, chat['scenario']))
scenario_db = ScenarioDB(scenarios)
write_json(scenario_db.to_dict(), args.scenarios)
def main():
    plt.style.use('ggplot')

    parser = argparse.ArgumentParser(
        description='Train decision trees for goal recognition')
    parser.add_argument('--scenario',
                        type=str,
                        help='Name of scenario to validate',
                        default=None)
    args = parser.parse_args()

    if args.scenario is None:
        scenario_names = ['heckstrasse', 'bendplatz', 'frankenberg', 'round']
    else:
        scenario_names = [args.scenario]

    # print('loading episodes')
    # episodes = scenario.load_episodes()

    models = {
        'prior_baseline': PriorBaseline,
        #'handcrafted_trees': HandcraftedGoalTrees,
        'trained_trees': TrainedDecisionTrees
    }

    accuracies = pd.DataFrame(index=models.keys(), columns=scenario_names)
    accuracies_sem = pd.DataFrame(index=models.keys(), columns=scenario_names)
    cross_entropies = pd.DataFrame(index=models.keys(), columns=scenario_names)
    entropies = pd.DataFrame(index=models.keys(), columns=scenario_names)
    norm_entropies = pd.DataFrame(index=models.keys(), columns=scenario_names)
    avg_max_prob = pd.DataFrame(index=models.keys(), columns=scenario_names)
    avg_min_prob = pd.DataFrame(index=models.keys(), columns=scenario_names)

    predictions = {}
    dataset_name = 'test'

    for scenario_name in scenario_names:
        dataset = get_dataset(scenario_name, dataset_name)
        scenario = Scenario.load(get_scenario_config_dir() + scenario_name +
                                 '.json')
        dataset_predictions = {}
        num_goals = len(scenario.config.goals)
        targets = dataset.true_goal.to_numpy()

        for model_name, model in models.items():
            model = model.load(scenario_name)
            unique_samples = model.batch_goal_probabilities(dataset)
            unique_samples['model_correct'] = (
                unique_samples['model_prediction'] ==
                unique_samples['true_goal'])
            cross_entropy = -np.mean(
                np.log(unique_samples.loc[unique_samples.model_probs != 0,
                                          'model_probs']))
            accuracy = unique_samples.model_correct.mean()
            accuracies_sem.loc[
                model_name,
                scenario_name] = unique_samples.model_correct.sem()
            accuracies.loc[model_name, scenario_name] = accuracy
            cross_entropies.loc[model_name, scenario_name] = cross_entropy
            entropies.loc[model_name,
                          scenario_name] = unique_samples.model_entropy.mean()
            norm_entropies.loc[
                model_name,
                scenario_name] = unique_samples.model_entropy_norm.mean()
            avg_max_prob.loc[model_name,
                             scenario_name] = unique_samples.max_probs.mean()
            avg_min_prob.loc[model_name,
                             scenario_name] = unique_samples.min_probs.mean()
            dataset_predictions[model_name] = unique_samples
            print('{} accuracy: {:.3f}'.format(model_name, accuracy))
            print('{} cross entropy: {:.3f}'.format(model_name, cross_entropy))

        predictions[scenario_name] = dataset_predictions

    print('accuracy:')
    print(accuracies)
    print('accuracy sem:')
    print(accuracies_sem)
    print('\ncross entropy:')
    print(cross_entropies)
    print('\nentropy:')
    print(entropies)
    print('\nnormalised entropy:')
    print(norm_entropies)
    print('\naverage max probability:')
    print(avg_max_prob)
    print('\naverage min probability:')
    print(avg_min_prob)

    for scenario_name in scenario_names:

        fig, ax = plt.subplots()
        for model_name, model in models.items():
            unique_samples = predictions[scenario_name][model_name]
            fraction_observed_grouped = unique_samples[[
                'model_correct', 'fraction_observed'
            ]].groupby('fraction_observed')
            accuracy = fraction_observed_grouped.mean()
            accuracy_sem = fraction_observed_grouped.std() / np.sqrt(
                fraction_observed_grouped.count())
            accuracy.rename(columns={'model_correct': model_name}).plot(ax=ax)
            plt.fill_between(
                accuracy_sem.index,
                (accuracy + accuracy_sem).model_correct.to_numpy(),
                (accuracy - accuracy_sem).model_correct.to_numpy(),
                alpha=0.2)
        plt.xlabel('fraction of trajectory observed')
        plt.title('Accuracy ({})'.format(scenario_name))
        plt.ylim([0, 1])
        plt.show()

        fig, ax = plt.subplots()
        for model_name, model in models.items():
            unique_samples = predictions[scenario_name][model_name]
            fraction_observed_grouped = unique_samples[[
                'model_entropy', 'fraction_observed'
            ]].groupby('fraction_observed')
            entropy_norm = fraction_observed_grouped.mean()
            entropy_norm_sem = fraction_observed_grouped.std() / np.sqrt(
                fraction_observed_grouped.count())
            entropy_norm.rename(columns={
                'model_entropy': model_name
            }).plot(ax=ax)
            plt.fill_between(
                entropy_norm_sem.index,
                (entropy_norm + entropy_norm_sem).model_entropy.to_numpy(),
                (entropy_norm - entropy_norm_sem).model_entropy.to_numpy(),
                alpha=0.2)
        plt.xlabel('fraction of trajectory observed')
        plt.title('Normalised Entropy ({})'.format(scenario_name))
        plt.ylim([0, 1])
        plt.show()
Beispiel #8
0
 def get_scenario(cls, chat):
     scenario = Scenario.from_dict(None, chat['scenario'])
     return scenario
Beispiel #9
0
 def __init__(self):
     Scenario.__init__(self)
     self.load_settings()
     self.add(Lamp(self, self.parse_tuple('lamp')))
     self.add(Cockroach(self, self.parse_tuple('cockroach')))
Beispiel #10
0
import numpy as np
import pandas as pd

from core.base import get_scenario_config_dir, get_data_dir
from core.data_processing import get_dataset
from decisiontree.dt_goal_recogniser import HandcraftedGoalTrees, TrainedDecisionTrees
from goalrecognition.goal_recognition import PriorBaseline
from core.scenario import Scenario

scenario_name = 'heckstrasse'
scenario = Scenario.load(get_scenario_config_dir() + scenario_name + '.json')
print('loading episodes')
episodes = scenario.load_episodes()

models = {
    'prior_baseline': PriorBaseline,
    'handcrafted_trees': HandcraftedGoalTrees,
    'trained_trees': TrainedDecisionTrees
}

dataset_names = ['train', 'valid', 'test']

accuracies = pd.DataFrame(index=models.keys(), columns=dataset_names)
cross_entropies = pd.DataFrame(index=models.keys(), columns=dataset_names)

for dataset_name in dataset_names:
    dataset = get_dataset(scenario_name, dataset_name, features=False)
    predictions = {}
    num_goals = len(scenario.config.goals)
    targets = dataset.true_goal.to_numpy()
Beispiel #11
0
 def create_new_scenario(self):
     def_name = self.tranlator.translate('UNTITLED_TEXT')
     self.model.scenario = Scenario(f'<{def_name}>')
Beispiel #12
0
 def load(cls, scenario_name):
     priors = cls.load_priors(scenario_name)
     scenario = Scenario.load(get_scenario_config_dir() +
                              '{}.json'.format(scenario_name))
     return cls(priors, scenario)
Beispiel #13
0
parser.add_argument('--schema-path')
parser.add_argument(
    '--scenario-ints-file',
    help=
    'Path to the file containing 6 integers per line that describes the scenario'
)
parser.add_argument('--output', help='Path to the output JSON scenario file')
args = parser.parse_args()

schema = Schema(args.schema_path)

scenarios = []
with open(args.scenario_ints_file) as fin:
    kbs = []
    names = ['book', 'hat', 'ball']
    for line in fin:
        ints = [int(x) for x in line.strip().split()]
        kb = KB.from_ints(schema.attributes, names, ints)
        kbs.append(kb)
        if len(kbs) == 2:
            if kbs[0].item_counts != kbs[1].item_counts:
                del kbs[0]
                continue
            assert kbs[0].item_counts == kbs[1].item_counts
            scenario = Scenario(generate_uuid("FB"), schema.attributes, kbs)
            scenarios.append(scenario)
            kbs = []

scenario_db = ScenarioDB(scenarios)
write_json(scenario_db.to_dict(), args.output)