Beispiel #1
0
def generate_new_game():
    
    yaml_file =  'mazebasev2/options/knowledge_planner/' + filename

    #yaml_file = 'mazebasev2/options/knowledge_planner/length12task_distractor.yaml'
    with open(yaml_file, 'r') as handle:
        options = yaml.load(handle)

    # Get sub opts
    method_opt = options['method']
    env_opt = options['env']
    log_opt = options['logs'] 

    # Set up the mazebase environment
    knowledge_root = env_opt['knowledge_root']
    world_knowledge_file = os.path.join('mazebasev2', knowledge_root, env_opt['world_knowledge']['train'])
    with open(world_knowledge_file) as f:
      world_knowledge = json.load(f)

    # Make the world
    map_size = (env_opt['state_rep']['w'], env_opt['state_rep']['w'], env_opt['state_rep']['h'], env_opt['state_rep']['h'])
    all_games = [games.BasicKnowledgeGame(world_knowledge=world_knowledge, proposed_knowledge=[], options=env_opt, load_items=None, map_size=map_size)]

    # Game wrapper
    game = games.MazeGame(
      all_games,
      featurizer=featurizers.GridFeaturizer()
    )

    return game
    def reset(self):

        self.count = 0

        # Game wrapper
        self.game = games.MazeGame(self.all_games,
                                   featurizer=featurizers.GridFeaturizer())

        #get observation
        config = self.game.observe()
        grid_obs, side_info = config['observation']

        inventory = self.game.game.inventory
        goal = self.game.game.goal

        print(goal)

        obs = (grid_obs, inventory, goal)

        state, inventory, goal = obs

        states_embedding = get_grid_embedding(state, self.glove,
                                              self.embed_size)
        states_onehot = one_hot_grid(state, self.glove, self.embed_size)
        goal = get_goal_embedding(goal, self.glove, self.embed_size)
        inventory = get_inventory_embedding(inventory, self.glove,
                                            self.embed_size)
        counts = np.array([self.game.game.count])
        self.state = np.concatenate(
            (counts.flatten(), states_embedding.flatten(),
             states_onehot.flatten(), goal.flatten(), inventory.flatten()))

        #k_prev_words = np.array([[self.vocabulary.word2idx['<start>']]])
        #top_k_scores = np.zeros((1, 1))

        #print(top_k_scores)

        #print(k_prev_words.flatten())
        #print(top_k_scores.flatten())

        #no language:
        #self.state = np.concatenate((k_prev_words.flatten(), top_k_scores.flatten(), states_embedding.flatten(), states_onehot.flatten(), goal.flatten(), inventory.flatten()))

        #temp_embedding = torch.Tensor(states_embedding).to(self.device)
        #temp_onehot = torch.Tensor(states_onehot).to(self.device)
        #temp_inv = torch.Tensor(inventory).to(self.device)
        #temp_goal = torch.Tensor(goal).to(self.device)

        # ADD LANGUAGE MODEL STUFF!!
        #all_sampled_ids = self.lang_model.get_hidden_state(temp_embedding, temp_onehot, temp_inv, temp_goal, self.device, self.vocabulary)
        #bow_ids = [sent + [len(self.vocabulary)] * (20 - len(sent)) for sent in all_sampled_ids]
        #bow_ids = np.array(bow_ids)

        #sampled_ids, hidden_state = self.lang_model.get_hidden_state(states_embedding, states_onehot, inventory, goal, self.device, self.vocabulary)
        #sampled_ids = sampled_ids + [-1]*(32-len(sampled_ids))
        #sampled_ids = np.array(sampled_ids)
        #hidden_state = hidden_state.data.numpy()
        #self.state = np.concatenate((bow_ids.flatten(), states_embedding.flatten(), states_onehot.flatten(), goal.flatten(), inventory.flatten()))

        return self.state
Beispiel #3
0
def generate_new_game():

    #pick a random yaml file: 

    #num = random.randint(1,4)
    #num =  2

    # if num == 1:
    #   yaml_file = 'mazebasev2/options/knowledge_planner/length45common.yaml'
    # elif num == 2:
    #   yaml_file = 'mazebasev2/options/knowledge_planner/length12task.yaml'
    # else:
    
    #yaml_file = 'mazebasev2/options/knowledge_planner/length3task.yaml' 
    #yaml_file = 'mazebasev2/options/knowledge_planner/length1task.yaml'
    #yaml_file = 'mazebasev2/options/knowledge_planner/length2task.yaml'
    #yaml_file = 'mazebasev2/options/knowledge_planner/length45common.yaml'
    #yaml_file = 'mazebasev2/options/knowledge_planner/length3task.yaml' 
    #yaml_file = 'mazebasev2/options/knowledge_planner/length12task.yaml'
    #yaml_file =  'mazebasev2/options/knowledge_planner/minimum_viable_planning.yaml'
    #yaml_file =  'mazebasev2/options/knowledge_planner/minimum_viable_rl.yaml'
    yaml_file =  'mazebasev2/options/knowledge_planner/unseen_tasks.yaml'

    #yaml_file = 'mazebasev2/options/knowledge_planner/length12task_distractor.yaml'
    with open(yaml_file, 'r') as handle:
        options = yaml.load(handle)

    # Get sub opts
    method_opt = options['method']
    env_opt = options['env']
    log_opt = options['logs'] 

    # Set up the mazebase environment
    knowledge_root = env_opt['knowledge_root']
    world_knowledge_file = os.path.join('mazebasev2', knowledge_root, env_opt['world_knowledge']['train'])
    with open(world_knowledge_file) as f:
      world_knowledge = json.load(f)

    # Make the world
    map_size = (env_opt['state_rep']['w'], env_opt['state_rep']['w'], env_opt['state_rep']['h'], env_opt['state_rep']['h'])
    all_games = [games.BasicKnowledgeGame(world_knowledge=world_knowledge, proposed_knowledge=[], options=env_opt, load_items=None, map_size=map_size)]

    # Game wrapper
    game = games.MazeGame(
      all_games,
      featurizer=featurizers.GridFeaturizer()
    )

    return game
Beispiel #4
0
    def reset(self):

        self.count = 0

        self.queue = []

        # Game wrapper
        self.game = games.MazeGame(self.all_games,
                                   featurizer=featurizers.GridFeaturizer())

        #get observation
        config = self.game.observe()
        grid_obs, side_info = config['observation']

        inventory = self.game.game.inventory
        goal = self.game.game.goal

        #print(goal)

        obs = (grid_obs, inventory, goal)

        state, inventory, goal = obs

        states_embedding = get_grid_embedding(state, self.glove,
                                              self.embed_size)
        states_onehot = one_hot_grid(state, self.glove, self.embed_size)
        goal = get_goal_embedding(goal, self.glove, self.embed_size)
        inventory = get_inventory_embedding(inventory, self.glove,
                                            self.embed_size)
        counts = np.array([self.game.game.count])
        self.state = np.concatenate(
            (counts.flatten(), states_embedding.flatten(),
             states_onehot.flatten(), goal.flatten(), inventory.flatten()))

        #print(self.state.shape)

        self.combined_states = np.concatenate(
            (self.state, self.state, self.state))

        return self.combined_states
Beispiel #5
0
def start_game():

    json_data = request.get_json(force=True)
    player1 = json_data["a"]
    entrance_code = json_data["b"]

    if (player1, entrance_code) in game_count:

        if game_count[(player1, entrance_code)] == 2:
            completed = True

        # otherwise, they have already done one. give the short task.
        yaml_file = 'mazebasev2/options/knowledge_planner/short_task.yaml'
        game_count[(player1, entrance_code)] = 2

    elif player1 not in users:
        users[player1] = 1
        game_count[(player1, entrance_code)] = 1
        yaml_file = 'mazebasev2/options/knowledge_planner/short_task.yaml'

    else:
        #pick a long or short randomly and add to game_count accordingly.
        if random.uniform(0, 1) > 0.4:
            yaml_file = 'mazebasev2/options/knowledge_planner/long_task.yaml'
            game_count[(player1, entrance_code)] = 2
        else:
            yaml_file = 'mazebasev2/options/knowledge_planner/short_task.yaml'
            game_count[(player1, entrance_code)] = 1

    # yaml_file = 'mazebasev2/options/knowledge_planner/long_task.yaml'
    # game_count[(player1, entrance_code)] = 2

    # if player1 not in users or random.uniform(0, 1) < 0.4: # or random change
    #   yaml_file = 'mazebasev2/options/knowledge_planner/short_task.yaml'
    #   users[player1] = 1
    #   game_count[(player1, entrance_code)] = 1

    with open(yaml_file, 'r') as handle:
        options = yaml.load(handle)

    # Get sub opts
    method_opt = options['method']
    env_opt = options['env']
    log_opt = options['logs']

    # Set up the mazebase environment
    knowledge_root = env_opt['knowledge_root']
    world_knowledge_file = os.path.join('mazebasev2', knowledge_root,
                                        env_opt['world_knowledge']['train'])
    with open(world_knowledge_file) as f:
        world_knowledge = json.load(f)

    # Make the world
    map_size = (env_opt['state_rep']['w'], env_opt['state_rep']['w'],
                env_opt['state_rep']['h'], env_opt['state_rep']['h'])
    all_games = [
        games.BasicKnowledgeGame(world_knowledge=world_knowledge,
                                 proposed_knowledge=[],
                                 options=env_opt,
                                 load_items=None,
                                 map_size=map_size)
    ]

    # Game wrapper
    game = games.MazeGame(all_games, featurizer=featurizers.GridFeaturizer())

    current_games[(player1, entrance_code)] = game

    game_observe = game.observe()
    game_observe["extra_items"] = game.game.extra_items
    game_observe["inventory"] = game.game.inventory
    game_observe["goal"] = game.game.goal
    game_observe["recipe"] = game.game.recipe

    add_to_file(player1, entrance_code, game_observe, '1', str(time.time()))
    #add_to_db(player1, entrance_code, game, '1', 'NONE', str(time.time()))

    temp = game.display()
    symbol_map, color_map, agent_loc = create_symbol_color_maps(temp)

    completed = False

    return jsonify(result=symbol_map,
                   color=color_map,
                   goal=game.game.goal,
                   inventory=game.game.inventory,
                   recipe=game.game.recipe,
                   agent=agent_loc,
                   links=link_lookup,
                   already_complete=completed)