def generate_sentences(self): env_params = get_env_params() name_attributes = env_params['name_attributes'] adjective_attributes = env_params['adjective_attributes'] adj_list = list(adjective_attributes) adj_list.append('any') adjective_attributes = tuple(adj_list) action = env_params['admissible_actions'] p = env_params.copy() # Get the list of admissible attributes and split them by name attributes (type and categories) and adjective attributes. name_attributes = p['name_attributes'] adjective_attributes = p['adjective_attributes'] adj_list = list(adjective_attributes) adj_list.append('any') adjective_attributes = tuple(adj_list) action = p['admissible_actions'] if 'Grasp' in p['admissible_actions'] or 'Grow' in p[ 'admissible_actions']: new_sentence = [] while (len(new_sentence) < 200): num1 = random.randrange(0, len(name_attributes)) num2 = random.randrange(0, len(action)) num3 = random.randrange(0, len(adjective_attributes)) # print(num1, num2, num3) sentence = [ action[num2], adjective_attributes[num3], name_attributes[num1] ] sentence = ' '.join([str(elem) for elem in sentence]) # new sentences are those which are not in train_description and also not in test_descriptions if sentence in self.train_descriptions: pass else: new_sentence.append(sentence) self.new_sentence_generate = tuple(new_sentence) return tuple(new_sentence)
# new sentences are those which are not in train_description and also not in test_descriptions if sentence in train_descriptions: pass else: new_sentence.append(sentence) self.new_sentence_generate = tuple(new_sentence) return tuple(new_sentence) # new sentence will be generated randomly from enviroment directly if __name__ == '__main__': from src.playground_env.descriptions import generate_all_descriptions env_params = get_env_params() train_descriptions, test_descriptions, extra_descriptions = generate_all_descriptions( env_params) p = env_params.copy() # Get the list of admissible attributes and split them by name attributes (type and categories) and adjective attributes. name_attributes = env_params['name_attributes'] adjective_attributes = env_params['adjective_attributes'] adj_list = list(adjective_attributes) adj_list.append('any') adjective_attributes = tuple(adj_list) action = env_params['admissible_actions'] generator = simple_conjuction_based_heuristic(train_descriptions, test_descriptions, None, method='SCBH')
from src.playground_env.env_params import get_env_params from src.playground_env.descriptions import generate_all_descriptions train_descriptions, test_descriptions, extra_descriptions = generate_all_descriptions( get_env_params()) def get_move_descriptions(get_agent_position_attributes, current_state): """ Get all move descriptions from the current state (if any). Parameters ---------- get_agent_position_attributes: function Function that extracts the absolute position of the agent from the state. current_state: nd.array Current state of the environment. Returns ------- descr: list of str List of Move descriptions satisfied by the current state. """ move_descriptions = [] position_attributes = get_agent_position_attributes(current_state) for pos_att in position_attributes: move_descriptions.append('Go ' + pos_att) return move_descriptions.copy() def get_grasp_descriptions(get_grasped_ids, current_state, sort_attributes, obj_attributes, params, check_if_relative,
def __init__(self, max_timesteps=50, random_init=False, human=False, reward_screen=False, viz_data_collection=False, display=True, agent_step_size=0.15, agent_initial_pos=(0, 0), agent_initial_pos_range=0.6, max_nb_objects=3, # number of objects in the scene random_nb_obj=False, admissible_actions=('Move', 'Grasp', 'Grow'), # which types of actions are admissible admissible_attributes=('colors', 'categories', 'types'), # , 'relative_sizes', 'shades', 'relative_shades', 'sizes', 'relative_positions'), # which object attributes # can be used min_max_sizes=((0.2, 0.25), (0.25, 0.3)), # ranges of sizes of objects (small and large ones) agent_size=0.05, # size of the agent epsilon_initial_pos=0.3, # epsilon to sample initial positions screen_size=800, # size of the visualization screen next_to_epsilon=0.3, # define the area to qualify an object as 'next to' another. attribute_combinations=False, obj_size_update=0.04, render_mode=False ): self.params = get_env_params(max_nb_objects=max_nb_objects, admissible_actions=admissible_actions, admissible_attributes=admissible_attributes, min_max_sizes=min_max_sizes, agent_size=agent_size, epsilon_initial_pos=epsilon_initial_pos, screen_size=screen_size, next_to_epsilon=next_to_epsilon, attribute_combinations=attribute_combinations, obj_size_update=obj_size_update, render_mode=render_mode ) self.adm_attributes = self.params['admissible_attributes'] self.adm_abs_attributes = [a for a in self.adm_attributes if 'relative' not in a] self.attributes = self.params['attributes'] self.categories = self.params['categories'] self.screen_size = self.params['screen_size'] self.viz_data_collection = viz_data_collection self.show_imagination_bubble = False self.reward_screen = reward_screen self.first_action = False self.SP_feedback = False self.known_goals_update = False self.known_goals_descr = [] self.display = display self.circles = [[x * 3, 200, x * 4] for x in range(50)] self.random_init = random_init self.max_timesteps = max_timesteps # Dimensions of action and observations spaces self.dim_act = 3 self.max_nb_objects = self.params['max_nb_objects'] self.random_nb_obj = random_nb_obj self.nb_obj = self.params['max_nb_objects'] self.dim_obj = self.params['dim_obj_features'] self.dim_body = self.params['dim_body_features'] self.inds_objs = [np.arange(self.dim_body + self.dim_obj * i_obj, self.dim_body + self.dim_obj * (i_obj + 1)) for i_obj in range(self.nb_obj)] self.half_dim_obs = self.max_nb_objects * self.dim_obj + self.dim_body self.dim_obs = int(2 * self.half_dim_obs) # We define the spaces self.action_space = spaces.Box(low=-np.ones(self.dim_act), high=np.ones(self.dim_act), dtype=np.float32) self.observation_space = spaces.Box(low=-np.ones(self.dim_obs), high=np.ones(self.dim_obs), dtype=np.float32) # Agent parameters self.agent_step_size = agent_step_size self.agent_initial_pos = agent_initial_pos if self.display: self.agent_initial_pos_range = agent_initial_pos_range else: self.agent_initial_pos_range = 0.8 # rendering self.human = human self.render_mode = render_mode self.logits_concat = (0 for _ in range(self.nb_obj)) if self.render_mode: pygame.init() if self.display: if self.reward_screen: self.viewer = pygame.display.set_mode((self.screen_size + 300, self.screen_size)) else: self.viewer = pygame.display.set_mode((self.screen_size, self.screen_size)) else: if self.reward_screen: self.viewer = pygame.Surface((self.screen_size + 300, self.screen_size)) else: self.viewer = pygame.Surface((self.screen_size, self.screen_size)) self.viewer_started = False self.background = None self.reset() # We set to None to rush error if reset not called self.observation = None self.initial_observation = None self.done = None