Exemplo n.º 1
0
 def testTerminateBonus(self, successes, reward_aggregator, terminate_bonus,
                        reward):
     task = tasks.MetaAggregated(self.subtasks,
                                 reward_aggregator=reward_aggregator,
                                 terminate_bonus=terminate_bonus)
     sprites, _ = self._get_sprites_and_reward_list(successes)
     self.assertAlmostEqual(task.reward(sprites), reward, delta=0.1)
Exemplo n.º 2
0
 def testSum(self, termination_criterion, successes, success):
     task = tasks.MetaAggregated(
         self.subtasks,
         reward_aggregator='sum',
         termination_criterion=termination_criterion)
     sprites, reward_list = self._get_sprites_and_reward_list(successes)
     self.assertAlmostEqual(task.reward(sprites),
                            sum(reward_list),
                            delta=0.1)
     self.assertEqual(task.success(sprites), success)
Exemplo n.º 3
0
def get_config(mode='train'):
    """Generate environment config.

  Args:
    mode: Unused task mode.

  Returns:
    config: Dictionary defining task/environment configuration. Can be fed as
      kwargs to environment.Environment.
  """

    # Factor distributions common to all objects.
    common_factors = distribs.Product([
        distribs.Continuous('x', 0.1, 0.9),
        distribs.Continuous('y', 0.1, 0.9),
        distribs.Continuous('angle', 0, 360, dtype='int32'),
    ])

    # train/test split for goal-finding object scales and clustering object colors
    goal_finding_scale_test = distribs.Continuous('scale', 0.08, 0.12)
    green_blue_colors = distribs.Product([
        distribs.Continuous('c1', 64, 256, dtype='int32'),
        distribs.Continuous('c2', 64, 256, dtype='int32'),
    ])
    if mode == 'train':
        goal_finding_scale = distribs.SetMinus(
            distribs.Continuous('scale', 0.05, 0.15),
            goal_finding_scale_test,
        )
        cluster_colors = distribs.Product([
            distribs.Continuous('c0', 128, 256, dtype='int32'),
            green_blue_colors
        ])
    elif mode == 'test':
        goal_finding_scale = goal_finding_scale_test
        cluster_colors = distribs.Product([
            distribs.Continuous('c0', 0, 128, dtype='int32'), green_blue_colors
        ])
    else:
        raise ValueError(
            'Invalid mode {}. Mode must be "train" or "test".'.format(mode))

    # Create clustering sprite generators
    sprite_gen_list = []
    cluster_shapes = [
        distribs.Discrete('shape', [s])
        for s in ['triangle', 'square', 'pentagon']
    ]
    for shape in cluster_shapes:
        factors = distribs.Product([
            common_factors,
            cluster_colors,
            shape,
            distribs.Continuous('scale', 0.08, 0.12),
        ])
        sprite_gen_list.append(
            sprite_generators.generate_sprites(factors, num_sprites=2))

    # Create goal-finding sprite generators
    goal_finding_colors = [
        distribs.Product([
            distribs.Continuous('c0', 192, 256, dtype='int32'),
            distribs.Continuous('c1', 0, 128, dtype='int32'),
            distribs.Continuous('c2', 64, 128, dtype='int32'),
        ]),
        distribs.Product([
            distribs.Continuous('c0', 0, 128, dtype='int32'),
            distribs.Continuous('c1', 192, 256, dtype='int32'),
            distribs.Continuous('c2', 64, 128, dtype='int32'),
        ])
    ]
    # Goal positions corresponding to the colors in goal_finding_colors
    goal_finding_positions = [(0., 0.5), (1., 0.5)]
    goal_finding_shapes = distribs.Discrete('shape', ['spoke_4', 'star_4'])
    for colors in goal_finding_colors:
        factors = distribs.Product([
            common_factors,
            goal_finding_scale,
            goal_finding_shapes,
            colors,
        ])
        sprite_gen_list.append(
            sprite_generators.generate_sprites(
                factors, num_sprites=lambda: np.random.randint(1, 3)))

    # Create distractor sprite generator
    distractor_factors = distribs.Product([
        common_factors,
        distribs.Discrete('shape', ['circle']),
        distribs.Continuous('c0', 64, 256, dtype='uint8'),
        distribs.Continuous('c1', 64, 256, dtype='uint8'),
        distribs.Continuous('c2', 64, 256, dtype='uint8'),
        distribs.Continuous('scale', 0.08, 0.12),
    ])
    sprite_gen_list.append(
        sprite_generators.generate_sprites(
            distractor_factors, num_sprites=lambda: np.random.randint(0, 3)))

    # Concat clusters into single scene to generate
    sprite_gen = sprite_generators.chain_generators(*sprite_gen_list)
    # Randomize sprite ordering to eliminate any task information from occlusions
    sprite_gen = sprite_generators.shuffle(sprite_gen)

    # Create the combined task of goal-finding and clustering
    task_list = []
    task_list.append(
        tasks.Clustering(cluster_shapes, terminate_bonus=0., reward_range=10.))
    for colors, goal_pos in zip(goal_finding_colors, goal_finding_positions):
        goal_finding_task = tasks.FindGoalPosition(distribs.Product(
            [colors, goal_finding_shapes]),
                                                   goal_position=goal_pos,
                                                   weights_dimensions=(1, 0),
                                                   terminate_distance=0.15,
                                                   raw_reward_multiplier=30)
        task_list.append(goal_finding_task)
    task = tasks.MetaAggregated(task_list,
                                reward_aggregator='sum',
                                termination_criterion='all')

    renderers = {
        'image':
        spriteworld_renderers.PILRenderer(image_size=(64, 64), anti_aliasing=5)
    }

    config = {
        'task': task,
        'action_space': action_spaces.SelectMove(scale=0.5),
        'renderers': renderers,
        'init_sprites': sprite_gen,
        'max_episode_length': 50,
        'metadata': {
            'name': os.path.basename(__file__),
            'mode': mode
        }
    }
    return config
Exemplo n.º 4
0
def get_config(mode='train'):
    """Generate environment config.

  Args:
    mode: 'train' or 'test'.

  Returns:
    config: Dictionary defining task/environment configuration. Can be fed as
      kwargs to environment.Environment.
  """

    # Create the subtasks and their corresponding sprite generators
    subtasks = []
    sprite_gen_per_subtask = []
    for subtask in SUBTASKS:
        subtasks.append(
            tasks.FindGoalPosition(
                filter_distrib=subtask['distrib'],
                goal_position=subtask['goal_position'],
                terminate_distance=TERMINATE_DISTANCE,
                raw_reward_multiplier=RAW_REWARD_MULTIPLIER))
        factors = distribs.Product((
            subtask['distrib'],
            distribs.Continuous('x', 0.1, 0.9),
            distribs.Continuous('y', 0.1, 0.9),
            distribs.Discrete('shape', ['square', 'triangle', 'circle']),
            distribs.Discrete('scale', [0.13]),
            distribs.Continuous('c1', 0.3, 1.),
            distribs.Continuous('c2', 0.9, 1.),
        ))
        sprite_gen_per_subtask.append(
            sprite_generators.generate_sprites(factors, num_sprites=1))

    # Consider all combinations of subtasks
    subtask_combos = list(
        itertools.combinations(np.arange(len(SUBTASKS)), NUM_TARGETS))
    if mode == 'train':
        # Randomly sample a combination of subtasks, holding one combination out
        sprite_gen = sprite_generators.sample_generator([
            sprite_generators.chain_generators(
                *[sprite_gen_per_subtask[i] for i in c])
            for c in subtask_combos[1:]
        ])

    elif mode == 'test':
        # Use the held-out subtask combination for testing
        sprite_gen = sprite_generators.chain_generators(
            *[sprite_gen_per_subtask[i] for i in subtask_combos[0]])
    else:
        raise ValueError('Invalide mode {}.'.format(mode))

    # Randomize sprite ordering to eliminate any task information from occlusions
    sprite_gen = sprite_generators.shuffle(sprite_gen)

    task = tasks.MetaAggregated(subtasks,
                                reward_aggregator='sum',
                                termination_criterion='all')

    config = {
        'task': task,
        'action_space': common.action_space(),
        'renderers': common.renderers(),
        'init_sprites': sprite_gen,
        'max_episode_length': MAX_EPISODE_LENGTH,
        'metadata': {
            'name': os.path.basename(__file__),
            'mode': mode
        }
    }

    return config
Exemplo n.º 5
0
 def testMean(self, successes):
     task = tasks.MetaAggregated(self.subtasks, reward_aggregator='mean')
     sprites, reward_list = self._get_sprites_and_reward_list(successes)
     self.assertAlmostEqual(task.reward(sprites),
                            np.mean(reward_list),
                            delta=0.1)