コード例 #1
0
ファイル: tutorial_six.py プロジェクト: thias15/CausalWorld
def example():
    task = generate_task(task_generator_id='pick_and_place')
    env = CausalWorld(task=task, enable_visualization=True)
    env.reset()
    intervention_space = env.get_variable_space_used()
    for _ in range(100):
        for i in range(200):
            obs, reward, done, info = env.step(env.action_space.low)
        intervention = {
            'tool_block': {
                'size':
                np.random.uniform(intervention_space['tool_block']['size'][0],
                                  intervention_space['tool_block']['size'][1])
            }
        }
        env.do_intervention(intervention)
    env.close()
コード例 #2
0
def privileged_information():
    task = generate_task(task_generator_id='pushing')
    env = CausalWorld(task=task, enable_visualization=True)
    env.expose_potential_partial_solution()
    env.reset()
    for _ in range(10):
        goal_intervention_dict = env.sample_new_goal()
        success_signal, obs = env.do_intervention(goal_intervention_dict)
        print("Goal Intervention success signal", success_signal)
        for i in range(1000):
            obs, reward, done, info = env.step(env.action_space.low)
        print("now we solve it with privileged info")
        success_signal, obs = env.do_intervention(
            info['possible_solution_intervention'], check_bounds=False)
        print("Partial Solution Setting Intervention Succes Signal",
              success_signal)
        for i in range(500):
            obs, reward, done, info = env.step(env.action_space.low)
    env.close()
コード例 #3
0
ファイル: tutorial_three.py プロジェクト: thias15/CausalWorld
def goal_interventions():
    task = generate_task(task_generator_id='stacked_blocks')
    env = CausalWorld(task=task, enable_visualization=True)
    env.reset()
    for _ in range(10):
        for i in range(200):
            obs, reward, done, info = env.step(env.action_space.sample())
        goal_intervention_dict = env.sample_new_goal()
        print("new goal chosen: ", goal_intervention_dict)
        success_signal, obs = env.do_intervention(goal_intervention_dict)
        print("Goal Intervention success signal", success_signal)
    env.close()
コード例 #4
0
def without_intervention_split():
    task = generate_task(task_generator_id='pushing')
    env = CausalWorld(task=task, enable_visualization=True)
    env.reset()
    for _ in range(2):
        for i in range(200):
            obs, reward, done, info = env.step(env.action_space.sample())
        success_signal, obs = env.do_intervention(
            {'stage_color': np.random.uniform(0, 1, [
                3,
            ])})
        print("Intervention success signal", success_signal)
    env.close()
コード例 #5
0
ファイル: tutorial_one.py プロジェクト: thias15/CausalWorld
def example():
    task = generate_task(task_generator_id='pushing')
    env = CausalWorld(task=task, enable_visualization=True)
    env.reset()
    counter = 0
    for _ in range(1):
        for i in range(210):
            obs, reward, done, info = env.step(env.action_space.low)
            if i % 50 == 0 and i > 0:
                print(i)
                intervention = {'goal_block': {'cartesian_position':
                                                   [0, -0.08+(0.04*counter),
                                                    0.0325],
                                               'color':[0, 0, 1]}}
                env.do_intervention(intervention, check_bounds=False)
                counter += 1
                print("intervention")
            if i == 201:
                intervention = {'goal_block': {
                    'cartesian_position': [0, 0.08,  0.0325],
                    'color': [0, 1, 0]}}
                env.do_intervention(intervention, check_bounds=False)
    env.close()
コード例 #6
0
def end_effector_pos():
    task = generate_task(task_generator_id='reaching')
    env = CausalWorld(task=task,
                      enable_visualization=True,
                      action_mode="joint_positions",
                      normalize_actions=False,
                      normalize_observations=False)
    obs = env.reset()
    for _ in range(100):
        goal_dict = env.sample_new_goal()
        success_signal, obs = env.do_intervention(goal_dict)
        obs, reward, done, info = env.step(control_policy(env, obs))
        for _ in range(250):
            obs, reward, done, info = env.step(control_policy(env, obs))
    env.close()
コード例 #7
0
def with_intervention_split_2():
    task = generate_task(task_generator_id='pushing',
                          variables_space='space_b')
    env = CausalWorld(task=task, enable_visualization=False)
    interventions_space = task.get_intervention_space_a()
    env.reset()
    for _ in range(2):
        for i in range(200):
            obs, reward, done, info = env.step(env.action_space.sample())
        success_signal, obs = env.do_intervention({
            'stage_color':
                np.random.uniform(interventions_space['stage_color'][0],
                                  interventions_space['stage_color'][1])
        })
        print("Intervention success signal", success_signal)
    env.close()
コード例 #8
0
class TestPicking(unittest.TestCase):
    def setUp(self):
        self.task = generate_task(task_generator_id="picking")
        self.env = CausalWorld(task=self.task,
                               enable_visualization=False,
                               skip_frame=1,
                               action_mode="end_effector_positions",
                               normalize_actions=False,
                               normalize_observations=False)
        return

    def tearDown(self):
        self.env.close()
        return

    def test_determinism(self):
        self.env.set_action_mode('joint_positions')
        observations_1 = []
        rewards_1 = []
        horizon = 2000
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        obs = self.env.reset()
        observations_1.append(obs)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            obs = self.env.reset()
            observations_2.append(obs)
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                if not np.array_equal(observations_1[i], observations_2[i]):
                    print(observations_1[i] - observations_2[i])
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def lift_last_finger_first(self, current_obs):
        desired_action = current_obs[19:19 + 9]
        desired_action[6:] = [-0, -0.08, 0.4]
        for _ in range(250):
            obs, reward, done, info = self.env.step(desired_action)
        return desired_action

    def move_first_two_fingers(self, current_obs):
        desired_action = current_obs[19:19 + 9]
        desired_action[:6] = [
            0., 0.15313708, 0.05586292, 0.13262061, -0.07656854, 0.05586292
        ]
        for _ in range(250):
            obs, reward, done, info = self.env.step(desired_action)
        return obs

    def grip_block(self):
        grip_locations = get_suggested_grip_locations(
            self.env._task._stage.get_object('tool_block').get_size(),
            self.env._task._stage.get_object(
                'tool_block').world_to_cube_r_matrix())
        desired_action = np.zeros(9)
        desired_action[6:] = [-0, -0.08, 0.4]
        desired_action[:3] = grip_locations[0]
        desired_action[3:6] = grip_locations[1]
        # grasp the block now
        for _ in range(250):
            obs, reward, done, info = self.env.step(desired_action)
        return desired_action

    def lift_block(self, desired_grip):
        desired_action = desired_grip
        for _ in range(40):
            desired_action[2] += 0.005
            desired_action[5] += 0.005
            for _ in range(10):
                obs, reward, done, info = self.env.step(desired_action)
        return obs

    def test_02_mass(self):
        self.env.set_action_mode('end_effector_positions')
        intervention = {'tool_block': {'mass': 0.02}}
        self.env.do_intervention(interventions_dict=intervention)
        for _ in range(1):
            obs = self.env.reset()
            obs = self.move_first_two_fingers(obs)
            self.lift_last_finger_first(obs)
            desired_grip = self.grip_block()
            self.assertEqual(self.env.get_robot().get_tip_contact_states(),
                             [1, 1, 0], "contact states are not closed")
            final_obs = self.lift_block(desired_grip)
            self.assertGreater(final_obs[-22], 0.2,
                               "the block didn't get lifted")

    def test_08_mass(self):
        self.env.set_action_mode('end_effector_positions')
        intervention = {'tool_block': {'mass': 0.08}}
        self.env.do_intervention(interventions_dict=intervention)
        for _ in range(1):
            obs = self.env.reset()
            obs = self.move_first_two_fingers(obs)
            self.lift_last_finger_first(obs)
            desired_grip = self.grip_block()
            self.assertEqual(self.env.get_robot().get_tip_contact_states(),
                             [1, 1, 0], "contact states are not closed")
            final_obs = self.lift_block(desired_grip)
            self.assertGreater(final_obs[-22], 0.2,
                               "the block didn't get lifted")

    def test_1_mass(self):
        self.env.set_action_mode('end_effector_positions')
        intervention = {'tool_block': {'mass': 0.1}}
        self.env.do_intervention(interventions_dict=intervention)
        for _ in range(1):
            obs = self.env.reset()
            obs = self.move_first_two_fingers(obs)
            self.lift_last_finger_first(obs)
            desired_grip = self.grip_block()
            self.assertEqual(self.env.get_robot().get_tip_contact_states(),
                             [1, 1, 0], "contact states are not closed")
            final_obs = self.lift_block(desired_grip)
            self.assertGreater(final_obs[-22], 0.2,
                               "the block didn't get lifted")

    def test_determinism_w_interventions(self):
        self.env.set_action_mode('joint_positions')
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        new_goal = self.env.sample_new_goal()
        self.env.set_starting_state(interventions_dict=new_goal)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            self.env.reset()
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_in_episode_interventions(self):
        self.env.set_action_mode('joint_positions')
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)
        #now we will restart again and perform an in epsiode intervention
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            if i == 50:
                success_signal = self.env.do_intervention({
                    'tool_block': {
                        'cylindrical_position': [0.1, np.pi / 2, 0.0325]
                    }
                })
        observations_2 = []
        rewards_2 = []
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_2.append(obs)
            rewards_2.append(reward)
            if not np.array_equal(observations_1[i], observations_2[i]):
                print(observations_1[i] - observations_2[i])
            assert np.array_equal(observations_1[i], observations_2[i])
        assert rewards_1 == rewards_2

    def test_goal_intervention(self):
        task = generate_task(task_generator_id='picking')
        env = CausalWorld(task=task,
                          enable_visualization=False,
                          normalize_observations=False)
        for _ in range(10):
            invalid_interventions_before = env.get_tracker(
            ).invalid_intervention_steps
            new_goal = env.sample_new_goal()
            env.set_starting_state(interventions_dict=new_goal)
            invalid_interventions_after = env.get_tracker(
            ).invalid_intervention_steps
            for _ in range(2):
                for _ in range(100):
                    obs, reward, done, info = env.step(env.action_space.low)
                    #TODO: this shouldnt be the case when the benchmark is complete
                    #Its a hack for now
                    if invalid_interventions_before == invalid_interventions_after:
                        assert np.array_equal(
                            cyl2cart(new_goal['goal_block']
                                     ['cylindrical_position']), obs[-7:-4])
                env.reset()

        env.close()
コード例 #9
0
class TestCreativeStackedBlocks(unittest.TestCase):

    def setUp(self):
        self.task = generate_task(task_generator_id="creative_stacked_blocks")
        self.env = CausalWorld(task=self.task, enable_visualization=False)
        return

    def tearDown(self):
        self.env.close()
        return

    def test_determinism(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        obs = self.env.reset()
        observations_1.append(obs)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            obs = self.env.reset()
            observations_2.append(obs)
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                if not np.array_equal(observations_1[i], observations_2[i]):
                    print(
                        np.array(observations_1[i]) -
                        np.array(observations_2[i]))
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_interventions(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        new_goal = self.env.sample_new_goal()
        self.env.set_starting_state(interventions_dict=new_goal)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            self.env.reset()
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                if not np.array_equal(observations_1[i], observations_2[i]):
                    print(
                        np.array(observations_1[i]) -
                        np.array(observations_2[i]))
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_in_episode_interventions(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)
        #now we will restart again and perform an in epsiode intervention
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            if i == 50:
                success_signal = self.env.do_intervention(
                    {'tool_level_0_num_1': {
                        'cylindrical_position': [0, 0, 0.2]
                    }})
        observations_2 = []
        rewards_2 = []
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_2.append(obs)
            rewards_2.append(reward)
            assert np.array_equal(observations_1[i], observations_2[i])
        assert rewards_1 == rewards_2
コード例 #10
0
ファイル: test_reaching.py プロジェクト: thias15/CausalWorld
class TestReaching(unittest.TestCase):

    def setUp(self):
        self.task = generate_task(task_generator_id="reaching")
        self.env = CausalWorld(task=self.task,
                               enable_visualization=False,
                               action_mode='joint_positions',
                               normalize_observations=False,
                               normalize_actions=False)
        return

    def tearDown(self):
        self.env.close()
        return

    # def test_forward_kinemetics(self):
    #     horizon = 100
    #     obs = self.env.reset()
    #     desired_goal = obs[-9:]
    #     for i in range(horizon):
    #         obs, reward, done, info = self.env.step(desired_goal)

    # print(obs[19:28])

    def test_determinism(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        obs = self.env.reset()
        observations_1.append(obs)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            obs = self.env.reset()
            observations_2.append(obs)
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                if not np.array_equal(observations_1[i], observations_2[i]):
                    print(
                        np.array(observations_1[i]) -
                        np.array(observations_2[i]))
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_interventions(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        new_goal = self.env.sample_new_goal()
        self.env.set_starting_state(interventions_dict=new_goal)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            self.env.reset()
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_in_episode_interventions(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)
        #now we will restart again and perform an in epsiode intervention
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            if i == 50:
                new_goal = self.env.sample_new_goal()
                success_signal = self.env.do_intervention(new_goal)
        observations_2 = []
        rewards_2 = []
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_2.append(obs)
            rewards_2.append(reward)
            assert np.array_equal(observations_1[i], observations_2[i])
        assert rewards_1 == rewards_2
コード例 #11
0
ファイル: test_pushing.py プロジェクト: thias15/CausalWorld
class TestPushing(unittest.TestCase):
    def setUp(self):
        self.task = generate_task(task_generator_id="pushing")
        self.env = CausalWorld(task=self.task, enable_visualization=False)
        return

    def tearDown(self):
        self.env.close()
        return

    def test_determinism(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        obs = self.env.reset()
        observations_1.append(obs)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            obs = self.env.reset()
            observations_2.append(obs)
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                if not np.array_equal(observations_1[i], observations_2[i]):
                    print(
                        np.array(observations_1[i]) -
                        np.array(observations_2[i]))
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_interventions_1(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        new_goal = self.env.sample_new_goal()
        self.env.set_starting_state(interventions_dict=new_goal)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            self.env.reset()
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                if not np.array_equal(observations_1[i], observations_2[i]):
                    print(i)
                    print(
                        np.array(observations_1[i]) -
                        np.array(observations_2[i]))
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_interventions_2(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        intervention = {
            'tool_block': {
                'cylindrical_position': [0, 0.3, 0.0325]
            }
        }
        self.env.set_starting_state(interventions_dict=intervention)
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)

        for _ in range(10):
            observations_2 = []
            rewards_2 = []
            self.env.reset()
            for i in range(horizon):
                obs, reward, done, info = self.env.step(actions[i])
                observations_2.append(obs)
                rewards_2.append(reward)
                if not np.array_equal(observations_1[i], observations_2[i]):
                    print(i)
                    print(
                        np.array(observations_1[i]) -
                        np.array(observations_2[i]))
                assert np.array_equal(observations_1[i], observations_2[i])
            assert rewards_1 == rewards_2

    def test_determinism_w_in_episode_interventions(self):
        observations_1 = []
        rewards_1 = []
        horizon = 100
        actions = [self.env.action_space.sample() for _ in range(horizon)]
        actions = np.array(actions)
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_1.append(obs)
            rewards_1.append(reward)
        #now we will restart again and perform an in epsiode intervention
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            if i == 50:
                success_signal = self.env.do_intervention({
                    'tool_block': {
                        'cylindrical_position': [0.1, np.pi / 2, 0.0325]
                    }
                })
        observations_2 = []
        rewards_2 = []
        self.env.reset()
        for i in range(horizon):
            obs, reward, done, info = self.env.step(actions[i])
            observations_2.append(obs)
            rewards_2.append(reward)
            assert np.array_equal(observations_1[i], observations_2[i])
        assert rewards_1 == rewards_2

    def test_goal_intervention(self):
        task = generate_task(task_generator_id='pushing')
        env = CausalWorld(task=task,
                          enable_visualization=False,
                          normalize_observations=False)
        for _ in range(10):
            invalid_interventions_before = env.get_tracker(
            ).invalid_intervention_steps
            new_goal = env.sample_new_goal()
            env.set_starting_state(interventions_dict=new_goal)
            invalid_interventions_after = env.get_tracker(
            ).invalid_intervention_steps
            for _ in range(2):
                for _ in range(100):
                    obs, reward, done, info = env.step(env.action_space.low)
                    #TODO: this shouldnt be the case when the benchmark is complete
                    #Its a hack for now
                    if invalid_interventions_before == invalid_interventions_after:
                        assert np.array_equal(
                            cyl2cart(new_goal['goal_block']
                                     ['cylindrical_position']), obs[-7:-4])
                env.reset()

        env.close()