Exemple #1
0
def test_planner_to_goal():
    vec_env, custom_draws = make_block_push_env(two_d=True)
    start_state_str = vec_env.get_pillar_state()[0]
    start_state = State.create_from_serialized_string(start_state_str)
    goal_state = State.create_from_serialized_string(start_state_str)
    goal_pose = np.array(start_state.get_values_as_vec([block_pos_fqn]))
    goal_pose[0] = 0.2
    goal_pose[2] = 0.04
    goal_state.set_values_from_vec([block_pos_fqn], goal_pose.tolist())
    planner = Planner()
    planner.plan(start_state.get_serialized_string(),
                 goal_state.get_serialized_string())
Exemple #2
0
def test_one_push_planner():
    vec_env, custom_draws = make_block_push_env(two_d=True)
    start_state_str = vec_env.get_pillar_state()[0]
    start_state = State.create_from_serialized_string(start_state_str)
    goal_state = State.create_from_serialized_string(start_state_str)
    goal_pose = np.array(start_state.get_values_as_vec([block_pos_fqn]))
    goal_pose[0] -= 0.05
    goal_state.set_values_from_vec([block_pos_fqn], goal_pose.tolist())
    planner = Planner(vec_env.cfg)
    one_push_plan = planner.plan(start_state.get_serialized_string(),
                                 goal_state.get_serialized_string())
    plans = [one_push_plan]
    agent = Agent("test_planner")
    agent.collect_transition_data(vec_env, plans)
Exemple #3
0
 def __init__(self, state_str, op=None, parent=None):
     self.state_str = state_str
     self.state = State.create_from_serialized_string(state_str)
     self.values = np.array(
         self.state.get_values_as_vec([block_pos_fqn, block_on_color_fqn]))
     self.parent = parent
     if parent is None:
         self.cost = 0
         self.op = None
     else:
         self.op = op  # op taken to get to this state
         self.cost = parent.cost + op.cost()
     self.f = self.cost
    def predict(self, state_str, action):
        state = State.create_from_serialized_string(state_str)
        new_state = State.create_from_serialized_string(state_str)
        #take current state
        if isinstance(action, list):
            dir, amount, T = action
        else:
            dir = action.sidenum
            amount = action.amount
            T = action.T
        robot_pos_fqn = "frame:pose/position"
        robot_orn_fqn = "frame:pose/quaternion"
        block_pos_fqn = "frame:block:pose/position"

        #precondition is that the robot is at the appropriate side, taken care of at another layer of abstraction
        #if robot is "below" block, otherwise it's the same
        current_block_state = state.get_values_as_vec([block_pos_fqn])
        next_block_state_np = np.array([current_block_state[0] - amount * np.sin(self.dir_to_rpy[dir][2]),
                             current_block_state[1],
                             current_block_state[2] - amount * np.cos(self.dir_to_rpy[dir][2])])
        new_state.set_values_from_vec([block_pos_fqn],next_block_state_np.tolist())
        return new_state.get_serialized_string()
 def predict(self, state_str, action):
     state = State.create_from_serialized_string(state_str)
     new_state = State.create_from_serialized_string(state_str)
     #take current state
     if isinstance(action, list):
         dir = action[0]
     else:
         dir = action.sidenum
     robot_pos_fqn = "frame:pose/position"
     robot_orn_fqn = "frame:pose/quaternion"
     block_pos_fqn = "frame:block:pose/position"
     des_quat = quat_to_np(rpy_to_quat(np.array(self.dir_to_rpy[dir])), format="wxyz")
     amount= 0.02 + self.cfg["block"]["dims"]["width"]/2
     #precondition is that the robot is at the appropriate side, taken care of at another layer of abstraction
     #if robot is "below" block, otherwise it's the same
     current_block_state = state.get_values_as_vec([block_pos_fqn])
     next_robot_state_np = np.array([current_block_state[0] + amount * np.sin(self.dir_to_rpy[dir][2]),
                          current_block_state[1],
                          current_block_state[2] + amount * np.cos(self.dir_to_rpy[dir][2])])
     
     new_state.set_values_from_vec([robot_pos_fqn],next_robot_state_np.tolist())
     new_state.set_values_from_vec([robot_orn_fqn],des_quat.tolist())
     return new_state.get_serialized_string()
Exemple #6
0
 def precond(self, state_str):
     state = State.create_from_serialized_string(state_str)
     robot_pos = state.get_values_as_vec([robot_pos_fqn])
     robot_orn = np.array(state.get_values_as_vec([robot_orn_fqn]))
     block_pos = state.get_values_as_vec([block_pos_fqn])
     delta_side = state.get_values_as_vec(["constants/block_width"])[0] / 2
     #close to block side and also in right orientation
     robot_des_pos = np.array([
         block_pos[0] + delta_side * np.sin(dir_to_rpy[self.sidenum][2]),
         block_pos[1],
         block_pos[2] + delta_side * np.cos(dir_to_rpy[self.sidenum][2])
     ])
     gripper_pos_close = np.linalg.norm(robot_des_pos - robot_pos) < 0.03
     des_quat = quat_to_np(rpy_to_quat(np.array(dir_to_rpy[self.sidenum])),
                           format="wxyz")
     orn_dist = Quaternion.absolute_distance(Quaternion(des_quat),
                                             Quaternion(robot_orn))
     orn_close = orn_dist < 0.01
     return gripper_pos_close and orn_close
Exemple #7
0
def test_precond():
    #one where precond is false, one where it's true.
    vec_env, custom_draws = make_block_push_env(two_d=True)
    start_state_str = vec_env.get_pillar_state()[0]
    start_state = State.create_from_serialized_string(start_state_str)
    start_node = Node(start_state_str)
    goto_dir_op = GoToSide(1)
    goto_dir2_op = GoToSide(2)
    after_dir1_node = Node(goto_dir_op.transition_model(
        start_node.state.get_serialized_string(), goto_dir_op),
                           parent=start_node,
                           op=goto_dir_op)
    after_dir2_node = Node(goto_dir2_op.transition_model(
        start_node.state.get_serialized_string(), goto_dir2_op),
                           parent=start_node,
                           op=goto_dir2_op)
    pushin_dir_op = PushInDir(1, 0.05, 50, cfg=vec_env.cfg)
    assert (not pushin_dir_op.precond(start_state_str))
    assert (pushin_dir_op.precond(
        after_dir1_node.state.get_serialized_string()))
    assert (not pushin_dir_op.precond(
        after_dir2_node.state.get_serialized_string()))
    print("Precond test passed")
Exemple #8
0
 def __init__(self, cfg):
     render_func = lambda x, y, z: self.render(custom_draws=custom_draws)
     self.max_steps_per_movement = 400
     self.cfg = cfg
     super().__init__(cfg,
                      n_inter_steps=cfg["env"]["n_inter_steps"],
                      inter_step_cb=render_func,
                      auto_reset_after_done=False)
     #urdf_fn = "/home/lagrassa/git/carbongym/assets/urdf/franka_description/robots/franka_panda.urdf"
     #urdf_fn="/home/lagrassa/git/planorparam/models/robots/model.urdf"
     #self.franka_chain = ikpy.chain.Chain.from_urdf_file(urdf_fn)
     self.dir_to_rpy = {
         0: [-np.pi / 2, np.pi / 2, 0],
         1: [-np.pi / 2, np.pi / 2, np.pi / 2],
         2: [-np.pi / 2, np.pi / 2, np.pi],
         3: [-np.pi / 2, np.pi / 2, 1.5 * np.pi]
     }
     self.ip = "127.0.0.1"
     self.port = "5555"
     self.state_server = SimpleZMQServer(self.ip, self.port)
     self.pillar_states = [
         State.create_from_yaml_file("cfg/push_state.yaml")
         for _ in range(cfg["scene"]["n_envs"])
     ]
Exemple #9
0
 def pillar_state_to_feature(self, pillar_state_str):
     pillar_state = State.create_from_serialized_string(pillar_state_str)
     states = np.array(
         pillar_state.get_values_as_vec([block_pos_fqn,
                                         block_on_color_fqn]))
     return states.flatten()