Exemplo n.º 1
0
 def _is_close_to_goal(self):
     body_pose = self.robot_body.pose()
     parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
     self.body_xyz = (
     parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2])  # torso z is more informative than mean z
     dist_to_goal = np.linalg.norm([self.body_xyz[0] - self.target_pos[0], self.body_xyz[1] - self.target_pos[1]])
     return dist_to_goal < 2
Exemplo n.º 2
0
	def calc_state(self):
		j = np.array([j.current_relative_position() for j in self.ordered_joints], dtype=np.float32).flatten()
		# even elements [0::2] position, scaled to -1..+1 between limits
		# odd elements  [1::2] angular speed, scaled to show -1..+1
		self.joint_speeds = j[1::2]
		self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)

		body_pose = self.robot_body.pose()
		parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
		self.body_xyz = (
		parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2])  # torso z is more informative than mean z
		self.body_rpy = body_pose.rpy()
		z = self.body_xyz[2]
		if self.initial_z is None:
			self.initial_z = z
		r, p, yaw = self.body_rpy
		self.walk_target_theta = np.arctan2(self.walk_target_y - self.body_xyz[1],
											self.walk_target_x - self.body_xyz[0])
		self.walk_target_dist = np.linalg.norm(
			[self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]])
		angle_to_target = self.walk_target_theta - yaw

		rot_speed = np.array(
			[[np.cos(-yaw), -np.sin(-yaw), 0],
			 [np.sin(-yaw), np.cos(-yaw), 0],
			 [		0,			 0, 1]]
		)
		vx, vy, vz = np.dot(rot_speed, self.robot_body.speed())  # rotate speed back to body point of view

		more = np.array([ z-self.initial_z,
			np.sin(angle_to_target), np.cos(angle_to_target),
			0.3* vx , 0.3* vy , 0.3* vz ,  # 0.3 is just scaling typical speed into -1..+1, no physical sense here
			r, p], dtype=np.float32)
		return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
Exemplo n.º 3
0
	def calc_state(self):
		j = np.array([j.current_relative_position() for j in self.ordered_joints], dtype=np.float32).flatten()
		# even elements [0::2] position, scaled to -1..+1 between limits
		# odd elements  [1::2] angular speed, scaled to show -1..+1
		self.joint_speeds = j[1::2]
		self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)

		body_pose = self.robot_body.pose()
		parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
		self.body_xyz = (
		parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2])  # torso z is more informative than mean z
		self.body_rpy = body_pose.rpy()
		z = self.body_xyz[2]
		if self.initial_z == None:
			self.initial_z = z
		r, p, yaw = self.body_rpy
		self.walk_target_theta = np.arctan2(self.walk_target_y - self.body_xyz[1],
											self.walk_target_x - self.body_xyz[0])
		self.walk_target_dist = np.linalg.norm(
			[self.walk_target_y - self.body_xyz[1], self.walk_target_x - self.body_xyz[0]])
		angle_to_target = self.walk_target_theta - yaw

		rot_speed = np.array(
			[[np.cos(-yaw), -np.sin(-yaw), 0],
			 [np.sin(-yaw), np.cos(-yaw), 0],
			 [		0,			 0, 1]]
		)
		vx, vy, vz = np.dot(rot_speed, self.robot_body.speed())  # rotate speed back to body point of view

		more = np.array([ z-self.initial_z,
			np.sin(angle_to_target), np.cos(angle_to_target),
			0.3* vx , 0.3* vy , 0.3* vz ,  # 0.3 is just scaling typical speed into -1..+1, no physical sense here
			r, p], dtype=np.float32)
		return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
Exemplo n.º 4
0
    def calc_state(self):
        j = np.array([j.get_joint_relative_state() for j in self.ordered_joints], dtype=np.float32).flatten()
        self.joint_speeds = j[1::2]
        self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)

        body_pose = self.robot_body.pose()
        parts_xyz = np.array([p.pose().xyz() for p in self.parts.values()]).flatten()
        self.body_xyz = (
        parts_xyz[0::3].mean(), parts_xyz[1::3].mean(), body_pose.xyz()[2])  # torso z is more informative than mean z
        self.body_rpy = body_pose.rpy()
        z = self.body_xyz[2]
        if self.initial_z == None:
            self.initial_z = z
        r, p, yaw = self.body_rpy
        self.walk_target_theta = np.arctan2(self.target_pos[1] - self.body_xyz[1],
                                            self.target_pos[0] - self.body_xyz[0])
        self.walk_target_dist = np.linalg.norm(
            [self.target_pos[1] - self.body_xyz[1], self.target_pos[0] - self.body_xyz[0]])
        self.walk_target_dist_xyz = np.linalg.norm(
            [self.target_pos[2] - self.body_xyz[2], self.target_pos[0] - self.body_xyz[1], self.target_pos[0] - self.body_xyz[0]])
        angle_to_target = self.walk_target_theta - yaw
        self.angle_to_target = angle_to_target

        self.walk_height_diff = np.abs(self.target_pos[2] - self.body_xyz[2])

        self.dist_to_start = np.linalg.norm(np.array(self.body_xyz) - np.array(self.initial_pos))

        debugmode= 0
        if debugmode:
            print("Robot dsebug mode: walk_height_diff", self.walk_height_diff)
            print("Robot dsebug mode: walk_target_z", self.target_pos[2])
            print("Robot dsebug mode: body_xyz", self.body_xyz[2])

        rot_speed = np.array(
            [[np.cos(-yaw), -np.sin(-yaw), 0],
             [np.sin(-yaw), np.cos(-yaw), 0],
             [        0,             0, 1]]
        )
        vx, vy, vz = np.dot(rot_speed, self.robot_body.speed())  # rotate speed back to body point of view

        debugmode=0
        if debugmode:
            print("Robot state", self.target_pos[1] - self.body_xyz[1], self.target_pos[0] - self.body_xyz[0])

        more = np.array([ z-self.initial_z,
            np.sin(angle_to_target), np.cos(angle_to_target),
            0.3* vx , 0.3* vy , 0.3* vz ,  # 0.3 is just scaling typical speed into -1..+1, no physical sense here
            r, p], dtype=np.float32)

        if debugmode:
            print("Robot more", more)


        if not 'nonviz_sensor' in self.env.config["output"]:
            j.fill(0)
            more.fill(0)

        return np.clip( np.concatenate([more] + [j] + [self.feet_contact]), -5, +5)
Exemplo n.º 5
0
 def is_close_to_goal(self):
     body_pose = self.robot_body.pose()
     parts_xyz = np.array([p.pose().xyz()
                           for p in self.parts.values()]).flatten()
     self.body_xyz = (parts_xyz[0::3].mean(), parts_xyz[1::3].mean(),
                      body_pose.xyz()[2]
                      )  # torso z is more informative than mean z
     dist_to_goal = np.linalg.norm([
         self.body_xyz[0] - self.walk_target_x,
         self.body_xyz[1] - self.walk_target_y,
         self.body_xyz[2] - self.walk_target_z
     ])
     debugmode = 0
     if debugmode:
         print(
             np.linalg.norm([
                 self.body_xyz[0] - self.walk_target_x,
                 self.body_xyz[1] - self.walk_target_y,
                 self.body_xyz[2] - self.walk_target_z
             ]), [self.body_xyz[0], self.body_xyz[1], self.body_xyz[2]],
             [self.walk_target_x, self.walk_target_y, self.walk_target_z])
     return dist_to_goal < 0.5
Exemplo n.º 6
0
    def calc_state(self):
        j = np.array(
            [j.get_joint_relative_state() for j in self.ordered_joints],
            dtype=np.float32).flatten()
        self.joint_speeds = j[1::2]
        self.joints_at_limit = np.count_nonzero(np.abs(j[0::2]) > 0.99)

        body_pose = self.robot_body.pose()
        parts_xyz = np.array([p.pose().xyz()
                              for p in self.parts.values()]).flatten()
        self.body_xyz = (parts_xyz[0::3].mean(), parts_xyz[1::3].mean(),
                         body_pose.xyz()[2]
                         )  # torso z is more informative than mean z
        self.body_rpy = body_pose.rpy()
        z = self.body_xyz[2]
        if self.initial_z == None:
            self.initial_z = z
        r, p, yaw = self.body_rpy
        robot_orn = self.get_orientation()

        self.walk_target_theta = np.arctan2(
            self.target_pos[1] - self.body_xyz[1],
            self.target_pos[0] - self.body_xyz[0])

        self.walk_target_dist = np.linalg.norm([
            self.target_pos[1] - self.body_xyz[1],
            self.target_pos[0] - self.body_xyz[0]
        ])

        self.walk_target_dist_xyz = np.linalg.norm([
            self.target_pos[2] - self.body_xyz[2],
            self.target_pos[1] - self.body_xyz[1],
            self.target_pos[0] - self.body_xyz[0]
        ])

        self.angle_to_target = self.walk_target_theta - yaw

        if self.angle_to_target > np.pi:
            self.angle_to_target -= 2 * np.pi
        elif self.angle_to_target < -np.pi:
            self.angle_to_target += 2 * np.pi

        debug = 0
        if debug:
            print("Walk Target Theta: {:.3g}, Yaw: {:.3g}".format(
                self.walk_target_theta, yaw))
            print("Angle Target: {:.3g}".format(self.angle_to_target))

        self.walk_height_diff = np.abs(self.target_pos[2] - self.body_xyz[2])

        self.dist_to_start = np.linalg.norm(
            np.array(self.body_xyz) - np.array(self.initial_pos))

        debugmode = 0
        if debugmode:
            print("Robot dsebug mode: walk_height_diff", self.walk_height_diff)
            print("Robot dsebug mode: walk_target_z", self.target_pos[2])
            print("Robot dsebug mode: body_xyz", self.body_xyz[2])

        rot_speed = np.array([[np.cos(-yaw), -np.sin(-yaw), 0],
                              [np.sin(-yaw), np.cos(-yaw), 0], [0, 0, 1]])
        vx, vy, vz = np.dot(
            rot_speed,
            self.robot_body.speed())  # rotate speed back to body point of view

        debugmode = 0
        if debugmode:
            print("Robot state y:%.3f x:%.3f" %
                  (self.target_pos[1] - self.body_xyz[1],
                   self.target_pos[0] - self.body_xyz[0]))

        more = np.array(
            [
                z - self.initial_z,
                np.sin(self.angle_to_target),
                np.cos(self.angle_to_target),
                0.3 * vx,
                0.3 * vy,
                0.3 *
                vz,  # 0.3 is just scaling typical speed into -1..+1, no physical sense here
                r,
                p
            ],
            dtype=np.float32)

        #more = np.array([np.sin(self.angle_to_target), np.cos(self.angle_to_target),
        #                 0.3 * vx, 0.3 * vy, 0.3 * vz,
        #                 # 0.3 is just scaling typical speed into -1..+1, no physical sense here
        #                 r, p], dtype=np.float32)

        debugmode = 0
        if debugmode:
            print("Robot more", more)

        if not 'nonviz_sensor' in self.env.config["output"]:
            j.fill(0)
            more.fill(0)

        d = self.dist_to_target()
        '''if self.env.config["waypoint_active"]:
            if (d <= 0.2):  # Pass next waypoint
                self.point += 1
                # self.point = np.clip(self.point, 0, 4) #Aloha_way_custom için geçerli
                self.point = np.clip(self.point, 0, 1)
                self.set_target_position(self.way_target[self.point])
                print("Reached Waypoint %i" % self.point)
        '''

        return np.clip(np.concatenate([more] + [j] + [self.feet_contact]), -5,
                       +5)