Esempio n. 1
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):

        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        position = np.array([pose.position.x, pose.position.y, pose.position.z])
        orientation = np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])
        if self.last_timestamp is None:
            velocity = np.array([0.0, 0.0, 0.0])
        else:
            velocity = (position - self.last_position) / max(timestamp - self.last_timestamp, 1e-03)  # prevent divide by zero
        state = np.concatenate([position, orientation, velocity])  # combined state vector
        self.last_timestamp = timestamp
        self.last_position = position

        error_position = np.linalg.norm(self.target_position - state[0:3])  # Euclidean distance from target position vector
        error_orientation = np.linalg.norm(self.target_orientation - state[3:7])  # Euclidean distance from target orientation quaternion (a better comparison may be needed)
        error_velocity = np.linalg.norm(self.target_velocity - state[7:10])  # Euclidean distance from target velocity vector
        
        # Compute reward / penalty and check if this episode is complete
        done = False
        reward = -(self.weight_position * error_position + self.weight_orientation * error_orientation + self.weight_velocity * error_velocity)

        if (timestamp > self.max_duration):
            reward += 50.0
            done = True
        
        if (error_position > self.max_error_position):
            reward -= 200.0
            done = True

        if (pose.position.z <= self.target_position[2] / 2):
            reward -= 25.0 

        if (error_position <= self.min_error_position and pose.position.z >= self.target_position[2]/2 and velocity[2] >= 0):
            reward += 100.0
       
        if (np.abs(pose.position.z - self.target_position[2]) <= self.target_position[2]/3):
            reward += 25.0 

        if (error_velocity > self.max_error_velocity):
            reward -= 15.0

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward, done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low, self.action_space.high)  # flatten, clamp to action space limits
            return Wrench(
                    force=Vector3(action[0], action[1], action[2]),
                    torque=Vector3(action[3], action[4], action[5])
                ), done
        else:
            return Wrench(), done
Esempio n. 2
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        position = np.array(
            [pose.position.x, pose.position.y, pose.position.z])
        orientation = np.array([
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])
        if self.last_timestamp is None:
            velocity = np.array([0.0, 0.0, 0.0])
        else:
            velocity = (position - self.last_position) / max(
                timestamp - self.last_timestamp,
                1e-03)  # prevent divide by zero
        state = np.concatenate([position, orientation,
                                velocity])  # combined state vector
        self.last_timestamp = timestamp
        self.last_position = position
        # print("angular_velocity: {}, linear_acceleration: {}".format(angular_velocity, linear_acceleration))

        error_position = np.linalg.norm(
            self.target_position -
            state[0:3])  # Euclidian distance from target position vector
        # error_orientation = np.linalg.norm(self.target_orientation - state[3:7]) #Euclidian distance from target orientation quaternion
        error_orientation = 0  # factor out orientation
        error_velocity = np.linalg.norm(self.target_velocity - state[7:10])**2
        # Compute reward / penalty and check if this episode is complete
        done = False
        reward = -(self.weight_position * error_position +
                   self.weight_orientation * error_orientation +
                   self.weight_velocity * error_velocity)
        if error_position > self.max_error_position:
            reward -= 50.0  # extra penalty, agent strayed too far
            done = True
        elif timestamp > self.max_duration:  # agent has run out of time
            reward += 50.0  # extra reward, agent made it to the end
            done = True
            print("postition: {}...".format(pose.position.z))

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward,
                                 done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
 def __init__(self):
     '''Parameters Inicialization '''
     self.rospy = rospy
     self.frc_topic = self.rospy.get_param("frc_topic", "/frc")
     self.frc_left_topic = self.rospy.get_param("frc_left", "/frc_left")
     self.frc_right_topic = self.rospy.get_param("frc_right", "/frc_right")
     self.wflc_params = {
         "M": self.rospy.get_param("wflc_order", 1),
         "mu": self.rospy.get_param("wflc_amplitude_a_g", 0.05),
         "mu0": self.rospy.get_param("wflc_frequency_a_g", 0.005),
         "mub": self.rospy.get_param("wflc_bias_a_g", 0.05),
         "sum_w0": 0,
         "w0": np.random.rand(1),
         "X": [],
         "W": [],
         "Wb": 0,
         "fs": 100
     }
     self.r_flc_params = {
         "M": self.rospy.get_param("r_flc_order", 1),
         "mu": self.rospy.get_param("r_flc_amplitude_a_g", 0.008),
         "X": [],
         "W": [],
         "w0": 2 * np.pi * np.random.rand(1),
         "k": 1,
         "fs": 100
     }
     self.l_flc_params = {
         "M": self.rospy.get_param("l_flc_order", 1),
         "mu": self.rospy.get_param("l_flc_amplitude_a_g", 0.008),
         "X": [],
         "W": [],
         "w0": 2 * np.pi * np.random.rand(1),
         "k": 1,
         "fs": 100
     }
     '''Subscribers'''
     #self.sub_frc = self.rospy.Subscriber(self.frc_topic,Wrench,self.callback_frc)
     self.pub_left = self.rospy.Subscriber(self.frc_left_topic, Wrench,
                                           self.callback_frc_left)
     self.pub_right = self.rospy.Subscriber(self.frc_right_topic, Wrench,
                                            self.callback_frc_right)
     '''Publishers'''
     self.pub_frc = rospy.Publisher(self.frc_topic, Wrench, queue_size=10)
     '''Node Configuration'''
     self.rospy.init_node("FLC_Filters", anonymous=True)
     self.frc = Wrench()
     self.frc_left = Wrench()
     self.frc_right = Wrench()
     self.change = {"left": False, "right": False}
     self.wflc_params["W"] = np.zeros(self.wflc_params["M"] * 2)
     self.l_flc_params["W"] = np.zeros(self.l_flc_params["M"] * 2)
     self.r_flc_params["W"] = np.zeros(self.r_flc_params["M"] * 2)
     self.rospy.spin()
Esempio n. 4
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        if self.last_pos is None:
            vel = 0.0
        else:
            vel = (pose.position.z - self.last_pos) / max(
                timestamp - self.last_timestamp, 1e-4)
        state = np.array([
            pose.position.x, pose.position.y, pose.position.z,
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w, self.target_z, vel
        ])

        # Compute reward / penalty and check if this episode is complete
        done = False

        reward = (15.0 - pose.position.z) / 15.0
        if pose.position.z > 3.0 and np.abs(vel) > pose.position.z:
            reward -= np.abs(vel) / 5.0

        if timestamp > self.max_duration:
            reward -= 1.0
            done = True
        elif pose.position.z > 20.0:
            reward -= 5.0
            done = True
        elif pose.position.z < 0.4 and vel > 0.6:
            reward -= 5.5
            done = True
        elif pose.position.z < 0.2 and vel < 0.1:
            reward += 1.0
            done = True

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward,
                                 done)  # note: action = <force; torque> vector

        self.last_timestamp = timestamp
        self.last_action = action
        self.last_pos = pose.position.z
        self.last_vel = vel

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
Esempio n. 5
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        position = np.array([pose.position.x, pose.position.y, pose.position.z])
        orientation = np.array([pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w])
        # Calculate velocity
        if self.last_timestamp is None:
            velocity = np.array([0.0, 0.0, 0.0])
        else:
            velocity = (position - self.last_position) / max(timestamp - self.last_timestamp, 1e-03)
        
        # Create state space and update lag variables
        state = np.concatenate([position, orientation, velocity])
        self.last_timestamp = timestamp
        self.last_position = position
        
        # Compute reward / penalty and check if this episode is complete
        done = False
        error_position = np.linalg.norm(self.target_position - state[0:3])
        error_orientation = np.linalg.norm(self.target_orientation - state[3:7])
        error_velocity = np.linalg.norm(self.target_velocity - state[7:10])
        reward = - (self.position_weight * error_position + 
                    self.orientation_weight * error_orientation +
                    self.velocity_weight * error_velocity)
        
        if position[2] == 0:
            reward += 50.0
            done = True
        elif position[2] >= self.start_z + 2.5:
            reward -= 50.0
            done = True
        elif abs(velocity[2]) >= 10.0 and position[2] <= 5.0:
            reward -= 50.0
            done = True
        elif abs(velocity[2]) >= 5.0 and position[2] <= 2.0:
            reward -= 50.0
            done = True
        elif timestamp > self.max_duration:
            reward -= 50.0
            done = True

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward, done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low, self.action_space.high)  # flatten, clamp to action space limits
            return Wrench(
                    force=Vector3(action[0], action[1], action[2]),
                    torque=Vector3(action[3], action[4], action[5])
                ), done
        else:
            return Wrench(), done
Esempio n. 6
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        position = np.array(
            [pose.position.x, pose.position.y, pose.position.z])
        orientation = np.array([
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])
        if self.last_timestamp is None:
            velocity = np.array([0.0, 0.0, 0.0])
        else:
            velocity = (position - self.last_position) / max(
                timestamp - self.last_timestamp,
                1e-03)  # prevent divide by zero
        state = np.concatenate([position, orientation,
                                velocity])  # combined state vector
        self.last_timestamp = timestamp
        self.last_position = position
        # print("angular_velocity: {}, linear_acceleration: {}".format(angular_velocity, linear_acceleration))

        # Compute reward / penalty and check if this episode is complete
        done = False
        error_position = abs(self.target_z - pose.position.z)
        error_velocity = (self.target_velocity_z - state[9])**2
        reward = -min(
            self.weight_position * error_position +
            self.weight_velocity * error_velocity, 20.0
        )  # reward = zero for matching target z, -ve as you go farther, upto -20
        if pose.position.z >= self.target_z:  # agent has crossed the target height
            reward += 10.0  # bonus reward
            done = True
        elif timestamp > self.max_duration:  # agent has run out of time
            reward -= 10.0  # extra penalty
            done = True
            print("Last action: {}...".format(self.action))

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward,
                                 done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            self.action = action[2]
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
Esempio n. 7
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        state = np.array([
            pose.position.x, pose.position.y, pose.position.z,
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])

        # get velocity
        if self.last_timestamp is None:
            velocity = 0.0
        else:
            velocity = abs(pose.position.z - self.last_position) / \
                        max(timestamp - self.last_timestamp, 1e-03)  # prevent divide by zero

        # scale elements to [0,1]
        scaled_z = pose.position.z / self.observation_space.high[2]
        max_v = (self.observation_space.high[2] -
                 self.observation_space.low[2]) / 1e-03
        scaled_v = velocity / max_v

        state = np.array([scaled_z, scaled_v])

        # Compute reward / penalty and check if this episode is complete
        done = False
        reward = -min(
            abs(self.target_z - pose.position.z), 20.0
        )  # reward = zero for matching target z, -ve as you go farther, upto -20
        if pose.position.z >= self.target_z:  # agent has crossed the target height
            reward += 10.0  # bonus reward
            done = True
        elif timestamp > self.max_duration:  # agent has run out of time
            reward -= 10.0  # extra penalty
            done = True

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward,
                                 done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
Esempio n. 8
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        position = np.array(
            [pose.position.x, pose.position.y, pose.position.z])

        if self.last_timestamp is None:
            velocity = np.array([0.0, 0.0, 0.0])
        else:
            velocity = (position - self.last_position) / max(
                timestamp - self.last_timestamp, 1e-03)
        state = np.concatenate([position, velocity])

        # Compute reward / penalty and check if this episode is complete
        done = False

        error_position = np.linalg.norm(
            self.target_position -
            state[0:3])  # Euclidean distance from target position vector
        error_velocity = np.linalg.norm(
            self.target_velocity -
            state[3:6])  # Euclidean distance from target velocity vector
        reward = -(self.weight_position * error_position +
                   self.weight_velocity * error_velocity)

        self.last_timestamp = timestamp
        self.last_position = position

        if error_position > self.max_error_position:
            reward -= 50.0
            print('Last for : {} s'.format(timestamp))
            done = True
        elif timestamp > self.max_duration:
            reward += 50.0  # extra reward, agent made it to the end
            done = True

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward,
                                 done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            self.last_action = action
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
Esempio n. 9
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # if np.random.rand() > 0.99:
        #     self.target_z = np.random.rand() * 15 + 5

        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        if self.last_pos is None:
            vel = 0.0
        else:
            vel = (pose.position.z - self.last_pos) / max(timestamp - self.last_timestamp, 1e-4)
        self.last_pos = pose.position.z
        state = np.array([
                pose.position.x, pose.position.y, pose.position.z,
                pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w,
                self.target_z, vel])

        # Compute reward / penalty and check if this episode is complete
        done = False
        zdist = abs(self.target_z - pose.position.z)

        reward = -(zdist ** 2 + 0.01 * (np.sum(self.last_action) ** 2)) / 9000

        # reward = 10.0 - zdist
        if zdist < 0.5:
            reward += 0.5
        if timestamp > self.max_duration:
            done = True
        elif pose.position.z > self.z_oob:
            reward -= 1.0
            done = True
        elif pose.position.z < 0.2:
            reward -= 1.0
            done = True

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward, done)  # note: action = <force; torque> vector

        self.last_timestamp = timestamp
        self.last_action = action

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low, self.action_space.high)  # flatten, clamp to action space limits
            return Wrench(
                    force=Vector3(action[0], action[1], action[2]),
                    torque=Vector3(action[3], action[4], action[5])
                ), done
        else:
            return Wrench(), done
Esempio n. 10
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        # Prepare state vector (pose only; ignore angular_velocity, linear_acceleration)
        state = np.array([
            pose.position.x, pose.position.y, pose.position.z,
            pose.orientation.x, pose.orientation.y, pose.orientation.z,
            pose.orientation.w
        ])

        # Compute reward / penalty and check if this episode is complete
        done = False
        reward = 0

        if self.agent is not None and isinstance(self.agent, DQN_Agent):
            if self.mode == -1:
                self.agent.set_takeoff_mode()
                self.mode = 0
            if self.mode == 0 and pose.position.z >= self.target_z:
                self.agent.set_hover_mode()
                self.hover_poss = pose.position.z
                self.mode = 1
            if self.mode == 1 and timestamp >= self.time_per_step * 2:
                self.agent.set_land_mode()
                self.mode = 2

        if self.mode == 0:  # reward up movement when taking off
            reward += pose.position.z / 10.0
        if self.mode == 1:  # reward staying calm when hovering
            reward += abs(pose.position.z - self.hover_poss) / 10.0
        if self.mode == 2:  # reward moving down when landing
            reward -= pose.position.z / 10.0

        if timestamp > self.max_duration:  # agent has run out of time
            done = True

        # Take one RL step, passing in current state and reward, and obtain action
        # Note: The reward passed in here is the result of past action(s)
        action = self.agent.step(state, reward,
                                 done)  # note: action = <force; torque> vector

        # Convert to proper force command (a Wrench object) and return it
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
	def __init__(self):
		self.pub_left = rospy.Publisher('frc_left', Wrench, queue_size = 10)
		self.pub_right = rospy.Publisher('frc_right', Wrench, queue_size = 10)
		self.pub_trq = rospy.Publisher('torque',Wrench, queue_size = 10)
		self.frc_left = Wrench()
		self.frc_right = Wrench()
		self.trq = Wrench()
		self.serialPort = serial.Serial("/dev/ttyACM0")
		self.serialChar = 'x'
		self.exitFlag = False
		self.fs = 10
		self.rospy = rospy
		self.rospy.init_node('frc_acquisition', anonymous = True)
		self.rate = self.rospy.Rate(self.fs)
		self.dataExport = [False, 'data.txt']
    def place_panel(self, target_payload, goal=None):
        self._begin_step(goal)

        def done_cb(status, result):
            rospy.loginfo("ibvs placement generated success")
            if (goal is not None):
                if status == actionlib.GoalStatus.SUCCEEDED:
                    self._step_complete(goal)
                else:
                    with self._goal_handle_lock:
                        if self._goal_handle == goal:
                            self._goal_handle = None
                    res = ProcessStepResult()
                    res.state = self.state
                    res.target = self.current_target if self.current_target is not None else ""
                    res.payload = self.current_payload if self.current_payload is not None else ""
                    res.error_msg = str(result.error_msg)
                    goal.set_aborted(result=res)
                    rospy.loginfo("pbvs placement generated: %s",
                                  result.error_msg)

        self.process_index = 7
        self.process_starts[self.process_states[
            self.process_index]] = self.get_current_pose()
        with self._goal_handle_lock:

            placement_goal = PBVSPlacementGoal()
            placement_goal.desired_transform = self.load_placement_target_config(
                target_payload)
            placement_goal.stage1_kp = np.array([0.9] * 6)
            placement_goal.stage2_kp = np.array([0.9] * 6)
            placement_goal.stage3_kp = np.array([0.5] * 6)
            placement_goal.stage1_tol_p = 0.05
            placement_goal.stage1_tol_r = np.deg2rad(1)
            placement_goal.stage2_tol_p = 0.05
            placement_goal.stage2_tol_r = np.deg2rad(1)
            placement_goal.stage3_tol_p = 0.001
            placement_goal.stage3_tol_r = np.deg2rad(0.2)
            placement_goal.stage2_z_offset = 0.05

            placement_goal.abort_force = Wrench(Vector3(500, 500, 500),
                                                Vector3(100, 100, 100))
            placement_goal.placement_force = Wrench(Vector3(0, 0, 300),
                                                    Vector3(0, 0, 0))
            placement_goal.force_ki = np.array([1e-6] * 6)

            client_handle = self.placement_client.send_goal(placement_goal,
                                                            done_cb=done_cb)
Esempio n. 13
0
    def update(self, timestamp, pose, angular_velocity, linear_acceleration):
        self.count += 1
        pos = np.array([pose.position.x, pose.position.y, pose.position.z])
        distance = np.linalg.norm(self.target - pos)
        rescaled_pos = self.state_rescale(pos)
        rescaled_tar = self.state_rescale(self.target)
        pos_target_vec = rescaled_tar - rescaled_pos
        delta_pos = (pos - self.last_pos)
        self.last_pos = pos
        state = np.concatenate(
            (rescaled_pos, delta_pos * 10., pos_target_vec * 5.0), axis=-1)
        state = state[None, :]
        accel = np.array([
            linear_acceleration.x, linear_acceleration.y, linear_acceleration.z
        ])
        sum_accel = np.linalg.norm(accel)
        done = False
        reward = (5. - distance) * 0.3 - sum_accel * 0.05

        if distance < 0.1 and self.target[2] == 10.0:
            self.hovered = True
        if timestamp > self.landing_start_time and self.hovered:
            target_z = max(
                (self.initial_target_z / self.landing_time) *
                ((self.landing_start_time + self.landing_time) - timestamp),
                0.0)
            self.target = np.array([0.0, 0.0, target_z])

        print('re={:.3} pt={:.3} tar={:.3}'.format(reward, pos_target_vec[2],
                                                   self.target[2]),
              end='\r')

        if (timestamp > self.max_duration):
            done = True

        self.action = self.agent.step(state, reward, done)
        #print('action={:.3} reward={:.3}'.format(self.action[2],reward),end='\r')

        action = self.action * self.action_space_range[2] * 0.5
        if action is not None:
            action = np.clip(action.flatten(), self.action_space.low,
                             self.action_space.high
                             )  # flatten, clamp to action space limits
            return Wrench(force=Vector3(action[0], action[1], action[2]),
                          torque=Vector3(action[3], action[4],
                                         action[5])), done
        else:
            return Wrench(), done
Esempio n. 14
0
    def __init__(self, search_target, search_area):
        State.__init__(self,
                       outcomes=['succeeded', 'aborted', 'preempted'],
                       input_keys=[
                           'px_input', 'fx_input', 'search_input',
                           'search_confidence_input'
                       ])

        # initialize controller
        self.CameraPID = CameraPID()
        self.pub_thrust = rospy.Publisher('/manta/thruster_manager/input',
                                          Wrench,
                                          queue_size=1)

        # thrust message
        self.thrust_msg = Wrench()

        # my current pose
        self.vehicle_odom = Odometry()
        self.sub_pose = rospy.Subscriber('/odometry/filtered',
                                         Odometry,
                                         self.positionCallback,
                                         queue_size=1)

        # set init_flag
        self.init_flag = True

        # straight-line path segment
        self.search_target = search_target
        self.search_y = search_area.y
        self.search_x = search_area.x
Esempio n. 15
0
def callback(joy):
    global closing

    def f(x):
        return sign(x) * 70 * abs(x)**2

    w = Wrench()
    w.force.x = f(joy.axes[0])
    w.force.y = f(joy.axes[1])
    w.force.z = f(joy.axes[2])
    w.torque.x = f(joy.axes[3])
    w.torque.y = f(joy.axes[4])
    w.torque.z = f(joy.axes[5])
    pub.publish(w)

    if joy.buttons[1] > 0:
        closing = False
        gripper.publish(Float64(5))
    elif joy.buttons[0] > 0:
        closing = True
        gripper.publish(Float64(-5))
    elif closing:
        gripper.publish(Float64(-5))
    else:
        gripper.publish(Float64(0))
Esempio n. 16
0
def push2():
    try:
        service = rospy.ServiceProxy('/gazebo/apply_body_wrench', ApplyBodyWrench)
        
        body_name = 'robot1::wrist_roll_link'
        reference_frame = 'robot1::wrist_roll_link'
        point = Point()
        point.x = 0
        point.y = 0
        point.z = 0
        wrench = Wrench()
        wrench.force.x = 35
        wrench.force.y = 0
        wrench.force.z = 0
        wrench.torque.x = 0
        wrench.torque.y = 0
        wrench.torque.z = 0
        start_time = rospy.Time.now()
        duration = rospy.Duration(10)

        success = service(body_name, reference_frame, point, wrench, start_time, duration)

        print 'service call %s', success 
    except rospy.ServiceException as e:
        rospy.loginfo('service call failed %s', e)
Esempio n. 17
0
    def activate_launcher(self, _ball, robot_euler_angles, force_n):
        #print(robot_twist)
        # Ball launcher angle
        angle_deg = self.launcher_angle
        angle_rad = radians(angle_deg)
        # Ball Launch relative to robot
        robot_x = force_n * cos(angle_rad)
        robot_z = force_n * sin(angle_rad)
        robot_y = force_n * 0
        # Ball Launch relative to Gazebo world
        ball_launch_vector = [robot_x, robot_y, robot_z]
        [x, y,
         z] = self.transform_robot_to_world_frame(robot_euler_angles,
                                                  ball_launch_vector)

        wrench = Wrench()
        wrench.force.x = x
        wrench.force.y = y
        wrench.force.z = z
        wrench.torque.x = 0
        wrench.torque.y = 0
        wrench.torque.z = 0
        # You can also define the start time if necessary...
        #print("wrench = " + wrench)
        self.apply_force(
            body_name=_ball.name_for_force,
            wrench=wrench,
            duration=rospy.Duration(0.1)
        )  # try to apply that as an instance...? Looks like it defaults to frame step
        if (debug_launcher): rospy.loginfo('Ball force Applied')
Esempio n. 18
0
    def __init__(self, target):
        State.__init__(self,
                       outcomes=['found', 'unseen', 'passed', 'missed'],
                       output_keys=[
                           'px_output', 'fx_output', 'search_output',
                           'search_confidence_output'
                       ])
        self.target = target
        self.search_timeout = 30.0
        self.sampling_time = 0.2
        self.timer = 0.0
        self.task_status = 'missed'
        self.CameraPID = CameraPID()

        self.sub_object = rospy.Subscriber('/pole_midpoint',
                                           CameraObjectInfo,
                                           self.objectDetectionCallback,
                                           queue_size=1)
        self.sub_pose = rospy.Subscriber('/odometry/filtered',
                                         Odometry,
                                         self.positionCallback,
                                         queue_size=1)
        print(self.sub_pose)
        self.pub_thrust = rospy.Publisher('/manta/thruster_manager/input',
                                          Wrench,
                                          queue_size=1)
        self.thrust_msg = Wrench()
Esempio n. 19
0
def callbacallback(twist_msg):
    m3 = twist_msg.linear.x
    m0 = twist_msg.linear.y
    m1 = twist_msg.linear.z
    m2 = twist_msg.angular.z
    # PWM
    M = np.array([m0, m1, m2, m3])

    # Motor saturation
    M[M > pwm_max] = pwm_max
    # M[M<=pwm_min] = 0  # Real robot does not generage forces with less than this

    # pwm to forces (in grams)
    MF = np.power((M / pwm_max - c1) / c2, 2) - c3
    # forces in kilograms
    MF /= 1000
    MF[MF < 0] = 0.

    # compute forces
    [fx, fy, mz] = np.dot(A, MF)

    # publish twist
    msg = Wrench()
    force = Vector3()
    torque = Vector3()
    force.x = fx
    force.y = fy
    torque.z = mz

    msg.force = force
    msg.torque = torque
    pub1.publish(msg)
Esempio n. 20
0
def apply_body_wrench(name, wrench, duration):
    pose = get_pose(name)
    q = np.array([pose.orientation.x, pose.orientation.y, 
                  pose.orientation.z, pose.orientation.w])
    H = quaternion_matrix(q)
    A = H[0:3,0:3]
    #sk = np.array(skew_symmetric(-pose.position.x, -pose.position.y, -pose.position.z))
    #T = np.block([
    #    [A, -np.dot(A,sk)],
    #    [np.zeros((3,3)), A]
    #])
    T = np.block([
        [A, np.zeros((3,3))],
        [np.zeros((3,3)), A]
    ])
    w = np.zeros((6,1))
    w[0] = wrench.force.x
    w[1] = wrench.force.y
    w[2] = wrench.force.z
    w[3] = wrench.torque.x
    w[4] = wrench.torque.y
    w[5] = wrench.torque.z
    gw = np.dot(T,w)
    global_wrench = Wrench()
    global_wrench.force.x = gw[0]
    global_wrench.force.y = gw[1]
    global_wrench.force.z = gw[2]
    global_wrench.torque.x = gw[3]
    global_wrench.torque.y = gw[4]
    global_wrench.torque.z = gw[5]
    apply_body_wrench_global(name, global_wrench, duration)
Esempio n. 21
0
    def applyForces(self, lin_z, ang_x, ang_y, ang_z, pose):

        # heuristic drag. 
        drag_x = 0.1*self.x_vel_world**2
        drag_y = 0.1*self.y_vel_world**2
        drag_z = 0.3*self.z_vel_world**2

        # Apply forces
        msg = Wrench()

        # Extract the rotation matrix from quaternion
        q = pose.orientation
        rotation = quaternion_matrix([q.x,q.y,q.z,q.w])

        # Apply linear forces in world frame
        linear_forces_body = np.array([[0],[0],[lin_z],[1]])
        linear_force_world = np.dot(rotation, linear_forces_body)
        msg.force.x = linear_force_world[0]
        msg.force.y = linear_force_world[1]
        msg.force.z = linear_force_world[2]

        # Apply torques in the world frame
        torques_body = np.array([[ang_x],[ang_y],[ang_z],[1]])
        torques_world = np.dot(rotation,torques_body)
        msg.torque.x = torques_world[0]
        msg.torque.y = torques_world[1]
        msg.torque.z = torques_world[2]

        # Publish the msg
        self.controller_output_pub.publish(msg)
Esempio n. 22
0
    def reference_model_data_callback(self, msg):
        """
		Handle guidance data whenever it is published by calculating
		a control vector based on the given data.

		Args:
			msg:	The guidance data message
		"""

        # Control forces
        tau_d = self.Backstepping.regulate(msg.u, msg.u_dot, msg.u_d,
                                           msg.u_d_dot, msg.v, msg.psi,
                                           msg.psi_d, msg.r, msg.r_d,
                                           msg.r_d_dot)

        tau_depth_hold = self.PID.depthController(msg.z_d, msg.z, msg.t)

        # add speed controllers here

        thrust_msg = Wrench()

        # Thrust message forces and torque
        if tau_d[0] > 0.0:
            thrust_msg.force.x = tau_d[0]

        thrust_msg.force.y = tau_d[1]
        thrust_msg.force.z = tau_depth_hold

        thrust_msg.torque.z = tau_d[2]

        # Publish the thrust message to /auv/thruster_manager/input
        self.pub_thrust.publish(thrust_msg)
Esempio n. 23
0
 def point_callback(self, msg):
     x = msg.x
     y = msg.y
     z = msg.z
     curr_point = np.array([x,y,z]) #cm
     p_err = (curr_point - self.ref_point)*(1/np.power(10, 2)) #m
     if self.prev_err is None:
         self.prev_err = p_err
     d_err = (p_err - self.prev_err) #m
     if (np.greater(p_err, (self.ref_point - self.ref_error)).all() and
         np.less(p_err, (self.ref_point + self.ref_error)).all() and 
         not has_reached_point
     ):
         self.ref_point = curr_point
         self.has_reached_point = True
     else:
         force = self.Kp*p_err + self.Kd*d_err
         force_vector = Vector3()
         force_vector.x = force[0]
         force_vector.y = force[1]
         force_vector.z = force[2]
         wrench_msg = Wrench()
         wrench_msg.force = force_vector
         self.gazebo_force_pub.publish(wrench_msg)
     self.prev_err = p_err
Esempio n. 24
0
 def get_thrust_command(self):
     cmd = Wrench()
     cmd.force.x = self.data.axes[X_AXIS]
     cmd.force.y = self.data.axes[Y_AXIS]
     cmd.force.z = self.data.axes[Z_AXIS]
     cmd.torque.z = self.data.axes[YAW_AXIS]
     return cmd
Esempio n. 25
0
    def control_callback(self, odom):
        # position
        position = odom.pose.pose.position

        # orientation
        orientation = odom.pose.pose.orientation
        euler = tf.transformations.euler_from_quaternion(
            (orientation.x, orientation.y, orientation.z, orientation.w))
        pitch = euler[1]
        yaw = euler[2]

        # rotation rate
        rates = odom.twist.twist.angular
        vel = odom.twist.twist.linear
        yaw_rate = rates.z

        # publish fram for transform tree
        br = tf.TransformBroadcaster()
        br.sendTransform(
            (position.x, position.y, position.z),
            (orientation.x, orientation.y, orientation.z, orientation.w),
            rospy.Time.now(), "base_link", "map")

        # fin1
        msg = Wrench()
        msg.force.x = 0
        msg.force.y = 0
        msg.force.z = 0
        msg.torque.x = 10
        msg.torque.y = 0
        msg.torque.z = 0
        self.pub_fin1.publish(msg)
Esempio n. 26
0
    def __init__(self, target):
        State.__init__(self,
                       outcomes=['found', 'unseen', 'passed', 'missed'],
                       output_keys=[
                           'px_output', 'fx_output', 'search_output',
                           'search_confidence_output'
                       ])

        self.target = target
        self.search_timeout = 25.0
        self.sampling_time = 0.2
        self.timer = 0.0
        self.task_status = 'missed'

        # search for object
        if target == 'gate':
            self.sub_object = rospy.Subscriber('/gate_midpoint',
                                               CameraObjectInfo,
                                               self.objectDetectionCallback,
                                               queue_size=1)
        else:
            self.sub_object = rospy.Subscriber('/pole_midpoint',
                                               CameraObjectInfo,
                                               self.objectDetectionCallback,
                                               queue_size=1)

        self.pub_thrust = rospy.Publisher('/manta/thruster_manager/input',
                                          Wrench,
                                          queue_size=1)

        # thrust message
        self.thrust_msg = Wrench()
Esempio n. 27
0
    def __init__(self):
        rospy.init_node('skibot_node')
        rospy.Subscriber('thrust', Wrench, self.wrench_callback)
        rospy.Subscriber('target_pose', Pose, self.target_pose_callback)
        rospy.Subscriber('target_point', Point, self.target_point_callback)
        self.target_pose = None
        self.target_point = None
        self.loc_pub = rospy.Publisher('pose', Pose, queue_size=10)
        self.pub_rate = 35.0
        rospy.Service('teleport', Teleport, self.handle_teleport_service)

        # Start pygame...
        pygame.init()
        self.screen = pygame.display.set_mode(
            (SCREEN_WIDTH_PX, SCREEN_HEIGHT_PX))
        self.refresh_rate = 100
        pygame.display.set_caption('Skibot 354')
        self.rocket = Skibot(self.screen,
                             (SCREEN_WIDTH_M / 2, SCREEN_HEIGHT_M / 2), 0.0)

        # load and prep arrow image.
        arrow_file = roslib.packages.resource_file('skibot', 'images',
                                                   'arrow.png')
        self.arrow_img = pygame.image.load(arrow_file)
        self.arrow_img = pygame.transform.smoothscale(self.arrow_img, (38, 8))
        square = pygame.Surface((38, 38), flags=SRCALPHA)
        square.fill((255, 255, 255, 0))
        square.blit(self.arrow_img, (0, 15))
        self.arrow_img = square
        self.arrow_img.convert()

        self.cur_wrench = Wrench()
        self.thrust_start = 0
    def activate(self, config, ff_wrench=Wrench()):
        # パラメータが存在するかチェック
        configs = rospy.get_param("/hsrb/impedance_control/config_names")
        if config not in configs:
            rospy.logerr(config + "is invalid name.")
            return False

        # 現在の姿勢から非常に長い(1日分)現在姿勢の軌道を作成
        arm_joint, arm_value = utils.extract_joint_positions(
            self._joint_states_cache.data, _ARM_JOINT_NAMES)
        odom_joint, odom_value = utils.extract_odom_positions(
            self._odom_states_cache.data, _ODOM_JOINT_NAMES)

        trajectory = JointTrajectory()
        trajectory.header.stamp = rospy.get_rostime()
        trajectory.joint_names = arm_joint + odom_joint

        trajectory_point = JointTrajectoryPoint()
        trajectory_point.time_from_start = rospy.Duration(86400.0)
        trajectory_point.positions = arm_value + odom_value
        trajectory.points = [trajectory_point]

        # 軌道を投げる
        goal = FollowJointTrajectoryGoal()
        goal.trajectory = trajectory
        goal.preset_parameters_name = config
        goal.feed_forward_wrench.wrench = ff_wrench
        self._client.send_goal(goal)
        return True
Esempio n. 29
0
    def apply_body_wrench(self,
                          model_name,
                          link_name,
                          force,
                          torque,
                          start_time=0,
                          duration=-1,
                          reference_point=[0, 0, 0],
                          reference_frame=None):
        assert 'apply_body_wrench' in self._services
        assert model_name in self.get_model_names(
        ), 'Model {} does not exist'.format(model_name)

        if reference_frame is None:
            reference_frame = '{}::{}'.format(model_name, link_name)

        # Set reference point
        point = Point(*reference_point)
        # Set wrench
        wrench = Wrench()
        wrench.force.x = force[0]
        wrench.force.y = force[1]
        wrench.force.z = force[2]

        wrench.torque.x = torque[0]
        wrench.torque.y = torque[1]
        wrench.torque.z = torque[2]

        output = self._services['apply_body_wrench']('{}::{}'.format(
            model_name, link_name), reference_frame, point, wrench,
                                                     rospy.Time(start_time),
                                                     rospy.Duration(duration))
        return output.success
 def run(self):
     while not rospy.is_shutdown():
         line = self.srl.readline()
         if len(line) > 0:
             w = Wrench()
             w.force.x = float(line)
             self.publ.publish(w)