def __init__(self):
     rospy.logwarn("Init line Follower")
     self.bridge_object = CvBridge()
     self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                       self.camera_callback)
     self.movekobuki_object = MoveKobuki()
     # We set init values to ideal case where we detect it just ahead
     setPoint_value = 0.0
     state_value = 0.0
     self.pid_object = PID(init_setPoint_value=setPoint_value,
                           init_state=state_value)
    def __init__(self, robot, kp, kd, ki, axes, error_thres, pose_tracker):
        #initialise pid controller
        #axes = list [0,1,0] #list of size 3 (x,y,z) representing axes along which control is required

        self.robot = robot
        self.pid = PID(kp, kd, ki)
        self.pid_param = PidParam(kp, ki, kd)
        self.pose_tracker = pose_tracker
        self.axes = axes
        self.error_thresh = error_thres
        self.jac_step = 0.002
        self.pid_update_pub = rospy.Publisher('/arm_tracking/pid_update_log',
                                              PidUpdate,
                                              queue_size=10)
Пример #3
0
def test_pid(P=0.2, I=0.0, D=0.0, L=100):
    """Self-test PID class

    .. note::
        ...
        for i in range(1, END):
            pid.update(feedback)
            output = pid.output
            if pid.SetPoint > 0:
                feedback += (output - (1/i))
            if i>9:
                pid.SetPoint = 1
            time.sleep(0.02)
        ---
    """
    pid = PID(P, I, D)

    pid.SetPoint = 0.0
    pid.setSampleTime(0.01)

    END = L
    feedback = 0

    feedback_list = []
    time_list = []
    setpoint_list = []

    for i in range(1, END):
        pid.update(feedback)
        output = pid.output
        print output
        if pid.SetPoint > 0:
            feedback += output  # (output - (1/i))控制系统的函数
        if i > 9:
            pid.SetPoint = 10
        time.sleep(0.01)

        feedback_list.append(feedback)
        setpoint_list.append(pid.SetPoint)
        time_list.append(i)

    time_sm = np.array(time_list)
    time_smooth = np.linspace(time_sm.min(), time_sm.max(), 300)
    feedback_smooth = spline(time_list, feedback_list, time_smooth)
    plt.figure(0)
    plt.plot(time_smooth, feedback_smooth)
    plt.plot(time_list, setpoint_list)
    plt.xlim((0, L))
    plt.ylim((min(feedback_list) - 0.5, max(feedback_list) + 0.5))
    plt.xlabel('time (s)')
    plt.ylabel('PID (PV)')
    plt.title('TEST PID')

    plt.ylim((1 - 10.5, 1 + 10.5))
    # plt.ylim((1-0.5, 1+0.5))
    plt.grid(True)
    plt.show()
Пример #4
0
import time
import kinematic
import simulator
from pid_control import PID

#joystick process
# joy.setup()
stop_threading = 0
rTime = time.time()
mutex = Lock()

#JOYSTICK
thread_joy = Thread(target=joy.loop, args=(lambda:stop_threading,mutex))
thread_joy.start()

pitch_PID = PID(Kp=0.8,Ki=0.1,Kd=0.2,iMin=-0.1,iMax=0.1,anti_windup=True)
roll_PID = PID(Kp=0.8,Ki=0.1,Kd=0.2,iMin=-0.1,iMax=0.1,anti_windup=True)
yaw_PID = PID(Kp=0.8,Ki=0.1,Kd=0.2,iMin=-0.1,iMax=0.1,anti_windup=True)

def update_to_simulator(stop):
    while not stop():
        simulator.ROLL = pitch_PID.control(joy.joy_axis[0],reference=0.0)
        simulator.PITCH = pitch_PID.control(joy.joy_axis[1],reference=0.0)
        simulator.YAW = pitch_PID.control(joy.joy_axis[2 ],reference=0.0)
        print(simulator.ROLL, joy.joy_axis[0])

        # simulator.ROLL = joy.joy_axis[0]
        # simulator.PITCH = joy.joy_axis[1]
        # simulator.YAW = joy.joy_axis[2]
        time.sleep(0.01)
Пример #5
0
# Attach the Ctrl+C signal interrupt
signal.signal(signal.SIGINT, ctrlC)

# Setup encoder
encoder = Encoder()
encoder.initEncoders()

# Setup motor control
motorControl = MotorControl(encoder)

orientation = Orientation(encoder, motorControl)

tof = TOF()

pid = PID(0.5, -6, 6)

try:
    with open('calibratedSpeeds.json') as json_file:
        motorControl.speedMap = json.load(json_file)

except IOError:
    input("You must calibrate speeds first. Press enter to continue")
    motorControl.calibrateSpeeds()

while True:
    print("\n(1) Calibrate speeds")
    print("(2) Test distance sensors")
    print("(3) Move forward until 12\" from wall")
    print("(4) Follow center between walls")
    print("(5) Follow wall")
Пример #6
0
import b0RemoteApi
import math
import time
from kalmanfilter import AccelGyro
from pid_control import PID

accel_gyro = AccelGyro()
pid = PID()

with b0RemoteApi.RemoteApiClient('b0RemoteApi_pythonClient',
                                 'b0RemoteApi') as client:

    # synchronous functions
    client.doNextStep = True
    client.gyro_signal = 0.0
    client.accel_signal = [0.0, 0.0]
    client.tilt_angle = 0.0
    client.list_gyro_signal = list()
    client.list_accel_signal = list()
    client.u = 0.0

    def simulationStepStarted(msg):
        simTime = msg[1][b'simulationTime']

    def simulationStepDone(msg):
        simTime = msg[1][b'simulationTime']
        client.doNextStep = True

    def gyro_signal_callback(msg):
        if type(msg) == bytes:
            msg[1] = msg[1].decode('ascii')
class ArmPID(object):
    def __init__(self, robot, kp, kd, ki, axes, error_thres, pose_tracker):
        #initialise pid controller
        #axes = list [0,1,0] #list of size 3 (x,y,z) representing axes along which control is required

        self.robot = robot
        self.pid = PID(kp, kd, ki)
        self.pid_param = PidParam(kp, ki, kd)
        self.pose_tracker = pose_tracker
        self.axes = axes
        self.error_thresh = error_thres
        self.jac_step = 0.002
        self.pid_update_pub = rospy.Publisher('/arm_tracking/pid_update_log',
                                              PidUpdate,
                                              queue_size=10)

    #execute pid for

#    def trajectory_pid(self,target_trajectory):
#        for target in target_trajectory:
#            waypoints=[]
##            waypoints.append((self.robot.group.get_current_pose().pose))
#            waypoints.append((self.robot.get_end_effector_pose()))
#            waypoints.append(target)
#            #first move from current pos to the target pos along a straight line
#            pre_plan = self.robot.get_plan_move_along_line(waypoints)
#            self.robot.execute_trajectory(pre_plan)
#            #execute pid control to exactly reach target
##            self.point_pid_jacobian(target)
#            self.point_pid_cartesian(target)

#move to point 1, perform pid at point 1
#do repeat:
#find (dx,dy,dz) = point (j+1) - point j
#move current_pos + (dx,dy,dz)
#do pid for point (j+1)

    def trajectory_point_wise_pid(self, target_trajectory,
                                  end_effector_orientation,
                                  first_point_rframe):

        #first move from current pos to the first point along a straight line
        target = target_trajectory[0]
        rospy.loginfo(target)

        target_pose = list(first_point_rframe) + end_effector_orientation
        plan = self.robot.get_plan_move_to_goal(target_pose)
        self.robot.execute_trajectory(plan)
        time.sleep(1)
        #do pid control to reach the first point
        self.point_pid_cartesian(target)

        prev_target = target

        for target in target_trajectory[1:]:

            rospy.loginfo(target)
            delta_trans = target - prev_target
            current_pose = deepcopy(self.robot.get_end_effector_pose())
            target_pose = deepcopy(current_pose)
            target_pose.position.x += delta_trans[0]
            target_pose.position.y += delta_trans[1]
            target_pose.position.z += delta_trans[2]

            for i in range(1):
                current_pose = deepcopy(self.robot.get_end_effector_pose())
                pre_plan = self.robot.get_plan_move_along_line(
                    [current_pose, target_pose])
                self.robot.execute_trajectory(pre_plan)
                time.sleep(1)
            #execute pid control to exactly reach target
#            self.point_pid_jacobian(target)
            self.point_pid_cartesian(target)
            prev_target = target

#    do pid control for point based on jacobian
#    def point_pid_jacobian(self,target):
#
#        error_trans = self.get_error(target)
#
#        while(np.linalg.norm(error_trans) > self.error_thresh):
##            delta_trans = self.pid(error_trans)
#            delta_trans = error_trans
#            joint_angle = self.robot.group.get_current_joint_values()
#
#            jacob = self.robot.group.get_jacobian_matrix(joint_angle)[:3]
#            delta_angle = np.linalg.lstsq( jacob, delta_trans, rcond = None )[0]
#            org_joint_angle = joint_angle[:]
#            for i,ang in enumerate(joint_angle):
#                joint_angle[i] = ang + self.jac_step*delta_angle[i]
#            print(org_joint_angle,joint_angle)
#            plan = self.robot.get_plan_move_to_joint(joint_angle)
#
#            self.robot.execute_trajectory(plan,True)
#            error_trans = self.get_error(target)

#    do pid control for point based on cartesian control

    def point_pid_cartesian(self, target):
        self.pid.reset_pid()
        error_trans = self.get_error(target)
        next_pose = ''

        while (np.linalg.norm(error_trans) > self.error_thresh):

            pid_update = self.pid.Update(error_trans)

            delta_trans = pid_update[0]
            #            current_pose, next_pose = Pose(),Pose()
            ##            current_pose = self.robot.group.get_current_pose().pose
            #            current_pose.position = self.robot.get_end_effector_pose().position
            #            current_pose.orientation = self.robot.get_end_effector_pose().orientation
            #            next_pose.orientation = self.robot.get_end_effector_pose().orientation
            #            next_pose.position.x += self.robot.get_end_effector_pose().position.x + delta_trans[0]
            #            next_pose.position.y += self.robot.get_end_effector_pose().position.y + delta_trans[1]
            #            next_pose.position.z += self.robot.get_end_effector_pose().position.z + delta_trans[2]
            #
            current_pose = deepcopy(self.robot.get_end_effector_pose())
            next_pose = deepcopy(current_pose)
            next_pose.position.x += delta_trans[0]
            next_pose.position.y += delta_trans[1]
            next_pose.position.z += delta_trans[2]
            #            print(current_pose)
            #            print(next_pose)
            plan = self.robot.get_plan_move_along_line(
                [current_pose, next_pose])

            #            self.robot.display_trajectory(plan)
            #            time.sleep(0.5)
            self.robot.execute_trajectory(plan, True)
            time.sleep(1)
            #            self.publish_pid_update_msg(pid_update)
            error_trans = self.get_error(target)
            print("doing pid")
#            rospy.loginfo(error_trans)

        pid_update = self.pid.Update(error_trans)
        self.publish_pid_update_msg(pid_update)

        rospy.loginfo("Completed PiD for this point. Final Error ", )
        error_trans = self.get_error(target)
        rospy.loginfo(error_trans)
#                      str(next_pose.position.x)next_pose.position.y,next_pose.position.z))

#perform for movement along the trajectory

    def trajectory_continuous_pid(self, target_trajectory,
                                  end_effector_orientation,
                                  first_point_rframe):
        self.pid.reset_pid()

        #first move from current pos to the first point along a straight line
        target = target_trajectory[0]
        rospy.loginfo(target)

        #get robot somehwere near first pose,to get in camera
        target_pose = list(first_point_rframe) + end_effector_orientation
        plan = self.robot.get_plan_move_to_goal(target_pose)
        self.robot.execute_trajectory(plan)
        time.sleep(1)
        error_trans = self.get_error(target)
        prev_target = target

        #        i = 0
        for target in target_trajectory:

            rospy.loginfo(target)
            pid_update = self.pid.Update(error_trans)
            self.publish_pid_update_msg(pid_update)

            pid_trans = pid_update[0]
            #            if i >0: pid_trans = 0
            #next movement=planned diff + f(error)
            delta_trans = (target - prev_target) + pid_trans
            current_pose = deepcopy(self.robot.get_end_effector_pose())
            target_pose = deepcopy(self.robot.get_end_effector_pose())
            target_pose.position.x += delta_trans[0]
            target_pose.position.y += delta_trans[1]
            target_pose.position.z += delta_trans[2]

            #            current_pose = deepcopy(self.robot.get_end_effector_pose())
            pre_plan = self.robot.get_plan_move_along_line(
                [current_pose, target_pose])
            self.robot.execute_trajectory(pre_plan)
            time.sleep(1)

            prev_target = target
            rospy.loginfo("Completed PiD for this point. Final Error ", )
            error_trans = self.get_error(target)
            rospy.loginfo(error_trans)
#            rospy.loginfo("target_pose_")
#            rospy.loginfo(target_pose)
#            i +=1

#publishing the error at the last point
        pid_update = self.pid.Update(error_trans)
        self.publish_pid_update_msg(pid_update)

    #returns 3d vector showing error along each axes
    def get_error(self, target, robot=None):
        #get pose as array
        if self.robot.virtual_or_real == "real":
            robot = self.pose_tracker.get_robot_ef_position()
        elif self.robot.virtual_or_real == "virtual":
            robot = self.pose_tracker.get_robot_pose()


#        robot = self.pose_tracker.get_robot_pose()
        error = target - robot
        for index, axis in enumerate(self.axes):
            if axis == 0:
                error[index] = 0
        return error

    #generates and publishes the pid_update message
    def publish_pid_update_msg(self, pid_update_output):
        update_msg = PidUpdate()
        h = std_msgs.msg.Header()
        h.stamp = rospy.Time.now()
        update_msg.header = h
        update_msg.pid_param = self.pid_param
        update_msg.error = pid_update_output[1]
        update_msg.integ_error = pid_update_output[2]
        update_msg.delta_error = pid_update_output[3]
        update_msg.response = pid_update_output[0]
        self.pid_update_pub.publish(update_msg)
class LineFollower(object):
    def __init__(self):
        rospy.logwarn("Init line Follower")
        self.bridge_object = CvBridge()
        self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                          self.camera_callback)
        self.movekobuki_object = MoveKobuki()
        # We set init values to ideal case where we detect it just ahead
        setPoint_value = 0.0
        state_value = 0.0
        self.pid_object = PID(init_setPoint_value=setPoint_value,
                              init_state=state_value)

    def camera_callback(self, data):

        try:
            # We select bgr8 because its the OpneCV encoding by default
            cv_image = self.bridge_object.imgmsg_to_cv2(
                data, desired_encoding="bgr8")
        except CvBridgeError as e:
            print(e)

        # We get image dimensions and crop the parts of the image we dont need
        # Bare in mind that because its image matrix first value is start and second value is down limit.
        # Select the limits so that it gets the line not too close, not too far and the minimum portion possible
        # To make process faster.
        height, width, channels = cv_image.shape
        descentre = 160
        rows_to_watch = 20
        crop_img = cv_image[(height) / 2 + descentre:(height) / 2 +
                            (descentre + rows_to_watch)][1:width]

        # Convert from RGB to HSV
        hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)

        # Define the Yellow Colour in HSV
        #[[[ 30 196 235]]]
        """
        To know which color to track in HSV, Put in BGR. Use ColorZilla to get the color registered by the camera
        >>> yellow = np.uint8([[[B,G,R ]]])
        >>> hsv_yellow = cv2.cvtColor(yellow,cv2.COLOR_BGR2HSV)
        >>> print( hsv_yellow )
        [[[ 60 255 255]]]
        """
        lower_yellow = np.array([20, 100, 100])
        upper_yellow = np.array([50, 255, 255])

        # Threshold the HSV image to get only yellow colors
        mask = cv2.inRange(hsv, lower_yellow, upper_yellow)

        # Calculate centroid of the blob of binary image using ImageMoments
        m = cv2.moments(mask, False)
        try:
            cx, cy = m['m10'] / m['m00'], m['m01'] / m['m00']
        except ZeroDivisionError:
            cy, cx = height / 2, width / 2

        # Bitwise-AND mask and original image
        res = cv2.bitwise_and(crop_img, crop_img, mask=mask)

        # Draw the centroid in the resultut image
        cv2.circle(res, (int(cx), int(cy)), 10, (0, 0, 255), -1)

        cv2.imshow("RES", res)

        cv2.waitKey(1)

        # Move the Robot , center it in the middle of the witdth 640 => 320:
        setPoint_value = width / 2
        self.pid_object.setpoint_update(value=setPoint_value)

        twist_object = Twist()
        twist_object.linear.x = 0.1

        # Make it start turning

        self.pid_object.state_update(value=cx)
        effort_value = self.pid_object.get_control_effort()
        # We divide the effort to map it to the normal values for angular speed in the turtlebot
        rospy.logwarn("Set Value==" + str(setPoint_value))
        rospy.logwarn("State Value==" + str(cx))
        rospy.logwarn("Effort Value==" + str(effort_value))
        angular_effort_value = effort_value / 1000.0

        twist_object.angular.z = angular_effort_value
        rospy.logwarn("TWist ==" + str(twist_object.angular.z))
        self.movekobuki_object.move_robot(twist_object)

    def clean_up(self):
        self.movekobuki_object.clean_class()
        cv2.destroyAllWindows()
Пример #9
0

if __name__ == '__main__':

    #Listener
    rospy.init_node('laserListener', anonymous=True)
    rospy.Subscriber('/move_base_simple/goal', PoseStamped,
                     goalCB)  #in odom frame
    tflistener = tf.TransformListener()

    #Publishers:
    pub = rospy.Publisher('cmd_vel_mux/input/teleop', Twist, queue_size=10)
    rate = rospy.Rate(60)  # 60hz

    #  PID Terms:    P   I   D I_max
    angle_pid = PID(7.0, 0.0, 3.0, 0.2)  #Tuned Parameters
    #angle_pid = PID(7.0,0.0,0.0,0.2) #Undamped Parameters
    #angle_pid = PID(7.0,0.0,1.0,0.2) #Underdamped Parameters
    #angle_pid = PID(1.0,0.0,25.0,0.2) #Overdamped Parameters
    t_last = 0.
    while not rospy.is_shutdown():
        v = 0.
        phi = 0.
        try:
            t_now = rospy.get_rostime(
            )  #Update the goal so it's relative to the current robot position
            goal.header.stamp = t_now
            tflistener.waitForTransform(
                'odom', 'base_link', t_now, rospy.Duration(
                    4.0))  #Make sure we have tf data before doing this
            localGoal = tflistener.transformPose(
 def __init__(self):
 
     self.bridge_object = CvBridge()
     self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
     self.movekobuki_object = MoveKobuki()
     self.pid_object = PID()
Пример #11
0
 def init_pid_controller():
     pid_object = PID()