def __init__(self):
    
        self.bridge_object = CvBridge()
	#self.bridge_object = CvBridge.compressed_imgmsg_to_cv2() 

        self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
	#self.image_sub = rospy.Subscriber('/raspicam_node/image',Image,self.camera_callback) 
        self.moveTurtlebot3_object = MoveTurtlebot3()
 def __init__(self):
     print("jafbahkm")
     self.bridge_object = CvBridge()
     self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                       self.camera_callback)
     self.boundingbox_subscriber = rospy.Subscriber(
         '/darknet_ros/bounding_boxes', BoundingBoxes, self.call_back)
     self.moveTurtlebot3_object = MoveTurtlebot3()
    def __init__(self):

        self.bridge_object = CvBridge()
        self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                          self.camera_callback)
        self.detection_sub = rospy.Subscriber("/darknet_ros/bounding_boxes",
                                              BoundingBoxes,
                                              self.detection_callback)
        self.moveTurtlebot3_object = MoveTurtlebot3()
 def __init__(self):
 
     self.bridge_object = CvBridge()
     self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
     self.tb_sub = rospy.Subscriber('/odom',Odometry,self.pose_callback)
     self.manforce = rospy.Subscriber('/people_tracker_measurements', PositionMeasurementArray, self.man_callback)
     self.moveTurtlebot3_object = MoveTurtlebot3()
     self.wall_following_sub = rospy.Subscriber('scan', LaserScan, self.wall_following_callback)
     self.stop_sign_sub = rospy.Subscriber("/darknet_ros/bounding_boxes", BoundingBoxes, self.detection_callback)
 def __init__(self):  # Class constructor for line following
     global flag
     self.bridge_object = CvBridge()
     self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                       self.camera_callback)
     self.stop_sign_subscriber = rospy.Subscriber(
         '/darknet_ros/bounding_boxes', BoundingBoxes,
         self.stop_sign_callback)
     self.moveTurtlebot3_object = MoveTurtlebot3()
    def __init__(self):

        self.bridge_object = CvBridge()
        self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                          self.camera_callback)
        self.moveTurtlebot3_object = MoveTurtlebot3()
        self.obstacle_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=10)
        self.obstacle_sub = rospy.Subscriber('/scan', LaserScan,
                                             self.call_back)
        self.rate = rospy.Rate(10)
Пример #7
0
    def __init__(self):

        self.bridge_object = CvBridge()
        self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                          self.camera_callback)
        self.tb_sub = rospy.Subscriber('/odom', Odometry, self.pose_callback)
        self.manforce = rospy.Subscriber('/people_tracker_measurements',
                                         PositionMeasurementArray,
                                         self.man_callback)
        self.moveTurtlebot3_object = MoveTurtlebot3()
Пример #8
0
 def __init__(self):
 
     self.bridge_object = CvBridge()
     self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
     # self.image_sub = rospy.Subscriber("/raspicam_node/image",Image,self.camera_callback)
     self.moveTurtlebot3_object = MoveTurtlebot3()
def camera_callback(data):
    print('In callback')
    bridge_object = CvBridge()
    moveTurtlebot3_object = MoveTurtlebot3()
    # We select bgr8 because its the OpneCV encoding by default
    try:
        cv_image = bridge_object.imgmsg_to_cv2(data, desired_encoding="bgr8")
    except CvBridgeError as e:
        print(e)

    # We get image dimensions and crop the parts of the image we dont need
    height, width, channels = cv_image.shape
    crop_img = cv_image[(height)/2+180:(height)/2+200][1:width]

    # Convert from RGB to HSV
    hsv = cv2.cvtColor(crop_img, cv2.COLOR_BGR2HSV)

    # Define the Yellow Colour in HSV

    """
    To know which color to track in HSV use ColorZilla to get the color registered by the camera in BGR and convert to HSV. 
    """

    # Threshold the HSV image to get only yellow colors
    lower_yellow = np.array([20,100,100])
    upper_yellow = np.array([50,255,255])
    mask = cv2.inRange(hsv, lower_yellow, upper_yellow)

    # Calculate centroid of the blob of binary image using ImageMoments
    m = cv2.moments(mask, False)

    global lane_confirm

    try:
        cx, cy = m['m10']/m['m00'], m['m01']/m['m00']
        lane_finder = False
        lane_confirm = True
    except ZeroDivisionError:
        cx, cy = height/2, width/2
        lane_finder = True

    res = cv2.bitwise_and(crop_img, crop_img, mask= mask)

    # Draw the centroid in the resultut image

    cv2.circle(res,(int(cx), int(cy)), 10,(0,0,255),-1)

    cv2.imshow("Original", cv_image)
    cv2.imshow("MASK", mask)
    cv2.imshow("RES", res)
    cv2.waitKey(1)

    """
Enter controller here.
    """
    error_x = cx - width / 2
    twist_object = Twist()
    if error_x >-5 and error_x <5:
        error = 0
    twist_object.angular.z = np.clip((-float(error_x/1000)), -0.2, 0.2)
    temp = np.clip((-float(error_x/1000)), -0.2, 0.2)
    twist_object.linear.x = np.clip(0.2*(1-abs(temp)/0.2), 0, 0.08)

    if lane_finder:
        if lane_confirm==False:
            twist_object.linear.x = 0.1
            twist_object.angular.z = -0.02

        if lane_confirm:
            twist_object.linear.x = 0.08
            twist_object.angular.z = 0.21

    rospy.loginfo("ANGULAR VALUE SENT===>"+str(twist_object.angular.z))
    # Make it start turning
    moveTurtlebot3_object.move_robot(twist_object)
Пример #10
0
def camera_callback(data):
    global tag, stop_check
    pub = rospy.Publisher("cmd_vel", Twist, queue_size=True)
    twist_object = Twist()
    if tag == 'line_following':
        rospy.loginfo_throttle(1000, "MODE : LINE FOLLOWING")
        rospy.loginfo_throttle(3, "FOLLOWING THE LINE")
        bridge_object = CvBridge()
        moveTurtlebot3_object = MoveTurtlebot3()
        ### LINE DETECTION BEHAVIOUR
        try:
            cv_image = bridge_object.imgmsg_to_cv2(
                data, desired_encoding="bgr8"
            )  #convert image from ROS topic data to CV image
        except CvBridgeError as e:
            print(e)
        height, width, channels = cv_image.shape
        crop_img = cv_image[(height) / 2 + 450:(height) / 2 + 470][
            1:
            width]  # We get image dimensions and crop the parts of the image we dont need
        hsv = cv2.cvtColor(crop_img,
                           cv2.COLOR_BGR2HSV)  # Convert from RGB to HSV
        lower_yellow = np.array(
            [20, 100,
             100])  # Threshold the HSV image to get only yellow colors
        upper_yellow = np.array(
            [50, 255,
             255])  # Threshold the HSV image to get only yellow colors
        mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
        m = cv2.moments(
            mask, False
        )  # Calculate centroid of the blob of binary image using ImageMoments
        try:
            cx, cy = m['m10'] / m['m00'], m['m01'] / m['m00']
        except ZeroDivisionError:
            cx, cy = height / 2, width / 2
        bitwise = cv2.bitwise_and(crop_img, crop_img, mask=mask)
        cv2.circle(bitwise, (int(cx), int(cy)), 10, (0, 0, 255),
                   -1)  # Draw the centroid in the resultant image
        cv2.imshow("Centroid Tracker", bitwise)
        cv2.resizeWindow("Centroid Tracker", 300, 700)
        cv2.waitKey(1)
        ### CONTROLLER
        cx = cx + 300  # Camera Offset
        error_x = cx - width / 2
        if error_x > -5 and error_x < 5:
            error = 0
        twist_object.angular.z = np.clip((-float(error_x / 1000)), -0.2, 0.2)
        temp = np.clip((-float(error_x / 1000)), -0.2, 0.2)
        twist_object.linear.x = np.clip(0.2 * (1 - abs(temp) / 0.2), 0, 0.08)
        pub.publish(twist_object)

        ### STOP SIGN BEHAVIOUR
    if tag == 'stop':
        twist_object = Twist()
        t0 = float(rospy.Time.now().to_sec())
        t1 = 0
        while (t1 - t0) <= 3:
            rospy.loginfo_throttle(0.5, 'STOP SIGN!!!')
            t1 = float(rospy.Time.now().to_sec())
            pub.publish(twist_object)
        stop_check = False
        sign = "Go Ahead"
        tag = "line_following"
Пример #11
0
            x1, y1, x2, y2 = line.reshape(4)
            x_lower += x1
            x_upper += x2
            cv2.line(line_image, (x1, y1), (x2, y2), (255, 0, 0), 10) # 10 is the line width
        x_upper = x_upper/2
        x_lower = x_lower/2

    return line_image, x_upper, x_lower


if __name__ == '__main__':
    rospy.init_node('lane_follower')
    bridge_object = CvBridge()
    image_sub = rospy.Subscriber("/camera/zed/rgb/image_rect_color",Image,camera_callback)
    pub = rospy.Publisher('vesc/ackermann_cmd_mux/input/teleop', AckermannDriveStamped, queue_size = 10)
    moveTurtlebot3_object = MoveTurtlebot3()

    vel = AckermannDriveStamped()
    velocity = 0.1
    vel.drive.speed = velocity
    # angle = m

    # print(np.float32(m)*0.01)



    rate = rospy.Rate(10)
    while not rospy.is_shutdown():
        # angle = m+
        vel.drive.steering_angle = -m
        print(m)