def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None  # the latest image from the camera
        self.cv_image_res = None
        self.grayscale_sign = None
        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV
        cv2.namedWindow('video_window')
        cv2.namedWindow('grayscale_sign_window')
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)

        cv2.namedWindow('threshold_image')
        self.hsv_lb = np.array([30, 75, 75])  # hsv lower bound
        cv2.createTrackbar('H lb', 'threshold_image', 0, 360, self.set_h_lb)
        cv2.createTrackbar('S lb', 'threshold_image', 0, 100, self.set_s_lb)
        cv2.createTrackbar('V lb', 'threshold_image', 0, 100, self.set_v_lb)
        self.hsv_ub = np.array([90, 100, 100])  # hsv upper bound
        cv2.createTrackbar('H ub', 'threshold_image', 0, 360, self.set_h_ub)
        cv2.createTrackbar('S ub', 'threshold_image', 0, 100, self.set_s_ub)
        cv2.createTrackbar('V ub', 'threshold_image', 0, 100, self.set_v_ub)

        self.tm = TemplateMatcher({
            "left":
            '/home/hdavidzhu/catkin_ws/src/sign_follower/images/leftturn_box_small.png',
            "right":
            '/home/hdavidzhu/catkin_ws/src/sign_follower/images/rightturn_box_small.png',
            "uturn":
            '/home/hdavidzhu/catkin_ws/src/sign_follower/images/uturn_box_small.png'
        })
    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None                        # the latest image from the camera
        # self.grid_cell = None
        self.binary_image = None
        self.image_info_window = None
        self.bridge = CvBridge()                    # used to convert ROS messages to OpenCV
        # self.hsv_lb = np.array([23, 175, 133])           # hsv lower bound
        # self.hsv_ub = np.array([40, 255, 255])     # hsv upper bound
        self.hsv_lb = np.array([0, 0, 0])           # hsv lower bound
        self.hsv_ub = np.array([255, 255, 255])     # hsv upper bound
        # self.grid_cell_w = 64*3
        # self.grid_cell_h = 48*3
        self.TM = TemplateMatcher() 
        self.grid = GridImage()

        cv2.namedWindow('video_window')
        cv2.setMouseCallback('video_window', self.process_mouse_event)
        cv2.namedWindow('threshold_image')
        
        cv2.createTrackbar('H lb', 'threshold_image', 0, 255, self.set_h_lb)
        cv2.createTrackbar('S lb', 'threshold_image', 0, 255, self.set_s_lb)
        cv2.createTrackbar('V lb', 'threshold_image', 0, 255, self.set_v_lb)
        
        cv2.createTrackbar('H ub', 'threshold_image', 0, 255, self.set_h_ub)
        cv2.createTrackbar('S ub', 'threshold_image', 0, 255, self.set_s_ub)
        cv2.createTrackbar('V ub', 'threshold_image', 0, 255, self.set_v_ub)
        
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
    def __init__(self):
        """Initialize the street sign recognizer """
        rospy.init_node('street_sign_recognizer')
        rospy.Subscriber("sign_follower/cropped_sign", Image, self.recognize_sign)

        images = {
            "left": '../images/leftturn_box_small.png',
            "right": '../images/rightturn_box_small.png',
            "uturn": '../images/uturn_box_small.png'
            }
        self.tm = TemplateMatcher(images)
class StreetSignRecognizer(object):
    """This robot should take take in an image of a street sign and match that
       with what street sign it is.
    """

    def __init__(self):
        """Initialize the street sign recognizer """
        rospy.init_node('street_sign_recognizer')
        rospy.Subscriber("sign_follower/cropped_sign", Image, self.recognize_sign)

        images = {
            "left": '../images/leftturn_box_small.png',
            "right": '../images/rightturn_box_small.png',
            "uturn": '../images/uturn_box_small.png'
            }
        self.tm = TemplateMatcher(images)

    def recognize_sign(self, msg):
        image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
        pred = self.tm.predict(image)
        print pred

    def run(self):
        """ The main run loop"""
        r = rospy.Rate(9)
        while not rospy.is_shutdown():
            print "asdf"
            r.sleep()
Example #5
0
    def __init__(self):
        #init ROS node
        rospy.init_node('street_sign_recognizer')

        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV

        self.image = None  # the latest image from the camera
        self.cv_image = None  # gaussian blurred image
        self.hsv_image = None  # HSV form of image
        self.binary_image = None  # Binary form of image
        self.grid_cell = None  # portion of an image

        self.density = 0  # amount of white in a grid cell
        self.threshold = .001  # amount of white needed in a grid cell to be part of sign

        #the thresholds to find the yellow color of the sign
        self.hsv_lb = np.array([17, 161, 160])  # hsv lower bound
        self.hsv_ub = np.array([75, 255, 255])  # hsv upper bound

        # the various windows for visualization
        cv2.namedWindow('HSV_window')
        cv2.namedWindow('Binary_window')
        cv2.namedWindow('RGB_window')

        #set of template images for the Template Matcher
        images = {
            "left": '../images/leftturn_box_small.png',
            "right": '../images/rightturn_box_small.png',
            "uturn": '../images/uturn_box_small.png'
        }

        #variables for Template Matcher
        self.matcher = TemplateMatcher(
            images)  # initialize Template Matcher class
        self.matched_threshold = 40  # threshold to determine which sign the input is
        self.total_confidence = {
            'uturn': 0.0,
            'left': 0.0,
            'right': 0.0
        }  # dictionary that holds cumulative confidence of each sign
        self.recognized = False  #boolean to ensure only one run of the recognition

        #init ROS Subscriber to camera image
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None  # the latest image from the camera
        self.hsv_image = None
        self.res_image = None
        self.mask = None
        self.cropped_sign = None
        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV
        cv2.namedWindow('video_window')
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
        self.signpub = rospy.Publisher('/predicted_sign',
                                       String,
                                       queue_size=10)
        self.prediction = ""

        #sets up slider bars for hue isolation
        #cv2.namedWindow('threshold_image')
        # current values work well for uturn and right, but poorly for left
        # values for left (20, 166, 139) increase error for uturn and right
        self.hsv_lb = np.array(
            [25, 202, 186]
        )  # hsv lower bound 20, 166, 139 25, 202, 186 26, 214, 167 [15, 225, 139
        # cv2.createTrackbar('H lb', 'threshold_image', 0, 255, self.set_h_lb)
        # cv2.createTrackbar('S lb', 'threshold_image', 0, 255, self.set_s_lb)
        # cv2.createTrackbar('V lb', 'threshold_image', 0, 255, self.set_v_lb)
        self.hsv_ub = np.array(
            [204, 255, 255])  # hsv upper bound 204, 255, 255 204, 255, 230
        # cv2.createTrackbar('H ub', 'threshold_image', 0, 255, self.set_h_ub)
        # cv2.createTrackbar('S ub', 'threshold_image', 0, 255, self.set_s_ub)
        # cv2.createTrackbar('V ub', 'threshold_image', 0, 255, self.set_v_ub)

        # initialize template_matcher
        images = {
            "left": '../images/leftturn_box_small.png',
            "right": '../images/rightturn_box_small.png',
            "uturn": '../images/uturn_box_small.png'
        }

        self.tm = TemplateMatcher(images)
Example #7
0
                cv2.waitKey(5)

                # cv2.imshow('image_info', self.image_info_window)
                # cv2.waitKey(5)

            r.sleep()


if __name__ == '__main__':
    images = {
        "left": '../images/leftturn_box_small.png',
        "right": '../images/rightturn_box_small.png',
        "uturn": '../images/uturn_box_small.png'
    }

    scenes = [
        "../images/uturn_scene.jpg", "../images/leftturn_scene.jpg",
        "../images/rightturn_scene.jpg"
    ]

    node = StreetSignRecognizer()
    node.run()

    tm = TemplateMatcher(images)

    for filename in scenes:
        scene_img = cv2.imread(filename, 0)
        pred = tm.predict(scene_img)
        print filename.split('/')[-1]
        print pred
class StreetSignRecognizer(object):
    """ This robot should recognize street signs """
    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None  # the latest image from the camera
        self.hsv_image = None
        self.res_image = None
        self.mask = None
        self.cropped_sign = None
        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV
        cv2.namedWindow('video_window')
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
        self.signpub = rospy.Publisher('/predicted_sign',
                                       String,
                                       queue_size=10)
        self.prediction = ""

        #sets up slider bars for hue isolation
        #cv2.namedWindow('threshold_image')
        # current values work well for uturn and right, but poorly for left
        # values for left (20, 166, 139) increase error for uturn and right
        self.hsv_lb = np.array(
            [25, 202, 186]
        )  # hsv lower bound 20, 166, 139 25, 202, 186 26, 214, 167 [15, 225, 139
        # cv2.createTrackbar('H lb', 'threshold_image', 0, 255, self.set_h_lb)
        # cv2.createTrackbar('S lb', 'threshold_image', 0, 255, self.set_s_lb)
        # cv2.createTrackbar('V lb', 'threshold_image', 0, 255, self.set_v_lb)
        self.hsv_ub = np.array(
            [204, 255, 255])  # hsv upper bound 204, 255, 255 204, 255, 230
        # cv2.createTrackbar('H ub', 'threshold_image', 0, 255, self.set_h_ub)
        # cv2.createTrackbar('S ub', 'threshold_image', 0, 255, self.set_s_ub)
        # cv2.createTrackbar('V ub', 'threshold_image', 0, 255, self.set_v_ub)

        # initialize template_matcher
        images = {
            "left": '../images/leftturn_box_small.png',
            "right": '../images/rightturn_box_small.png',
            "uturn": '../images/uturn_box_small.png'
        }

        self.tm = TemplateMatcher(images)

    def process_image(self, msg):
        """ Process image messages from ROS and stash them in an attribute
            called cv_image for subsequent processing """
        self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")

        #convert to img hsv
        self.hsv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV)
        #create img mask for only yellow in hsv img
        self.mask = cv2.inRange(self.hsv_image, self.hsv_lb, self.hsv_ub)

        # Bitwise-AND mask and original image
        self.res_image = cv2.bitwise_and(self.cv_image,
                                         self.cv_image,
                                         mask=self.mask)

        left_top, right_bottom = self.sign_bounding_box()
        left, top = left_top
        right, bottom = right_bottom

        # crop bounding box region of interest
        self.cropped_sign = cv2.cvtColor(self.cv_image[top:bottom, left:right],
                                         cv2.COLOR_BGR2GRAY)

        # draw bounding box rectangle
        cv2.rectangle(self.cv_image,
                      left_top,
                      right_bottom,
                      color=(0, 0, 255),
                      thickness=5)

    def set_h_lb(self, val):
        """ set hue lower bound """
        self.hsv_lb[0] = val

    def set_s_lb(self, val):
        """ set saturation lower bound """
        self.hsv_lb[1] = val

    def set_v_lb(self, val):
        """ set value lower bound """
        self.hsv_lb[2] = val

    def set_h_ub(self, val):
        """ set hue upper bound """
        self.hsv_ub[0] = val

    def set_s_ub(self, val):
        """ set saturation upper bound """
        self.hsv_ub[1] = val

    def set_v_ub(self, val):
        """ set value upper bound """
        self.hsv_ub[2] = val

    def sign_bounding_box(self):
        """
        Returns
        -------
        (left_top, right_bottom) where left_top and right_bottom are tuples of (x_pixel, y_pixel)
            defining topleft and bottomright corners of the bounding box
        """
        #use self.mask and boundingRect() to compute left_top and right_bottom
        bound = cv2.findNonZero(self.mask)
        (bx, by, bw, bh) = cv2.boundingRect(bound)
        left_top = (bx, by)
        right_bottom = (bx + bw, by + bh)
        return left_top, right_bottom

    def run(self):
        """ The main run loop sometimes throws errors, but if you try again it works"""
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            if not self.cv_image is None:
                print "here"
                if not self.cropped_sign is None:
                    pred = self.tm.predict(self.cropped_sign)
                    predNum = pred[max(pred, key=pred.get)]
                    #print predNum
                    if (predNum > 0.75):
                        self.prediction = max(pred, key=pred.get)
                        print self.prediction
                        print pred[self.prediction]
                    if not self.prediction == "":
                        self.signpub.publish(String(self.prediction))
                # creates a window and displays the image for X milliseconds
                #cv2.imshow('video_window', self.cv_image)
                #cv2.imshow('masked_window', self.res_image)
                #cv2.imshow('hsv_window', self.hsv_image)
                cv2.waitKey(5)
                #print "there"
            r.sleep()
class StreetSignRecognizer(object):
    """ This robot should recognize street signs """


    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None                        # the latest image from the camera
        # self.grid_cell = None
        self.binary_image = None
        self.image_info_window = None
        self.bridge = CvBridge()                    # used to convert ROS messages to OpenCV
        # self.hsv_lb = np.array([23, 175, 133])           # hsv lower bound
        # self.hsv_ub = np.array([40, 255, 255])     # hsv upper bound
        self.hsv_lb = np.array([0, 0, 0])           # hsv lower bound
        self.hsv_ub = np.array([255, 255, 255])     # hsv upper bound
        # self.grid_cell_w = 64*3
        # self.grid_cell_h = 48*3
        self.TM = TemplateMatcher() 
        self.grid = GridImage()

        cv2.namedWindow('video_window')
        cv2.setMouseCallback('video_window', self.process_mouse_event)
        cv2.namedWindow('threshold_image')
        
        cv2.createTrackbar('H lb', 'threshold_image', 0, 255, self.set_h_lb)
        cv2.createTrackbar('S lb', 'threshold_image', 0, 255, self.set_s_lb)
        cv2.createTrackbar('V lb', 'threshold_image', 0, 255, self.set_v_lb)
        
        cv2.createTrackbar('H ub', 'threshold_image', 0, 255, self.set_h_ub)
        cv2.createTrackbar('S ub', 'threshold_image', 0, 255, self.set_s_ub)
        cv2.createTrackbar('V ub', 'threshold_image', 0, 255, self.set_v_ub)
        
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)

    def process_image(self, msg):
        """ Process image messages from ROS and stash them in an attribute
            called cv_image for subsequent processing """
        self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
        self.hsv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV)
        self.gray_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)
        self.good_thresh = cv2.threshold(self.cv_image, self.hsv_lb[0], self.hsv_ub[0], cv2.THRESH_BINARY)
        # # print type(self.good_thresh[1])
        # if not self.good_thresh is None:
        #     cv2.imshow('threshold_image',self.good_thresh[1])
        #     cv2.waitKey(5)
        
        # # NumPy array slicing!!
        # self.grid_cell = self.gray_image[self.grid_cell_h:2*self.grid_cell_h,
        #                 self.grid_cell_w:2*self.grid_cell_w]

        self.binary_image = cv2.inRange(self.cv_image, self.hsv_lb, self.hsv_ub)
        if not self.binary_image is None:
            cv2.imshow('threshold_image',self.binary_image)
            cv2.waitKey(5)

        left_top, right_bottom = self.sign_bounding_box()
        left, top = left_top
        right, bottom = right_bottom

        # crop bounding box region of interest
        cropped_sign = self.gray_image[top:bottom, left:right]
        # cropped_sign = cv2.rectangle(self.gray_image,left_top,right_bottom,(0,255,0),2)
        print self.TM.predict(self.gray_image)

        # draw bounding box rectangle
        cv2.rectangle(self.gray_image, left_top, right_bottom, color=(0, 0, 255), thickness=5)

    def process_mouse_event(self, event, x,y,flags,param):
        """ Process mouse events so that you can see the color values associated
            with a particular pixel in the camera images """
        image_info_window = 255*np.ones((500,500,3))
        # show hsv values
        cv2.putText(self.image_info_window,
                    'Color (h=%d,s=%d,v=%d)' % (self.hsv_image[y,x,0], self.hsv_image[y,x,1], self.hsv_image[y,x,2]),
                    (5,50), # 5 = x, 50 = y
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1,
                    (0,0,0))
        # show bgr values
        cv2.putText(image_info_window,
                    'Color (b=%d,g=%d,r=%d)' % (self.cv_image[y,x,0], self.cv_image[y,x,1], self.cv_image[y,x,2]),
                    (5,50),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1,
                    (0,0,0))
        if not self.image_info_window is None:
            cv2.imshow('image_info', image_info_window)
            cv2.waitKey(5)

    def set_h_lb(self, val):
        """ set hue lower bound """
        self.hsv_lb[0] = val

    def set_s_lb(self, val):
        """ set saturation lower bound """
        self.hsv_lb[1] = val

    def set_v_lb(self, val):
        """ set value lower bound """
        self.hsv_lb[2] = val

    def set_h_ub(self, val):
        """ set hue upper bound """
        self.hsv_ub[0] = val

    def set_s_ub(self, val):
        """ set saturation upper bound """
        self.hsv_ub[1] = val

    def set_v_ub(self, val):
        """ set value upper bound """
        self.hsv_ub[2] = val

    def sign_bounding_box(self):
        """
        Returns
        -------
        (left_top, right_bottom) where left_top and right_bottom are tuples of (x_pixel, y_pixel)
            defining topleft and bottomright corners of the bounding box
        """
        # TODO: YOUR SOLUTION HERE
        self.binary_image = cv2.inRange(self.cv_image, self.hsv_lb, self.hsv_ub)
        _, self.good_thresh = cv2.threshold(self.cv_image, self.hsv_lb[2], self.hsv_ub[2], cv2.THRESH_BINARY)
        
        contours,_ = cv2.findContours(self.binary_image, 1, 2)
        # print type(contours[0])
        area = 0
        max_cnt = None
        for cnt in contours:
            if area < cv2.contourArea(cnt):
                area = cv2.contourArea(cnt)
                max_cnt = cnt
        # print max_cnt

        x,y,w,h = cv2.boundingRect(max_cnt)
        print w,h
        # x,y,w,h = cv2.boundingRect(np.array(self.binary_image, dtype=int))

        for i in self.grid.grid_cell

        left_top = (x,y)
        right_bottom = (x+w,y+h)

        return left_top, right_bottom

    def run(self):
        """ The main run loop"""
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            if not self.cv_image is None; # and not self.grid_cell is None:
                print "here"
                # creates a window and displays the image for X milliseconds
                cv2.imshow('video_window', self.gray_image)
                # cv2.imshow('video_window', self.grid_cell)
                cv2.waitKey(5)
            r.sleep()
Example #10
0
class StreetSignRecognizer(object):
    """
    The main class of this script, which holds the sign detection methods and variables
    It outputs a detected sign which it sends to the Template Matcher module for
    categorization
    """
    def __init__(self):
        #init ROS node
        rospy.init_node('street_sign_recognizer')

        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV

        self.image = None  # the latest image from the camera
        self.cv_image = None  # gaussian blurred image
        self.hsv_image = None  # HSV form of image
        self.binary_image = None  # Binary form of image
        self.grid_cell = None  # portion of an image

        self.density = 0  # amount of white in a grid cell
        self.threshold = .001  # amount of white needed in a grid cell to be part of sign

        #the thresholds to find the yellow color of the sign
        self.hsv_lb = np.array([17, 161, 160])  # hsv lower bound
        self.hsv_ub = np.array([75, 255, 255])  # hsv upper bound

        # the various windows for visualization
        cv2.namedWindow('HSV_window')
        cv2.namedWindow('Binary_window')
        cv2.namedWindow('RGB_window')

        #set of template images for the Template Matcher
        images = {
            "left": '../images/leftturn_box_small.png',
            "right": '../images/rightturn_box_small.png',
            "uturn": '../images/uturn_box_small.png'
        }

        #variables for Template Matcher
        self.matcher = TemplateMatcher(
            images)  # initialize Template Matcher class
        self.matched_threshold = 40  # threshold to determine which sign the input is
        self.total_confidence = {
            'uturn': 0.0,
            'left': 0.0,
            'right': 0.0
        }  # dictionary that holds cumulative confidence of each sign
        self.recognized = False  #boolean to ensure only one run of the recognition

        #init ROS Subscriber to camera image
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)

    def process_image(self, msg):
        """
        Process image messages from ROS by detecting a sign, and sends the sign
        to TemplateMatcher for categorization
        """
        #converts ROS image to OpenCV image
        self.image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
        #blurs image to average out some color
        self.cv_image = cv2.medianBlur(self.image, 5)

        #finds the corners of the bounding box on the sign
        left_top, right_bottom = self.sign_bounding_box()
        left, top = left_top
        right, bottom = right_bottom

        # draw bounding box rectangle
        cv2.rectangle(self.image,
                      left_top,
                      right_bottom,
                      color=(0, 0, 255),
                      thickness=5)

        #crops the image to the detected sign given by the bounding box ranges
        detected_sign = self.image[top:bottom, left:right]
        #converts to grayscale for processing
        gray_image = cv2.cvtColor(detected_sign, cv2.COLOR_BGR2GRAY)

        #as long as detected_sign exists and a sign has not been recognized yet
        if not self.recognized and detected_sign != []:
            #get the dictionary of confidence values from TemplateMatcher for this image
            sign_confidences = self.matcher.predict(gray_image)
            #update the cumulative dictionary of confidence values
            for sign, confidence in sign_confidences.items():
                self.total_confidence[sign] += confidence

            #if a value in the cumulative dictionary passes the threshold
            for sign, tot_confidence in self.total_confidence.items():
                if tot_confidence > self.matched_threshold:
                    #declare that sign, and discontinue recognition
                    print "I know what this is! It's a", sign, "sign!"
                    self.recognized = True
                    break

    def sign_bounding_box(self):
        """
        Returns
        -------
        (left_top, right_bottom) where left_top and right_bottom are tuples of (x_pixel, y_pixel)
            defining topleft and bottomright corners of the bounding box
        """
        #converts RGB image to hue, saturation, value image
        self.hsv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV)
        #creates a binary image on the basis of the yellow sign
        self.binary_image = cv2.inRange(
            self.hsv_image, (self.hsv_lb[0], self.hsv_lb[1], self.hsv_lb[2]),
            (self.hsv_ub[0], self.hsv_ub[1], self.hsv_ub[2]))

        left_top = (640, 480)
        right_bottom = (0, 0)
        #splits the image into 100 grid cells
        for r in range(1, 11):
            for c in range(1, 11):
                grid_cell_x = 64 * (c - 1)
                grid_cell_y = 48 * (r - 1)
                grid_cell = self.binary_image[grid_cell_y:grid_cell_y + 48,
                                              grid_cell_x:grid_cell_x + 64]
                self.density = np.sum(grid_cell)
                self.density /= (255.0 * 48 * 64)
                #checks if current cell contains enough white to be considered part of the sign
                if (self.density > self.threshold):
                    #if so, check if current cell increases the width of height of the bounding box
                    if (grid_cell_x < left_top[0]
                            or grid_cell_y < left_top[1]):
                        left_top = (grid_cell_x, grid_cell_y)
                    if ((grid_cell_x + 64) > right_bottom[0]
                            or (grid_cell_y + 48) > right_bottom[1]):
                        right_bottom = (grid_cell_x + 64, grid_cell_y + 48)

        #if nothing was found, then just create a point at the image's center
        if (right_bottom == (0, 0)):
            left_top = (320, 240)
            right_bottom = (320, 240)

        return left_top, right_bottom

    def run(self):
        """
        The main run loop
        """
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            if not self.image is None:
                # creates the windows and displays the RGB, HSV, and binary image
                cv2.imshow('HSV_window', self.hsv_image)
                cv2.imshow('Binary_window', self.binary_image)
                cv2.imshow('RGB_window', self.image)

                cv2.waitKey(5)
            r.sleep()
class StreetSignRecognizer(object):
    """ This robot should recognize street signs """
    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None  # the latest image from the camera
        self.cv_image_res = None
        self.grayscale_sign = None
        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV
        cv2.namedWindow('video_window')
        cv2.namedWindow('grayscale_sign_window')
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)

        cv2.namedWindow('threshold_image')
        self.hsv_lb = np.array([30, 75, 75])  # hsv lower bound
        cv2.createTrackbar('H lb', 'threshold_image', 0, 360, self.set_h_lb)
        cv2.createTrackbar('S lb', 'threshold_image', 0, 100, self.set_s_lb)
        cv2.createTrackbar('V lb', 'threshold_image', 0, 100, self.set_v_lb)
        self.hsv_ub = np.array([90, 100, 100])  # hsv upper bound
        cv2.createTrackbar('H ub', 'threshold_image', 0, 360, self.set_h_ub)
        cv2.createTrackbar('S ub', 'threshold_image', 0, 100, self.set_s_ub)
        cv2.createTrackbar('V ub', 'threshold_image', 0, 100, self.set_v_ub)

        self.tm = TemplateMatcher({
            "left":
            '/home/hdavidzhu/catkin_ws/src/sign_follower/images/leftturn_box_small.png',
            "right":
            '/home/hdavidzhu/catkin_ws/src/sign_follower/images/rightturn_box_small.png',
            "uturn":
            '/home/hdavidzhu/catkin_ws/src/sign_follower/images/uturn_box_small.png'
        })

    def process_image(self, msg):
        """ Process image messages from ROS and stash them in an attribute
            called cv_image for subsequent processing """
        self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")

        left_top, right_bottom = self.sign_bounding_box()
        left, top = left_top
        right, bottom = right_bottom

        # crop bounding box region of interest
        cropped_sign = self.cv_image_res[top:bottom, left:right]

        # draw bounding box rectangle
        cv2.rectangle(self.cv_image_res,
                      left_top,
                      right_bottom,
                      color=(0, 0, 255),
                      thickness=5)

        # Convert to grayscale
        self.grayscale_sign = cv2.cvtColor(cropped_sign, cv2.COLOR_BGR2GRAY)

        # Predict
        predictions = self.tm.predict(
            cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY))
        prediction = max(predictions.iteritems(), key=operator.itemgetter(1))
        print prediction

    def sign_bounding_box(self):
        """
        Returns
        -------
        (left_top, right_bottom) where left_top and right_bottom are tuples of (x_pixel, y_pixel)
            defining topleft and bottomright corners of the bounding box
        """

        # Convert colorspaces to HSV
        cv_image_hsv = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV)

        # Apply filter to select only for objects in the yellow color spectrum
        lb = np.round(
            np.multiply(self.hsv_lb, [255.0 / 360, 255.0 / 100, 255.0 / 100]))
        ub = np.round(
            np.multiply(self.hsv_ub, [255.0 / 360, 255.0 / 100, 255.0 / 100]))
        mask = cv2.inRange(cv_image_hsv, lb, ub)
        self.cv_image_res = cv2.bitwise_and(self.cv_image,
                                            self.cv_image,
                                            mask=mask)

        # Apply bounding box over most dense region
        x, y, w, h = cv2.boundingRect(mask)
        left_top = (x, y)
        right_bottom = (x + w, y + h)
        return left_top, right_bottom

    def run(self):
        """ The main run loop"""
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            if not self.cv_image is None:
                # creates a window and displays the image for X milliseconds
                cv2.imshow('video_window', self.cv_image_res)
                cv2.imshow('grayscale_sign_window', self.grayscale_sign)
                cv2.waitKey(5)
            r.sleep()

    def set_h_lb(self, val):
        """ set hue lower bound """
        self.hsv_lb[0] = val

    def set_s_lb(self, val):
        """ set saturation lower bound """
        self.hsv_lb[1] = val

    def set_v_lb(self, val):
        """ set value lower bound """
        self.hsv_lb[2] = val

    def set_h_ub(self, val):
        """ set hue upper bound """
        self.hsv_ub[0] = val

    def set_s_ub(self, val):
        """ set saturation upper bound """
        self.hsv_ub[1] = val

    def set_v_ub(self, val):
        """ set value upper bound """
        self.hsv_ub[2] = val
Example #12
0
    def __init__(self, image_topic, sleep_topic):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None                        # the latest image from the camera
        self.bridge = CvBridge()                    # used to convert ROS messages to OpenCV
        self.saveCounter = 0                        # how many images we've saved to disk


        print "Loading TemplateMatcher"
        self.template_matcher = TemplateMatcher(self.template_images)

        self.pub = rospy.Publisher('predicted_sign', String, queue_size=1)
        cv2.namedWindow('video_window')

        self.sleeping = False

        self.running_predictions = {"lturn": 0, "rturn": 0, "uturn": 0}

        self.sign_pub_mappings = {
            "uturn": 1,
            "lturn": 2,
            "rturn": 3
        }

        # 'use' parameters for quick changes in node functionality
        self.use_slider = False
        self.use_mouse_hover = False
        self.use_saver = False
        self.use_predict = True

        # threshold by which the running confidence summation must achieve to publish a predicted_sign
        # hand tuned
        self.decision_threshold = 35

        # # # # # # # # # # # # #
        # color params, in HSV  #
        # # # # # # # # # # # # #

        self.COLOR = "yellow"                       # which default color we binarize based on
        self.color_bounds = {}

        # tuned from the green hand ball
        self.color_bounds["green"] = (
              np.array([60,76,2])                   # min
            , np.array([80,255,255])                # max
        )
        # tuned from yellow lturn sign
        self.color_bounds["yellow"] = (
              np.array([23,175,130])                # min
            , np.array([32,255,255])                # max
        )

        if self.use_mouse_hover:
            # when mouse hovers over video window
            cv2.setMouseCallback('video_window', self.process_mouse_event)

        if self.use_slider:
            cv2.namedWindow('threshold_image')
            self.hsv_lb = np.array([0, 0, 0])
            cv2.createTrackbar('H lb', 'threshold_image', 0, 255, self.set_h_lb)
            cv2.createTrackbar('S lb', 'threshold_image', 0, 255, self.set_s_lb)
            cv2.createTrackbar('V lb', 'threshold_image', 0, 255, self.set_v_lb)
            self.hsv_ub = np.array([255, 255, 255])
            cv2.createTrackbar('H ub', 'threshold_image', 0, 255, self.set_h_ub)
            cv2.createTrackbar('S ub', 'threshold_image', 0, 255, self.set_s_ub)
            cv2.createTrackbar('V ub', 'threshold_image', 0, 255, self.set_v_ub)

        rospy.Subscriber(image_topic, Image, self.process_image)
        rospy.Subscriber(sleep_topic, Bool, self.process_sleep)
Example #13
0
class StreetSignRecognizer(object):
    """ This robot should recognize street signs """


    curr_dir = os.path.dirname(os.path.realpath(__file__))
    template_images = {
        "lturn":  os.path.join(curr_dir, '../images/leftturn_box_small.png'),
        "rturn": os.path.join(curr_dir, '../images/rightturn_box_small.png'),
        "uturn": os.path.join(curr_dir, '../images/uturn_box_small.png')
    }

    def __init__(self, image_topic, sleep_topic):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None                        # the latest image from the camera
        self.bridge = CvBridge()                    # used to convert ROS messages to OpenCV
        self.saveCounter = 0                        # how many images we've saved to disk


        print "Loading TemplateMatcher"
        self.template_matcher = TemplateMatcher(self.template_images)

        self.pub = rospy.Publisher('predicted_sign', String, queue_size=1)
        cv2.namedWindow('video_window')

        self.sleeping = False

        self.running_predictions = {"lturn": 0, "rturn": 0, "uturn": 0}

        self.sign_pub_mappings = {
            "uturn": 1,
            "lturn": 2,
            "rturn": 3
        }

        # 'use' parameters for quick changes in node functionality
        self.use_slider = False
        self.use_mouse_hover = False
        self.use_saver = False
        self.use_predict = True

        # threshold by which the running confidence summation must achieve to publish a predicted_sign
        # hand tuned
        self.decision_threshold = 35

        # # # # # # # # # # # # #
        # color params, in HSV  #
        # # # # # # # # # # # # #

        self.COLOR = "yellow"                       # which default color we binarize based on
        self.color_bounds = {}

        # tuned from the green hand ball
        self.color_bounds["green"] = (
              np.array([60,76,2])                   # min
            , np.array([80,255,255])                # max
        )
        # tuned from yellow lturn sign
        self.color_bounds["yellow"] = (
              np.array([23,175,130])                # min
            , np.array([32,255,255])                # max
        )

        if self.use_mouse_hover:
            # when mouse hovers over video window
            cv2.setMouseCallback('video_window', self.process_mouse_event)

        if self.use_slider:
            cv2.namedWindow('threshold_image')
            self.hsv_lb = np.array([0, 0, 0])
            cv2.createTrackbar('H lb', 'threshold_image', 0, 255, self.set_h_lb)
            cv2.createTrackbar('S lb', 'threshold_image', 0, 255, self.set_s_lb)
            cv2.createTrackbar('V lb', 'threshold_image', 0, 255, self.set_v_lb)
            self.hsv_ub = np.array([255, 255, 255])
            cv2.createTrackbar('H ub', 'threshold_image', 0, 255, self.set_h_ub)
            cv2.createTrackbar('S ub', 'threshold_image', 0, 255, self.set_s_ub)
            cv2.createTrackbar('V ub', 'threshold_image', 0, 255, self.set_v_ub)

        rospy.Subscriber(image_topic, Image, self.process_image)
        rospy.Subscriber(sleep_topic, Bool, self.process_sleep)

    # # # # # # # # # #
    # color callbacks #
    # # # # # # # # # #

    def set_h_lb(self, val):
        """ set hue lower bound """
        self.hsv_lb[0] = val

    def set_s_lb(self, val):
        """ set saturation lower bound """
        self.hsv_lb[1] = val

    def set_v_lb(self, val):
        """ set value lower bound """
        self.hsv_lb[2] = val

    def set_h_ub(self, val):
        """ set hue upper bound """
        self.hsv_ub[0] = val

    def set_s_ub(self, val):
        """ set saturation upper bound """
        self.hsv_ub[1] = val

    def set_v_ub(self, val):
        """ set value upper bound """
        self.hsv_ub[2] = val

    def process_sleep(self, msg):
        """ Process sleep messages from the navigation node and stash them
            in an attribute called sleeping """
        self.sleeping = msg.data

    def process_image(self, msg):
        """ Process image messages from ROS and stash them in an attribute
            called cv_image for subsequent processing """
        self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
        self.hsv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV)

        if self.use_slider:
            # binarize based on the slider values
            self.binarized_image = cv2.inRange(self.hsv_image, self.hsv_lb, self.hsv_ub)
        else:
            # binarize based on preset values
            self.binarized_image = cv2.inRange(self.hsv_image, self.color_bounds[self.COLOR][0], self.color_bounds[self.COLOR][1])

        # compute bounding box points, bounding the "white" pixel grids in a binarized image
        binaryGrid = thresh2binarygrid(self.binarized_image, gridsize=(20, 20), percentage=0.2)
        pt1, pt2 = get_bbox_from_grid(self.binarized_image, binaryGrid, pad=1)

 
        if self.use_predict and pt1 and pt2:
            # crop and gray scale the bounding box region of interest
            cropped_sign = self.cv_image[pt1[1]:pt2[1], pt1[0]:pt2[0]]
            cropped_sign_gray = cv2.cvtColor(cropped_sign, cv2.COLOR_BGR2GRAY)

            # make predictions with confidence for each sign key
            prediction = self.template_matcher.predict(cropped_sign_gray)

            for sign_key in prediction:
                self.running_predictions[sign_key] += prediction[sign_key]

        # draw bounding box rectangle
        cv2.rectangle(self.cv_image, pt1, pt2, color=(0, 0, 255), thickness=5)
        cv2.imshow('video_window', self.cv_image)

        if self.use_saver:
            cv2.imwrite("/tmp/bin_img_{0:0>4}.jpg".format(self.saveCounter), cropped_sign)
            self.saveCounter += 1

        cv2.waitKey(5)

    def process_mouse_event(self, event, x,y,flags,param):
        """ Process mouse events so that you can see the color values associated
            with a particular pixel in the camera images """
        image_info_window = 255*np.ones((500,500,3))

        # show hsv values
        cv2.putText(image_info_window,
                    'Color (h=%d,s=%d,v=%d)' % (self.hsv_image[y,x,0], self.hsv_image[y,x,1], self.hsv_image[y,x,2]),
                    (5,50), # 5 = x, 50 = y
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1,
                    (0,0,0))

        # show bgr values
        cv2.putText(image_info_window,
                    'Color (b=%d,g=%d,r=%d)' % (self.cv_image[y,x,0], self.cv_image[y,x,1], self.cv_image[y,x,2]),
                    (5,100),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1,
                    (0,0,0))
        cv2.imshow('image_info', image_info_window)
        cv2.waitKey(5)


    def run(self):
        """ The main run loop, in this node it doesn't do anything """
        r = rospy.Rate(5)
        while not rospy.is_shutdown():
            if not self.sleeping:
                print self.running_predictions
                sorted_preds = sorted(self.running_predictions.iterkeys(), key=(lambda key: self.running_predictions[key]), reverse=True)
                best = sorted_preds[0]
                second = sorted_preds[1]

                # If cummulative running predictions > decision threshold
                # AND the confidence for the best sign is greater than the second best by a certain value
                if (sum(self.running_predictions.values()) > self.decision_threshold
                and self.running_predictions[best] - self.running_predictions[second] > self.decision_threshold/5):
                    # publish the best predicted_sign and reset running predictions
                    self.pub.publish(best)
                    self.running_predictions = {"lturn": 0, "rturn": 0, "uturn": 0}
            else:
                # Don't store running predictions while sleeping
                self.running_predictions = {"lturn": 0, "rturn": 0, "uturn": 0}

            r.sleep()
    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')

        # Create TemplateMatcher object with template images
        self.template_matcher = TemplateMatcher({
            "left":
            '../images/leftturn_box_small.png',
            "right":
            '../images/rightturn_box_small.png',
            "uturn":
            '../images/uturn_box_small.png'
        })

        self.bgr_image = None  # the latest image from the camera
        self.hsv_image = None
        self.filt_image = None
        self.cropped_sign_grayscale = None

        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV

        # Create windows to view video streams
        cv2.namedWindow('video_window')
        cv2.namedWindow('filt_window')
        cv2.namedWindow('cropped_grayscale_window')
        cv2.namedWindow('img_T_window')

        # Create sliders to control image color filtering
        cv2.namedWindow('threshold_image')
        self.h_lower_bound = 25
        self.h_upper_bound = 38
        self.s_lower_bound = 199
        self.s_upper_bound = 255
        self.v_lower_bound = 203
        self.v_upper_bound = 237
        self.blur_amount = 3
        cv2.createTrackbar('H lower bound', 'threshold_image', 0, 255,
                           self.set_h_lower_bound)
        cv2.createTrackbar('H upper bound', 'threshold_image', 0, 255,
                           self.set_h_upper_bound)
        cv2.createTrackbar('S lower bound', 'threshold_image', 0, 255,
                           self.set_s_lower_bound)
        cv2.createTrackbar('S upper bound', 'threshold_image', 0, 255,
                           self.set_s_upper_bound)
        cv2.createTrackbar('V lower bound', 'threshold_image', 0, 255,
                           self.set_v_lower_bound)
        cv2.createTrackbar('V upper bound', 'threshold_image', 0, 255,
                           self.set_v_upper_bound)
        cv2.createTrackbar('Blur amount', 'threshold_image', 0, 20,
                           self.set_blur_amount)
        cv2.setTrackbarPos('H lower bound', 'threshold_image',
                           self.h_lower_bound)
        cv2.setTrackbarPos('H upper bound', 'threshold_image',
                           self.h_upper_bound)
        cv2.setTrackbarPos('S lower bound', 'threshold_image',
                           self.s_lower_bound)
        cv2.setTrackbarPos('S upper bound', 'threshold_image',
                           self.s_upper_bound)
        cv2.setTrackbarPos('V lower bound', 'threshold_image',
                           self.v_lower_bound)
        cv2.setTrackbarPos('V upper bound', 'threshold_image',
                           self.v_upper_bound)
        cv2.setTrackbarPos('Blur amount', 'threshold_image', self.blur_amount)

        # Subscribe to ROS image stream
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
class StreetSignRecognizer(object):
    """ This robot should recognize street signs """
    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')

        # Create TemplateMatcher object with template images
        self.template_matcher = TemplateMatcher({
            "left":
            '../images/leftturn_box_small.png',
            "right":
            '../images/rightturn_box_small.png',
            "uturn":
            '../images/uturn_box_small.png'
        })

        self.bgr_image = None  # the latest image from the camera
        self.hsv_image = None
        self.filt_image = None
        self.cropped_sign_grayscale = None

        self.bridge = CvBridge()  # used to convert ROS messages to OpenCV

        # Create windows to view video streams
        cv2.namedWindow('video_window')
        cv2.namedWindow('filt_window')
        cv2.namedWindow('cropped_grayscale_window')
        cv2.namedWindow('img_T_window')

        # Create sliders to control image color filtering
        cv2.namedWindow('threshold_image')
        self.h_lower_bound = 25
        self.h_upper_bound = 38
        self.s_lower_bound = 199
        self.s_upper_bound = 255
        self.v_lower_bound = 203
        self.v_upper_bound = 237
        self.blur_amount = 3
        cv2.createTrackbar('H lower bound', 'threshold_image', 0, 255,
                           self.set_h_lower_bound)
        cv2.createTrackbar('H upper bound', 'threshold_image', 0, 255,
                           self.set_h_upper_bound)
        cv2.createTrackbar('S lower bound', 'threshold_image', 0, 255,
                           self.set_s_lower_bound)
        cv2.createTrackbar('S upper bound', 'threshold_image', 0, 255,
                           self.set_s_upper_bound)
        cv2.createTrackbar('V lower bound', 'threshold_image', 0, 255,
                           self.set_v_lower_bound)
        cv2.createTrackbar('V upper bound', 'threshold_image', 0, 255,
                           self.set_v_upper_bound)
        cv2.createTrackbar('Blur amount', 'threshold_image', 0, 20,
                           self.set_blur_amount)
        cv2.setTrackbarPos('H lower bound', 'threshold_image',
                           self.h_lower_bound)
        cv2.setTrackbarPos('H upper bound', 'threshold_image',
                           self.h_upper_bound)
        cv2.setTrackbarPos('S lower bound', 'threshold_image',
                           self.s_lower_bound)
        cv2.setTrackbarPos('S upper bound', 'threshold_image',
                           self.s_upper_bound)
        cv2.setTrackbarPos('V lower bound', 'threshold_image',
                           self.v_lower_bound)
        cv2.setTrackbarPos('V upper bound', 'threshold_image',
                           self.v_upper_bound)
        cv2.setTrackbarPos('Blur amount', 'threshold_image', self.blur_amount)

        # Subscribe to ROS image stream
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)

    def set_h_lower_bound(self, val):
        """ A callback function to handle the OpenCV slider to select the hue lower bound """
        self.h_lower_bound = val

    def set_h_upper_bound(self, val):
        """ A callback function to handle the OpenCV slider to select the hue upper bound """
        self.h_upper_bound = val

    def set_s_lower_bound(self, val):
        """ A callback function to handle the OpenCV slider to select the saturation lower bound """
        self.s_lower_bound = val

    def set_s_upper_bound(self, val):
        """ A callback function to handle the OpenCV slider to select the saturation upper bound """
        self.s_upper_bound = val

    def set_v_lower_bound(self, val):
        """ A callback function to handle the OpenCV slider to select the value lower bound """
        self.v_lower_bound = val

    def set_v_upper_bound(self, val):
        """ A callback function to handle the OpenCV slider to select the value upper bound """
        self.v_upper_bound = val

    def set_blur_amount(self, val):
        """ A callback function to handle the OpenCV slider to select the blur amount """
        # The kernel blur size must always be odd
        self.blur_amount = 2 * val + 1

    def process_image(self, msg):
        """ Process image messages from ROS and stash them in an attribute
            called cv_image for subsequent processing """
        # Convert ROS image stream to opencv image stream
        self.bgr_image = self.bridge.imgmsg_to_cv2(msg,
                                                   desired_encoding="bgr8")

        # Gaussian blur the image to low-pass contents
        self.blurred_bgr_image = cv2.GaussianBlur(self.bgr_image, \
            (self.blur_amount, self.blur_amount), 0)

        # Shift to HSV image and filter for color of traffic signs
        self.hsv_image = cv2.cvtColor(self.blurred_bgr_image,
                                      cv2.COLOR_BGR2HSV)
        self.filt_image = cv2.inRange(self.hsv_image, \
            (self.h_lower_bound, self.s_lower_bound, self.v_lower_bound),
            (self.h_upper_bound, self.s_upper_bound, self.v_upper_bound))

        # Define a bounding box around the detected color
        left_top, right_bottom = self.sign_bounding_box()
        left, top = left_top
        right, bottom = right_bottom
        cropped_sign = self.bgr_image[top:bottom, left:right]

        # Convert bounded image to grayscale and detect type of traffic sign
        self.cropped_sign_grayscale = cv2.cvtColor(cropped_sign,
                                                   cv2.COLOR_BGR2GRAY)
        print self.template_matcher.predict(self.cropped_sign_grayscale)

        # Draw visual bounding box on bgr_image
        cv2.rectangle(self.bgr_image,
                      left_top,
                      right_bottom,
                      color=(0, 0, 255),
                      thickness=5)

    def sign_bounding_box(self):
        """
        Returns
        -------
        (left_top, right_bottom) where left_top and right_bottom are tuples of (x_pixel, y_pixel)
            defining topleft and bottomright corners of the bounding box
        """
        # Find contours based on the binary filtered image
        contours, hierarchy = cv2.findContours(self.filt_image, cv2.RETR_TREE, \
           cv2.CHAIN_APPROX_SIMPLE)

        # Define bounding rectangle around contour
        if (contours):
            x, y, w, h = cv2.boundingRect(contours[0])
            left_top = (x, y)
            right_bottom = (x + w, y + h)
            return left_top, right_bottom
        else:
            # If no contours were found
            return 0, 0

    def run(self):
        """ The main run loop"""
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            # Create windows to show all specified image streams
            if (not self.bgr_image is None):
                cv2.imshow('video_window', self.bgr_image)
            if (not self.filt_image is None):
                cv2.imshow('filt_window', self.filt_image)
            if (not self.cropped_sign_grayscale is None):
                cv2.imshow('cropped_grayscale_window',
                           self.cropped_sign_grayscale)
            if (not self.template_matcher.img_T is None):
                cv2.imshow('img_T_window', self.template_matcher.img_T)
            cv2.waitKey(5)
            r.sleep()
Example #16
0
class StreetSignRecognizer(object):
    """ This robot should recognize street signs """


    def __init__(self):
        """ Initialize the street sign reocgnizer """
        rospy.init_node('street_sign_recognizer')
        self.cv_image = None                        # the latest image from the camera
        self.hsv_image = None                       # converted hsv image
        self.mask = None
        self.bridge = CvBridge()                    # used to convert ROS messages to OpenCV
        cv2.namedWindow('video_window')
        rospy.Subscriber("/camera/image_raw", Image, self.process_image)
        
        cv2.namedWindow('threshold_image')
        self.hsv_lb = np.array([0, 0, 0]) # hsv lower bound
        cv2.createTrackbar('H lb', 'threshold_image', 0, 255, self.set_h_lb)
        cv2.createTrackbar('S lb', 'threshold_image', 0, 255, self.set_s_lb)
        cv2.createTrackbar('V lb', 'threshold_image', 0, 255, self.set_v_lb)
        self.hsv_ub = np.array([255, 255, 255]) # hsv upper bound
        cv2.createTrackbar('H ub', 'threshold_image', 0, 255, self.set_h_ub)
        cv2.createTrackbar('S ub', 'threshold_image', 0, 255, self.set_s_ub)
        cv2.createTrackbar('V ub', 'threshold_image', 0, 255, self.set_v_ub)

        images = {
        "left": '/home/shruti/catkin_ws/src/sign_follower/images/leftturn_box_small.png',
        "right": '/home/shruti/catkin_ws/src/sign_follower/images/rightturn_box_small.png',
        "uturn": '/home/shruti/catkin_ws/src/sign_follower/images/uturn_box_small.png'
        }

        self.pred_total = {}
        self.pred_sign = None
        self.tm = TemplateMatcher(images)

    def set_h_lb(self, val):
        """ set hue lower bound """
        self.hsv_lb[0] = 16

    def set_s_lb(self, val):
        """ set saturation lower bound """
        self.hsv_lb[1] = 196

    def set_v_lb(self, val):
        """ set value lower bound """
        self.hsv_lb[2] = 210

    def set_h_ub(self, val):
        """ set hue upper bound """
        self.hsv_ub[0] = 38

    def set_s_ub(self, val):
        """ set saturation upper bound """
        self.hsv_ub[1] = 255

    def set_v_ub(self, val):
        """ set value upper bound """
        self.hsv_ub[2] = 227

    def process_image(self, msg):
        """ Process image messages from ROS and stash them in an attribute
            called cv_image for subsequent processing """
        self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
        self.hsv_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2HSV) # HSV stands for hue, saturation, value

        self.mask = cv2.inRange(self.hsv_image, self.hsv_lb, self.hsv_ub)

        left_top, right_bottom = self.sign_bounding_box()
        left, top = left_top
        right, bottom = right_bottom

        # crop bounding box region of interest
        cropped_sign = self.cv_image[top:bottom, left:right]

        # Convert to grayscale 
        gray_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)

        # Run the template matcher
        pred = self.tm.predict(gray_image)

        if self.pred_total:
            for key, value in pred.iteritems():
                self.pred_total[key] += value
        else:
            self.pred_total = pred
        # draw bounding box rectangle
        cv2.rectangle(self.cv_image, left_top, right_bottom, color=(0, 0, 255), thickness=5)

    def sign_bounding_box(self):
        """
        Returns
        -------
        (left_top, right_bottom) where left_top and right_bottom are tuples of (x_pixel, y_pixel)
            defining topleft and bottomright corners of the bounding box
        """
        x, y, w, h = cv2.boundingRect(self.mask)
        
        left_top = (x, y)
        right_bottom = (x + w, y + h)
        return left_top, right_bottom

    def run(self):
        """ The main run loop"""
        r = rospy.Rate(10)
        while not rospy.is_shutdown():
            if not self.cv_image is None:
                # creates a window and displays the image for X milliseconds
                cv2.imshow('video_window', self.cv_image)
                if (self.pred_total):
                    if (self.pred_total[max(self.pred_total, key=self.pred_total.get)] > 5):
                        self.pred_sign = max(self.pred_total, key=self.pred_total.get)
                print self.pred_sign
                cv2.waitKey(5)
            r.sleep()