예제 #1
0
    def callback(self, data):
        print("Call back")
        print(data)
        try:
            myimage = rgb_from_ros(data)
            print("RGB Image = ", myimage)
        #cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
        except CvBridgeError as e:
            print("Hark! I am an error", e)


#    (rows,cols,channels) = cv_image.shape
#    if cols > 60 and rows > 60 :
#      cv2.circle(cv_image, (50,50), 10, 255)
        print("About to write image")
        image_to_write = cv2.cvtColor(myimage, cv2.COLOR_RGB2BGR)
        cv2.imwrite("./duckies.jpg", image_to_write)
        print("Ctrl C now")
        cv2.imshow("Image window", myimage)
        cv2.waitKey(3)

        try:
            self.image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
        except CvBridgeError as e:
            print(e)
예제 #2
0
    def callback(self, data):
        center_circle = (0, 0)
        myimage = rgb_from_ros(data)
        #Crop image to limit window of duckiebot
        myimage = myimage[100:500, 80:800]
        #Output image for debug purposes
        cv2.imwrite("cropped_ducks.jpg", myimage)
        frame = myimage
        #Convert RGB image to BGR
        frame = cv2.cvtColor(myimage, cv2.COLOR_RGB2BGR)
        #Convert the image to gray
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.medianBlur(gray, 5)
        rows = gray.shape[0]
        #Implement the HoughCircles algorithm to find the duckie(s)'s heads, which are circles
        #We found better performance when using our object detection algorithms on the transformed image compared to the original
        circles = cv2.HoughCircles(gray,
                                   cv2.HOUGH_GRADIENT,
                                   1,
                                   rows / 8,
                                   param1=100,
                                   param2=30,
                                   minRadius=1,
                                   maxRadius=60)
        circles_frame = frame.copy()

        #Here, we ensure at least some circles were found, and output an image containign the circles around the original objects
        if circles is not None:
            circles = np.uint16(np.around(circles))
            for i in circles[0, :]:
                center = (i[0], i[1])
                center_circle = center
                # circle center
                cv2.circle(circles_frame, center, 1, (0, 100, 100), 3)
                # circle outline
                radius = i[2]
                cv2.circle(circles_frame, center, radius, (255, 0, 255), 3)
                cv2.imwrite("circle.jpg", circles_frame)

        # Here, we ensure at least some circles were found and implement the stop and swerve logic
        if circles is not None:
            print("Found duckies")
        if self.obj_detected == 0:
            #If we detect an object, we stop, move right, move a little forward, move left, and move forward again, to simulate swerving
            #We move forward just a bit, even if we detect a duckie, to simulate real-life conditions, where cars can't stop right away
            #before they try to swerve.
            self.robot.forward(75, 1.0)
            self.robot.stop()
            self.robot.right(90, 0.5)
            self.robot.forward(75, 1.0)
            self.robot.left(100, 0.3)
            self.robot.forward(75, 0.5)
            #Set the flag, so even if we detect the same duckie again, we try not to repeat the same logic
            self.obj_detected = 1
        else:
            #If no duckies are detected, we just move forward
            print("No duckies detected")
            self.robot.forward(75, 5.5)
    def callback(self, image):
        rectified_img = CompressedImage()
        rectified_img.header.stamp = image.header.stamp
        rectified_img.format = "jpeg"
        rectified_img.data = self.image_rectifier.process_image(
            rgb_from_ros(image))

        # publish new message
        self.publisher.publish(rectified_img)
예제 #4
0
    def go(self):
        vehicle_name = dtu.get_current_robot_name() if dtu.on_duckiebot(
        ) else "default"

        output = self.options.output
        if output is None:
            output = 'out-pipeline'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.image is not None:
            image_filename = self.options.image
            if image_filename.startswith('http'):
                image_filename = dtu.get_file_from_url(image_filename)

            bgr = dtu.bgr_from_jpg_fn(image_filename)
        else:
            print("Validating using the ROS image stream...")
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join('/', vehicle_name,
                                      'camera_node/image/compressed')

            print('Let\'s wait for an image. Say cheese!')

            # Dummy to get ROS message
            rospy.init_node('single_image')
            img_msg = None
            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                print('Image captured!')
            except rospy.ROSException as e:
                print(
                    '\n\n\nDidn\'t get any message!: %s\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!\n\n\n'
                    % (e, ))

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg))
            self.info('Picture taken: %s ' % str(bgr.shape))

        gp = GroundProjection(vehicle_name)

        dtu.DuckietownConstants.show_timeit_benchmarks = True
        res, _stats = run_pipeline(
            bgr,
            gp,
            line_detector_name=self.options.line_detector,
            image_prep_name=self.options.image_prep,
            anti_instagram_name=self.options.anti_instagram,
            lane_filter_name=self.options.lane_filter)

        self.info('Resizing images..')
        res = dtu.resize_small_images(res)
        self.info('Writing images..')
        dtu.write_bgr_images_as_jpgs(res, output)
예제 #5
0
    def processMsg(self, msg_in):
        # Get the image from the message
        im_rgb = du.rgb_from_ros(msg_in)
        im_bgr = cv2.cvtColor(im_rgb, cv2.COLOR_RGB2BGR)

        # Apply filters
        im_mod_bgr = self.applyFilters(im_bgr, self.filters)

        # Convert to message
        msg_out = msg_in
        msg_out.data = du.d8_compressed_image_from_cv_image(im_mod_bgr, msg_in).data

        return msg_out
예제 #6
0
 def cbFilter(self, msg):
     filter = self.setupParameter("~filter")
     image = dt.rgb_from_ros(msg)
     image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
     if filter == 'flip-vertical':
         image = np.fliplr(image)
     if filter == 'flip-horizontal':
         image = np.flipud(image)
     if filter == 'greyscale':
         image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     if filter == 'sepia':
         image_copy = np.empty(image.shape, 'float')
         image = cv2.transform(image, M_Sepia, image_copy)
     new_msg = dt.d8_compressed_image_from_cv_image(image)
     self.pub_topic_a.publish(new_msg)
예제 #7
0
  def callback(self,data):
    print("Call back")

    center_circle = (0,0)

    myimage = rgb_from_ros(data)

    # Shantha: Full image size i captured is 1280 x 720.
    # We want a 1200 x 300 cropped view in the bottom center of the full screenshot
    myimage = myimage[40:1240, 420:720]
    cv2.imwrite("cropped_ducks.jpg", myimage)

    frame = myimage
    im = frame
    frame = cv2.cvtColor(myimage, cv2.COLOR_RGB2BGR)

    # Convert BGR to HSV
    hsv_blue = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # define range of yellow color in HSV
    lower_blue = np.array([51, 97, 100])
    upper_blue = np.array([56, 86, 100])

    # Threshold the HSV image to get only blue colors
    mask_blue = cv2.inRange(hsv_blue, lower_blue, upper_blue)

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame, frame, mask_blue)
    res = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
    cv2.imwrite('frame.jpg',frame)
    cv2.imwrite('mask.jpg',mask_blue)
    hsv_blue[mask_blue > 0] = ([56, 86, 100])
    cv2.imwrite('hsv_yellow.jpg',hsv_blue)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.medianBlur(gray, 5)
    rows = gray.shape[0]
    circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8,
                               param1=100, param2=30,
                               minRadius=1, maxRadius=60)
    circles_frame = frame.copy()

    # ensure at least some circles were found
    if circles is not None:
        self.num_obj_detected = 1
        print("Done! Found atleast one ducky")
    if circles is None:
        self.num_obj_detected = 0
        print("Done! No ducky found")
예제 #8
0
    def callback(self, image):
	#print "HELLO CALLLBACK HERE"
        obst_image = CompressedImage()
        obst_image.header.stamp = image.header.stamp
        obst_image.format = "jpeg"

        # you should write the following function in your class
        obst_image.data = self.detector.process_image(rgb_from_ros(image))

	#spaeter: anstatt obst_image.data eher
	#1. EXTRACT OBSTACLES 
	#2. SEND THEM
	#3. VISUALIE THEM!

        # publish new message
        self.publisher.publish(obst_image.data)
예제 #9
0
    def callback(self, obst_list, image):
        self.publisher_bblinelist.publish(
            self.bbmarker)  # since a single publish is likely to get lost
        # print "CALLBACK HERE"
        if (self.show_marker):
            marker_list = self.visualizer.visualize_marker(obst_list)
            self.publisher_marker.publish(marker_list)

        # 4. EVENTUALLY DISPLAY IMAGE
        if (self.show_image):
            obst_image = CompressedImage()
            obst_image.header.stamp = image.header.stamp
            obst_image.format = "jpeg"
            obst_image.data = self.visualizer.visualize_image(
                rectify(rgb_from_ros(image), self.intrinsics), obst_list)
            self.publisher_img.publish(obst_image.data)
예제 #10
0
    def on_image_received(self, compressed_image):
        detections = self.object_detector.detect(
            rgb_from_ros(compressed_image))

        if len(detections) > 0:
            detection_list_msg = ObjectDetectionList()
            for detection in detections:
                detection_list_msg.detections.append(
                    Detection(class_label=detection['class_label'],
                              class_id=detection['class_id'],
                              xmin=detection['xmin'],
                              xmax=detection['xmax'],
                              ymin=detection['ymin'],
                              ymax=detection['ymax'],
                              score=detection['score']))

            self.pub_detection.publish(detection_list_msg)
    def ProcessRawImage(self, img_raw):
        # rectify image
        img_undistorted = dt.rectify(dt.rgb_from_ros(img_raw), self.intrinsics)

        # compute grayscale image
        img_gray = cv2.cvtColor(img_undistorted, cv2.COLOR_RGB2GRAY)

        # detect edges
        img_canny = cv2.Canny(img_gray, self.canny_lower_threshold, self.canny_upper_threshold,
                              apertureSize=self.canny_aperture_size)

        # apply mask
        img_mask = cv2.bitwise_and(img_canny,self.mask_visible)

        # pad image to avoid running into borders
        img_processed = cv2.copyMakeBorder(img_mask, self.line_search_length, self.line_search_length,
                                           self.line_search_length, self.line_search_length, cv2.BORDER_CONSTANT,
                                           value=0)

        return img_processed, img_gray
예제 #12
0
            gen = cv2.cvtColor(gen, cv2.COLOR_BGR2GRAY).reshape(
                (gen.shape[0], gen.shape[1], 1))
            gen = np.concatenate([gen, gen, gen], axis=2)
        elif f == "sepia":
            sepia_kernel = np.array([[.272, .534, .131], [.349, .686, .168],
                                     [.393, .769, .189]])
            gen = cv2.transform(gen, sepia_kernel)
        else:
            raise Exception('filter not found')

    return gen


bag_file = sys.argv[1]
topic_sel = sys.argv[2]
filters = sys.argv[3].split(":")
bag_out = rosbag.Bag(sys.argv[4], mode="w")

bag = rosbag.Bag(bag_file)

for topic, msg, t in bag.read_messages():

    if topic == topic_sel:
        img = rgb_from_ros(msg)
        img = instagram(img, filters)
        msg = d8_compressed_image_from_cv_image(img)
        bag_out.write(topic=topic, msg=msg, t=t)

bag.close()
bag_out.close()
예제 #13
0
    def callback(self, image):
        if not self.active:
            return

        if not self.thread_lock.acquire(False):
            return

        #start = time.time()
        obst_list = PoseArray()
        marker_list = MarkerArray()

        # pass RECTIFIED IMAGE TO DETECTOR MODULE
        #1. EXTRACT OBSTACLES and return the pose array
        obst_list = self.detector.process_image(
            rectify(rgb_from_ros(image), self.intrinsics))

        obst_list.header.stamp = image.header.stamp  #for synchronization
        #interessant um zu schauen ob stau oder nicht!!!!
        #print image.header.stamp.to_sec()
        self.publisher_arr.publish(obst_list)
        #EXPLANATION: (x,y) is world coordinates of obstacle, z is radius of obstacle
        #QUATERNION HAS NO MEANING!!!!

        #3. VISUALIZE POSE ARRAY IN TF
        if (self.show_marker):
            marker_list = self.visualizer.visualize_marker(obst_list)
            self.publisher_marker.publish(marker_list)

        #4. EVENTUALLY DISPLAY !!!!CROPPED!!!!!! IMAGE
        if (self.show_image):
            obst_image = CompressedImage()
            obst_image.header.stamp = image.header.stamp
            obst_image.format = "jpeg"
            obst_image.data = self.visualizer.visualize_image(
                rectify(rgb_from_ros(image), self.intrinsics), obst_list)
            #here i want to display cropped image
            rgb_image = rgb_from_ros(obst_image.data)
            obst_image.data = d8_compressed_image_from_cv_image(
                rgb_image[self.detector.crop:, :, ::-1])
            #THIS part only to visualize the cropped version -> somehow a little inefficient but keeps
            #the visualizer.py modular!!!
            self.publisher_img.publish(obst_image.data)

        if (self.show_bird_perspective):
            bird_perspective_image = CompressedImage()
            bird_perspective_image.header.stamp = image.header.stamp
            bird_perspective_image.format = "jpeg"
            bird_perspective_image.data = self.visualizer.visualize_bird_perspective(
                rectify(rgb_from_ros(image), self.intrinsics), obst_list)
            #here i want to display cropped image
            rgb_image = rgb_from_ros(obst_image.data)
            obst_image.data = d8_compressed_image_from_cv_image(rgb_image)
            #THIS part only to visualize the cropped version -> somehow a little inefficient but keeps
            #the visualizer.py modular!!!
            self.publisher_img_bird_perspective.publish(obst_image.data)

        #end = time.time()
        #print "OBST DETECTION TOOK: s"
        #print(end - start)
        self.r.sleep()
        self.thread_lock.release()
예제 #14
0
def callback(msg):
    img = rgb_from_ros(msg)
    gen = instagram(img, filter_lst)
    newmsg = d8_compressed_image_from_cv_image(gen)
    publisher.publish(newmsg)
예제 #15
0
while (True):
    filename = im_path + '/' + str(nummer) + '.jpg'
    im1 = cv2.imread(filename)  #reads BGR
    if (im1 is None):
        #stop it!
        break

    else:  #START MODIFYING THE IMAGE!!!
        #-------------HERE GOES THE REAL CODE-----------------------------------------------------------
        #-----------------------------------------------------------------------------------------------

        obst_list = detector.process_image(rectify(im1[:, :, ::-1],
                                                   intrinsics))
        obst_image = CompressedImage()
        obst_image.format = "jpeg"
        obst_image.data = visualizer.visualize_image(
            rectify(im1[:, :, ::-1], intrinsics), obst_list)
        #here i want to display cropped image
        image = rgb_from_ros(obst_image.data)
        image = image[crop:, :, :]
        #THIS part only to visualize the cropped version -> somehow a little inefficient but keeps
        #the visualizer.py modular!!!
        #plt.imshow(image[detector.crop:,:,:]);plt.show() #the cropped image
        #plt.imshow(image);plt.show()                     #normal sized image
        #SAVE THE IMAGE
        conv = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
        cv2.imwrite(dir_path + '/' + str(nummer) + '.jpg', conv)
        nummer += 1

print "FERTIG"
os.system("rosnode kill obstacle_detection_node")
예제 #16
0
    def callback(self, data):
        print("Call back")

        center_circle = (0, 0)

        myimage = rgb_from_ros(data)
        frame = myimage
        #myimage = myimage[100:350, 100:1000]
        cv2.imwrite("cropped_ducks.jpg", myimage)
        #im = Image.open("Shantha_duck.jpg")
        im = frame
        frame = cv2.cvtColor(myimage, cv2.COLOR_RGB2BGR)
        # Convert BGR to HSV
        hsv_blue = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # define range of yellow color in HSV
        lower_blue = np.array([51, 97, 100])
        upper_blue = np.array([56, 86, 100])
        # Threshold the HSV image to get only blue colors
        mask_blue = cv2.inRange(hsv_blue, lower_blue, upper_blue)
        # Bitwise-AND mask and original image
        res = cv2.bitwise_and(frame, frame, mask_blue)
        res = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
        cv2.imwrite('frame.jpg', frame)
        cv2.imwrite('mask.jpg', mask_blue)
        hsv_blue[mask_blue > 0] = ([56, 86, 100])
        cv2.imwrite('hsv_yellow.jpg', hsv_blue)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.medianBlur(gray, 5)
        rows = gray.shape[0]
        circles = cv2.HoughCircles(gray,
                                   cv2.HOUGH_GRADIENT,
                                   1,
                                   rows / 8,
                                   param1=100,
                                   param2=30,
                                   minRadius=1,
                                   maxRadius=60)
        circles_frame = frame.copy()
        # ensure at least some circles were found
        if circles is not None:
            circles = np.uint16(np.around(circles))
            for i in circles[0, :]:
                center = (i[0], i[1])
                center_circle = center
                # circle center
                cv2.circle(circles_frame, center, 1, (0, 100, 100), 3)
                # circle outline
                radius = i[2]
                cv2.circle(circles_frame, center, radius, (255, 0, 255), 3)

        def find_marker(image):

            # convert the image to grayscale, blur it, and detect edges
            try:
                gray = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            except:
                pass
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(image, (5, 5), 0)
            edged = cv2.Canny(gray, 35, 125)
            cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            print("I found {} matching shapes".format(len(cnts)))
            c = max(cnts, key=cv2.contourArea)
            return cv2.minAreaRect(c)

        def find_marker_altered(image):
            # blur image, and detect edges
            gray = cv2.GaussianBlur(image, (5, 5), 0)
            edged = cv2.Canny(gray, 35, 125)
            cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            print("I found {} matching shapes".format(len(cnts)))
            return cnts

        def find_distance(c):
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])
            immat = im.load()
            (X, Y) = im.size
            m = np.zeros((X, Y))  #Is this needed?
            m = np.sum(np.asarray(im), -1) < 255 * 3
            m = m / np.sum(np.sum(m))

            dx = np.sum(m, 0)
            dy = np.sum(m, 1)
            # expected values
            new_frame = frame.copy()
            middle_of_image_x = np.sum(dx * np.arange(X))
            middle_of_image_y = np.sum(dy * np.arange(Y))
            print("x, y", cX, cY)
            print(middle_of_image_x, middle_of_image_y)
            print("X, Y")
            obj_center_dist = dist.euclidean(
                (cX, cY), (middle_of_image_x, middle_of_image_y))
            print(obj_center_dist)
            # draw the contour and center of the shape on the image
            cv2.drawContours(new_frame, [c], -1, (0, 255, 0), 2)
            cv2.circle(new_frame, (cX, cY), 7, (255, 255, 255), -1)

            cv2.putText(
                new_frame,
                str(cX - middle_of_image_x),
                (cX - 20, cY - 20),  #str(obj_center_dist) + 
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (255, 255, 255),
                2)
            cv2.imwrite("new_distance_image" + str(c[1]) + ".jpg", new_frame)

            # show the image

        def distance_to_camera(knownWidth, focalLength, perWidth):
            return (knownWidth * focalLength) / perWidth

        KNOWN_DISTANCE = 12.0
        KNOWN_WIDTH = 1.5
        image = cv2.imread("duckie_calibration.jpg")

        marker = find_marker(image)
        focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
        ###############

        cnts = find_marker_altered(hsv_blue)
        cnts = list(sorted(cnts, key=cv2.contourArea, reverse=True))[:30]  #22
        detected_obj_list = []

        for c in cnts:
            bbox = cv2.boundingRect(c)
            x, y, w, h = bbox
            if w < 20 or h < 45 or w * h < 20:  # h < 50  or w > 1000 do h < 35 to get perfect detection on test_ducks, h < 50 for Shantha_duck
                pass
            else:
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)
                print("Hi", len(approx))
                if len(approx) == 4:
                    pass
                if len(approx) > 4 and len(approx) <= 12:
                    distance_param = cv2.pointPolygonTest(
                        c, center_circle, False)
                    print(distance_param)
                    #if distance_param == 1.0:
                    marker = approx
                    #else: #See if you can refine this further!!
                    detected_obj_list.append(marker)
                    inches = distance_to_camera(KNOWN_WIDTH, focalLength,
                                                marker[1][0])
                    cv2.drawContours(frame, [marker], -1, (0, 255, 0),
                                     2)  #0, 255, 0
                    print([marker])
                    #find_distance(c)
        self.num_obj_detected = len(detected_obj_list)
        #self.num_obj_detected = num_obj_detected
        #robot = Robot.Robot(left_trim=0, right_trim=0)
        #if num_ob_detected >=1:
        #    robot.forward(150, 1.0)
        #rospy.spin()
        #rate = rospy.Rate(10)
        #print("num obj detected", self.num_obj_detected)
        #if self.num_obj_detected == 0:
        #robot.forward(100, 30.5)
        #if self.num_obj_detected >= 1:
        #robot.stop()

        #image_to_write = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        #try:
        #    os.remove("hi_newest_edge_detected.jpg")
        #except:
        #    pass
        #cv2.imwrite( "hi_newest_edge_detected.jpg", image_to_write)
        print("Done!")
예제 #17
0
import rosbag

# Example usage: ./sample_images.py path_to_bag /scbb/camera_rectifier/image/compressed 500

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='Sample images from ROS Bags.')
    parser.add_argument('bag_path', help='path to the ROS Bag')
    parser.add_argument('topic', help='the topic for image sampling')
    parser.add_argument('n_imgs',
                        help='the approximate number of images to extract')
    parser.add_argument('--output_dir',
                        default='./sampled_images',
                        help='output directory for the sampled images')
    args = parser.parse_args()
    if os.path.exists(args.output_dir):
        shutil.rmtree(args.output_dir, ignore_errors=True)
    os.makedirs(args.output_dir)
    rb = rosbag.Bag(args.bag_path)
    n_imgs = float(args.n_imgs)
    p = n_imgs / rb.get_message_count(args.topic)
    img_no = 0
    for msg in rb.read_messages(args.topic):
        if numpy.random.rand() < p:
            cv2.imwrite(
                os.path.join(
                    args.output_dir, 'sample_{{:0{}d}}.png'.format(
                        int(np.floor(np.log10(n_imgs))) + 1).format(img_no)),
                duckietown_utils.rgb_from_ros(msg.message)[..., ::-1])
            img_no += 1
예제 #18
0
 def get_the_image(data):
       myimage = rgb_from_ros(data)
예제 #19
0
        msg_out.data = du.d8_compressed_image_from_cv_image(im_mod_bgr, msg_in).data

        return msg_out


### --- Testing --- #
if __name__ == "__main__":
    # Load
    im = cv2.imread("sunset.jpg", cv2.IMREAD_COLOR)
    msg_in = CompressedImage()

    # Not manipulated
    msg_in.data = du.d8_compressed_image_from_cv_image(im, msg_in).data
    inst = Instagram("")
    msg_out = inst.processMsg(msg_in)
    im_out_rgb = du.rgb_from_ros(msg_out)
    im_out_bgr = cv2.cvtColor(im_out_rgb, cv2.COLOR_RGB2BGR)
    cv2.imwrite("untouched.jpg", im_out_bgr)

    # Flip vertically
    msg_in.data = du.d8_compressed_image_from_cv_image(im, msg_in).data
    inst = Instagram("flip-vertical")
    msg_out = inst.processMsg(msg_in)
    im_out_rgb = du.rgb_from_ros(msg_out)
    im_out_bgr = cv2.cvtColor(im_out_rgb, cv2.COLOR_RGB2BGR)
    cv2.imwrite("vflip.jpg", im_out_bgr)

    # Flip horizontally
    msg_in.data = du.d8_compressed_image_from_cv_image(im, msg_in).data
    inst = Instagram("flip-horizontal")
    msg_out = inst.processMsg(msg_in)
예제 #20
0
    def callback(self, data):
        #print("Call back")

        center_circle = (0, 0)
        myimage = rgb_from_ros(data)
        cv2.imwrite("original_ducks.jpg", myimage)
        # Shantha: Full image size i captured is 1280 x 720.
        # We want a 1200 x 300 cropped view in the bottom center of the full screenshot
        myimage = myimage[100:500, 80:800]
        cv2.imwrite("cropped_ducks.jpg", myimage)

        frame = myimage
        im = frame
        frame = cv2.cvtColor(myimage, cv2.COLOR_RGB2BGR)

        # Convert BGR to HSV
        hsv_blue = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # define range of yellow color in HSV
        lower_blue = np.array([51, 97, 100])
        upper_blue = np.array([56, 86, 100])

        # Threshold the HSV image to get only blue colors
        mask_blue = cv2.inRange(hsv_blue, lower_blue, upper_blue)

        # Bitwise-AND mask and original image
        res = cv2.bitwise_and(frame, frame, mask_blue)
        res = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
        cv2.imwrite('frame.jpg', frame)
        cv2.imwrite('mask.jpg', mask_blue)
        hsv_blue[mask_blue > 0] = ([56, 86, 100])
        cv2.imwrite('hsv_yellow.jpg', hsv_blue)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.medianBlur(gray, 5)
        rows = gray.shape[0]
        circles = cv2.HoughCircles(gray,
                                   cv2.HOUGH_GRADIENT,
                                   1,
                                   rows / 8,
                                   param1=100,
                                   param2=30,
                                   minRadius=1,
                                   maxRadius=60)
        circles_frame = frame.copy()

        # ensure at least some circles were found
        if circles is not None:
            circles = np.uint16(np.around(circles))
            for i in circles[0, :]:
                center = (i[0], i[1])
                center_circle = center
                # circle center
                cv2.circle(circles_frame, center, 1, (0, 100, 100), 3)
                # circle outline
                radius = i[2]
                cv2.circle(circles_frame, center, radius, (255, 0, 255), 3)
                cv2.imwrite("new_circle.jpg", circles_frame)

        def find_marker(image):
            # convert the image to grayscale, blur it, and detect edges
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)
            edged = cv2.Canny(gray, 35, 125)

            # find the contours in the edged image and keep the largest one;
            # we'll assume that this is our piece of paper in the image
            cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)
            c = max(cnts, key=cv2.contourArea)

            # compute the bounding box of the of the paper region and return it
            return cv2.minAreaRect(c)

        def distance_to_camera(knownWidth, focalLength, perWidth):
            # compute and return the distance from the maker to the camera
            return (knownWidth * focalLength) / perWidth

        def duckie_distance_detection():
            # initialize the known distance from the camera to the object, which
            # in this case is 24 inches
            KNOWN_DISTANCE = 12.0

            # initialize the known object width, which in this case, the piece of
            # paper is 12 inches wide
            KNOWN_WIDTH = 1.5

            # load the furst image that contains an object that is KNOWN TO BE 2 feet
            # from our camera, then find the paper marker in the image, and initialize
            # the focal length
            image = cv2.imread("duckie_calibration.jpg")
            marker = find_marker(image)
            focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH

            # loop over the images
            # load the image, find the marker in the image, then compute the
            # distance to the marker from the camera
            marker = find_marker(circles_frame)
            inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])

            # draw a bounding box around the image and display it
            box = cv2.cv.BoxPoints(
                marker) if imutils.is_cv2() else cv2.boxPoints(marker)
            box = np.int0(box)
            cv2.drawContours(circles_frame, [box], -1, (0, 255, 0), 2)
            cv2.putText(
                circles_frame, "%.2fft" % (inches / 12),
                (circles_frame.shape[1] - 200, circles_frame.shape[0] - 20),
                cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 255, 0), 3)
            cv2.imwrite("distance_image.jpg", circles_frame)
            return inches / 12

        # ensure at least some circles were found
        distance_from_obj_ft = duckie_distance_detection()
        if circles is not None:
            self.num_obj_detected = 1
            print("Done! Found duckies")
            #sys.exit()
            #self.robot.stop()
            if distance_from_obj_ft <= 5.0:
                self.robot.right(100, 0.5)
            else:
                self.robot.left(100, 3.5)

#self.robot.forward(100, 5.5)
#sys.exit()
        else:
            self.num_obj_detected = 0
            self.robot.forward(75, 5.5)
            #self.robot.stop()
            print("Done! no ducky")
예제 #21
0
    def process_log(self, bag_in, prefix_in, bag_out, prefix_out,
                    utils):  #@UnusedVariable
        log_name = utils.get_log().log_name

        vehicle_name = dtu.which_robot(bag_in)

        dtu.logger.info('Vehicle name: %s' % vehicle_name)

        gp = GroundProjection(vehicle_name)

        topic = dtu.get_image_topic(bag_in)

        bgcolor = dtu.ColorConstants.BGR_DUCKIETOWN_YELLOW

        sequence = bag_in.read_messages_plus(topics=[topic])
        for _i, mp in enumerate(sequence):

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(mp.msg))

            res, stats = run_pipeline(bgr,
                                      gp=gp,
                                      line_detector_name=self.line_detector,
                                      image_prep_name=self.image_prep,
                                      lane_filter_name=self.lane_filter,
                                      anti_instagram_name=self.anti_instagram,
                                      all_details=self.all_details)

            rect = (480, 640) if not self.all_details else (240, 320)
            res = dtu.resize_images_to_fit_in_rect(res, rect, bgcolor=bgcolor)

            print('abs: %s  window: %s  fron log: %s' %
                  (mp.time_absolute, mp.time_window,
                   mp.time_from_physical_log_start))
            headers = [
                "Robot: %s log: %s time: %.2f s" %
                (vehicle_name, log_name, mp.time_from_physical_log_start),
                "Algorithms | color correction: %s | preparation: %s | detector: %s | filter: %s"
                % (
                    self.anti_instagram,
                    self.image_prep,
                    self.line_detector,
                    self.lane_filter,
                )
            ]

            res = dtu.write_bgr_images_as_jpgs(res,
                                               dirname=None,
                                               bgcolor=bgcolor)

            cv_image = res['all']

            for head in reversed(headers):
                max_height = 35
                cv_image = dtu.add_header_to_bgr(cv_image,
                                                 head,
                                                 max_height=max_height)

            otopic = "all"

            omsg = dtu.d8_compressed_image_from_cv_image(
                cv_image, same_timestamp_as=mp.msg)
            t = rospy.Time.from_sec(mp.time_absolute)  # @UndefinedVariable
            print('written %r at t = %s' % (otopic, t.to_sec()))
            bag_out.write(prefix_out + '/' + otopic, omsg, t=t)

            for name, value in stats.items():
                utils.write_stat(prefix_out + '/' + name, value, t=t)
예제 #22
0
    def go(self):
        robot_name = dtu.get_current_robot_name()

        output = self.options.output
        if output is None:
            output = 'out-calibrate-extrinsics'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.input is None:

            print("{}\nCalibrating using the ROS image stream...\n".format(
                "*" * 20))
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join('/', robot_name,
                                      'camera_node/image/compressed')
            print('Topic to listen to is: %s' % topic_name)

            print('Let\'s wait for an image. Say cheese!')

            # Dummy for getting a ROS message
            rospy.init_node('calibrate_extrinsics')
            img_msg = None
            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                print('Image captured!')
            except rospy.ROSException as e:
                print(
                    '\n\n\nDidn\'t get any message!: %s\n MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!\n\n\n'
                    % (e, ))

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg))
            self.info('Picture taken: %s ' % str(bgr.shape))

        else:
            self.info('Loading input image %s' % self.options.input)
            bgr = dtu.bgr_from_jpg_fn(self.options.input)

        if bgr.shape[1] != 640:
            interpolation = cv2.INTER_CUBIC
            bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation)
            self.info('Resized to: %s ' % str(bgr.shape))

        # Disable the old calibration file
        disable_old_homography(robot_name)

        camera_info = get_camera_info_for_robot(robot_name)
        homography_dummy = get_homography_default()
        gpg = GroundProjectionGeometry(camera_info, homography_dummy)

        res = OrderedDict()
        try:
            bgr_rectified = gpg.rectify(bgr, interpolation=cv2.INTER_CUBIC)

            res['bgr'] = bgr
            res['bgr_rectified'] = bgr_rectified

            _new_matrix, res['rectified_full_ratio_auto'] = gpg.rectify_full(
                bgr, ratio=1.65)
            result = estimate_homography(bgr_rectified)
            dtu.check_isinstance(result, HomographyEstimationResult)

            if result.bgr_detected_refined is not None:
                res['bgr_detected_refined'] = result.bgr_detected_refined

            if not result.success:
                raise Exception(result.error)

            save_homography(result.H, robot_name)

            msg = '''

To check that this worked well, place the robot on the road, and run:

    rosrun complete_image_pipeline single_image

Look at the produced jpgs.

'''
            self.info(msg)
        finally:
            dtu.write_bgr_images_as_jpgs(res, output)
예제 #23
0
    def go(self):
        robot_name = dtu.get_current_robot_name()

        output = self.options.output
        if output is None:
            output = 'out-calibrate-extrinsics'  #  + dtu.get_md5(self.options.image)[:6]
            self.info('No --output given, using %s' % output)

        if self.options.input is None:

            print("{}\nCalibrating using the ROS image stream...\n".format(
                "*" * 20))
            import rospy
            from sensor_msgs.msg import CompressedImage

            topic_name = os.path.join('/', robot_name,
                                      'camera_node/image/compressed')
            print('Topic to listen to is: %s' % topic_name)

            print('Let\'s wait for an image. Say cheese!')

            # Dummy for getting a ROS message
            rospy.init_node('calibrate_extrinsics')
            img_msg = None
            try:
                img_msg = rospy.wait_for_message(topic_name,
                                                 CompressedImage,
                                                 timeout=10)
                print('Image captured!')
            except rospy.ROSException as e:
                print(
                    '\n\n\n'
                    'Didn\'t get any message!: %s\n '
                    'MAKE SURE YOU USE DT SHELL COMMANDS OF VERSION 4.1.9 OR HIGHER!'
                    '\n\n\n' % (e, ))

            bgr = dtu.bgr_from_rgb(dtu.rgb_from_ros(img_msg))
            print('Picture taken: %s ' % str(bgr.shape))

        else:
            print('Loading input image %s' % self.options.input)
            bgr = dtu.bgr_from_jpg_fn(self.options.input)

        if bgr.shape[1] != 640:
            interpolation = cv2.INTER_CUBIC
            bgr = dtu.d8_image_resize_fit(bgr, 640, interpolation)
            print('Resized to: %s ' % str(bgr.shape))
        # Disable the old calibration file
        print("Disableing old homography")
        disable_old_homography(robot_name)
        print("Obtaining camera info")
        try:
            camera_info = get_camera_info_for_robot(robot_name)
        except Exception as E:
            print("Error on obtaining camera info!")
            print(E)
        print("Get default homography")
        try:
            homography_dummy = get_homography_default()
        except Exception as E:
            print("Error on getting homography")
            print(E)
        print("Rectify image")
        try:
            rect = Rectify(camera_info)
        except Exception as E:
            print("Error rectifying image!")
            print(E)
        print("Calculate GPG")
        try:
            gpg = GroundProjectionGeometry(camera_info.width,
                                           camera_info.height,
                                           homography_dummy.reshape((3, 3)))
        except Exception as E:
            print("Error calculating GPG!")
            print(E)
        print("Ordered Dict")
        res = OrderedDict()
        try:
            bgr_rectified = rect.rectify(bgr, interpolation=cv2.INTER_CUBIC)

            res['bgr'] = bgr
            res['bgr_rectified'] = bgr_rectified

            _new_matrix, res['rectified_full_ratio_auto'] = rect.rectify_full(
                bgr, ratio=1.65)

            (result_gpg, status) = gpg.estimate_homography(bgr_rectified)

            if status is not None:
                raise Exception(status)

            save_homography(result_gpg.H, robot_name)
            msg = '''

To check that this worked well, place the robot on the road, and run:

    rosrun complete_image_pipeline single_image

Look at the produced jpgs.

'''
            print(msg)
        except Exception as E:
            print(E)
        finally:
            dtu.write_bgr_images_as_jpgs(res, output)
예제 #24
0
  def callback(self,data):
    #print("Call back")

    center_circle = (0,0)
    myimage = rgb_from_ros(data)
    myimage = myimage[100:500, 80:800]
    # Shantha: Full image size i captured is 1280 x 720.
    # We want a 1200 x 300 cropped view in the bottom center of the full screenshot
    #myimage = myimage[40:1240, 420:720]
    cv2.imwrite("cropped_ducks.jpg", myimage)

    frame = myimage
    im = frame
    frame = cv2.cvtColor(myimage, cv2.COLOR_RGB2BGR)

    # Convert BGR to HSV
    hsv_blue = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

    # define range of yellow color in HSV
    lower_blue = np.array([51, 97, 100])
    upper_blue = np.array([56, 86, 100])

    # Threshold the HSV image to get only blue colors
    mask_blue = cv2.inRange(hsv_blue, lower_blue, upper_blue)

    # Bitwise-AND mask and original image
    res = cv2.bitwise_and(frame, frame, mask_blue)
    res = cv2.cvtColor(res, cv2.COLOR_HSV2BGR)
    cv2.imwrite('frame.jpg',frame)
    cv2.imwrite('mask.jpg',mask_blue)
    hsv_blue[mask_blue > 0] = ([56, 86, 100])
    cv2.imwrite('hsv_yellow.jpg',hsv_blue)
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.medianBlur(gray, 5)
    rows = gray.shape[0]
    circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, rows / 8,
                               param1=100, param2=30,
                               minRadius=1, maxRadius=60)
    circles_frame = frame.copy()
# ensure at least some circles were found
    if circles is not None:
    	circles = np.uint16(np.around(circles))
    	for i in circles[0, :]:
        	center = (i[0], i[1])
        	center_circle = center
        	# circle center
        	cv2.circle(circles_frame, center, 1, (0, 100, 100), 3)
        	# circle outline
        	radius = i[2]
        	cv2.circle(circles_frame, center, radius, (255, 0, 255), 3)
        	cv2.imwrite( "circle.jpg", circles_frame)
    
    # ensure at least some circles were found
    if circles is not None:
        self.num_obj_detected = 1
        print("Done! Found duckies")
        #sys.exit()
	#self.robot.stop()
        self.robot.right(100, 0.5)
	#self.robot.forward(100, 5.5)
	#sys.exit()
    else:	
        self.num_obj_detected = 0
	self.robot.forward(75, 5.5)
	#self.robot.stop()        
	print("Done! no ducky")