Пример #1
0
    def detect_video(self, yolo, video_path):
        from PIL import Image, ImageFont, ImageDraw
        #Start ROS node
        # pub, pub_flag, pub_track, pub_frame1, pub_frame2 = start_node()
        pub, pub_flag, pub_frame1, pub_frame2 = start_node()
        vid = RealsenseCapture()
        vid.start()
        bridge = CvBridge()

        accum_time = 0
        curr_fps = 0
        fps = "FPS: ??"
        prev_time = timer()
        worldy = 0.0
        flag = 0

        while True:
            # pub_track.publish(0)
            # flag = 0
            if self.garbage_in_can == 1:
                # print("ゴミを捨てました")
                flag = 0
            pub_flag.publish(flag)
            ret, frames, _ = vid.read()
            frame = frames[0]
            depth_frame = frames[1]
            image = Image.fromarray(frame)
            image, bottle, person, right, left, bottom, top, right2, left2, bottom2, top2 = yolo.detect_image(
                image, pub)
            result = np.asarray(image)
            curr_time = timer()
            exec_time = curr_time - prev_time
            prev_time = curr_time
            accum_time = accum_time + exec_time
            curr_fps = curr_fps + 1
            if accum_time > 1:
                accum_time = accum_time - 1
                fps = "FPS: " + str(curr_fps)
                curr_fps = 0
            cv2.putText(result,
                        text=fps,
                        org=(3, 15),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50,
                        color=(255, 0, 0),
                        thickness=2)
            # cv2.imshow("result", result)
            yolo_frame = bridge.cv2_to_imgmsg(result, "bgr8")
            pub_frame1.publish(yolo_frame)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

            if (bottle == False) or (person == False):
                continue

        # ------------------------------Tracking-----------------------------------
        # tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
        # tracker_type = tracker_types[7]
            tracker = cv2.TrackerCSRT_create()
            tracker2 = cv2.TrackerCSRT_create()

            # setup initial location of window
            left, right, top, bottom = left, right, top, bottom
            r, h, ci, w = top, bottom - top, left, right - left  # simply hardcoded the values r, h, c, w
            frame_b, frame_g, frame_r = frame[:, :, 0], frame[:, :,
                                                              1], frame[:, :,
                                                                        2]
            hist_b = cv2.calcHist([frame_b[top:bottom, left:right]], [0], None,
                                  [256], [0, 256])
            hist_g = cv2.calcHist([frame_g[top:bottom, left:right]], [0], None,
                                  [256], [0, 256])
            hist_r = cv2.calcHist([frame_r[top:bottom, left:right]], [0], None,
                                  [256], [0, 256])
            cv2.normalize(hist_b, hist_b, 0, 255, cv2.NORM_MINMAX)
            cv2.normalize(hist_g, hist_g, 0, 255, cv2.NORM_MINMAX)
            cv2.normalize(hist_r, hist_r, 0, 255, cv2.NORM_MINMAX)
            track_window = (ci, r, w, h)

            r2, h2, ci2, w2 = top2, bottom2 - top2, left2, right2 - left2  # simply hardcoded the values r, h, c, w
            hist_bp = cv2.calcHist([frame_b[top2:bottom2, left2:right2]], [0],
                                   None, [256], [0, 256])
            hist_gp = cv2.calcHist([frame_g[top2:bottom2, left2:right2]], [0],
                                   None, [256], [0, 256])
            hist_rp = cv2.calcHist([frame_r[top2:bottom2, left2:right2]], [0],
                                   None, [256], [0, 256])
            cv2.normalize(hist_bp, hist_bp, 0, 255, cv2.NORM_MINMAX)
            cv2.normalize(hist_gp, hist_gp, 0, 255, cv2.NORM_MINMAX)
            cv2.normalize(hist_rp, hist_rp, 0, 255, cv2.NORM_MINMAX)
            track_window2 = (ci2, r2, w2, h2)
            cv2.imwrite('bottledetect.jpg', frame[r:r + h, ci:ci + w])
            cv2.imwrite('persondetect.jpg', frame[r2:r2 + h2, ci2:ci2 + w2])

            # set up the ROI for tracking
            roi = frame[r:r + h, ci:ci + w]
            hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv_roi, np.array((0., 60., 32.)),
                               np.array((180., 255., 255.)))
            roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0, 180])
            cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)

            # Setup the termination criteria, either 10 iteration or move by atleast 1 pt
            term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                         1)

            ok = tracker.init(frame, track_window)
            ok2 = tracker2.init(frame, track_window2)

            track_thing = 0  #bottle
            pts = Point()
            pts2 = Point()
            untrack = 0
            self.emergency_stop = 0
            # flag = 0
            # pub_flag.publish(flag)

            while (1):
                ret, frames, depth = vid.read()
                frame = frames[0]
                depth_frame = frames[1]

                if ret == True:
                    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
                    dst = cv2.calcBackProject([hsv], [0], roi_hist, [0, 180],
                                              1)

                    # apply meanshift to get the new location
                    ok, track_window = tracker.update(frame)
                    x, y, w, h = track_window

                    ok, track_window2 = tracker2.update(frame)
                    x2, y2, w2, h2 = track_window2

                    # Draw it on image
                    img2 = cv2.rectangle(frame, (x, y), (x + w, y + h), 255, 2)
                    if not track_thing:
                        img2 = cv2.rectangle(img2, (x2, y2),
                                             (x2 + w2, y2 + h2), 255, 2)
                    else:
                        img2 = cv2.rectangle(img2, (x2, y2),
                                             (x2 + w2, y2 + h2), (0, 0, 255),
                                             2)
                    # cv2.imshow('Tracking',img2)
                    tracking_frame = bridge.cv2_to_imgmsg(img2, "bgr8")
                    pub_frame2.publish(tracking_frame)

                    # https://www.intelrealsense.com/wp-content/uploads/2020/06/Intel-RealSense-D400-Series-Datasheet-June-2020.pdf
                    total, cnt = 0, 0
                    for i in range(3):
                        for j in range(3):
                            dep = depth.get_distance(
                                np.maximum(0, np.minimum(i + x + w // 2, 639)),
                                np.maximum(0, np.minimum(j + y + h // 2, 479)))
                            if (dep) != 0:
                                total += dep
                                cnt += 1
                    if cnt != 0:
                        worldz = total / cnt
                        # 人にぶつからないように距離を確保するため
                        if (worldz < 0.6) or (worldz > 3.0):
                            worldz = 0
                    else:
                        worldz = 0

                    total2, cnt2 = 0, 0
                    for i in range(3):
                        for j in range(3):
                            dep2 = depth.get_distance(
                                np.maximum(0,
                                           np.minimum(i + x2 + w2 // 2, 639)),
                                np.maximum(0,
                                           np.minimum(j + y2 + h2 // 2, 479)))
                            if dep2 != 0:
                                total2 += dep2
                                cnt2 += 1
                    if cnt2 != 0:
                        worldz2 = total2 / cnt2
                        if (worldz2 < 0.6) or (worldz2 > 3.0):
                            worldz2 = 0
                    else:
                        worldz2 = 0

                    # print('worldz', worldz)
                    # print('worldz2', worldz2)
                    # if (worldz == 0) or (worldz2 == 0):
                    if worldz2 == 0:
                        # break
                        worldx, worldy = 0, 0
                        # worldx = 0
                        pts.x, pts.y, pts.z = 0.0, 0.0, 0.0
                        worldx2, worldy2 = 0, 0
                        pts2.x, pts2.y, pts2.z = 0.0, 0.0, 0.0
                    else:
                        # focus length = 1.93mm, distance between depth cameras = about 5cm, a pixel size = 3um
                        if (track_thing == 0) and (not worldz == 0):
                            #bottle Tracking
                            u_ud = (0.05 * 1.88 * 10**(-3)) / (3 * 10**(-6) *
                                                               worldz)
                            # print('u_ud', u_ud)
                            # print('x, y =', (x+w//2)-(img2.shape[1]//2), (img2.shape[0]//2)-(y+h//2))
                            # 深度カメラとカラーカメラの物理的な距離を考慮した項(-0.3*u_ud)
                            # これらの座標は物体を見たときの左の深度カメラを基準とする
                            worldx = 0.05 * (x + w // 2 -
                                             (img2.shape[1] // 2) -
                                             0.3 * u_ud) / u_ud
                            worldy = 0.05 * ((img2.shape[0] // 2) -
                                             (y + h)) / u_ud
                            print('x,y,z = ', worldx, worldy, worldz - 0.6)
                            pts.y, pts.z, pts.x = -1.0 * float(worldx), float(
                                worldy), float(worldz) - 0.6
                            # pts.y, pts.z, pts.x = float(0.0), float(worldy), float(1.0)

                        else:
                            #human Tracking
                            # if worldz==0:
                            #     worldx, worldy = 0, 0
                            #     pts.x, pts.y, pts.z = 0.0, 0.0, 0.0
                            u_ud = (0.05 * 1.88 * 10**(-3)) / (3 * 10**(-6) *
                                                               worldz2)
                            worldx2 = 0.05 * (x2 + w2 // 2 -
                                              (img2.shape[1] // 2) -
                                              0.3 * u_ud) / u_ud
                            worldy2 = 0.05 * ((img2.shape[0] // 2) -
                                              (y2 + h2)) / u_ud
                            print('x2,y2,z2 = ', worldx2, worldy2,
                                  worldz2 - 0.6)
                            pts2.y, pts2.z, pts2.x = -1.0 * float(
                                worldx2), float(worldy2), float(worldz2) - 0.6
                            # pts.y, pts.z, pts.x = float(0.0), float(worldy), float(1.0)
                            if worldz == 0:
                                worldx, worldy = 0, 0
                                pts.x, pts.y, pts.z = 0.0, 0.0, 0.0

                    print("track_thing = ", track_thing)

                    frame_b, frame_g, frame_r = frame[:, :,
                                                      0], frame[:, :,
                                                                1], frame[:, :,
                                                                          2]
                    hist_b2 = cv2.calcHist([frame_b[y:y + h, x:x + w]], [0],
                                           None, [256], [0, 256])
                    hist_g2 = cv2.calcHist([frame_g[y:y + h, x:x + w]], [0],
                                           None, [256], [0, 256])
                    hist_r2 = cv2.calcHist([frame_r[y:y + h, x:x + w]], [0],
                                           None, [256], [0, 256])
                    cv2.normalize(hist_b2, hist_b2, 0, 255, cv2.NORM_MINMAX)
                    cv2.normalize(hist_g2, hist_g2, 0, 255, cv2.NORM_MINMAX)
                    cv2.normalize(hist_r2, hist_r2, 0, 255, cv2.NORM_MINMAX)
                    hist_bp2 = cv2.calcHist([frame_b[y2:y2 + h2, x2:x2 + w2]],
                                            [0], None, [256], [0, 256])
                    hist_gp2 = cv2.calcHist([frame_g[y2:y2 + h2, x2:x2 + w2]],
                                            [0], None, [256], [0, 256])
                    hist_rp2 = cv2.calcHist([frame_r[y2:y2 + h2, x2:x2 + w2]],
                                            [0], None, [256], [0, 256])
                    cv2.normalize(hist_bp2, hist_bp2, 0, 255, cv2.NORM_MINMAX)
                    cv2.normalize(hist_gp2, hist_gp2, 0, 255, cv2.NORM_MINMAX)
                    cv2.normalize(hist_rp2, hist_rp2, 0, 255, cv2.NORM_MINMAX)
                    comp_b = cv2.compareHist(hist_b, hist_b2,
                                             cv2.HISTCMP_CORREL)
                    comp_g = cv2.compareHist(hist_g, hist_g2,
                                             cv2.HISTCMP_CORREL)
                    comp_r = cv2.compareHist(hist_r, hist_r2,
                                             cv2.HISTCMP_CORREL)
                    comp_bp = cv2.compareHist(hist_bp, hist_bp2,
                                              cv2.HISTCMP_CORREL)
                    comp_gp = cv2.compareHist(hist_gp, hist_gp2,
                                              cv2.HISTCMP_CORREL)
                    comp_rp = cv2.compareHist(hist_rp, hist_rp2,
                                              cv2.HISTCMP_CORREL)
                    # print('compareHist(b)', comp_b)
                    # print('compareHist(g)', comp_g)
                    # print('compareHist(r)', comp_r)
                    # print('compareHist(bp)', comp_bp)
                    # print('compareHist(gp)', comp_gp)
                    # print('compareHist(rp)', comp_rp)
                    # print("garbage_in_can", garbage_in_can)
                    # 追跡を止める条件は,bottle追跡中にヒストグラムが大きく変化するか枠が無くなるまたはpersonを見失う,もしくはperson追跡中にヒストグラムが大きく変化するか枠が無くなるまたはゴミがゴミ箱に入れられた,
                    if ((track_thing == 0 and
                         ((comp_b <= 0.1) or (comp_g <= 0.1) or
                          (comp_r <= 0.1) or track_window == (0, 0, 0, 0)))
                            or (track_window2 == (0, 0, 0, 0))
                            or (track_thing == 1 and
                                ((comp_bp <= 0.) or (comp_gp <= 0.) or
                                 (comp_rp <= 0.)))):
                        untrack += 1
                        print("untrack = ", untrack)
                        if untrack >= 30:
                            print("追跡が外れた!\n")
                            break
                    elif (track_thing == 0 and (x + w > 640 or x < 0) and
                          (y + h > 480
                           or y < 0)) or (track_thing == 1 and
                                          (x2 + w2 > 640 or x2 < 0) and
                                          (y2 + h2 > 480 or y2 < 0)):
                        untrack += 1
                        print("untrack = ", untrack)
                        if untrack >= 50:
                            print("枠が画面外で固まった")
                            break
                    # elif (track_thing==1 and self.garbage_in_can==1):
                    elif self.garbage_in_can == 1:
                        print("ゴミを捨てたため追跡を終えます")
                        flag = 0
                        break
                    else:
                        untrack = 0
                    # print('garbage_in_can', self.garbage_in_can)

                    # ポイ捨ての基準はbottleを追跡していて,地面から10cmのところまで落ちたか,bottleを見失ったかつ見失う前のフレームでの高さがカメラの10cmより下
                    # print('track_window = ', track_window)
                    if (((worldy <= -0.01) or
                         ((track_window == (0, 0, 0, 0) or untrack >= 1) and
                          (worldy < 0.5))) and (not track_thing)):
                        print("ポイ捨てした!\n")
                        track_thing = 1  #human

                    # if track_thing==0:
                    #     tracking_point = pts
                    #     if not (pts.x==0.0 and pts.y==0.0 and pts.z==0.0):
                    #         pub.publish(tracking_point)
                    #     flag = 0 #bottle
                    # else:
                    #     tracking_point = pts2
                    #     if not (pts2.x==0.0 and pts2.y==0.0 and pts2.z==0.0):
                    #         pub.publish(tracking_point)
                    #     flag = 1 #person
                    if track_thing == 1:
                        tracking_point = pts2
                        if not (pts2.x == 0.0 and pts2.y == 0.0
                                and pts2.z == 0.0):
                            pub.publish(tracking_point)
                        flag = 1
                    # else:
                    #     flag = 0

                    pub_flag.publish(flag)

                    k = cv2.waitKey(1) & 0xff
                    # print("emergency_stop", self.emergency_stop)
                    # if (k == 27) or self.emergency_stop==1: # dev
                    if self.emergency_stop == 1:  # ops
                        print('emergency_stop == 1')
                        break
                else:
                    break
                # pub_track.publish(1)
            # pub_flag.publish(flag)

        yolo.close_session()
Пример #2
0
Classes to handle Carla sensors
"""
import math
import numpy as np
import tf

from cv_bridge import CvBridge
from geometry_msgs.msg import TransformStamped
from sensor_msgs.msg import CameraInfo
from sensor_msgs.point_cloud2 import create_cloud_xyz32
from std_msgs.msg import Header

from carla.sensor import Camera, Lidar, Sensor
from carla_ros_bridge.transforms import carla_transform_to_ros_transform

cv_bridge = CvBridge(
)  # global cv bridge to convert image between opencv and ros


class SensorHandler(object):
    """
    Generic Sensor Handler

    A sensor handler compute the associated ros message to a carla sensor data, and the transform asssociated to the
    sensor.
    These messages are passed to a *process_msg_fun* which will take care of publishing them.
    """
    def __init__(self,
                 name,
                 params=None,
                 carla_sensor_class=Sensor,
                 carla_settings=None,
Пример #3
0
netInitFinished = False
# top camera's information.
net_init_top = True
net_top = None
meta_top = None
cam_param_top = None
det_flag_top = False
has_rev_top = False
np_arr_top = None
netInitFinished_top = False

issave = True
isshow = False

empty_msg = Empty()
bridge = CvBridge()

import datetime
start_t = datetime.datetime.now()
output_dir_base = os.path.join("./run_pic", "{:%Y%m%dT%H%M}".format(start_t))
if not os.path.exists(output_dir_base):
    os.makedirs(output_dir_base)
output_dir_ball = os.path.join(output_dir_base, 'ball')
if not os.path.exists(output_dir_ball):
    os.makedirs(output_dir_ball)
output_dir_door = os.path.join(output_dir_base, 'door')
if not os.path.exists(output_dir_door):
    os.makedirs(output_dir_door)


# get bottom camera's picture
Пример #4
0
    def processSegments(self, segment_list_msg):
        if not self.active:
            return
        t_start = rospy.get_time()

        if self.use_propagation:
            self.propagateBelief()
            self.t_last_update = rospy.get_time()

        # initialize measurement likelihood
        measurement_likelihood = np.zeros(self.d.shape)

        for segment in segment_list_msg.segments:
            if segment.color != segment.WHITE and segment.color != segment.YELLOW:
                continue
            if segment.points[0].x < 0 or segment.points[1].x < 0:
                continue

            d_i, phi_i, l_i = self.generateVote(segment)
            if d_i > self.d_max or d_i < self.d_min or phi_i < self.phi_min or phi_i > self.phi_max:
                continue
            if self.use_max_segment_dist and (l_i > self.max_segment_dist):
                continue

            i = floor((d_i - self.d_min) / self.delta_d)
            j = floor((phi_i - self.phi_min) / self.delta_phi)

            if self.use_distance_weighting:
                dist_weight = self.dwa * l_i**3 + self.dwb * l_i**2 + self.dwc * l_i + self.zero_val
                if dist_weight < 0:
                    continue
                measurement_likelihood[
                    i, j] = measurement_likelihood[i, j] + dist_weight
            else:
                measurement_likelihood[
                    i, j] = measurement_likelihood[i, j] + 1 / (l_i)

        if np.linalg.norm(measurement_likelihood) == 0:
            return
        measurement_likelihood = measurement_likelihood / np.sum(
            measurement_likelihood)

        if self.use_propagation:
            self.updateBelief(measurement_likelihood)
        else:
            self.beliefRV = measurement_likelihood

        # TODO entropy test:
        #print self.beliefRV.argmax()

        maxids = np.unravel_index(self.beliefRV.argmax(), self.beliefRV.shape)
        # rospy.loginfo('maxids: %s' % maxids)
        self.lanePose.header.stamp = segment_list_msg.header.stamp
        self.lanePose.d = self.d_min + maxids[0] * self.delta_d
        self.lanePose.phi = self.phi_min + maxids[1] * self.delta_phi
        self.lanePose.status = self.lanePose.NORMAL

        # publish the belief image
        bridge = CvBridge()
        belief_img = bridge.cv2_to_imgmsg(
            (255 * self.beliefRV).astype('uint8'), "mono8")
        belief_img.header.stamp = segment_list_msg.header.stamp

        max_val = self.beliefRV.max()
        self.lanePose.in_lane = max_val > self.min_max and len(
            segment_list_msg.segments) > self.min_segs and np.linalg.norm(
                measurement_likelihood) != 0
        self.pub_lane_pose.publish(self.lanePose)
        self.pub_belief_img.publish(belief_img)

        # print "time to process segments:"
        # print rospy.get_time() - t_start

        # Publish in_lane according to the ent
        in_lane_msg = BoolStamped()
        in_lane_msg.header.stamp = segment_list_msg.header.stamp
        in_lane_msg.data = self.lanePose.in_lane
        # ent = entropy(self.beliefRV)
        # if (ent < self.max_entropy):
        #     in_lane_msg.data = True
        # else:
        #     in_lane_msg.data = False
        self.pub_in_lane.publish(in_lane_msg)
Пример #5
0
    def __init__(self):
        self.image_pub = rospy.Publisher("/eyes/right", Image)

        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/camera2/usb_cam2/image_raw", Image,
                                          self.callback)
 def __init__(self):
 
     self.bridge_object = CvBridge()
     # self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",Image,self.camera_callback)
     self.image_sub = rospy.Subscriber("/raspicam_node/image",Image,self.camera_callback)
     self.moveTurtlebot3_object = MoveTurtlebot3()
Пример #7
0
def callback(data):
    br = CvBridge()
    rospy.loginfo('receiving image')
    dx = br.imgmsg_to_cv2(data)
    cv2.imshow("camera", dx)
    cv2.waitKey(1)
Пример #8
0
def callback(data):
    global imagePub
    global markerPub

    #Convert image to CV image
    bridge = CvBridge()
    cv_image = bridge.imgmsg_to_cv(data, "mono8")

    # Convert the image to a Numpy array since most cv2 functionsrequire Numpy arrays.
    searched = np.array(cv_image, dtype=np.uint8)

    #Create a copy for sobel comparison
    searchedCopy = copy.copy(searched)
    searchedCopy[searchedCopy == 255] = 0

    #Take Sobel Derivatives
    sobel_x = np.uint8(
        np.absolute(cv2.Sobel(searched, cv2.CV_16S, 1, 0, ksize=1)))
    sobel_y = np.uint8(
        np.absolute(cv2.Sobel(searched, cv2.CV_16S, 0, 1, ksize=1)))
    sobel_xy = cv2.addWeighted(sobel_x, 0.5, sobel_y, 0.5, 0)
    sobel_x_base = np.uint8(
        np.absolute(cv2.Sobel(searchedCopy, cv2.CV_16S, 1, 0, ksize=1)))
    sobel_y_base = np.uint8(
        np.absolute(cv2.Sobel(searchedCopy, cv2.CV_16S, 0, 1, ksize=1)))
    sobel_xy_base = cv2.addWeighted(sobel_x_base, 0.5, sobel_y_base, 0.5, 0)
    ret, sobel_xy_thres = cv2.threshold(sobel_xy, 0, 255, cv2.THRESH_BINARY)
    ret, sobel_xy_base_thres = cv2.threshold(sobel_xy_base, 0, 255,
                                             cv2.THRESH_BINARY)

    #Subtract Comparisons for frontiers only
    sobelCombined = sobel_xy_base_thres - sobel_xy_thres
    ret, sobelCombined_thresh = cv2.threshold(sobelCombined, 0, 255,
                                              cv2.THRESH_BINARY)

    #Dialate Combined
    dialate = cv2.dilate(sobelCombined_thresh, np.ones((3, 3), 'uint8'))

    #Find Contour Centorids
    dialateCopy = copy.copy(dialate)
    contours, hier = cv2.findContours(dialateCopy, cv2.RETR_LIST,
                                      cv2.CHAIN_APPROX_SIMPLE)
    centroids = []
    colors = []
    for i in contours:
        if len(i) > 10:
            moments = cv2.moments(i)
            cx = int(moments['m10'] / moments['m00'])
            cy = int(moments['m01'] / moments['m00'])
            centroidPoint = Point()
            centroidColor = ColorRGBA()
            centroidPoint.x = cx / 20.0 - 20
            centroidPoint.y = cy / 20.0 - 32.8
            centroidPoint.z = 0
            #centroidColor = (0,0,1,1)
            centroidColor.r = 0
            centroidColor.g = 0
            centroidColor.b = 1
            centroidColor.a = 1
            centroids.append(centroidPoint)
            colors.append(centroidColor)

    #code.interact(local=locals())
    #Display Image in PLT Window
    '''plt.subplot(1,5,1),plt.imshow(searched,cmap='gray'),plt.title('Searched Space'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,2),plt.imshow(sobel_xy_thres,cmap='gray'),plt.title('Sobel All'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,3),plt.imshow(sobel_xy_base_thres,cmap='gray'),plt.title('Sobel All'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,4),plt.imshow(sobelCombined,cmap='gray'),plt.title('Frontiers Map'),plt.xticks([]), plt.yticks([])
	plt.subplot(1,5,5),plt.imshow(dialate,cmap='gray'),plt.title('Dialate'),plt.xticks([]), plt.yticks([])
	plt.draw()
	plt.pause(0.001)'''

    #Convert the image back to mat format and publish as ROS image
    frontiers = cv.fromarray(dialate)
    imagePub.publish(bridge.cv_to_imgmsg(frontiers, "mono8"))

    #Publish centorids of frontiers as marker
    markerMsg = Marker()
    markerMsg.header.frame_id = "/map"
    markerMsg.header.stamp = rospy.Time.now()
    markerMsg.ns = ""
    markerMsg.id = 0
    markerMsg.type = 7  #Sphere List
    markerMsg.action = 0  #Add
    markerMsg.scale.x = 0.5
    markerMsg.scale.y = 0.5
    markerMsg.scale.z = 0.5
    markerMsg.points = centroids
    markerMsg.colors = colors
    markerPub.publish(markerMsg)
    print 'yes'
Пример #9
0
 def __init__(self):
     self.bridge = CvBridge()
     self.sub = rospy.Subscriber("/bot/camera/image_raw", Image,
                                 self.callback)
     self.cnt = 0
 def callback(self, ros_data):
     bridge = CvBridge()
     self.image = bridge.compressed_imgmsg_to_cv2(
         ros_data, desired_encoding='passthrough')
Пример #11
0
def run_kitti2bag():
    parser = argparse.ArgumentParser(description = "Convert KITTI dataset to ROS bag file the easy way!")
    # Accepted argument values
    kitti_types = ["raw_synced", "odom_color", "odom_gray"]
    odometry_sequences = []
    for s in range(22):
        odometry_sequences.append(str(s).zfill(2))
    
    parser.add_argument("kitti_type", choices = kitti_types, help = "KITTI dataset type")
    parser.add_argument("dir", nargs = "?", default = os.getcwd(), help = "base directory of the dataset, if no directory passed the deafult is current working directory")
    parser.add_argument("-t", "--date", help = "date of the raw dataset (i.e. 2011_09_26), option is only for RAW datasets.")
    parser.add_argument("-r", "--drive", help = "drive number of the raw dataset (i.e. 0001), option is only for RAW datasets.")
    parser.add_argument("-s", "--sequence", choices = odometry_sequences,help = "sequence of the odometry dataset (between 00 - 21), option is only for ODOMETRY datasets.")
    args = parser.parse_args()

    bridge = CvBridge()
    compression = rosbag.Compression.NONE
    # compression = rosbag.Compression.BZ2
    # compression = rosbag.Compression.LZ4
    
    # CAMERAS
    cameras = [
        (0, 'camera_gray_left', '/kitti/camera_gray_left'),
        (1, 'camera_gray_right', '/kitti/camera_gray_right'),
        (2, 'camera_color_left', '/kitti/camera_color_left'),
        (3, 'camera_color_right', '/kitti/camera_color_right')
    ]

    if args.kitti_type.find("raw") != -1:
    
        if args.date == None:
            print("Date option is not given. It is mandatory for raw dataset.")
            print("Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>")
            sys.exit(1)
        elif args.drive == None:
            print("Drive option is not given. It is mandatory for raw dataset.")
            print("Usage for raw dataset: kitti2bag raw_synced [dir] -t <date> -r <drive>")
            sys.exit(1)
        
        bag = rosbag.Bag("kitti_{}_drive_{}_{}.bag".format(args.date, args.drive, args.kitti_type[4:]), 'w', compression=compression)
        kitti = pykitti.raw(args.dir, args.date, args.drive)
        print("kitti loaded")
        if not os.path.exists(kitti.data_path):
            print('Path {} does not exists. Exiting.'.format(kitti.data_path))
            sys.exit(1)

        if len(kitti.timestamps) == 0:
            print('Dataset is empty? Exiting.')
            sys.exit(1)

        try:
            # IMU
            imu_frame_id = 'imu_link'
            imu_topic = '/kitti/oxts/imu'
            gps_fix_topic = '/kitti/oxts/gps/fix'
            gps_vel_topic = '/kitti/oxts/gps/vel'
            velo_frame_id = 'velo_link'
            velo_topic = '/kitti/velo'

            T_base_link_to_imu = np.eye(4, 4)
            T_base_link_to_imu[0:3, 3] = [-2.71/2.0-0.05, 0.32, 0.93]

            # tf_static
            transforms = [
                ('base_link', imu_frame_id, T_base_link_to_imu),
                (imu_frame_id, velo_frame_id, inv(kitti.calib.T_velo_imu)),
                (imu_frame_id, cameras[0][1], inv(kitti.calib.T_cam0_imu)),
                (imu_frame_id, cameras[1][1], inv(kitti.calib.T_cam1_imu)),
                (imu_frame_id, cameras[2][1], inv(kitti.calib.T_cam2_imu)),
                (imu_frame_id, cameras[3][1], inv(kitti.calib.T_cam3_imu))
            ]

            util = pykitti.utils.read_calib_file(os.path.join(kitti.calib_path, 'calib_cam_to_cam.txt'))

            # Export
            save_static_transforms(bag, transforms, kitti.timestamps)
            save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=None)
            save_imu_data(bag, kitti, imu_frame_id, imu_topic)
            save_gps_fix_data(bag, kitti, imu_frame_id, gps_fix_topic)
            save_gps_vel_data(bag, kitti, imu_frame_id, gps_vel_topic)
            #for camera in cameras:
                #save_camera_data(bag, args.kitti_type, kitti, util, bridge, camera=camera[0], camera_frame_id=camera[1], topic=camera[2], initial_time=None)
            save_velo_data(bag, kitti, velo_frame_id, velo_topic, bridge)

        finally:
            print("## OVERVIEW ##")
            print(bag)
            bag.close()
            
    elif args.kitti_type.find("odom") != -1:
        
        if args.sequence == None:
            print("Sequence option is not given. It is mandatory for odometry dataset.")
            print("Usage for odometry dataset: kitti2bag {odom_color, odom_gray} [dir] -s <sequence>")
            sys.exit(1)
            
        bag = rosbag.Bag("kitti_data_odometry_{}_sequence_{}.bag".format(args.kitti_type[5:],args.sequence), 'w', compression=compression)
        
        kitti = pykitti.odometry(args.dir, args.sequence)
        if not os.path.exists(kitti.sequence_path):
            print('Path {} does not exists. Exiting.'.format(kitti.sequence_path))
            sys.exit(1)

        kitti.load_calib()         
        kitti.load_timestamps() 
             
        if len(kitti.timestamps) == 0:
            print('Dataset is empty? Exiting.')
            sys.exit(1)
            
        if args.sequence in odometry_sequences[:11]:
            print("Odometry dataset sequence {} has ground truth information (poses).".format(args.sequence))
            kitti.load_poses()

        try:
            util = pykitti.utils.read_calib_file(os.path.join(args.dir,'sequences',args.sequence, 'calib.txt'))
            current_epoch = (datetime.utcnow() - datetime(1970, 1, 1)).total_seconds()
            # Export
            if args.kitti_type.find("gray") != -1:
                used_cameras = cameras[:2]
            elif args.kitti_type.find("color") != -1:
                used_cameras = cameras[-2:]

            save_dynamic_tf(bag, kitti, args.kitti_type, initial_time=current_epoch)
            for camera in used_cameras:
                save_camera_data(bag, args.kitti_type, kitti, util, bridge, camera=camera[0], camera_frame_id=camera[1], topic=camera[2], initial_time=current_epoch)

        finally:
            print("## OVERVIEW ##")
            print(bag)
            bag.close()
    def run(self):
        """Run the Distance Tracker with the input from the camera"""
        bridge = CvBridge()
        rate = rospy.Rate(24)

        #for heading averaging
        current_slope = 0.0
        last_point = 0.0
        current_point = 0.0
        average = 0.0
        max_ = 0.0
        min_ = 0.0

        while not rospy.is_shutdown():
            target_found, target_centroid, dist, offset = self.find_target()
            #print("EST DISTANCE: " + str(dist) + ' cm')
            #print("EST OFFSET: " + str(offset) + ' cm')
            #print("--------------")

            #Calculate delta_T
            if (self.prev_T != 0):
                self.delta_T = rospy.get_time() - self.prev_T
            self.prev_T = rospy.get_time()

            if target_found:
                #(Old averaging method)
                #                ###heading averaging###
                #                last_point = current_point
                #                current_point = offset[0]
                #                new_slope = (current_point - last_point)
                #
                #                if new_slope != 0:
                #                    new_slope = new_slope / abs(new_slope)
                #
                #                #if current slope is positive
                #                if current_slope == 1:
                #                    if new_slope <= 0:
                #                        average = (max_ + min_) / 2.0
                #                        min_ = max_
                #                    elif new_slope > 0:
                #                        max_ = current_point
                #                #if current slope is negative
                #                elif current_slope == -1:
                #                    if new_slope >= 0:
                #                        average = (max_ + min_) / 2.0
                #                        max_ = min_
                #                    elif new_slope < 0:
                #                        min_ = current_point
                #                #if current slope is zero
                #                else:
                #                    if new_slope == 0:
                #                        average = max_
                #                    elif new_slope > 0:
                #                        max_ = current_point
                #                    elif new_slope < 0:
                #                        min_ = current_point
                #
                #                current_slope = new_slope

                average = self.filter_heading(offset[0], 0.25)
                #TODO pitch averaging
                #TODO dist averaging

                #Publish everything
                self.pose.header.seq = 1
                self.pose.header.stamp = rospy.Time.now()
                self.pose.header.frame_id = "sofi_cam"
                self.pose.pose.position.x = dist
                self.pose.pose.position.y = offset[0]
                self.pose.pose.position.z = offset[1]
                self.pose.pose.orientation.x = 0
                self.pose.pose.orientation.y = 0
                self.pose.pose.orientation.z = 0
                self.pose.pose.orientation.w = 1

                self.pose_pub.publish(self.pose)
                self.found_pub.publish(True)
                self.average_heading_pub.publish(average)
                #self.average_heading_pub.publish(offset[0])     #hack to filter oscillations in PID node
                self.average_pitch_pub.publish(offset[1])
                self.average_dist_pub.publish(dist)

            else:
                self.found_pub.publish(False)
                self.average_heading_pub.publish(average)
                self.average_pitch_pub.publish(offset[1])
                self.average_dist_pub.publish(dist)

            rate.sleep()
    def process_image(self, image):
        """
        Processes a single image, looks for a circle, return the target centroid in the camera frame
        Reference: https://www.pyimagesearch.com/2015/09/14/ball-tracking-with-opencv/
        """
        target_found = False
        target_centroid = None

        #Blur and resize the image, put into HSV color scale, and create an image mask
        #img_small = cv2.resize(image, None, fx=self.subsample_ratio, fy=self.subsample_ratio, interpolation=cv2.INTER_LINEAR)
        img_blur = cv2.GaussianBlur(image, (5, 5), 0)
        hsv_blur = cv2.cvtColor(img_blur, cv2.COLOR_BGR2HSV)
        mask_l = cv2.inRange(hsv_blur, self.hsv_lower_lower,
                             self.hsv_lower_upper)
        mask_u = cv2.inRange(hsv_blur, self.hsv_upper_lower,
                             self.hsv_upper_upper)
        mask = cv2.bitwise_or(mask_l, mask_u)

        # Get rid of noise
        kernel = np.ones((5, 5), np.uint8)
        mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)

        # Get rid of small holes
        mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)

        #Publish the mask
        mask_bgr8 = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
        bridge = CvBridge()
        img_masked = image
        img_masked[mask == 0, :] = [0, 0, 0]
        grey = cv2.cvtColor(img_masked, cv2.COLOR_BGR2GRAY)
        grey_msg = bridge.cv2_to_imgmsg(grey, encoding='mono8')
        #self.img_pub.publish(cv_grey)

        keypoints = self.detector.detect(mask)
        orig_keypoints = list(keypoints)
        keypoints.sort(reverse=True, key=lambda k: k.size)

        # filter on size
        if len(keypoints) > 1:
            keypoints = list(
                filter(lambda k: k.size / keypoints[0].size > 0.4, keypoints))

            keypoints.sort(reverse=True, key=lambda k: k.pt[1])
            keypoints = [keypoints[0]]

        grey_with_keypoints = cv2.drawKeypoints(
            grey, orig_keypoints, np.array([]), (0, 0, 255),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        grey_with_keypoints = cv2.drawKeypoints(
            grey_with_keypoints, keypoints, np.array([]), (0, 255, 0),
            cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
        greyk_msg = bridge.cv2_to_imgmsg(grey_with_keypoints, encoding='bgr8')
        self.img_pub.publish(greyk_msg)

        cv_mask = bridge.cv2_to_imgmsg(mask_bgr8, encoding='bgr8')
        self.mask_pub.publish(cv_mask)

        #find the largest contour of the mask or return False,None if target is not there
        #cnts, cnt_hier = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2:]
        #if len(cnts) == 0:
        #  return (False, None, None, None)
        #cnt = max(cnts, key=cv2.contourArea)
        #((x,y),radius) = cv2.minEnclosingCircle(cnt)

        if len(keypoints) >= 1:
            keypoint = keypoints[0]
            ((x, y), radius) = (keypoint.pt, keypoint.size / 2.0)

            if radius < 5:
                return (False, None, None, None)

            target_centroid = ((int(x), int(y)), int(radius))
            target_found = True
        else:
            return (target_found, target_centroid, 0.0, (0.0, 0.0))

        #Calculate the object's distance and offset from center
        height_px = 2.0 * target_centroid[1]
        offset_px = -1.0 * (target_centroid[0][0] -
                            self.image_center[0]), -1.0 * (
                                target_centroid[0][1] - self.image_center[1])
        distance = (self.focal_length * self.real_height) / height_px
        y_offset = (offset_px[0] * distance) / self.focal_length
        z_offset = (offset_px[1] * distance) / self.focal_length
        heading_rad = atan2(y_offset, distance)  #heading in radians
        #print("HEADING", heading_rad)
        pitch_rad = atan2(z_offset, distance)  #pitch in radians

        # Publish distance and offset information to message file instead
        #self.position_msg.distance = str(distance)
        #self.position_msg.x_offset = str(z_offset)
        #self.position_msg.y_offset = str(y_offset)
        #self.position_pub.publish(self.position_msg)

        #return (target_found, target_centroid, distance, (y_offset, z_offset))
        return (target_found, target_centroid, distance, (heading_rad,
                                                          pitch_rad))
Пример #14
0
    def __init__(self):
        parser = argparse.ArgumentParser()
        parser.add_argument("bag_file", type=str, help="path to rosbag file")
        parser.add_argument("export_dir",
                            type=str,
                            help="path to export folder")
        parser.add_argument("--topic_rgb",
                            type=str,
                            default="/camera/rgb/image_rect_color/compressed",
                            help="colour topic (CompressedImage)")
        parser.add_argument(
            "--topic_depth",
            type=str,
            default="/camera/depth/image_rect_raw/compressedDepth",
            help="depth topic (CompressedImage)")
        parser.add_argument("--topic_camera_info",
                            type=str,
                            default="/camera/rgb/camera_info",
                            help="camera info topic (CameraInfo)")
        parser.add_argument("--topic_joints",
                            type=str,
                            default="/joint_states",
                            help="joint state topic (JointState)")
        parser.add_argument("-f",
                            "--force",
                            action="store_true",
                            help="overwrite old data")
        parser.add_argument("-b",
                            "--reference_frame",
                            type=str,
                            default="base",
                            help="parent frame of camera pose")

        args = parser.parse_args()

        # input/output paths
        bag_file_path = args.bag_file
        self.export_path = args.export_dir

        if os.path.exists(self.export_path) and not args.force:
            raise UserWarning("path " + self.export_path + " already exists!")

        # image topics with CompressedImage
        self.topic_rgb = args.topic_rgb
        self.topic_depth = args.topic_depth
        # camera intrinsics with CameraInfo
        self.topic_ci = args.topic_camera_info
        # topic with JointState
        self.topic_joints = args.topic_joints

        self.topics_tf = ["/tf", "/tf_static"]

        self.topics = [
            self.topic_rgb, self.topic_depth, self.topic_ci, self.topic_joints
        ]

        self.ref_frame = args.reference_frame

        bag_file_path = os.path.expanduser(bag_file_path)
        print("reading:", bag_file_path)
        self.bag = rosbag.Bag(bag_file_path, mode='r')
        print("duration:",
              self.bag.get_end_time() - self.bag.get_start_time(), "s")

        self.export_path = os.path.expanduser(self.export_path)
        self.path_colour = os.path.join(self.export_path, "colour")
        self.path_depth = os.path.join(self.export_path, "depth")

        self.cvbridge = CvBridge()
Пример #15
0
	def __init__(self):
		self.imgPublisher = rospy.Publisher( "camera/image_raw", Image )
		self.imgSubscriber = rospy.Subscriber( "ardrone/front/image_raw", Image, self.echoImageCallback )
		cv.NamedWindow("Image window", 1)
		self.bridge = CvBridge()
Пример #16
0
def arduino_cam():
    rospy.init_node('arduino_cam', anonymous=True)
    subBotoes = message_filters.Subscriber('botoes', BotoesMsg)
    subRefle = message_filters.Subscriber('refletancia', RefletanciaMsg)
    subDistancia = message_filters.Subscriber('distancia', SensoresDistanciaMsg)
    subCam = message_filters.Subscriber('tem_circulos', BoolStamped)
    subCoordenadas = message_filters.Subscriber('/coordenadas_circulos', Vector3Stamped)

    ts = message_filters.TimeSynchronizer([subRefle, subDistancia, subCam, subBotoes, subCoordenadas], 20)

    ts.registerCallback(arduinoCamCb)

    rospy.spin()
    #while
    #rospy.spinOnce()
    ##copia valores dos sensores
    ##estrategia
    ##copia valores para os atuadores

if __name__ == "__main__":
    try:
        ponte = CvBridge()
        dataGarra = Int32MultiArray()
        dataGarra.data = [0, 0]
        dataMotores = Int32MultiArray()
        dataMotores.data = [0, 0]
        arduino_cam()
    except rospy.ROSInterruptException:
        pass
 def __init__(self):
     rospy.init_node("CamRcv", anonymous=True)
     self.sub = rospy.Subscriber("/camera/image_raw", Image, self.callback)
     self.rate = rospy.Rate(10)
     self.bridge = CvBridge()
def run():

    # declare global variables that can be updated globally
    global counter 
    global _current_image
    global _bridge
    global _detected_bbox
    global coordinates
    global object_class
    global dets
    global marked_image
    global _frame_seq
    global _current_image_header_seq
    global _detected_image_header_seq
    global _current_detected_image
    global tracker
    global trackers # contains tracker id, x1, y1, x2, y2, probabilitiy
    global _old_detection
    global _current_detected_image_seq
    trackers = [] # this is going to hold the state estimates of all of the trackers.
    tracker_ids = []

    _old_detection = None
    _current_detected_image_seq = -1
    _detected_image_header_seq = 0 
    _current_image_header_seq = -1
    temp_detected_image = -2
    counter = 0

    # initilize current image and detected boxes
    _current_image_buffer = []
    _detected_image_buffer = []
    _detected_image_header_seq_buffer = [] 
    _current_image_header_seq_buffer = []

    marked_image = None

    _current_image = None
    _current_detected_image = None
    # _detected_bbox_buffer = []
    _detected_bbox = None

    # setup cv2 bridge 
    _bridge = CvBridge()

    # image and point cloud subscribers
    image_topic, detection_topic, display = getParam()


    # subscribe to raw image feed
    # rospy.Subscriber(image_topic, Image, image_callback, queue_size=100)

    # subscribe to detections
    rospy.Subscriber(detection_topic, BoundingBoxes, detector_callback, queue_size=1)

    rospy.Subscriber("/darknet_ros/detection_image", Image, detected_image_callback, queue_size=1)

    

    # publish image topic with bounding boxes 
    _imagepub = rospy.Publisher('~labeled_image', Image, queue_size=1)

    # TODO publish original unmarked frame
    # _imagepub = rospy.Publisher('~original_frames', Image, queue_size=10)

    # TODO publish marked image with sort detections

    rospy.loginfo("ready to detect")

    # adjust frame rate 

    

    r = rospy.Rate(6)
    # max age is the maximum number of frames a tracker can exist by making max_age > 1 
    # you can allow it to survive without a detection, for instance if there are skip frames 
    # in this there is an expected number of skip frames, by making max_age = n, you are allowing
    # for n skip frames. 
    # min hits is the minimum number of times a tracker must be detected to survive
    
    tracker = sort.Sort(max_age = 8, min_hits=1) #create instance of the SORT tracker
    # counter = 0 
    frames = 1 

    _old_detection = rospy.wait_for_message(detection_topic, BoundingBoxes).image_header.seq
    # rospy.wait_for_message("/darknet_ros/detection_image", Image)
    while not rospy.is_shutdown():
        # _old_detection = _detected_bbox.image_header.seq
        # rospy.wait_for_message("/darknet_ros/detection_image", Image)
        
        boxes = []
        class_ids = []
        detections = 0
        dets = []
        # if frames == 1: 
        #     _old_detection = _detected_bbox.seq

        # if frames == 1: 
        #     _old_detection = _detected_bbox

        # in the form: dets.append([x_min, y_min, x_max, y_max, probability])
        # dets = np.empty((0,5))
            # only run if there is an image
        if  new_detection(_detected_bbox) and _current_detected_image is not None:

            # if frames == 1: 
            #     _old_detection = _detected_bbox.image_header.seq
            
            print("current image seq: %s", _current_detected_image_seq)
            # print("frames processed %s: " %frames)
            print("detected frame number: %s" % _detected_image_header_seq)
            # if frame < 3 : 
            #     rospy.spin()
            
            # check to see if curr detection is on the current frame
            # if _current_image_header_seq == _detected_image_header_seq:
                # print("image seq are the same")
            
            try:
                # image received
                # convert image from the subscriber into an OpenCV image
                # initialize the detected image as the current frame
                # initialize our current frame 
                # marked_image = _bridge.imgmsg_to_cv2(_current_image, 
                # 'rgb8')

                marked_image = _bridge.imgmsg_to_cv2(_current_detected_image, 
                'rgb8')
                # _imagepub.publish(_bridge.cv2_to_imgmsg(marked_image, 'rgb8'))  # publish detection results
                
                # marked_image, objects = self._detector.from_image(scene)  # detect objects
                
                # _imagepub.publish(_bridge.cv2_to_imgmsg(scene, 'rgb8'))  # publish detection results

                # what if there is no bounding boxes? 
                # go through all the detections in each frame
                if _detected_bbox is not None:
                    frames +=1
                    for box in _detected_bbox.bounding_boxes:
                        # xmin, ymin, xmax, ymax = box.xmin, box.ymin, box.xmax, box.ymax
                        obj_class = box.Class
                        # update the trackers one at a time based on detection boxes ? 
                        # collect all boxes from the relevant class
                        if box.Class == "plant": 
                            # rospy.loginfo("cycle_time %s", cycle_time)
                            # [x1,y1,x2,y2] - for each object
                            # store the class ids 
                            dets.append([box.xmin, box.ymin, box.xmax, box.ymax, box.probability])
                            # class_ids.append(obj_class)
                            # detections +=
                        else: pass  # no plants in the image

                    
                    # there are no detections, you still need to update you trackers
                dets = np.array(dets) # convert for the n dimensional array
                # whether or not the detections are none, the trackers still need to be updated
                # if len(dets) == 0:
                #     rospy.loginfo("no targets detected!")
                #     # trackers = tracker.update(dets)
                # else: 
                #     rospy.loginfo("targets detected")
                    # trackers = tracker.update(dets)
                # rospy.loginfo("trackers updated based on current detections")
                trackers = tracker.update(dets)
                # print(trackers)
            
                # iterate through all the trackers
                for t in trackers:
                    # rospy.loginfo("tracker ID: %s", d[4])
                    # rospy.loginfo("tracker id: " + str(t[4]) + ": " + "info: " + str(t))
                    # _tracker_ids.append(str(t[4]))
                    str_n = str(int(t[4]))
                    if (str_n in tracker_ids) == False:
                        # print("unique id found")
                        # unique id 
                        tracker_ids.append(str_n)
                        counter +=1
                    else: pass

                    box_t = [t[0], t[1], t[2], t[3]]
                    # draw every tracker 
                    # draw_detections(box, obj_class, tracker_id, im):
                    # draw_detections(box_t, obj_class, t[4], marked_image)
                    # draw_detections(marked_image, t[4], box_t)
                    draw_detections(box_t, "plant", t[4], marked_image)
                
                print("plant count:%s"%str(counter))
                # the default case is the current frame with no writting
                _imagepub.publish(_bridge.cv2_to_imgmsg(marked_image, 'rgb8'))  # publish detection results                    
                _old_detection = _detected_bbox.image_header.seq

            except CvBridgeError as e:
                print(e)

            r.sleep() # best effort to maintain loop rate r for each frame
Пример #19
0
def demo():
    global click
    Timer.enable(True)
    setup_windows()
    rospy.init_node('object_tracker')

    pcl = rospy.get_param('pcl', default=True)
    print 'pcl', pcl

    # initialize rectifier
    rospack = rospkg.RosPack()
    pkg_root = rospack.get_path('edwin')
    bridge = CvBridge()

    rectifier = Rectifier(
            param_l = os.path.join(pkg_root, 'Stereo/camera_info/left_camera.yaml'),
            param_r = os.path.join(pkg_root, 'Stereo/camera_info/right_camera.yaml')
            )

    obj_pub = rospy.Publisher('obj_point', PointStamped, queue_size=1)
    obj_msg = PointStamped()
    obj_msg.header.frame_id = 'camera_link'

    if pcl:
        pcl_pub = rospy.Publisher('point_cloud', PointCloud2, queue_size=1)
        pcl_msg = PointCloud2()

    ### SETUP IMAGES
    dims = (480,640)
    dims3 = (480,640,3)

    left = np.empty(dims3, dtype=np.uint8)
    right = np.empty(dims3, dtype=np.uint8)
    im_l = np.empty(dims3,dtype=np.uint8)
    im_r = np.empty(dims3,dtype=np.uint8)
    sp_l = np.empty(dims3,dtype=np.uint8)

    superpixel = SuperPixel(800)

    nodes = [SuperPixelNode1(400), SuperPixelNode2(), SuperPixelNode3(), DisparityNode()]

    pipeline = Pipeline(nodes)

    with CameraReader(1) as cam_l, CameraReader(2) as cam_r:
        def input_fn():
            cam_l.read(left)
            cam_r.read(right)
            rectifier.apply(left, right, dst_l=im_l, dst_r=im_r)
            return left, right

        def output_fn(x):
            sp_l, disp = x

            cv2.imshow('sp_l', sp_l)
            cv2.imshow('disp', disp)

            raw_dist = cv2.reprojectImageTo3D((disp/16.).astype(np.float32), rectifier.Q, handleMissingValues=True)
            dist = raw_dist[:,:,2]
            if pcl:
                pcl_msg = toPointCloud(raw_dist, im_l)
                pcl_pub.publish(pcl_msg)
            cv2.imshow('dist', dist)

            params = {
                    'color' : 'yellow',
                    'min_dist' : 0.3,
                    'max_dist' : 1.5,
                    'min_area' : 0.003,
                    'max_area' : 1.0
                    }
            filtered, ctrs = apply_criteria(dist, sp_l, params)
            if len(ctrs) > 0:
                m = cv2.moments(ctrs[0]) # pull out biggest one--ish
                cX = int(m["m10"] / m["m00"])
                cY = int(m["m01"] / m["m00"])
                x,y,z = raw_dist[cY,cX]
                x,y,z = z,-x, -y
                obj_msg.header.stamp = rospy.Time.now()
                obj_msg.point.x = x
                obj_msg.point.y = y
                obj_msg.point.z = z
                obj_pub.publish(obj_msg)
            cv2.imshow('filtered', filtered)

        pipeline.run(input_fn, output_fn)

    #with CameraReader(1) as cam_l, CameraReader(2) as cam_r:
    ##with ROSCameraReader('/edwin_camera/left/image_raw') as cam_l, \
    ##        ROSCameraReader('/edwin_camera/right/image_raw') as cam_r:
    #    #matcher = Matcher()
    #    #tracker = ObjectTracker()
    #    #manager = ObjectManager()
    #    #tracker.set_target('medium','blue')

    #    while not rospy.is_shutdown():
    #        cam_l.read(left)
    #        cam_r.read(right)
    #        rectifier.apply(left, right, dst_l=im_l, dst_r=im_r)

    #        with Timer('SuperPixel'):
    #            superpixel.apply(im_l, dst=sp_l)

    #        cv2.imshow('sp_l', sp_l)

    #        with Timer('Disparity'):
    #            disp = handle_disp(im_l, im_r, rectifier.Q)
    #            #im_disp = cv2.normalize(disp,None,0.0,255.0,cv2.NORM_MINMAX).astype(np.uint8)
    #            #cv2.imshow('im_disp', im_disp)

    #        disp = apply_superpixel(disp, superpixel)

    #        raw_dist = cv2.reprojectImageTo3D((disp/16.).astype(np.float32), rectifier.Q, handleMissingValues=True)

    #        if pcl:
    #            with Timer('PCL'):
    #                pcl_msg = toPointCloud(raw_dist, im_l)
    #                pcl_pub.publish(pcl_msg)
    #        dist = raw_dist[:,:,2]

    #        cv2.imshow('dist', dist)

    #        #blur = cv2.GaussianBlur(im_l,(3,3),0) 
    #        #im_opt = handle_opt(blur)
    #        #im_bksub = handle_bksub(blur)

    #        identified = im_l.copy()
    #        params = {
    #                'color' : 'yellow',
    #                'min_dist' : 0.3,
    #                'max_dist' : 1.5,
    #                'min_area' : 0.003,
    #                'max_area' : 1.0
    #                }
    #        filtered, ctrs = apply_criteria(dist, sp_l, params)
    #        if len(ctrs) > 0:
    #            m = cv2.moments(ctrs[0]) # pull out biggest one--ish
    #            cX = int(m["m10"] / m["m00"])
    #            cY = int(m["m01"] / m["m00"])
    #            x,y,z = raw_dist[cY,cX]
    #            x,y,z = z,-x, -y
    #            obj_msg.header.stamp = rospy.Time.now()
    #            obj_msg.point.x = x
    #            obj_msg.point.y = y
    #            obj_msg.point.z = z
    #            obj_pub.publish(obj_msg)
    #        cv2.imshow('filtered', filtered)

    #        if cv2.waitKey(10) == 27:
    #            break

    cv2.destroyAllWindows()
from tx2_whole_image_desc_server.srv import WholeImageDescriptorCompute

import rospy
import numpy as np
import cv2

from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image

import rospkg
THIS_PKG_BASE_PATH = rospkg.RosPack().get_path('tx2_whole_image_desc_server')

print('Attempt connecting to server')
rospy.wait_for_service('whole_image_descriptor_compute')
print('Connection successful!!')
try:
    res = rospy.ServiceProxy('whole_image_descriptor_compute',
                             WholeImageDescriptorCompute)

    # X = np.zeros( (100, 100), dtype=np.uint8 )
    X = cv2.resize(
        cv2.imread(THIS_PKG_BASE_PATH + '/resources/lena_color.jpg'),
        (640, 480))
    # X = cv2.resize( cv2.imread( THIS_PKG_BASE_PATH+'/resources/lena_color.jpg' ), (7,5) )
    print('X.shape=', X.shape)
    i = CvBridge().cv2_to_imgmsg(X)
    u = res(i, 23)
    print('received: ', u)
except rospy.ServiceException as e:
    print('failed', e)
Пример #21
0
haar_flags = 0
display = True

if __name__ == '__main__':
    opencv_dir = '/usr/share/opencv/haarcascades/'

    face_cascade = cv2.CascadeClassifier(opencv_dir +
                                         'haarcascade_frontalface_default.xml')
    if face_cascade.empty():
        print "Could not find face cascade"
        sys.exit(-1)
    eye_cascade = cv2.CascadeClassifier(opencv_dir + 'haarcascade_eye.xml')
    if eye_cascade.empty():
        print "Could not find eye cascade"
        sys.exit(-1)
    br = CvBridge()
    rospy.init_node('facedetect')
    display = rospy.get_param("~display", True)

    def detect_and_draw(imgmsg):
        img = br.imgmsg_to_cv2(imgmsg, "bgr8")
        # allocate temporary images
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        faces = face_cascade.detectMultiScale(gray, 1.3, 3)
        for (x, y, w, h) in faces:
            cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)
            roi_gray = gray[y:y + h, x:x + w]
            roi_color = img[y:y + h, x:x + w]
            eyes = eye_cascade.detectMultiScale(roi_gray)
            for (ex, ey, ew, eh) in eyes:
                cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
Пример #22
0
def imSub(imgmsg):
    global img, flag_img
    bridge = CvBridge()
    img = bridge.imgmsg_to_cv2(imgmsg, 'bgr8')
Пример #23
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--in_bag_file", help="input ROS bag")
    parser.add_argument("--out_bag_file", help="output ROS bag")
    parser.add_argument("--image_topic", help="image topic")
    parser.add_argument("--cam_info_topic", help="camera info topic")
    parser.add_argument("--desired_width", type=int)
    parser.add_argument("--desired_height", type=int)

    args = parser.parse_args()
    in_bag = rosbag.Bag(args.in_bag_file, "r")
    bridge = CvBridge()
    count = 0

    orig_width = None
    orig_height = None
    desired_width = args.desired_width
    desired_height = args.desired_height

    # figure out orig_width and orig_height first
    for topic, msg, t in in_bag.read_messages(args.cam_info_topic):
        orig_width = msg.width
        orig_height = msg.height
        break

    # scaling factors for camera_info in x and y directions
    s_x = desired_width / float(orig_width)
    s_y = desired_height / float(orig_height)

    with rosbag.Bag(args.out_bag_file, 'w') as outbag:
        for topic, msg, t in in_bag.read_messages():
            # resize camera_info msgs' K and P accordingly
            if (topic == args.cam_info_topic):
                orig_K = msg.K
                orig_P = msg.P
                new_msg = deepcopy(msg)
                new_msg.width = desired_width
                new_msg.height = desired_height
                new_msg.K = [s_x*orig_K[0], orig_K[1], s_x*orig_K[2],\
                         orig_K[3], s_y*orig_K[4], s_y*orig_K[5],\
                         orig_K[6], orig_K[7], orig_K[8]]
                new_msg.P = [s_x*orig_P[0], orig_P[1], s_x*orig_P[2], orig_P[3],\
                        orig_P[4], s_y*orig_P[5], s_y*orig_P[6], orig_P[7],\
                        orig_P[8], orig_P[9], orig_P[10], orig_P[11]]
                # print new_msg
                outbag.write(topic, new_msg, t)

            # resize images accordingly
            elif (topic == args.image_topic):
                # read bayered image as a long vector
                bayer_img = np.frombuffer(msg.data, dtype='uint8')
                # resize bayered image to 2D
                bayer_img = np.reshape(bayer_img, (orig_height, orig_width, 1))
                # debayer the raw image
                color_img = cv2.cvtColor(bayer_img, cv2.COLOR_BAYER_BG2BGR)
                # resize the debayered image
                color_img = cv2.resize(color_img,
                                       (desired_width, desired_height))
                # cv2.imshow("color_img", color_img)
                # cv2.waitKey(1)
                # conver back to imgmsg
                new_msg = bridge.cv2_to_imgmsg(color_img, encoding='bgr8')
                outbag.write(topic, new_msg, t)

            else:
                outbag.write(topic, msg, t)
        in_bag.close()
Пример #24
0
 def __init__(self):
     self.bridge = CvBridge()
Пример #25
0
    def __init__(self):
        # type: () -> None
        """
        Initiate VisualCompassHandler

        return: None
        """
        # Init ROS package
        rospack = rospkg.RosPack()
        self.package_path = rospack.get_path('bitbots_visual_compass')

        rospy.init_node('bitbots_visual_compass_setup')
        rospy.loginfo('Initializing visual compass setup')

        self.bridge = CvBridge()

        self.config = {}
        self.image_msg = None
        self.compass = None
        self.hostname = socket.gethostname()

        # TODO: docs
        self.base_frame = 'base_footprint'
        self.camera_frame = 'camera_optical_frame'
        self.tf_buffer = tf2.Buffer(cache_time=rospy.Duration(50))
        self.listener = tf2.TransformListener(self.tf_buffer)

        self.pub_head_mode = rospy.Publisher('head_mode',
                                             HeadMode,
                                             queue_size=1)

        # Register VisualCompassConfig server for dynamic reconfigure and set callback
        Server(VisualCompassConfig, self.dynamic_reconfigure_callback)

        rospy.logwarn("------------------------------------------------")
        rospy.logwarn("||                 WARNING                    ||")
        rospy.logwarn("||Please remove the LAN cable from the Robot, ||")
        rospy.logwarn("||after pressing 'YES' you have 10 Seconds    ||")
        rospy.logwarn("||until the head moves OVER the LAN port!!!   ||")
        rospy.logwarn("------------------------------------------------\n\n")

        try:
            input = raw_input
        except NameError:
            pass

        accept = input("Do you REALLY want to start? (YES/n)")

        if accept == "YES":
            rospy.logwarn("REMOVE THE LAN CABLE NOW!!!!!")

            rospy.sleep(10)

            head_mode = HeadMode()
            head_mode.headMode = 10
            self.pub_head_mode.publish(head_mode)
            rospy.loginfo("Head mode has been set!")

            rospy.spin()
        else:
            rospy.signal_shutdown(
                "You aborted the process! Shuting down correctly.")
Пример #26
0
 def __init__(self):
     self.bridge = CvBridge()
     self.image_pub = rospy.Publisher('/image', Image, latch=True)
Пример #27
0
 def __init__(self):
     super().__init__('DebugCamera')
     self.subscription = self.create_subscription(Image, 'camera',
                                                  self.image_callback, 100)
     self.bridge = CvBridge()
    def __init__(self):
        self.image_pub_rgb = rospy.Publisher("test_image_topic_rgb", Image)

        self.bridge = CvBridge()
        self.image_sub_rgb = rospy.Subscriber("/camera/rgb/image_raw", Image,
                                              self.callback_rgb)
Пример #29
0
    def __init__(self):
        ##USER PARAMETERS
        self.angle_setpoint = 90
        self.angle = 0
        self.fin_time = 0
        self.integral = 0
        self.prev_error = 0
        self.logs = [0, 0]
        ##Initiate variables
        self.leftLine = 0
        self.midLine = 0
        self.rightLine = 0
        self.distance = 0
        self.leftSpeed = 0
        self.rightSpeed = 0
        self.pan = 0
        self.tilt = 0
        self.bridge = CvBridge()

        #Setup Publishers
        self.motorPub = rospy.Publisher('motors', motors, queue_size=10)
        self.servoPub = rospy.Publisher('servos', servos, queue_size=10)
        self.blobpub = rospy.Publisher('imageProc', Image, queue_size=10)

        #Create Subscriber callbacks
        def lineCallback(data):
            self.leftLine = data.leftLine
            self.midLine = data.midLine
            self.rightLine = data.rightLine

        def distanceCallback(data):
            self.distance = data.distance

        def imageProcessing(data):
            # st_time = time.time()
            try:
                frame = self.bridge.imgmsg_to_cv2(
                    data, desired_encoding="passthrough")
            except CvBridgeError as e:
                print(e)

            ##Place image processing code here!
            frame, self.logs = imageProcessQueue(frame)
            print(self.logs)
            self.angle = self.logs[1]

            self.blobpub.publish(self.bridge.cv2_to_imgmsg(frame, "bgr8"))
            # print("Time diff: %f\n Steering angle: %f\n" % (((st_time - self.fin_time)*1000), self.angle))
            # print(logs)
            # self.fin_time = st_time

        def speed_callback(data):
            ## timing code
            st_time = time.time()
            # print("Time diff: %f\n" % ((st_time - self.fin_time)*1000))
            turn_speed_here = 0.3
            turn_speed = self.pid()
            self.leftSpeed = (0.2 + turn_speed) * 0.9
            self.rightSpeed = (0.2 - turn_speed) * 0.9
            self.fin_time = st_time
            if self.logs[0] == 0 or self.distance <= 0.4:
                self.leftSpeed = 0
                self.rightSpeed = 0
                self.integral = 0
                self.prev_error = 0

            # if only one lane is detected
            # if self.logs[0] == 1:
            # 	if self.logs[2][0] == 1:
            # 		self.leftSpeed = turn_speed_here
            # 		self.rightSpeed = -turn_speed_here
            # 		self.integral = 0
            # 		self.prev_error = 0
            # 	if self.logs[2][1] == 1:
            # 		self.leftSpeed = -turn_speed_here
            # 		self.rightSpeed = turn_speed_here
            # 		self.integral = 0
            # 		self.prev_error = 0

            ##Leave these lines at the end

            self.publishMotors()

        #Subscribe to topics
        rospy.Subscriber('raspicam_node/image', Image, imageProcessing)
        rospy.Subscriber('imageProc', Image, speed_callback)
        rospy.Subscriber('distance', distance, distanceCallback)

        # initialize
        rospy.init_node('core', anonymous=True)
        self.rate = rospy.Rate(10)
def centinel_cb(req):
    global lista

    print('~~~~~~~~~~~~~~~Llamado a Jackson Clasificacion~~~~~~~~~~~~~~~~~~~')
    '''
    os.system('> /home/steven/CarpetaJSONcaffe/response.json')              

    #del lista[:]
    lista = []

    imagen_from_request = CvBridge().imgmsg_to_cv2(req.image, "bgr8")
    path_to_save_image = '/home/steven/CarpetaJSONcaffe/'
    cv2.imwrite(os.path.join(path_to_save_image, 'image_for_jackson.png'), imagen_from_request)

    print("HELLO JACKSON")
    os.system('curl --form "imagefile=@/home/steven/CarpetaJSONcaffe/image_for_jackson.png" http://172.18.33.89:5000/index > /home/steven/CarpetaJSONcaffe/response.json')
    print("BYE BYE JACKSON")
    

    with open('/home/steven/CarpetaJSONcaffe/response.json') as data_file:    
               generatedMap=  json.load(data_file)
    '''
    lista = []

    imagen_from_request = CvBridge().imgmsg_to_cv2(req.image, "bgr8")

    _, img_encoded = cv2.imencode('.png', imagen_from_request)

    url = 'http://172.18.33.118:5000/index'
    files = {'imagefile': img_encoded.tostring()}  #
    r = requests.post(url, files=files)

    generatedMap = json.loads(r.text)

    generatedMapClasses = generatedMap["classes"]
    generatedMapBBoxes = generatedMap["bboxes"]
    generatedMapScores = generatedMap["scores"]

    try:
        if len(generatedMapClasses) > 0:
            for x in range(0, len(generatedMapClasses)):
                print(x)
                print(generatedMapClasses[x])
                print(generatedMapBBoxes[x])
                print((generatedMapScores[x]))
                msg = Prediction()
                msg.label = generatedMapClasses[x]
                msg.bbox = generatedMapBBoxes[x]
                msg.score = float((generatedMapScores[x]))
                lista.append(msg)
    except:
        print("No hay objeto score=0")
        msg.label = "NotObjectDetected"
        msg.bbox = [10, 10, 40, 40]
        msg.score = float(0.5)

    msgList = PredictionsList()
    msgList.n = len(lista)
    msgList.predictions = lista

    #return the predictions list to the service

    return imageTaggerResponse(msgList)