Ejemplo n.º 1
0
def detectCargos(data, side):
    frame = CvBridge().imgmsg_to_cv2(data, 'bgr8') #[90:150,120:200]
    hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    out = frame.copy()
    auxFrame = frame.copy()
    out_mask = cv2.cvtColor(np.zeros(out.shape, np.uint8), cv2.COLOR_BGR2GRAY)


    linePoints = getDevideLine()

    kernal = np.ones((5,5),np.uint8)

    for key in ranges:  # for each masks
        if len(ranges[key]) == 4:
            mask = cv2.morphologyEx(cv2.inRange(hsv, ranges[key][0], ranges[key][1]) + cv2.inRange(hsv, ranges[key][2], ranges[key][3]), cv2.MORPH_OPEN, kernal)
        else:
            mask = cv2.morphologyEx(cv2.inRange(hsv, ranges[key][0], ranges[key][1]), cv2.MORPH_OPEN, kernal)
        contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
        
        contours = [i for i in contours if cv2.contourArea(i) > OBJ_AREA]
        
        #print(str(len(contours)) + key)
        curContours = []
        for cnt in contours:
            [x, y, w, h] = cv2.boundingRect(cnt)
            if side == 'left':
                if x + w//2 < linePoints[0][0]:
                    curContours.append(cnt)
            else:
                if x + w//2 > linePoints[0][0]:
                   curContours.append(cnt)
        contours = curContours
        #print(str(len(contours)) + key)
        #     
        cargos_colors_quantity[key] += len(contours)


        for cnt in contours: #draw contours
            cv2.drawContours(out, [cnt], -1, (255, 255, 255), 2)
            [x, y, w, h] = cv2.boundingRect(cnt)
            cv2.putText(out, key, (x - 15, y + h + 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.7, (255,255,255), 1)
            cv2.putText(out_mask, key, (x - 15, y + h + 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.7, (255,255,255), 1)
        
        out_mask = cv2.bitwise_or(out_mask, mask)

    auxFrame = cv2.line(auxFrame, linePoints[0],linePoints[1], (0,0,0), 2)
    if side == 'left': dx = -20
    else: dx = 20
    for k in range(1, 3):
        auxFrame = cv2.arrowedLine(auxFrame , (linePoints[0][0], 80*k), (linePoints[0][0] + dx, 80*k), (0,0,0), 2)
    
    out = cv2.bitwise_and(out, out, mask=out_mask)
    vis = np.concatenate((out, auxFrame), axis=1)
    debug.publish(CvBridge().cv2_to_imgmsg(vis, 'bgr8'))
Ejemplo n.º 2
0
 	def CameraDepth_callback(self, data):
		try:
			cv_image = CvBridge().imgmsg_to_cv2(data, "16UC1")
			self.depth_image = cv_image.copy()
		except CvBridgeError as e:
			print(e)
		print('Receive depth!')
Ejemplo n.º 3
0
def callback(image_msg):
    frame = CvBridge().imgmsg_to_cv2(image_msg, desired_encoding="passthrough")
    frame = cv2.cvtColor(frame, cv2.COLOR_YCrCb2RGB)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
    print(frame.shape)
    l1 = cv2.getTrackbarPos ("l1", "Colorbars")
    h1 = cv2.getTrackbarPos ("h1", "Colorbars")
    l2 = cv2.getTrackbarPos ("l2", "Colorbars")
    h2 = cv2.getTrackbarPos ("h2", "Colorbars")
    l3 = cv2.getTrackbarPos ("l3", "Colorbars")
    h3 = cv2.getTrackbarPos ("h3", "Colorbars")

    low_th  = (l1, l2, l3)
    high_th = (h1, h2, h3)
    print("@")
    inrange_filter.set_ths (low_th, high_th)

    mask = inrange_filter.apply (frame)
    print("&")
    enlighted = frame.copy ()
    enlighted [:, :, 0] = np.array (np.add (enlighted [:, :, 0], np.multiply (mask, 0.5)), dtype = np.uint8)

    #cv2.imshow ("enlighted", enlighted)
    #cv2.imshow ("mask", mask)cv2.waitKey(2)
    result = np.concatenate ((enlighted, to_three (mask)), axis = 1)
    cv2.waitKey(2)
    #cv2.imshow ("result", result)
    #os.system ('clear')    
    print (low_th, high_th)
Ejemplo n.º 4
0
class ImageOverlay:
    def __init__(self):
        self.velocity = TextOverlay("Velocity", (50, 50))
        self.steering = TextOverlay("Steering", (50, 100))
        self.img = np.ones(
            (700, 1000, 3), dtype=np.uint8) * 128  # default black background
        print("helllo world\n\n\n\n\n\n\n\n\n")

    # update the image to the new image from camera feed
    def image_callback(self, data):
        self.img = CvBridge().compressed_imgmsg_to_cv2(data)
        print("hi\n\n\n\n\n\n\n\n\n\n\n\n")

# configure and overlay stats on the image

    def stats_callback(self, data):
        new_vel = data.drive.speed
        new_steer = data.drive.steering_angle
        self.velocity.update_text(new_vel)
        self.steering.update_text(new_steer)

    def overlay(self):
        img_out = self.img.copy()
        self.velocity.text_overlay(img_out)
        self.steering.text_overlay(img_out)
        #cv2.imshow('image', self.img)
        #cv2.waitKey(0)
        #cv2.destroyAllWindows()
        return img_out

    def run(self):
        rospy.init_node("overlay_image")

        self.publisher = rospy.Publisher('overlay/compressed',
                                         CompressedImage,
                                         queue_size=1)

        # Read in camera feed, change first arg to read a different camera
        self.camera_subscriber = rospy.Subscriber(
            '/zed/left/image_raw/compressed',
            CompressedImage,
            self.image_callback,
            queue_size=1)
        # Read in stats, change this command to read in a different source of stats to overlay
        self.stats_subscriber = rospy.Subscriber(
            '/aamfsd_robot_control/command',
            AckermannDriveStamped,
            self.stats_callback,
            queue_size=1)

        rate = rospy.Rate(3)  # default 3 Hz

        print("Running camera overlay in overlay/compressed")

        while not rospy.is_shutdown():
            img = self.overlay()
            compressed_img = CvBridge().cv2_to_compressed_imgmsg(img)
            self.publisher.publish(compressed_img)
            rate.sleep()
Ejemplo n.º 5
0
 def callback(self, ros_msg):
     start_time = time.time()
     # Invoke the image from camera
     ori_image = CvBridge().imgmsg_to_cv2(ros_msg)
     use_image = ori_image.copy()
     use_image = use_image[(use_image.shape[0] - 300) /
                           2:(use_image.shape[0] + 300) / 2,
                           (use_image.shape[1] - 300) /
                           2:(use_image.shape[1] + 300) / 2]
     # Remove black points
     ave_inten = use_image.mean().copy()
     for i in range(use_image.shape[0]):
         for j in range(use_image.shape[1]):
             if use_image[i, j] < 10:
                 use_image[i, j] = ave_inten
     cv_image = use_image / 1000.0
     cv_image = np.clip(cv_image, 0.2, 1)
     cv_image = np.uint8(cv_image * 255)
     crop_h = int((ori_image.shape[0] - 300) / 2)
     crop_w = int((ori_image.shape[1] - 300) / 2)
     offsets = np.array([crop_w, crop_h])
     # Process the image
     depth_img = processing_depth(cv_image)
     #depth_img = processing_depth(depth_read)
     # Predict the grasp
     q_img, cos_img, sin_img, width_img = predict(depth_img)
     cvt_img = np.uint8(q_img * 255)
     cvt_img = cv2.applyColorMap(cvt_img, cv2.COLORMAP_JET)
     # Post process the output
     dis_img, grasp_pts, center = process_output(cvt_img,
                                                 q_img,
                                                 cos_img,
                                                 sin_img,
                                                 width_img,
                                                 grip_length=30)
     # Publish the grasp points
     center = center + offsets
     self.msg.depth = ori_image[290, 300]
     self.msg.center = list(center)
     self.msg.pt1 = list(grasp_pts[0] + offsets)
     self.msg.pt2 = list(grasp_pts[1] + offsets)
     self.msg.pt3 = list(grasp_pts[2] + offsets)
     self.msg.pt4 = list(grasp_pts[3] + offsets)
     try:
         self.grasp_pub.publish(self.msg)
     except KeyboardInterrupt:
         print("Publish error!")
     end_time = time.time()
     execute_time = end_time - start_time
     print("Excute time: ", int(execute_time * 1000), "ms")
     # Display the output
     cv2.imshow("Image depth", cv_image)
     #cv2.imshow("Image depth", depth_img)
     cv2.waitKey(3)
     cv2.imshow("Image quality", dis_img)
     cv2.waitKey(3)
Ejemplo n.º 6
0
    def ImgCcallback(self, ros_img):
        global imgi
        imgi = mx.img.imdecode(ros_img.data)
        global img_rect
        global img_raw
        global Frame
        img_rect = CvBridge().compressed_imgmsg_to_cv2(ros_img)
        # img_raw = img_rect.copy()

        Frame = pv.Image(img_rect.copy())
    def on_image_received(self, image):
        rgb_image = CvBridge().imgmsg_to_cv2(image.rgb, "bgr8")
        scharred_image = CvBridge().imgmsg_to_cv2(image.scharred, "mono8")

        bboxes = self.find_location_of_plate(rgb_image)
        paint_image = rgb_image.copy()
        if (len(bboxes) == 0):
            cycle_completed_msg = Bool()
            cycle_completed_msg.data = True
            self.cycle_complete_publisher.publish(cycle_completed_msg)
        else:
            plates_msg = Plates()
            for bbox in bboxes:
                plate_msg = Plate()
                plate_msg.top_left.x = bbox[0]
                plate_msg.top_left.y = bbox[1]
                plate_msg.down_right.x = bbox[2]
                plate_msg.down_right.y = bbox[3]
                plates_msg.plates.append(plate_msg)
                cv2.rectangle(paint_image, (bbox[0], bbox[1]),
                              (bbox[2], bbox[3]), (0, 255, 0), 2)
            self.plates_publisher.publish(plates_msg)
        self.show_image_publisher.publish(
            self.bridge.cv2_to_imgmsg(paint_image, "bgr8"))
Ejemplo n.º 8
0
class view(object):
    def __init__(self):
        self.image_sub   = rospy.Subscriber("/image",  Image, self.imageCallback, queue_size=10)
        self.cluster_sub = rospy.Subscriber("/object_info", ObjectInfoArray, self.clusterCallback, queue_size=10)  
        self.image_pub = rospy.Publisher("/object_image", Image, queue_size=10)
        self.flag = False

        self.list = ['person', 'car']

    def imageCallback(self, image_msg):
        try:
            self.cv_image = CvBridge().imgmsg_to_cv2(image_msg, "bgr8")
            self.frame_id = image_msg.header.frame_id
            self.flag = True
        except CvBridgeerror as e:
            print (e)

    def clusterCallback(self, cluster_msg):
        if self.flag:
            print("ALL GREEN")
            image = self.cv_image.copy()
            for i in range(len(cluster_msg.object_array)):
                bbox = cluster_msg.object_array[i]
                if bbox.Class in self.list:
                    print("class:%s score:%5.2f xmin:%d ymin:%d xmax:%d ymax:%d" % (bbox.Class, bbox.probability, bbox.xmin, bbox.ymin, bbox.xmax, bbox.ymax))
                    if bbox.Class == self.list[0]:
                        cv2.rectangle(image, (bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax), (0, 200, 0), 5)
                    elif bbox.Class == self.list[1]:
                        cv2.rectangle(image, (bbox.xmin, bbox.ymin), (bbox.xmax, bbox.ymax), (0, 0, 200), 5)
           
            pub_image = CvBridge().cv2_to_imgmsg(image, "bgr8")
            self.image_pub.publish(pub_image)

    def main(self):
        rospy.init_node("image_view")
        rospy.spin()
Ejemplo n.º 9
0
class image_converter:
    def __init__(self,name):
        self._camcon = baxter_interface.CameraController(name+'_hand_camera')
        self._resolution = (480,300) #可选的分辨率 1280x800,960x600,640x400,480x300,384x240
        self._image_sub = rospy.Subscriber('/cameras/'+name+'_hand_camera/image', Image, self._image_callback)
        self._original_image = None #从相机获取的原始图像(opencv格式) 
        self._MAX_SCALE = 100       # 找不到棋子就返回更大的值
        self._pick_bias = (0.025,0.023) # the gripper is not at the center of the camera
        # 颜色在HSV空间中的上下阈值 蓝色:[90,130,30],[130,200,150]v 148 128
        self._color_dict = {'blue':[[70,80,30],[180,255,255]],'purple':[[130,50,0],[255,200,255]]} #125
        self.image_updated = False
    def open_cam(self,on = True):
        #打开相机
        if on:
            #TODO 考虑把头上的关掉,可能有占用的问题 Baxter只允许同时开两个
            self._camcon.resolution = self._resolution
            self._camcon.open()
        else:
            self._camcon.close()

    #转换为opencv图像并更新
    def _image_callback(self, img_data):
        try:
            self._original_image = CvBridge().imgmsg_to_cv2(img_data, "bgr8") #从ros image转换到openCV的图像格式
            self.image_updated = True
        except CvBridgeError as e:
            print e

    # 图像处理
    # 对图像的初步处理步骤如下:
    # 转换到HSV图像空间(HSV空间更容易分辨颜色)-->
    # 提取图像中的蓝色部分-->
    # 腐蚀与膨胀去除噪点-->转换为灰度图像-->二值化
    def _image_process(self, color='blue',best_is_nearest = True): #or biggest
        self.image_updated = False
        # wait for get new image
        while not self.image_updated:
            pass
        original = self._original_image.copy()

        # image process to get the position of chess
        hsv = cv2.cvtColor(original, cv2.COLOR_BGR2HSV)    #转换到HSV颜色空间 
        
        lower_color = np.array(self._color_dict[color][0])
        upper_color = np.array(self._color_dict[color][1])
        mask = cv2.inRange(hsv, lower_color, upper_color)  # get color roi
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) 
        open_img = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
        close_img = cv2.morphologyEx(open_img, cv2.MORPH_CLOSE, kernel)
        res = cv2.bitwise_and(original, original, mask= close_img) #open_img
        gray_img = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
        ret, bin_img = cv2.threshold(gray_img, 10, 255, cv2.THRESH_BINARY)        

        contours, hierarchy = cv2.findContours(bin_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)        
        if len(contours)<1: # 没有找到物体
            return [self._MAX_SCALE,self._MAX_SCALE,200]
        [bx,by,bw,bh,ba]=[0]*5
        brect = None
        best = 1000
        scale = 0.00064 # per pixel #TODO by resolution and height
        # cv2.imshow("Img",bin_img)
        # cv2.waitKey(0)
        biasx = self._resolution[0]/2+self._pick_bias[1]/scale
        biasy = self._resolution[1]/2-self._pick_bias[0]/scale
        for c in contours:
            # find bounding box coordinates
            # x, y, w, h = cv2.boundingRect(c)            
            rect = cv2.minAreaRect(c)            
            x, y = rect[0]   # 中心坐标
            w, h = rect[1]  # 长宽,总有 width>=height
            angle = rect[2]  # 角度:[-90,0)  
            if best_is_nearest:
                now = abs(x-biasx)+abs(y-biasy)
            else:
                now = 1000-(w+h)
            if w*h <500:
                now = best
            box = cv2.cv.BoxPoints(rect)   
            box =np.int0(box)#int32 / int64
            cv2.drawContours(original, [box], 0, (0, 255, 0), 1)  # 画出该矩形
            #print (now)
            if now<best:
                [bx,by,bw,bh,ba,best]= [x,y,w,h,angle,now]
                brect = rect
                best = now
        box = cv2.cv.BoxPoints(brect)   
        box =np.int0(box)#int32 / int64
        cv2.drawContours(original, [box], 0, (0, 0, 255), 2)  # 画出该矩形
        # cv2.rectangle(original, (bx, by), (bx + bw, by + bh), (0, 255, 0), 2)
        cv2.rectangle(original, (np.int0(biasx), np.int0(biasy)), (np.int0(biasx) + 1, np.int0(biasy) + 1), (255, 0, 0), 2)
        print bx,by,bw,bh,ba
        cv2.imshow("Searched", original)
        cv2.waitKey(3) #可以减少显示时间提高速度
        return [(bx-biasx)*scale,(by-biasy)*scale,abs(ba)]
Ejemplo n.º 10
0
    def detect_char(self,req):

        charObjList = []
        charName    = []
        charNum     = []
        angle       = []
        res =  detectobjectionSrvResponse.NOT_DETECTED


        self.charname.clear()
        self.charnum.clear()
        self.poseList.clear()
        self.angleList.clear()

        imageData = rospy.wait_for_message('/image_converter/output_video', Image, timeout=5)
        try:
            cv_image = CvBridge().imgmsg_to_cv2(imageData,"bgr8")

        except CvBridgeError as e:
            print(e)

        global args
        cv2.imwrite(args.output_path+"cv_image.jpg",cv_image)
        if req.objectType is detectobjectionSrvRequest.ALL_OBJECT:
            global img, area, point1, point2
            img = cv_image.copy()
            img2 = cv_image.copy()
            im = cv_image[:, :, ::-1]

            if area == 0:
                cv2.namedWindow('image')
                cv2.setMouseCallback('image', self.on_mouse)
            while True:
                if(area == 1):
                    break
                cv2.imshow('image', im)
                cv2.waitKey(30)
            
            cv2.destroyAllWindows()
            cv2.waitKey(15)
            area = 1
            h, w, _ = im.shape
            start_time = time.time()
            im_resized, (ratio_h, ratio_w) = self.resize_image(im)
            h, w, _ = im.shape

            timer = {'net': 0, 'restore': 0, 'nms': 0}
            start = time.time()



            score, geometry = self.sess.run([self.f_score, self.f_geometry], feed_dict={self.input_images: [im_resized]})
            timer['net'] = time.time() - start

            boxes, timer = self.detect(score_map=score, geo_map=geometry, timer=timer)

            if boxes is not None:
                boxes = boxes[:, :8].reshape((-1, 4, 2))
                boxes[:, :, 0] /= ratio_w
                boxes[:, :, 1] /= ratio_h

            duration = time.time() - start_time
            print('[timing] {}'.format(duration))

            cv2.rectangle(img2, point1, point2, (255,0,0), 5)

            cv2.imwrite(args.output_path+"img2.jpg",img2)

            boxnum = 0
            boxdic = {}
            if boxes is not None:
                i = 0
                for box in boxes:
                    if i > 9:
                        break
                    # to avoid submitting errors
                    box = self.sort_poly(box.astype(np.int32))
                    if np.linalg.norm(box[0] - box[1]) < 5 or np.linalg.norm(box[3]-box[0]) < 5:
                        continue
                    ##zai gou xuan de qu yu if()
                    x0 = box[0,0] >= point1[0] and box[0,0] <= point2[0]
                    y0 = box[0,1] >= point1[1] and box[0,1] <= point2[1]
                    x1 = box[1,0] >= point1[0] and box[1,0] <= point2[0]
                    y1 = box[1,1] >= point1[1] and box[1,1] <= point2[1]
                    x2 = box[2,0] >= point1[0] and box[2,0] <= point2[0]
                    y2 = box[2,1] >= point1[1] and box[2,1] <= point2[1]
                    x3 = box[3,0] >= point1[0] and box[3,0] <= point2[0]
                    y3 = box[3,1] >= point1[1] and box[3,1] <= point2[1]
                    if x0 and y0 and x1 and y1 and x2 and y2 and x3 and y3:
                        print("box", box[0, 0], box[0, 1], box[1, 0], box[1, 1], box[2, 0], box[2, 1], box[3, 0], box[3, 1])
                        img2 = cv_image.copy()

                        charPose = Pose()
                        charPose.position.x = int(box[3,0]+(box[1,0]-box[3,0])/2)
                        charPose.position.y = int(box[0,1]+(box[2,1]-box[0,1])/2)

                        double a1, a2, a3, a4, a5, a6;
                        double XB, YB;
                        double XW, YW, ZW;

                        a1 = Pixel2ChessMatrix.at<double>(0, 0) - Pixel2ChessMatrix.at<double>(2, 0) * charPose.position.x;
                        a2 = Pixel2ChessMatrix.at<double>(0, 1) - Pixel2ChessMatrix.at<double>(2, 1) * charPose.position.x;
                        a3 = Pixel2ChessMatrix.at<double>(0, 2) - Pixel2ChessMatrix.at<double>(2, 2) * charPose.position.x - Pixel2ChessMatrix.at<double>(2, 3) * charPose.position.x + Pixel2ChessMatrix.at<double>(0, 3);
                        a4 = Pixel2ChessMatrix.at<double>(1, 0) - Pixel2ChessMatrix.at<double>(2, 0) * charPose.position.y;
                        a5 = Pixel2ChessMatrix.at<double>(1, 1) - Pixel2ChessMatrix.at<double>(2, 1) * charPose.position.y;
                        a6 = Pixel2ChessMatrix.at<double>(1, 2) - Pixel2ChessMatrix.at<double>(2, 2) * charPose.position.y - Pixel2ChessMatrix.at<double>(2, 3) * charPose.position.y + Pixel2ChessMatrix.at<double>(1, 3);

                        YB = (a3 * a4 - a1 * a6) / (a1 * a5 - a2 * a4);
                        XB = -1 * (a2 * YB + a3) / a1;

                        XW = Chess2BaseMatrix.at<double>(0, 0) * XB + Chess2BaseMatrix.at<double>(0, 1) * YB + Chess2BaseMatrix.at<double>(0, 3);
                        YW = Chess2BaseMatrix.at<double>(1, 0) * XB + Chess2BaseMatrix.at<double>(1, 1) * YB + Chess2BaseMatrix.at<double>(1, 3);
                        ZW = Chess2BaseMatrix.at<double>(2, 0) * XB + Chess2BaseMatrix.at<double>(2, 1) * YB + Chess2BaseMatrix.at<double>(2, 3);
                        
                        charPose.position.x = XW + 29
                        charPose.position.y = XY + 16

                        self.poseList.append(charPose)

                        cv2.circle(img2, tuple(box[0]), 3, (0,255,0), 2)
                        cv2.circle(img2, tuple(box[1]), 3, (255,255,0), 2)
                        cv2.circle(img2, tuple(box[2]), 3, (255,255,255), 2)
                        cv2.circle(img2, (charPose.position.x,charPose.position.y), 3, (0,255,0), 2)

                        cv2.imwrite(args.output_path+"img3.jpg",img2)

                        ###################
                        boxdic['box%d'%boxnum] = [[box[0, 0], box[0, 1]],box[1, 0], box[1, 1],[box[2, 0], box[2, 1]],[box[3, 0], box[3, 1]]]
                        ####################
                        image_happy = cv_image.copy()
                        self.rotate(image_happy,box[1],box[2],box[3],box[0])




                        cv2.polylines(im[:, :, ::-1].copy(), [box.astype(np.int32).reshape((-1, 1, 2))], True, color=(255, 255, 0), thickness=1)
                    i = i + 1
                cv2.imwrite(args.output_path+"detection.jpg",im[:, :, ::-1])

        charObjList = self.poseList
        charName    = self.charname
        charNum     = self.charnum
        angle       = self.angleList
        print(charName)
        print('Is ok?Y/N')
        w = input()
        return detectobjectionSrvResponse(res,charObjList,charName,charNum,angle)
Ejemplo n.º 11
0
    def callback(self, data):
        retstat = 0
        pos = 99

        frame = CvBridge().imgmsg_to_cv2(data, 'bgr8')

        # Our operations on the frame come here
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        pose = self.tracker.locate_marker(gray)

        #print(pose.quality)

        markerpose_msg = markerpose()
        markerpose_msg.header = data.header
        markerpose_msg.order = pose.order
        markerpose_msg.x = pose.x
        markerpose_msg.y = pose.y
        markerpose_msg.theta = pose.theta
        markerpose_msg.quality = pose.quality

        pos = self.determinePos(pose.x, pose.y)
        markerpose_msg.x = pos[0]
        markerpose_msg.y = pos[1]
        if markerpose_msg.quality > 0.5:
            cv2.circle(frame, (pose.x, pose.y), 20, (0, 0, 255), 2)
            orientation = min(
                abs(
                    math.atan2(-(pose.x - self.image_width / 2),
                               pose.y - self.image_height / 2)),
                abs((math.pi - math.atan2(-(pose.x - self.image_width / 2),
                                          pose.y - self.image_height / 2))))
            #print math.degrees(orientation)
            #print orientation
            self.markerpose_pub.publish(markerpose_msg)
            #self.markerstatus_pub.publish(1)
            if (orientation > 0.1):
                retstat = 1
            else:
                retstat = 2

        #elif markerpose_msg.quality > 0.2:
        # self.markerstatus_pub.publish()
        else:
            retstat = 0
            #self.markerstatus_pub.publish(0)

            #print "pose x and y", pose.x, pose.y+

        #self.markerpose_pub.publish(markerpose_msg)

        #self.image_pub.publish(CvBridge().cv2_to_imgmsg(frame,'bgr8'))

        ##custom_dictionary = cv2.aruco.custom_dictionary(6, 4)
        #cv2.aruco.detectMarkers(frame)
        #markers = cv2.aruco.detectMarkers(gray, cv2.getPrede,parameters=parameters)
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(
            gray, self.aruco_dict, parameters=self.parameters)
        #print corners, ids
        if any(x != None for x in ids):
            aruco = markerpose()
            aruco.header = data.header
            aruco.order = 0
            aruco.theta = 0
            aruco.quality = 1
            #print(corners[0][0][0][0])
            X = (corners[0][0][0][0] + corners[0][0][1][0] +
                 corners[0][0][2][0] + corners[0][0][3][0]) / 4
            Y = (corners[0][0][0][1] + corners[0][0][1][1] +
                 corners[0][0][2][1] + corners[0][0][3][1]) / 4
            arucopos = self.determinePos(X, Y)
            aruco.x = arucopos[0]
            aruco.y = arucopos[1]
            self.arucopose_pub.publish(aruco)
            cv2.circle(frame, (int(X), int(Y)), 20, (0, 0, 255), 2)

        #self.determineHeight(corners[0]);
        frame_markers = cv2.aruco.drawDetectedMarkers(frame.copy(), corners,
                                                      ids)

        self.image_pub.publish(CvBridge().cv2_to_imgmsg(frame_markers, 'bgr8'))
        self.markerstatus_pub.publish(retstat)
class ImageOverlay():
  def __init__(self):
    self.scale_text = 0
    self.heading = 0
    self.img = np.ones((700,1000,3),dtype=np.uint8)*128 # default black background

    current_dir = os.path.dirname(os.path.abspath(__file__))
    compass_filename = '%s/compass.png' % current_dir
    scale_filename = '%s/scale.png' % current_dir

    # Load scale image
    self.scale_img = load_image(scale_filename)

    # Load compass image
    self.compass_img = load_image(compass_filename)

  def image_callback(self, data):
    self.img = CvBridge().compressed_imgmsg_to_cv2(data)

  def heading_callback(self, data):
    self.heading = data.data

  def scale_callback(self, data):
    self.scale_text = data.data

  def overlay_telemetry(self):
    im_out = np.uint8(self.img.copy())
    min_image_dimension = np.min([self.img.shape[1],self.img.shape[0]])

    compass_img = self.compass_img
    compass_img = rotate_image(compass_img, self.heading)
    compass_img = warp_image(compass_img)

    # Resize compass image
    width_percentage = 0.6
    width_px = width_percentage*min_image_dimension
    compass_img = resize_image(compass_img, width_px)

    # Resize scale image
    scale_img = resize_image(self.scale_img, width_px)

    # Overlay compass image
    x = im_out.shape[1] / 2 - compass_img.shape[1] / 2 # center width position
    y = im_out.shape[0] - int(compass_img.shape[0]) - 1 # near bottom
    im_out = overlay_image(compass_img, im_out, x, y)

    # Overlay scale image
    x = im_out.shape[1] / 2 - scale_img.shape[1] / 2 # center width position
    y = im_out.shape[0] / 2 # # center height position
    im_out = overlay_image(scale_img, im_out, x, y)
    
    # Set text overlay near scale_img indicating the scale value
    scale_factor = (min_image_dimension / 350.0) # scale font relative to img
    font = 2
    scale = 0.5 * scale_factor
    thick = 1 * scale_factor
    line_type = 4

    color = tuple(np.ones(im_out.shape[2])*255) # white text, ie. same number of 255s and number of image channels
    # Set text at the small end of the scale bar
    loc = (int(x-13*scale_factor),int(y+7*scale_factor))
    cv2.putText(im_out, "0", loc, font, scale, color, int(thick), line_type)
    # Set text at the large end of the scale bar
    scale_text_pretty = "%.1f cm" % self.scale_text
    loc = (x+scale_img.shape[1]+5,int(y+7*scale_factor))
    cv2.putText(im_out, scale_text_pretty, loc, font, scale, color, int(thick), line_type)

    ## Preview result
    # cv2.imshow("Image", im_out)
    # cv2.waitKey(0)
    # cv2.destroyAllWindo()

    return im_out

  def ros_loop(self):
    publisher = rospy.Publisher('overlay/compressed', CompressedImage, queue_size=1)
    rospy.Subscriber('camera/compressed', CompressedImage, self.image_callback, queue_size=1)

    if not self.heading:
      rospy.Subscriber('heading', Float32, self.heading_callback, queue_size=1)
    if not self.scale_text:
      rospy.Subscriber('scale', Float32, self.scale_callback, queue_size=1)

    rospy.loginfo('Setup complete. Image overlay process has started.')

    framerate = rospy.Rate(rospy.get_param('~framerate',3)) # default 3 Hz
    while not rospy.is_shutdown():
      img = self.overlay_telemetry()
      compressed_image = CvBridge().cv2_to_compressed_imgmsg(img)
      publisher.publish(compressed_image)
      framerate.sleep()
Ejemplo n.º 13
0
def image_callback(data):
	global xc, yc
	global move_position
	global collecting
	global c_cnt
	global HSV_value
	global lower_HSV, upper_HSV
	# change to opencv
	try:
		cv_image1 = CvBridge().imgmsg_to_cv2(data, "bgr8")
	except CvBridgeError as e:
		print(e)
	cv_image_draw = cv_image1.copy()
	if(move_position):
		cv2.putText(cv_image_draw, 'please move the camera to make ', (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
				(0, 255, 0), 2, cv2.LINE_AA)
		cv2.putText(cv_image_draw, 'the red color in the rectangle ', (30, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
				(0, 255, 0), 2, cv2.LINE_AA)
		cv2.putText(cv_image_draw, 'green box!then press the \'ENTER\' ', (30, 130), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
				(0, 255, 0), 2, cv2.LINE_AA)
		cv2.putText(cv_image_draw, 'in another terminal!', (30, 180), cv2.FONT_HERSHEY_SIMPLEX, 0.8,
				(0, 255, 0), 2, cv2.LINE_AA)
		cv2.rectangle(cv_image_draw, (355, 310), (355 + cali_w, 310 + cali_h), (0, 255, 0), 3)
		c_cnt = 0
		cv2.imshow("win_draw", cv_image_draw)
		cv2.waitKey(1)
		return
	elif(collecting):
		c_cnt = c_cnt+1
		cv2.putText(cv_image_draw, 'collecting the hsv!', (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
			(0, 255, 0), 2, cv2.LINE_AA)
		cv2.putText(cv_image_draw, ' please wait for 5s.', (30, 80), cv2.FONT_HERSHEY_SIMPLEX, 1.2,
				(0, 255, 0), 2, cv2.LINE_AA)
		cv2.rectangle(cv_image_draw, (355, 310), (355 + cali_w, 310 + cali_h), (0, 255, 0), 3)
		frame = cv_image1[315:310 + cali_h-5, 360:355 + cali_w-5]
		HSV_value = mean_hsv(frame, HSV_value)	
		cv2.imshow("win_draw", cv_image_draw)
		cv2.waitKey(1)
		
		if(c_cnt >= collect_times):
			for i in range(len(HSV_value)):
				HSV_value[i] = HSV_value[i] / collect_times
				print(i,len(HSV_value), HSV_value[i])
			lower_HSV, upper_HSV = hsv_range(HSV_value)
			#save_hsv(name, lower_HSV, upper_HSV)
			print(lower_HSV , upper_HSV)
			collecting = 0
			cv2.destroyWindow("win_draw")
		return
	#test(lower_HSV, upper_HSV, cv_image1)

	cv_image_cp = cv_image1.copy()
	cv_image_hsv = cv2.cvtColor(cv_image_cp, cv2.COLOR_BGR2HSV)
	cv_image_gray = cv2.inRange(cv_image_hsv, lower_HSV, upper_HSV)
	# smooth and clean noise
	cv_image_gray = cv2.erode(cv_image_gray, None, iterations=2)
	cv_image_gray = cv2.dilate(cv_image_gray, None, iterations=2)
	cv_image_gray = cv2.GaussianBlur(cv_image_gray, (5,5), 0)
	# detect contour
	cv2.imshow("win1", cv_image1)
	cv2.imshow("win2", cv_image_gray)
	cv2.waitKey(1)
	contours, hier = cv2.findContours(cv_image_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
	size = []
	size_max = 0
	for i, c in enumerate(contours):
		rect = cv2.minAreaRect(c)
		box = cv2.boxPoints(rect)
		box = np.int0(box)
		x_mid = (box[0][0] + box[2][0] + box[1][0] + box[3][0]) / 4
		y_mid = (box[0][1] + box[2][1] + box[1][1] + box[3][1]) / 4
		w = math.sqrt((box[0][0] - box[1][0])**2 + (box[0][1] - box[1][1])**2)
		h = math.sqrt((box[0][0] - box[3][0])**2 + (box[0][1] - box[3][1])**2)
		size.append(w * h)
		if size[i] > size_max:
			size_max = size[i]
			index = i
			xc = x_mid
			yc = y_mid
Ejemplo n.º 14
0
class OffbPosCtl:
    curr_pose = PoseStamped()
    waypointIndex = 0
    detections = []
    tag_pt_x = 0
    tag_pt_y = 0
    distThreshold= 2
    detection_count=0
    depth = Image()
    depth_matrix = []
    dimg = Image()
    KP=0.005
    KD=0.0004
    KI=0.00005
    prev_tag_pt_x=0
    prev_tag_pt_y=0
    upd_tag_pt_x=0
    upd_tag_pt_y=0
    left_image = Image()
    left_height = 0
    left_width = 0
    right_image = Image()
    right_height = 0
    right_width = 0
    camera=PinholeCameraModel()
    des_pose = PoseStamped()
    saved_location = None
    isReadyToFly = False
    left_matrix = []
    right_matrix = []
    cv_image_left = []
    cv_image_right = []
    hover_loc = [-1,.5,10,0,0,0,0]
    mode="HOVER"
    rgb_target = []
    target = []
    flag_x = "allow_x"
    flag_y = "allow_y"
    target = []
    count = 1
    
    vel_pub = rospy.Publisher('/mavros/setpoint_raw/local', PositionTarget, queue_size=10)
    pub = rospy.Publisher('/data', String, queue_size=10)

    def __init__(self):
        rospy.init_node('offboard_test', anonymous=True)
        self.loc = []
        rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback= self.mocap_cb)
        rospy.Subscriber('/mavros/state',State, callback= self.state_cb)
        rospy.Subscriber('/dreams/state',String,callback=self.update_state_cb)
        rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, callback=self.yolo)
	rospy.Subscriber('/darknet_ros/detection_image', Image, callback=self.detected_image)
	rospy.Subscriber('/camera/depth/image_raw',Image,callback=self.depth_image)
        self.camera.fromCameraInfo(self.rgb_info())
	#self.lawnmover(10,5,5,0,2.5)
	#self.depth_estm(self.left_matrix,self.right_matrix)
	self.hover()
	

    def rgb_info(self):
        msg_header = Header()
        msg_header.frame_id = "camera_link"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0
        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg


    def depth_image(self,Img):
	self.depth = Img
	
	#print(self.depth.encoding)
	#print(self.depth_matrix)

    def depth_eval(self):
	cx = self.rgb_target[0]
	
	cy = self.rgb_target[1]
	
	x_left_lim = int(cx) - 3
	x_right_lim = int(cx) + 3
	y_down_lim = int(cy) - 3
	y_up_lim = int(cy) + 3
	sum = 0
        
	self.depth_matrix = CvBridge().imgmsg_to_cv2(self.depth,desired_encoding= 'passthrough')
	#self.depth_matrix = np.array(self.depth.data, dtype=np.uint8) 
	cpimg = self.depth_matrix.copy().astype(float)
	mat = cv2.convertScaleAbs(cpimg)
	depth = mat[x_left_lim:x_right_lim, y_down_lim:y_up_lim] # Truncating mat
	#print depth
	for i in range(6):
	    for j in range(6):
	        sum = sum + depth[i][j]
	avg_depth = sum/49
	K = np.array([[1.0, 0.0, 320.5],[0.0, 1.0, 240.5],[0.0, 0.0, 1.0]])
	K_inv = np.array([[1.0, 0, -320.5],[0, 1.0, -240.5],[0, 0, 1.0]])
	self.target = avg_depth *np.matmul(K_inv,self.rgb_target)
	#print("________________________________________________________________________________________________________________________")
	print(self.target)

	
    def yolo(self,data):
	for a in data.bounding_boxes:
	    if a.Class == "truck" or a.Class == "bus":
    	        self.detection_count = self.detection_count + 1
                self.rgb_target = np.array([[a.xmin + (a.xmax - a.xmin)/2], [a.ymin + (a.ymax - a.ymin)/2], [1]])
		#print(self.rgb_target) 
		self.depth_eval()
       
    def detected_image(self,detection):
	try:
	   self.dimg = CvBridge().imgmsg_to_cv2(detection, "bgr8")
    	except CvBridgeError as e:
	    print(e)
         
    def copy_pose(self , pose):
        pt = pose.pose.position
        quat = pose.pose.orientation
        copied_pose = PoseStamped()
        copied_pose.header.frame_id = pose.header.frame_id
        copied_pose.pose.position = Point(pt.x, pt.y, pt.z)
        copied_pose.pose.orientation = Quaternion(quat.x , quat.y , quat.z , quat.w)
        return copied_pose

    def mocap_cb(self,msg1):
        self.curr_pose = msg1
        #print msg1
    
    def state_cb(self,msg):
        if msg.mode == 'OFFBOARD':
            #print msg
            #print("The mode is OFFBOARD")
            self.isReadyToFly = True
        else:
            #print("I am in state_cb")
            #print msg
            print msg.mode

    def update_state_cb(self,data):
        self.mode= data.data
        #print self.mode


    def hover(self):
        location = self.hover_loc
        loc = [location,
               location,
               location,
               location,
               location,
               location,
               location,
               location,
               location]
        #print loc       
        rate = rospy.Rate(10)
        shape = len(loc)
        pose_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size= 10)
        des_pose = self.copy_pose(self.curr_pose)
        waypoint_index = 0
        sim_ctr = 1
        #print("I am here")
	
        while self.mode=="HOVER" and self.detection_count < 5 and not rospy.is_shutdown():
	    print(len(self.detections))
            if waypoint_index==shape:
                waypoint_index = 0            # changing the way point index to 0
                sim_ctr = sim_ctr + 1
                print "HOVER STOP COUNTER:" + str(sim_ctr)
            if self.isReadyToFly:
                des_x = loc[waypoint_index][0]
                des_y = loc[waypoint_index][1]
                des_z = loc[waypoint_index][2]
                des_pose.pose.position.x = des_x
                des_pose.pose.position.y = des_y
                des_pose.pose.position.z = des_z
                des_pose.pose.orientation.x = loc[waypoint_index][3]
                des_pose.pose.orientation.y = loc[waypoint_index][4]
                des_pose.pose.orientation.z = loc[waypoint_index][5]
                des_pose.pose.orientation.w = loc[waypoint_index][6]
                curr_x = self.curr_pose.pose.position.x
                curr_y = self.curr_pose.pose.position.y
                curr_z = self.curr_pose.pose.position.z
                #print('I am here')
                dist = math.sqrt((curr_x - des_x)*(curr_x - des_x) + (curr_y - des_y)*(curr_y - des_y) + (curr_z - des_z)*(curr_z - des_z))
                if dist<self.distThreshold :
                    waypoint_index += 1
                  
            if sim_ctr == 50:
                pass
            pose_pub.publish(des_pose)
            #print(des_pose)    
	    rate.sleep()

    def get_descent(self,x,y,z):
	des_vel = PositionTarget()
	des_vel.header.frame_id = "world"
	des_vel.header.stamp=rospy.Time.from_sec(time.time())
        des_vel.coordinate_frame= 8
	des_vel.type_mask = 3527
	des_vel.velocity.x = x
	des_vel.velocity.y = y
	des_vel.velocity.z = z
	return des_vel
Ejemplo n.º 15
0
    def color_callback(self, color_msg):
        start_time = time.time()
        # Invoke the image from camera
        ori_image = CvBridge().imgmsg_to_cv2(color_msg)
        use_image = ori_image.copy()
        # Process the image
        rgb_img = processing_depth(use_image)
        # Predict the grasp
        q_img, cos_img, sin_img, width_img = prediction(rgb_img)
        # Filter the q_img
        ret = filter_thresh(q_img[0], 0.5)
        q_img_process = local_max(q_img[0])
        # Post process the output
        dis_img, grasp_pts, center = process_output(rgb_img[0], q_img[0], cos_img[0], sin_img[0], width_img[0])

        offsets = np.array([170, 90])
        dis_img2, grasp_pts_list, centers, angles = process_output2(rgb_img[0], q_img[0], cos_img[0], sin_img[0], width_img[0], offsets)
        # Publish the grasp points
        depth_point = self.depth_img[center[0]+90, center[1]+170]
        px, py, pz = self.coordinate.get_coord(depth_point, center[0]+90, center[1]+170)
        pxs, pys, pzs = self.coordinate.get_coords(self.depth_img, centers[:,0]+90, centers[:,1]+170)
        # print(pxs, pys, pzs)
        #print("Depth point: ", depth_point)
        grasp_pts = grasp_pts + offsets

        # Find the coordinate for grasping
        grasp_pcd, pcl2_pcd = self.coordinate.get_pcd(self.depth_img, grasp_pts, grasp_pts_list)

        self.msg.px = px/1000.0
        self.msg.py = py/1000.0
        self.msg.pz = pz/1000.0

        pxs = list(np.array(pxs)/1000.0)
        pys = list(np.array(pys)/1000.0)
        pzs = list(np.array(pzs)/1000.0)
        costhetas = list(np.array(angles)[:, 0])
        sinthetas = list(np.array(angles)[:, 1])
        self.msg.pxs = pxs
        self.msg.pys = pys
        self.msg.pzs = pzs
        self.msg.costhetas = costhetas
        self.msg.sinthetas = sinthetas

        # Convert from depth_img to JET
        cv2.polylines(self.depth_img, [grasp_pts], True, (0, 255, 0))

        # Publish the grasp points and pointcloud
        try:
            self.pcd_pub.publish(pcl2_pcd)
            self.grasp_pub.publish(self.msg)
        except KeyboardInterrupt:
            print("Publish error!")
        end_time = time.time()
        execute_time = end_time - start_time
        print("Excute time: ", int(execute_time*1000), "ms")
        # Display the output
        #heatmap = cv2.applyColorMap(np.uint8(q_img[0]*255), cv2.COLORMAP_JET)
        heatmap = cv2.applyColorMap(np.uint8(ret*255), cv2.COLORMAP_JET)
        q_img_process = cv2.applyColorMap(np.uint8(q_img_process*255), cv2.COLORMAP_JET)
        depth_dis = np.uint8(np.clip(self.depth_img, 0, 1000)/1000.0*255)
        cv2.imshow("Depth", depth_dis)
        cv2.waitKey(3)
        cv2.imshow("Multi-grasp", dis_img2)
        cv2.waitKey(3)
        cv2.imshow("Grasp Quality", heatmap)
        cv2.waitKey(3)
        cv2.imshow("Best Grasp", dis_img)
        cv2.waitKey(3)
Ejemplo n.º 16
0
class ImageOverlay:
    def __init__(self):
        self.i=""
	self.temp=""
	self.number=0
        self.asteering_raw=0
        self.max_steering = 1
        self.min_steering = -1
        self.epsilon_steering = math.radians(0.001)
        self.img = np.ones((700,1000,3),dtype=np.uint8)*128 # default black background
        # update the image to the new image from camera feed
    def image_callback(self, data):
        self.img = CvBridge().compressed_imgmsg_to_cv2(data)




    def overlay(self):
        img_out = self.img.copy()
        return img_out


    def call_back2(self,data):
                
        ack_cmd = AckermannDriveStamped()
        ack_cmd.header.stamp = rospy.Time.now()

        drive = AckermannDrive()
        drive.speed = data.linear.x
        drive.steering_angle = data.angular.z
        # impose limits on commanded angle
        # datetime object containing current date and time
        now = datetime.now()
	self.i = now.strftime("%H:%M:%S")

	my_name="youssf"
        # dd/mm/YY H:M:S
        
        image =self.overlay()[210:,:]

	if(self.temp != str(self.i)):
            with open("data_index.txt", "a") as myfile:
	    	cv2.imwrite("data/"+my_name+self.i+'.jpg',image)
            	myfile.write("data/"+my_name+self.i+'.jpg,'+str(drive.steering_angle)+"\n")
        	print("now =", str(self.number) + "         with id =  "+str(self.i)+"    shape"+str(image.shape))
		self.temp=str(self.i)
		self.number+=1
        ack_cmd.drive = drive
        self.publisher.publish(ack_cmd)
    def run(self):
        rospy.init_node("get_img")

        #self.publisher = rospy.Publisher('overlay/compressed', CompressedImage, queue_size=1)
        
        # Read in camera feed, change first arg to read a different camera
        self.camera_subscriber = rospy.Subscriber('/camera/left/image_raw/compressed', CompressedImage, self.image_callback, queue_size=1)
        self.publisher = rospy.Publisher('/robot_control/command', AckermannDriveStamped, queue_size=10)
        self.s=rospy.Subscriber("/cmd_vel", Twist, self.call_back2)

        rate = rospy.Rate(3)  # default 3 Hz

        #print("Running camera overlay in overlay/compressed")
        while(1):

                # init
                # choose ROI
                roi_x = 66
                roi_y = 200
                while True:
                    startTime = time.time()
                    image =self.overlay()
                    # from 1 dim to 3
                    cv2.imshow('inference (q to exit)', self.overlay())
                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break
                    # send steering to serial port


                    endTime = time.time()
                    # calculate fps
                    fps_q.append(endTime - startTime)
Ejemplo n.º 17
0
class OffbPosCtl:
    curr_pose = PoseStamped()
    des_pose = PoseStamped()
    cam_pose = PoseStamped()
    cam_img = PoseStamped()
    x_prev_error = 0
    y_prev_error = 0
    destination_x_previous = 0
    destination_y_previous = 0
    destination_z_previous = 0
    destination_pose = PoseStamped()
    waypointIndex = 0
    detections = []
    distThreshold = 2
    detection_count = 0
    depth = Image()
    depth_matrix = []
    dimg = Image()
    KP = .005
    KD = 0.0004
    des_x = 0
    des_y = 0
    des_z = 0
    camera = PinholeCameraModel()
    saved_location = None
    isReadyToFly = False
    hover_loc = [-28, 16.3, 10, 0, 0, 0, 0]
    mode = "HOVER"
    rgb_target = []
    ray_target = []
    target = []  # a column vector that stores the x,y,z target
    #dronename = rospy.get_param('iris_cam')
    vel_pub = rospy.Publisher('/mavros/setpoint_raw/local',
                              PositionTarget,
                              queue_size=10)
    pub = rospy.Publisher('/data', String, queue_size=10)

    def __init__(self):
        rospy.init_node('offboard_test', anonymous=True)
        self.loc = []
        rospy.Subscriber('/mavros/local_position/pose',
                         PoseStamped,
                         callback=self.mocap_cb)
        rospy.Subscriber('/mavros/state', State, callback=self.state_cb)
        rospy.Subscriber('/dreams/state',
                         String,
                         callback=self.update_state_cb)
        rospy.Subscriber('/darknet_ros/bounding_boxes',
                         BoundingBoxes,
                         callback=self.yolo)
        rospy.Subscriber('/darknet_ros/detection_image',
                         Image,
                         callback=self.detected_image)
        rospy.Subscriber('/camera/depth/image_raw',
                         Image,
                         callback=self.depth_image)
        #self.camera.fromCameraInfo(self.rgb_info())
        # Zhiang: use this instead
        camera_info = rospy.wait_for_message(
            "/camera/depth/camera_info",
            CameraInfo)  # Zhiang: make sure this is the correct camera
        #camera_info = rospy.wait_for_message("/camera/rgb/camera_info", CameraInfo)
        self.pinhole_camera_model = PinholeCameraModel()
        self.pinhole_camera_model.fromCameraInfo(camera_info)

        #self.listener =  tf.TransformListener()  # linked to tf_static
        self.planner()

    def mocap_cb(self, msg1):
        self.curr_pose = msg1
        #br = tf.TransformBroadcaster()
        #br.sendTransform((msg1.pose.position.x + 0, msg1.pose.position.y + 0, msg1.pose.position.z - 0.2, msg1.pose.orientation.x + 0.3825 , msg1.pose.orientation.y + 0.0003 , msg1.pose.orientation.z + 0.924 , msg1.pose.orientation.w + 0.001),rospy.Time.now(),"world", self.dronename)

    def rgb_info(self):
        msg_header = Header()
        msg_header.frame_id = "camera_link"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0
        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0
        ]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg

    def depth_image(self, Img):
        self.depth = Img

        #print(self.depth.encoding)
        #print(self.depth_matrix)

    def depth_eval(self):
        cx = self.rgb_target[0]
        alpha1 = 0.7
        cy = self.rgb_target[1]

        x_left_lim = int(cx) - 3
        x_right_lim = int(cx) + 3
        y_down_lim = int(cy) - 3
        y_up_lim = int(cy) + 3
        sum = 0
        #print(x_left_lim,x_right_lim,y_down_lim,y_up_lim)

        self.depth_matrix = CvBridge().imgmsg_to_cv2(
            self.depth, desired_encoding='passthrough')
        #self.depth_matrix = np.array(self.depth.data, dtype=np.uint8)
        cpimg = self.depth_matrix.copy().astype(float)
        #print(cpimg.shape)
        #mat = cv2.convertScaleAbs(cpimg, alpha = 255/cpimg.max())
        depth = cpimg[x_left_lim:x_right_lim,
                      y_down_lim:y_up_lim]  # Truncating mat
        #print depth
        if depth.shape != 0:
            for i in range(6):
                for j in range(6):
                    sum = sum + depth[i][j]
        avg_depth = sum / (49)
        # Evaluating x value from Z :

        #print(avg_depth)
        #K = np.array([[1.0, 0.0, 510],[0.0, 1.0, 384],[0.0, 0.0, 1.0]])
        #K_inv = np.array([[1.0, 0, -510],[0, 1.0, -384],[0, 0, 1.0]])
        #self.target = avg_depth *np.matmul(K_inv,self.rgb_target)
        #print(self.target)
        #x = ((self.rgb_target[0][0] - 510) * avg_depth)/#focal length from the pinhole camera model?     # fx is the focal length in the pixel coordinate
        #y = ((self.rgb_target[1][0] - 384) * avg_depth)/# focal length from the pinhole camera model?     # fy is the focal length in the pixel coordinate
        #print(self.ray_target[0])
        #print(self.ray_target[1])
        #print(avg_depth)
        #print(self.ray_target)
        x = self.ray_target[0] * avg_depth
        y = self.ray_target[1] * avg_depth
        z = avg_depth
        #print("_______________________________________________")
        #print x
        #print y
        #print z
        self.target = np.array([[x], [y], [z]])
        #hom_transformation = np.array([[0, -1, 0, 0],[-0.7071, 0, 0.7071, 0],[-.7071, 0, -.7071, 0],[0, 0, 0.2, 1]])
        hom_transformation = np.array([[0, -0.707, -0.707, 0], [-1, 0, 0, 0],
                                       [0, 0.707, -.707, 0], [0, 0, 0, 1]])

        homogeneous_coordinates = np.array([[self.target[0][0]],
                                            [self.target[1][0]],
                                            [self.target[2][0]], [1]])
        #print(homogeneous_coordinates)
        product = np.matmul(hom_transformation, homogeneous_coordinates)
        self.cam_img.pose.position.x = product[0][0]
        self.cam_img.pose.position.y = product[1][0]
        self.cam_img.pose.position.z = product[2][0]
        #print(product[0][0])
        #print(product[1][0])
        #print(product[2][0])
        #self.cam_img.pose.position.z = self.target[2]
        self.cam_img.pose.orientation.x = 0
        self.cam_img.pose.orientation.y = 0
        self.cam_img.pose.orientation.z = 0
        self.cam_img.pose.orientation.w = 1
        self.destination_pose.pose.position.x = self.cam_img.pose.position.x + self.curr_pose.pose.position.x
        self.destination_pose.pose.position.y = self.cam_img.pose.position.y + self.curr_pose.pose.position.y
        self.destination_pose.pose.position.z = self.cam_img.pose.position.z + self.curr_pose.pose.position.z
        self.destination_pose.pose.orientation.x = self.curr_pose.pose.orientation.x
        self.destination_pose.pose.orientation.y = self.curr_pose.pose.orientation.y
        self.destination_pose.pose.orientation.z = self.curr_pose.pose.orientation.z
        self.destination_pose.pose.orientation.w = self.curr_pose.pose.orientation.w
        self.destination_x = (
            (1 - alpha1) * self.destination_pose.pose.position.x) + (
                alpha1 * self.destination_x_previous)
        self.destination_y = (
            (1 - alpha1) * self.destination_pose.pose.position.y) + (
                alpha1 * self.destination_y_previous)
        self.destination_z = (
            (1 - alpha1) * self.destination_pose.pose.position.z) + (
                alpha1 * self.destination_z_previous)
        self.destination_x_previous = self.destination_x
        self.destination_y_previous = self.destination_y
        self.destination_z_previous = self.destination_z
        #print(self.destination_x,self.destination_y,self.destination_z)

    def yolo(self, data):
        while self.curr_pose.pose.position.z > 4 and self.curr_pose.pose.position.z < 14:
            for a in data.bounding_boxes:
                if (a.Class == "stop sign" or a.Class == "kite"
                        or a.Class == "cell phone" or a.Class == "car"
                        or a.Class == "truck" or a.Class == "bus"):
                    self.detection_count = self.detection_count + 1
                    #print(self.detection_count)
                    self.rgb_target = np.array(
                        [[a.xmin + (a.xmax - a.xmin) / 2],
                         [a.ymin + (a.ymax - a.ymin) / 2], [1]])
                    self.ray_target = self.pinhole_camera_model.projectPixelTo3dRay(
                        (a.xmin + (a.xmax - a.xmin) / 2,
                         a.ymin + (a.ymax - a.ymin) / 2))
                    #print(self.rgb_target)
                    #print(self.rgb_target[1])
                    self.depth_eval()

    def detected_image(self, detection):
        try:
            self.dimg = CvBridge().imgmsg_to_cv2(detection, "bgr8")
        except CvBridgeError as e:
            print(e)

    def copy_pose(self, pose):
        pt = pose.pose.position
        quat = pose.pose.orientation
        copied_pose = PoseStamped()
        copied_pose.header.frame_id = pose.header.frame_id
        copied_pose.pose.position = Point(pt.x, pt.y, pt.z)
        copied_pose.pose.orientation = Quaternion(quat.x, quat.y, quat.z,
                                                  quat.w)
        return copied_pose

    def state_cb(self, msg):
        if msg.mode == 'OFFBOARD':
            self.isReadyToFly = True
        else:
            print msg.mode

    def update_state_cb(self, data):
        self.mode = data.data

    def attach(self):
        attach_srv = rospy.ServiceProxy('/link_attacher_node/attach', Attach)
        attach_srv.wait_for_service()
        print('Trying to attach')

        req = AttachRequest()
        req.model_name_1 = "iris"
        req.link_name_1 = "base_link"
        req.model_name_2 = "suv"  # Insert the model name you want to attach to
        req.link_name_2 = "link"  # Insert link name you want to attach to

        attach_srv.call(req)
        print('Attached')

    def detach(self):
        attach_srv = rospy.ServiceProxy('/link_attacher_node/detach', Attach)
        attach_srv.wait_for_service()
        print('Trying to detach')

        req = AttachRequest()
        req.model_name_1 = "iris"
        req.link_name_1 = "base_link"
        req.model_name_2 = "suv"  # Insert the model name you want to attach to
        req.link_name_2 = "link"  # Insert link name you want to attach to

        attach_srv.call(req)
        print('Detached')

    def hover(self):
        location = self.hover_loc
        loc = [
            location, location, location, location, location, location,
            location, location, location
        ]

        rate = rospy.Rate(10)
        shape = len(loc)
        pose_pub = rospy.Publisher('/mavros/setpoint_position/local',
                                   PoseStamped,
                                   queue_size=10)
        des_pose = self.copy_pose(self.curr_pose)
        waypoint_index = 0
        sim_ctr = 1

        while self.mode == "HOVER" and self.detection_count < 100 and not rospy.is_shutdown(
        ):
            #print(self.detection_count)
            if waypoint_index == shape:
                waypoint_index = 0  # changing the way point index to 0
                sim_ctr = sim_ctr + 1
                #print "HOVER STOP COUNTER:" + str(sim_ctr)
            if self.isReadyToFly:
                des_x = loc[waypoint_index][0]
                des_y = loc[waypoint_index][1]
                des_z = loc[waypoint_index][2]
                des_pose.pose.position.x = des_x
                des_pose.pose.position.y = des_y
                des_pose.pose.position.z = des_z
                des_pose.pose.orientation.x = loc[waypoint_index][3]
                des_pose.pose.orientation.y = loc[waypoint_index][4]
                des_pose.pose.orientation.z = loc[waypoint_index][5]
                des_pose.pose.orientation.w = loc[waypoint_index][6]
                curr_x = self.curr_pose.pose.position.x
                curr_y = self.curr_pose.pose.position.y
                curr_z = self.curr_pose.pose.position.z
                dist = math.sqrt((curr_x - des_x) * (curr_x - des_x) +
                                 (curr_y - des_y) * (curr_y - des_y) +
                                 (curr_z - des_z) * (curr_z - des_z))
                if dist < self.distThreshold:
                    waypoint_index += 1

            if sim_ctr == 50:
                pass
            #print('I am here')
            pose_pub.publish(des_pose)
#print('I am here')
        rate.sleep()
        if self.detection_count >= 100:
            self.mode = "SWOOP"
            self.descent()
            #print(self.mode)

    def get_descent(self, x, y, z):
        des_vel = PositionTarget()
        des_vel.header.frame_id = "world"
        des_vel.header.stamp = rospy.Time.from_sec(time.time())
        des_vel.coordinate_frame = 8
        des_vel.type_mask = 3527
        des_vel.velocity.x = y
        des_vel.velocity.y = x
        des_vel.velocity.z = z

        return des_vel

    def descent(self):
        rate = rospy.Rate(10)  # 10 Hz
        # find tf static section under depth_eval
        #print self.mode
        while self.mode == "SWOOP" and self.curr_pose.pose.position.z > 1 and not rospy.is_shutdown(
        ):
            #print(" In while loop")
            err_x = self.des_x - self.curr_pose.pose.position.x
            err_y = self.des_y - self.curr_pose.pose.position.y
            err_z = self.curr_pose.pose.position.z - self.des_z  # self.des_z is small - close to ground.
            #print(err_y)
            #print(err_z)
            # print(err_y)

            x_change = -(err_x * self.KP * 1
                         )  #-(((err_x - self.x_prev_error) * 70) * self.KD)
            y_change = -(err_y * self.KP * 0
                         )  #-(((err_y - self.y_prev_error) * 70) * self.KD)
            z_change = -((err_z) / (math.sqrt((err_x)**2 +
                                              (err_y)**2))) * self.KP * 200
            des = self.get_descent(x_change, y_change, z_change)
            self.vel_pub.publish(des)
            self.x_prev_error = err_x
            self.y_prev_error = err_y
            self.pub.publish("PICKUP COMPLETE")
            rate.sleep()

        self.attach()  # The drone gets attached to the target
        self.mode == "HOVER"
        self.hover()

    def planner(self):

        if self.mode == "HOVER":
            self.hover()
        print('transitioning')
Ejemplo n.º 18
0
class OffbPosCtl:
    curr_pose = PoseStamped()
    des_pose = PoseStamped()
    cam_pose = PoseStamped()
    waypointIndex = 0
    detections = []
    distThreshold= 2
    detection_count=0
    depth = Image()
    depth_matrix = []
    dimg = Image()
    KP=0.005
    des_x = 0
    des_y = 0
    des_z = 0
    camera=PinholeCameraModel()
    saved_location = None
    isReadyToFly = False
    hover_loc = [1,-9,10,0,0,0,0]
    mode="HOVER"
    rgb_target = []
    target = []       # a column vector that stores the x,y,z target
    #dronename = rospy.get_param('iris_cam')
    vel_pub = rospy.Publisher('/mavros/setpoint_raw/local', PositionTarget, queue_size=10)	
    pub = rospy.Publisher('/data', String, queue_size=10)


    def __init__(self):
        rospy.init_node('offboard_test', anonymous=True)
        self.loc = []
        rospy.Subscriber('/mavros/local_position/pose', PoseStamped, callback= self.mocap_cb)
        rospy.Subscriber('/mavros/state',State, callback= self.state_cb)
        rospy.Subscriber('/dreams/state',String,callback=self.update_state_cb)
        rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, callback=self.yolo)
	rospy.Subscriber('/darknet_ros/detection_image', Image, callback=self.detected_image)
	rospy.Subscriber('/camera/depth/image_raw',Image,callback=self.depth_image)
        self.camera.fromCameraInfo(self.rgb_info())
	#self.listener =  tf.TransformListener()  # linked to tf_static
        self.planner() 

	
    def mocap_cb(self,msg1):
        self.curr_pose = msg1	
        #br = tf.TransformBroadcaster()
        #br.sendTransform((msg1.pose.position.x + 0, msg1.pose.position.y + 0, msg1.pose.position.z - 0.2, msg1.pose.orientation.x + 0.3825 , msg1.pose.orientation.y + 0.0003 , msg1.pose.orientation.z + 0.924 , msg1.pose.orientation.w + 0.001),rospy.Time.now(),"world", self.dronename)



    def rgb_info(self):
        msg_header = Header()
        msg_header.frame_id = "camera_link"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0
        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg



    def depth_image(self,Img):
	self.depth = Img
	
	#print(self.depth.encoding)
	#print(self.depth_matrix)



    def depth_eval(self):
	cx = self.rgb_target[0]
	
	cy = self.rgb_target[1]
	
	x_left_lim = int(cx) - 3
	x_right_lim = int(cx) + 3
	y_down_lim = int(cy) - 3
	y_up_lim = int(cy) + 3
	sum = 0
        
	self.depth_matrix = CvBridge().imgmsg_to_cv2(self.depth,desired_encoding= 'passthrough')
	#self.depth_matrix = np.array(self.depth.data, dtype=np.uint8) 
	cpimg = self.depth_matrix.copy().astype(float)
	mat = cv2.convertScaleAbs(cpimg, alpha = 255/cpimg.max())
	depth = mat[x_left_lim:x_right_lim, y_down_lim:y_up_lim] # Truncating mat
	#print depth
	for i in range(6):
	    for j in range(6):
	        sum = sum + depth[i][j]
	avg_depth = sum/49
	K = np.array([[1.0, 0.0, 320.5],[0.0, 1.0, 240.5],[0.0, 0.0, 1.0]])
	K_inv = np.array([[1.0, 0, -320.5],[0, 1.0, -240.5],[0, 0, 1.0]])
	self.target = .001*avg_depth *np.matmul(K_inv,self.rgb_target)
        hom_transformation = np.array([[-.707, 0, -.707, 0],[0, -1, 0, 0],[-.707, 0, .707, -0.2],[0, 0, 0, 1]])
        homogeneous_coordinates = [[self.target[0]],[self.target[1]],[self.target[2]],[1]]
        product =  np.matmul(hom_transformation,homogeneous_coordinates)
	self.cam_img.pose.position.x = product[0]
        self.cam_img.pose.position.y = product[1]
        self.cam_img.pose.position.z = product[2]
	self.cam_img.pose.position.z = self.target[2]
	self.cam_img.pose.orientation.x = 0
	self.cam_img.pose.orientation.y = 0
	self.cam_img.pose.orientation.z = 0
	self.cam_img.pose.orientation.w = 1
	self.des_pose.pose.position.x = self.cam_img.pose.position.x + self.curr_pose.pose.position.x
        self.des_pose.pose.position.y = self.cam_img.pose.position.y + self.curr_pose.pose.position.y
        self.des_pose.pose.position.z = self.cam_img.pose.position.z + self.curr_pose.pose.position.z
	self.des_pose.pose.orientation.x = self.curr_pose.pose.orientation.x
	self.des_pose.pose.orientation.y = self.curr_pose.pose.orientation.y
	self.des_pose.pose.orientation.z = self.curr_pose.pose.orientation.z
	self.des_pose.pose.orientation.w = self.curr_pose.pose.orientation.w
	self.des_x = self.des_pose.pose.position.x
	self.des_y = self.des_pose.pose.position.y
	self.des_z = self.des_pose.pose.position.z
        # Tf static  # Message@Abhijith:  This section has been appended under depth_eval
	#self.cam_img.pose.position.x = self.target[0]
	#self.cam_img.pose.position.y = self.target[1]
	#self.cam_img.pose.position.z = self.target[2]
	#self.cam_img.pose.orientation.x = 0
	#self.cam_img.pose.orientation.y = 0
	#self.cam_img.pose.orientation.z = 0
	#self.cam_img.pose.orientation.w = 1
	#self.cam_img.header.stamp = rospy.Time.now()
	#self.cam_img.header.frame_id = self.camera.tfFrame()
	#self.listener.waitForTransform(self.cam_img.tfFrame(), "world", rospy.Time.now(), rospy.Duration(1.0))   # listener is missing
	#tf_point = self.listener.transformPose("world", self.cam_img)
        #self.des_pose.pose.position.x = tf_point.pose.position.x
        #self.des_pose.pose.position.y = tf_point.pose.position.y
        #self.des_pose.pose.position.z = tf_point.pose.position.z
	#self.des_x = self.des_pose.pose.position.x
	#self.des_y = self.des_pose.pose.position.y
	#self.des_z = self.des_pose.pose.position.z
	print(self.des_x)
	print(self.des_y)
	print(self.des_z)
	print("________________________________________________________________________________________________________________________")
	

	
    def yolo(self,data):
	for a in data.bounding_boxes:
	    if a.Class == "truck" or a.Class == "bus":
    	        self.detection_count = self.detection_count + 1
                self.rgb_target = np.array([[a.xmin + (a.xmax - a.xmin)/2], [a.ymin + (a.ymax - a.ymin)/2], [1]])
		self.depth_eval()


       
    def detected_image(self,detection):
	try:
	   self.dimg = CvBridge().imgmsg_to_cv2(detection, "bgr8")
    	except CvBridgeError as e:
	    print(e)


         
    def copy_pose(self , pose):
        pt = pose.pose.position
        quat = pose.pose.orientation
        copied_pose = PoseStamped()
        copied_pose.header.frame_id = pose.header.frame_id
        copied_pose.pose.position = Point(pt.x, pt.y, pt.z)
        copied_pose.pose.orientation = Quaternion(quat.x , quat.y , quat.z , quat.w)
        return copied_pose
  

    
    def state_cb(self,msg):
        if msg.mode == 'OFFBOARD':
            self.isReadyToFly = True
        else:
            print msg.mode



    def update_state_cb(self,data):
        self.mode= data.data



    def attach(self):
	attach_srv = rospy.ServiceProxy('/link_attacher_node/attach', Attach)
	attach_srv.wait_for_service()
	print('Trying to attach')

	req = AttachRequest()
	req.model_name_1 = "iris"
	req.link_name_1 = "base_link"
	req.model_name_2 = "apriltag" # Insert the model name you want to attach to
	req.link_name_2  = "link" # Insert link name you want to attach to

	attach_srv.call(req)
	print('Attached')



    def detach(self):
	attach_srv = rospy.ServiceProxy('/link_attacher_node/detach', Attach)
	attach_srv.wait_for_service()
	print('Trying to detach')

	req = AttachRequest()
	req.model_name_1 = "iris"
	req.link_name_1 = "base_link"
	req.model_name_2 = "apriltag" # Insert the model name you want to attach to
	req.link_name_2  = "link" # Insert link name you want to attach to

	attach_srv.call(req)
	print('Detached')



    def hover(self):
        location = self.hover_loc
        loc = [location,
               location,
               location,
               location,
               location,
               location,
               location,
               location,
               location]
      
        rate = rospy.Rate(10)
        shape = len(loc)
        pose_pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size= 10)
        des_pose = self.copy_pose(self.curr_pose)
        waypoint_index = 0
        sim_ctr = 1
       	
        while self.mode=="HOVER" and self.detection_count < 5 and not rospy.is_shutdown():
	    print(len(self.detections))
            if waypoint_index==shape:
                waypoint_index = 0            # changing the way point index to 0
                sim_ctr = sim_ctr + 1
                print "HOVER STOP COUNTER:" + str(sim_ctr)
            if self.isReadyToFly:
                des_x = loc[waypoint_index][0]
                des_y = loc[waypoint_index][1]
                des_z = loc[waypoint_index][2]
                des_pose.pose.position.x = des_x
                des_pose.pose.position.y = des_y
                des_pose.pose.position.z = des_z
                des_pose.pose.orientation.x = loc[waypoint_index][3]
                des_pose.pose.orientation.y = loc[waypoint_index][4]
                des_pose.pose.orientation.z = loc[waypoint_index][5]
                des_pose.pose.orientation.w = loc[waypoint_index][6]
                curr_x = self.curr_pose.pose.position.x
                curr_y = self.curr_pose.pose.position.y
                curr_z = self.curr_pose.pose.position.z        
                dist = math.sqrt((curr_x - des_x)*(curr_x - des_x) + (curr_y - des_y)*(curr_y - des_y) + (curr_z - des_z)*(curr_z - des_z))
                if dist<self.distThreshold :
                    waypoint_index += 1
                  
            if sim_ctr == 50:
                pass

            pose_pub.publish(des_pose)
	rate.sleep()

        if self.detection_count >= 5 :
	    self.mode = "SWOOP"   



    def get_descent(self,x,y,z):
	des_vel = PositionTarget()
	des_vel.header.frame_id = "world"
	des_vel.header.stamp=rospy.Time.from_sec(time.time())
        des_vel.coordinate_frame= 8
	des_vel.type_mask = 3527
	des_vel.velocity.x = x
	des_vel.velocity.y = y
	des_vel.velocity.z = z
	return des_vel



    def descent(self):
        rate = rospy.Rate(10)  # 10 Hz
        # find tf static section under depth_eval
        print self.mode
        while self.mode == "SWOOP" and self.curr_pose.pose.position.z > 0.02 and not rospy.is_shutdown():
            print(" In while loop")
            err_x = self.des_x - self.curr_pose.pose.position.x
            err_y = self.des_y - self.curr_pose.pose.position.y
            err_z = self.des_z - self.curr_pose.pose.position.z
            #print(err_x)
            # print(err_y)
            x_change = -(err_x * self.KP * 2) 
            y_change = -(err_y * self.KP * 2)
	    z_change = -( (math.sqrt((err_x)^2 + (err_y)^2))/(err_z) ) * self.KP 
            des = self.get_descent(x_change, y_change, z_change)
            self.vel_pub.publish(des)
            self.pub.publish("PICKUP COMPLETE")
            rate.sleep()

	self.attach() # The drone gets attached to the target



    def planner(self):

	if self.mode == "HOVER":
	    self.hover()
	if self.mode == "SWOOP":
	    self.descent()	    
        if self.mode=="PICKED":
            return self.flight()
Ejemplo n.º 19
0
def img_cb(msg):
    global sav_cnt, block_array
    try:
        cv_image = CvBridge().imgmsg_to_cv2(msg, "bgr8")
    except CvBridgeError as e:
        print("[INFO]: Error in obtaining image from CvBridge! Skipping frame!")
    else:
        # converting to grayscale
        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        # threshold image
        ret, thresh1 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
        ret, thresh2 = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)

        # apply erosion to thresholded image
        kernel = np.ones((3, 3), np.uint8)
        eroded = cv2.erode(thresh2, kernel, iterations=1)

        # find edges
        edges = cv2.Canny(gray, 80, 120)

        """
        Lane line detection
        """
        contoured_img = find_contours(cv_image.copy(), edges.copy())

        # use contoured img to get the lines in bottom half using HoughLines and not HoughLinesP
        contoured_img_gray = cv2.cvtColor(contoured_img, cv2.COLOR_BGR2GRAY)
        mask = np.zeros_like(contoured_img_gray)
        mask[int((2*mask.shape[1])/3):, :] = 255
        masked_img = cv2.bitwise_and(edges, edges, mask=mask)
        lines_angled = cv2.HoughLines(masked_img, 1, m.pi/180, 75)

        line_factor = 500
        left_theta = []
        right_theta = []
        left_rho = []
        right_rho = []
        left_theta_avg = None
        right_theta_avg = None
        left_rho_avg = None
        right_rho_avg = None

        if lines_angled.shape[0] != 0:
            for i in range(lines_angled.shape[0]):
                for rho, theta in lines_angled[i]:
                    print(rho, theta)
                    if (theta) < m.radians(5):
                        continue
                    if theta < m.pi/2:
                        left_theta.append(theta)
                        left_rho.append(rho)
                    else:
                        right_theta.append(theta)
                        right_rho.append(rho)

            if len(left_theta) != 0:
                left_theta_avg = sum(left_theta) / len(left_theta)
                left_rho_avg = sum(left_rho) / len(left_rho)
                a = np.cos(left_theta_avg)
                b = np.sin(left_theta_avg)
                x0 = a*left_rho_avg
                y0 = b*left_rho_avg
                x1 = int(x0 + line_factor*(-b))
                y1 = int(y0 + line_factor*(a))
                x2 = int(x0 - line_factor*(-b))
                y2 = int(y0 - line_factor*(a))

                cv2.line(cv_image, (x1, y1), (x2, y2), (0, 0, 255), 2)

            if len(right_theta) != 0:
                right_theta_avg = sum(right_theta) / len(right_theta)
                right_rho_avg = sum(right_rho) / len(right_rho)
                a = np.cos(right_theta_avg)
                b = np.sin(right_theta_avg)
                x0 = a*right_rho_avg
                y0 = b*right_rho_avg
                x1 = int(x0 + line_factor*(-b))
                y1 = int(y0 + line_factor*(a))
                x2 = int(x0 - line_factor*(-b))
                y2 = int(y0 - line_factor*(a))

                cv2.line(cv_image, (x1, y1), (x2, y2), (0, 125, 125), 2)

        """
        vertical lines detection
        """
        # extract HoughP lines
        lines = cv2.HoughLinesP(thresh2, 1, m.pi/2, 2, None, 70, 1)

        lines_drawn = []
        for i in range(lines.shape[0]):
            for line in lines[i]:
                pt1 = (line[0], line[1])
                pt2 = (line[2], line[3])
                # to just detect vertical lines
                if (line[0] != line[2]):
                    continue
                # to remove the ceiling lines
                max_ordinate = max(line[1], line[3])
                if max_ordinate < 200:
                    continue
                """ to plot blue lines """
                cv2.line(cv_image, pt1, pt2, (255, 0, 0), 3)
                lines_drawn.append(pt1)
                lines_drawn.append(pt2)

        # sort the lines_drawn pts
        lines_drawn.sort(key=lambda x: x[0])
        # print("{} vertical lines were detected: and they are \n {}".format(len(lines_drawn), lines_drawn))
        lines_drawn_new = [n for n in lines_drawn if n[1] > 200]
        # print("new points are : \n {}".format(lines_drawn_new))

        # lets use bunching up techniques
        run_x_avg = lines_drawn_new[0][0]
        run_y = lines_drawn_new[0][1]
        pts_list = []
        count = 1
        x_thresh = 10
        consider = False
        consider_thresh = 7

        if left_theta_avg != None:
            slop_left = m.tan(m.pi/2 + left_theta_avg)
            intercept_left = -(slop_left * left_rho_avg) / \
                m.cos(left_theta_avg)
        if right_theta_avg != None:
            slop_right = m.tan(m.pi/2 + right_theta_avg)
            intercept_right = -(slop_right * right_rho_avg) / \
                m.cos(right_theta_avg)

        for i in range(1, len(lines_drawn_new)):
            if abs(run_x_avg - lines_drawn_new[i][0]) <= x_thresh:
                count += 1
                # calculate the new x avg
                run_x_avg = run_x_avg + \
                    ((lines_drawn_new[i][0] - run_x_avg)*1.0) / count
                run_y = max(run_y, lines_drawn_new[i][1])
            else:
                # we need to check if this point is close enough to the intersection point with either of the lane lines
                consider = False
                if left_theta_avg != None:
                    y_intersect = slop_left * run_x_avg + intercept_left
                    if abs(y_intersect - run_y) <= consider_thresh:
                        consider = True
                        run_y = y_intersect
                        pts_list.append([run_x_avg, run_y, "l"])
                        """ To plot the magenta lines """
                        cv2.line(cv_image, (int(run_x_avg), 0),
                                 (int(run_x_avg), int(run_y)), (255, 0, 255), 2)
                        run_x_avg = lines_drawn_new[i][0]
                        run_y = lines_drawn_new[i][1]
                        count = 1
                if consider == False and right_theta_avg != None:
                    y_intersect = slop_right * run_x_avg + intercept_right
                    if abs(y_intersect - run_y) <= consider_thresh:
                        consider = True
                        run_y = y_intersect
                        pts_list.append([run_x_avg, run_y, "r"])
                        """ To plot the magenta lines """
                        cv2.line(cv_image, (int(run_x_avg), 0),
                                 (int(run_x_avg), int(run_y)), (255, 0, 255), 2)
                        run_x_avg = lines_drawn_new[i][0]
                        run_y = lines_drawn_new[i][1]
                        count = 1

        consider = False
        if left_theta_avg != None:
            y_intersect = slop_left * run_x_avg + intercept_left
            if abs(y_intersect - run_y) <= consider_thresh:
                consider = True
                run_y = y_intersect
                pts_list.append([run_x_avg, run_y, "l"])
                """ To plot the magenta lines """
                cv2.line(cv_image, (int(run_x_avg), 0),
                         (int(run_x_avg), int(run_y)), (255, 0, 255), 2)

        if consider == False and right_theta_avg != None:
            y_intersect = slop_right * run_x_avg + intercept_right
            if abs(y_intersect - run_y) <= consider_thresh:
                consider = True
                run_y = y_intersect
                pts_list.append([run_x_avg, run_y, "r"])
                """ To plot the magenta lines """
                cv2.line(cv_image, (int(run_x_avg), 0),
                         (int(run_x_avg), int(run_y)), (255, 0, 255), 2)

        if len(pts_list) != 0:
            # sort this pts_list based on second index
            pts_list.sort(key=lambda x: x[1], reverse=True)

            prev = pts_list[0]
            # for i in range(1, len(pts_list)):
            #     cur = pts_list[i]
            #     if abs(cur[1] - prev[1]) <= 2:
            #         # draw a line
            #         cv2.line(cv_image, (prev[0], prev[1]), (cur[0], cur[1]), (0, 100, 100), 2)
            #     prev = cur

            """ This is for plotting the yellow line connecting ends of  the vertical pink lines"""
            for i in range(0, 1):
                cur = pts_list[i]
                prev = (0, int(cur[1]))
                next = (400, int(cur[1]))
                # for plotting line with recently acquired information
                cv2.line(cv_image, prev, next, (255, 0, 0), 2)
                cv_image = update_blocks(cv_image, cur[0], cur[1], cur[2])

        # lets also put text about current block length info to debug
        cv_image = puttext(cv_image, str(len(block_array)))

        cv2.namedWindow("HoughLines", cv2.WINDOW_NORMAL)
        cv2.resizeWindow("HoughLines", 800, 800)
        cv2.imshow("HoughLines", cv_image)
        cv2.imshow("masked", masked_img)
        cv2.waitKey(1)
Ejemplo n.º 20
0
class LidarImage:
    def __init__(self):
        self._imageInput = message_filters.Subscriber(image_topic_name, Image)
        self._velodyne = message_filters.Subscriber(lidar_topic_name,
                                                    PointCloud2)
        self._pub_img_depth = rospy.Publisher("/image_fusion",
                                              Image,
                                              queue_size=1)

        self._ts = message_filters.ApproximateTimeSynchronizer(
            [self._velodyne, self._imageInput], 1, 0.05)
        self._ts.registerCallback(self.lidar_camera_fusion)

    def lidar_camera_fusion(self, point_cloud_data, image_data):
        time_start_all = time.time()
        try:
            self.cvImage = CvBridge().imgmsg_to_cv2(image_data, 'bgr8')
        except CvBridgeError as e:
            print(e)

        image_depth = []
        image_depth = self.cvImage.copy()
        height, width = image_depth.shape[:2]

        time_start_transform = time.time()
        pointXYZ_raw = pointcloud2_to_xyz_array(point_cloud_data,
                                                remove_nans=True)
        pointXYZ = pointXYZ_raw.copy()
        alpha = 90 - 0.5 * the_field_of_view
        k = math.tan(alpha * math.pi / 180.0)
        if the_view_number == 1:
            pointXYZ = pointXYZ[np.logical_and(
                (pointXYZ[:, 0] > k * pointXYZ[:, 1]),
                (pointXYZ[:, 0] > -k * pointXYZ[:, 1]))]
        elif the_view_number == 2:
            pointXYZ = pointXYZ[np.logical_and(
                (-pointXYZ[:, 1] > k * pointXYZ[:, 0]),
                (-pointXYZ[:, 1] > -k * pointXYZ[:, 0]))]
        elif the_view_number == 3:
            pointXYZ = pointXYZ[np.logical_and(
                (-pointXYZ[:, 0] > k * pointXYZ[:, 1]),
                (-pointXYZ[:, 0] > -k * pointXYZ[:, 1]))]
        elif the_view_number == 4:
            pointXYZ = pointXYZ[np.logical_and(
                (pointXYZ[:, 1] > k * pointXYZ[:, 0]),
                (pointXYZ[:, 1] > -k * pointXYZ[:, 0]))]

        pointXYZ = pointXYZ[np.logical_and(
            (pointXYZ[:, 0]**2 + pointXYZ[:, 1]**2 > the_min_distance**2),
            (pointXYZ[:, 0]**2 + pointXYZ[:, 1]**2 < the_max_distance**2))]
        pointXYZ = pointXYZ[np.logical_and(
            (pointXYZ[:, 2] > the_view_lower_limit - the_sensor_height),
            (pointXYZ[:, 2] < the_view_higher_limit - the_sensor_height))]

        cloud_xyz = calib.lidar_to_cam.dot(pointXYZ.T).T
        cloud_uv = calib.lidar_to_img.dot(pointXYZ.T).T
        cloud_uv = np.true_divide(cloud_uv[:, :2], cloud_uv[:, [-1]])
        camera_xyz = cloud_xyz[(cloud_uv[:, 0] >= 0) & (cloud_uv[:, 0] < width)
                               & (cloud_uv[:, 1] >= 0) &
                               (cloud_uv[:, 1] < height)]
        camera_uv = cloud_uv[(cloud_uv[:, 0] >= 0) & (cloud_uv[:, 0] < width) &
                             (cloud_uv[:, 1] >= 0) & (cloud_uv[:, 1] < height)]
        transform_time_cost = time.time() - time_start_transform

        time_start_display = time.time()
        jc = Jet_Color()
        depth = np.sqrt(
            np.square(camera_xyz[:, 0]) + np.square(camera_xyz[:, 1]) +
            np.square(camera_xyz[:, 2]))
        for pt in range(0, camera_uv.shape[0]):
            cv_color = jc.get_jet_color(depth[pt] * jet_color)
            cv2.circle(image_depth,
                       (int(camera_uv[pt][0]), int(camera_uv[pt][1])),
                       1,
                       cv_color,
                       thickness=-1)
        display_time_cost = time.time() - time_start_display

        try:
            self._pub_img_depth.publish(CvBridge().cv2_to_imgmsg(
                image_depth, 'bgr8'))
        except CvBridgeError as e:
            print(e)

        total_time_cost = time.time() - time_start_all
        print(pointXYZ_raw.shape, camera_xyz.shape, camera_uv.shape)
        print("transform time cost:", transform_time_cost)
        print("display time cost:", display_time_cost)
        print("total time cost: ", total_time_cost)
        print()
Ejemplo n.º 21
0
def img_callback(msg):

    start = time.time()

    img = CvBridge().imgmsg_to_cv2(msg, "bgr8").copy()

    seq = msg.header.seq

    try:
        #cv2.imwrite("inputs/input_{}.png".format(str(msg.header.seq)), img) 
        
        detector = LaneDetection([img.copy(), seq])
        
        pre_process = PreProcessImg([img.copy(), seq])
        
        
        # Pre Process
        pre_success, processedImg = pre_process.process_image()

        if pre_success == False:
            print("\n|| COULD NOT FOUND RED LANE FROM PREPROCESSING IMAGE ||\n")
            return False

            
        # Lane Detection 
        blended_img, out_img, lane_found, path = detector.process_image(processedImg)
        
        ###########################################     output contatenation
        out_img = cv2.copyMakeBorder(
                    out_img, 
                    (img.shape[0] - out_img.shape[0])/2, 
                    (img.shape[0] - out_img.shape[0])/2, 
                    (img.shape[1] - out_img.shape[1])/2, 
                    (img.shape[1] - out_img.shape[1])/2, 
                    cv2.BORDER_CONSTANT, 
                    value=(128,128,128)
                )
        comb = np.concatenate((blended_img, out_img), axis=0)
        ###########################################     output contatenation


        
        if lane_found:
            pose_list = []

            global past_coords
            past_coords.append([path[1][0], path[1][1], path[0][2]])

            for i in range(len(path)):

                pose = PoseStamped()
                pose.pose.position.x = path[i][0]
                pose.pose.position.y = path[i][1]
                pose.pose.position.z = path[i][2]

                quat = tf.transformations.quaternion_from_euler(0,0, np.deg2rad(path[i][2]))

                pose.pose.orientation.x = quat[0]
                pose.pose.orientation.y = quat[1]
                pose.pose.orientation.z = quat[2]
                pose.pose.orientation.w = quat[3]

                pose_list.append(pose)

                
            path_msg = Path()
            path_msg.poses = pose_list

            path_pub.publish(path_msg)
            
            org1 = (10, comb.shape[1]+35)
            org2 = (10, comb.shape[1]+55)
            org3 = (10, comb.shape[1]+75)
            org4 = (10, comb.shape[1]+15)

            # Using cv2.putText() method 
            cv2.putText(comb, "x:{0}, y:{1}, yaw:{2}".format(path[0][0], path[0][1], round(path[0][2], 1)), org1, cv2.FONT_HERSHEY_SIMPLEX,  
                    0.5, (0, 0, 255), 1, cv2.LINE_AA) 
            cv2.putText(comb, "x:{0}, y:{1}, yaw:{2}".format(path[1][0], path[1][1], round(path[1][2], 1)), org2, cv2.FONT_HERSHEY_SIMPLEX,  
                    0.5, (0, 255, 0), 1, cv2.LINE_AA) 
            cv2.putText(comb, "x:{0}, y:{1}, yaw:{2}".format(path[2][0], path[2][1], round(path[2][2], 1)), org3, cv2.FONT_HERSHEY_SIMPLEX,  
                    0.5, (255, 0, 0), 1, cv2.LINE_AA) 

            
            cv2.putText(comb, "Lane Found: {}".format(lane_found), org4, cv2.FONT_HERSHEY_SIMPLEX,  
                    0.5, (120, 180, 75), 1, cv2.LINE_AA) 
            

        else:
            past_coords = []
            cv2.putText(comb, "LANE IS NOT FOUND !!!".format(lane_found), (10, comb.shape[1]+15), cv2.FONT_HERSHEY_SIMPLEX,  
                    0.5, (120, 180, 75), 1, cv2.LINE_AA) 
                    
        cv2.imshow("Lane Detection", comb)
        cv2.waitKey(1)

        img_pub.publish(CvBridge().cv2_to_imgmsg(comb, "bgr8"))
        pr_img_pub.publish(CvBridge().cv2_to_imgmsg(processedImg))
        blended_img_pub.publish(CvBridge().cv2_to_imgmsg(blended_img, "bgr8"))
        tf_img_pub.publish(CvBridge().cv2_to_imgmsg(out_img, "bgr8"))
        
        
        #cv2.imwrite("outputs/output_{}.png".format(str(msg.header.seq)), comb) 
        
        

        
    except Exception as e:

        print("ERROR!")
        print(traceback.format_exc())

        img_pub.publish(CvBridge().cv2_to_imgmsg(img.copy(), "bgr8"))

        #quit()
        
        return False


    t = time.time() - start
    f = 1/t
    print("time : {0}, frequency: {1}".format(t, f))
Ejemplo n.º 22
0
    def detect2(self, req):
        rospy.loginfo("Try to detect objects...")

        HM = rospy.get_param("~hmatrix")
        XM = rospy.get_param("~xmatrix")
        CM = rospy.get_param("cameraMatrix")
        EM = rospy.get_param("eyebase")
        image_params = rospy.get_param("~image")

        H = HM['data']
        X = XM['data']
        C = CM['data']
        E = EM['data']

        #C = [592.988765, 0.0, 316.144026, 0.0, 589.679756, 244.158662,0.0, 0.0, 1.0]
        #E = [0.9999262927350819, 0.012140295651447686, 0.00014939744324133248,0.0362733301805, 0.012138358976642286, -0.9998822784140743, 0.009385603649183276, 0.289040732468, 0.0002633238591056371,-0.009383098422207622, -0.9999559430922794,0.83550686443 , 0.0, 0.0, 0.0, 1.0]
        giraffeObjList = []
        duckObjList = []
        barrotObjList = []
        res = DetectObjectSrvResponse.NOT_DETECTED
        imageData = rospy.wait_for_message('/camera/color/image_raw', Image)
        try:
            cv_image = CvBridge().imgmsg_to_cv2(imageData, "bgr8")

        except CvBridgeError as e:
            print(e)

        image_np_expanded = np.expand_dims(cv_image, axis=0)
        image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
        boxes = self.graph.get_tensor_by_name('detection_boxes:0')
        scores = self.graph.get_tensor_by_name('detection_scores:0')
        classes = self.graph.get_tensor_by_name('detection_classes:0')
        num_detections = self.graph.get_tensor_by_name('num_detections:0')
        (boxes, scores, classes, num_detections) = self.sess.run(
            [boxes, scores, classes, num_detections],
            feed_dict={image_tensor: image_np_expanded})

        imagedeal = cv_image.copy()
        imagecopy = cv_image.copy()
        imagecopyp = cv_image.copy()
        vis_util.visualize_boxes_and_labels_on_image_array(
            imagecopy,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            self.category_index,
            use_normalized_coordinates=True,
            line_thickness=3)
        box_to_color_map, str_to_display = vis_util.need_location(
            imagecopy,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            self.category_index,
            use_normalized_coordinates=True,
            line_thickness=3)
        cv2.imwrite("jiance.jpg", imagecopy)

        gray_image = cv2.cvtColor(imagedeal, cv2.COLOR_BGR2GRAY)
        gray_image = cv2.GaussianBlur(gray_image, (5, 5), 0)
        ret3, th3 = cv2.threshold(gray_image, 0, 255,
                                  cv2.THRESH_BINARY + cv2.THRESH_OTSU)
        cv2.imwrite("beijing.jpg", th3)

        sp = imagecopy.shape
        for box, color in box_to_color_map.items():
            ymin, xmin, ymax, xmax = box
            x = int((xmax + xmin) * sp[1] / 2.0)
            y = int((ymax + ymin) * sp[0] / 2.0)

            cv2.circle(imagecopy, (x, y), 2, (0, 255, 0), 3)
            label = str_to_display[box]

            caijian = th3[int(ymin * sp[0]):int(ymax * sp[0]),
                          int(xmin * sp[1]):int(xmax * sp[1])]
            if (label[0][0] == 'p'):
                kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (26, 26))
                kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (10, 10))
            else:
                kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))
                kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                    (20, 20))
            caijian = cv2.dilate(caijian, kernel)
            caijian = cv2.erode(caijian, kernel2)
            contours, hier = cv2.findContours(caijian, cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_SIMPLE)

            M = cv2.moments(contours[0])
            for cnt in contours:
                N = cv2.moments(cnt)
                if M['m00'] < N['m00']:
                    M = cv2.moments(cnt)
                    #print(M['m00'])

            centroid_xD = M['m10'] / M['m00'] + xmin * sp[1]
            centroid_yD = M['m01'] / M['m00'] + ymin * sp[0]
            centroid_x = int(M['m10'] / M['m00'] + xmin * sp[1])
            centroid_y = int(M['m01'] / M['m00'] + ymin * sp[0])

            xc = (C[4] * centroid_xD - centroid_yD * C[1] - C[2] * C[4] +
                  C[1] * C[5]) / (C[4] * C[0])
            yc = (centroid_yD - C[5]) / C[4]
            zc = 1

            objPose = Pose()
            objPose.position.x = E[0] * xc + E[1] * yc + E[2] * zc + E[3]
            objPose.position.y = E[4] * xc + E[5] * yc + E[6] * zc + E[7]
            print("objPose.position.x,", objPose.position.x)
            print("objPose.position.y", objPose.position.y)

            if label[0][0] == 'p':
                barrotObjList.append(objPose)
            elif label[0][0] == 'd':
                duckObjList.append(objPose)
            else:
                giraffeObjList.append(objPose)
            res = DetectObjectSrvResponse.SUCCESS
            cv2.circle(imagecopy, (centroid_x, centroid_y), 2, (0, 255, 255),
                       3)
            cv2.imwrite("i.jpg", imagecopy)

        return DetectObjectSrvResponse(res, giraffeObjList, duckObjList,
                                       barrotObjList)
Ejemplo n.º 23
0
class ImageOverlay:
    def __init__(self):
        self.i = 0.0
        self.temp = ""
        self.number = 0
        self.asteering_raw = 0
        self.max_steering = 1
        self.min_steering = -1
        self.epsilon_steering = math.radians(0.001)
        self.img = np.ones(
            (700, 1000, 3), dtype=np.uint8) * 128  # default black background
        # update the image to the new image from camera feed
    def image_callback(self, data):
        self.img = CvBridge().compressed_imgmsg_to_cv2(data)

    def overlay(self):
        img_out = self.img.copy()
        return img_out

    def call_back2(self, data):

        ack_cmd = AckermannDriveStamped()
        ack_cmd.header.stamp = rospy.Time.now()

        drive = AckermannDrive()
        drive.speed = data.linear.x
        drive.steering_angle = data.angular.z
        # impose limits on commanded angle
        # datetime object containing current date and time
        now = datetime.now()
        print(self.i)
        drive.steering_angle = self.i
        ack_cmd.drive = drive
        self.publisher.publish(ack_cmd)

    def run(self):
        rospy.init_node("get_img")

        #self.publisher = rospy.Publisher('overlay/compressed', CompressedImage, queue_size=1)

        # Read in camera feed, change first arg to read a different camera
        self.camera_subscriber = rospy.Subscriber(
            '/camera/left/image_raw/compressed',
            CompressedImage,
            self.image_callback,
            queue_size=1)
        self.publisher = rospy.Publisher('/robot_control/command',
                                         AckermannDriveStamped,
                                         queue_size=10)
        self.s = rospy.Subscriber("/cmd_vel", Twist, self.call_back2)

        rate = rospy.Rate(3)  # default 3 Hz

        #print("Running camera overlay in overlay/compressed")
        graph = tf.Graph()
        with graph.as_default():
            with tf.Session(graph=graph) as sess:
                with tf.gfile.FastGFile('./model_tf.pb', 'rb') as model_file:
                    graph_def = tf.GraphDef()
                    graph_def.ParseFromString(model_file.read())
                # init
                # choose ROI
                roi_x = 66
                roi_y = 200
                input_var = tf.placeholder("float32", [1, roi_x, roi_y, 3])
                [output_image
                 ] = tf.import_graph_def(graph_def,
                                         input_map={'input_1:0': input_var},
                                         return_elements=['output/Sigmoid:0'],
                                         name='')
                while True:

                    startTime = time.time()
                    image = self.overlay()
                    # from 1 dim to 3
                    image = (np.reshape(image,
                                        (height, width, 3))).astype(float)
                    # resize - cv2. we don't need crop (did it before)
                    image_BGR = cv2.resize(image, (roi_y, roi_x))
                    # convert to RGB
                    image = image_BGR[..., ::-1]
                    # normalization
                    image /= 255.0
                    img = np.expand_dims(image, axis=0)
                    # inference
                    pred = sess.run(output_image, feed_dict={input_var: img})

                    steering_pred = 1 * (
                        (max(0, int(
                            ((((pred[0][0]) - 0.5) * 2) + 0.5) * 255)) - 0) *
                        (3 - -3) / (255 - 0) + -3)
                    self.i = steering_pred