示例#1
0
文件: gui.py 项目: SepehrV/vs_tld
def click_and_crop(event, x, y, flags, param):
    # grab references to the global variables
    global refPt, cropping, cropImage

    # if the left mouse button was clicked, record the starting
    # (x, y) coordinates and indicate that cropping is being
    # performed
    if event == cv2.EVENT_LBUTTONDOWN:
        refPt = [(x, y)]
        cropping = True
    # check to see if the left mouse button was released
    elif event == cv2.EVENT_LBUTTONUP:
        # record the ending (x, y) coordinates and indicate that
        # the cropping operation is finished
        refPt.append((x, y))
        cropping = False
        # draw a rectangle around the region of interest
        cv2.rectangle(cropImage, refPt[0], refPt[1], (0, 255, 0), 2)
        cv2.imshow("crop1", cropImage)
        msg = Target()
        msg.bb.x = refPt[0][0]
        msg.bb.y = refPt[0][1]
        msg.bb.width = x - refPt[0][0]
        msg.bb.height = y - refPt[0][1]
        msg.bb.confidence = 1
        msg.img = bridge.cv2_to_imgmsg(cropImage, 'bgr8')
        pub1 = rospy.Publisher("tld_gui_bb", Target, queue_size = 10)
        cv2.waitKey(1000)
        cv2.destroyWindow('crop1')
        pub1.publish(msg)
示例#2
0
def get_faces(boxes):
    board["num_faces"]=len(boxes.faces)
    idist=0#len(boxes.faces)/2
#    cent=0.5
#    idist=0
#    td=2.0
#    din=0
#    for fc in boxes.faces:
#        dx=(0.5-fc.top_left.x+fc.width_height.x/2.0)
#        dx2=dx*dx
#        dy=(0.5-fc.top_left.y+fc.width_height.y/2.0)
#        dy2=dy*dy
#        dist=(math.sqrt(dx2+dy2))
#        if dist<td:idist=din
#        din=din+1
    tgt=Target()
    #bridge=CvBridge()
    #source=bridge.imgmsg_to_cv2(board["img"],"bgr8")

    #tgt.img=source
    tgt.img=board["img"]#.deepcopy or .copy
    bb=BoundingBox()
    bb.x=(boxes.faces[idist].top_left.x)*640
    bb.y=(boxes.faces[idist].top_left.y)*480
    bb.width=boxes.faces[idist].width_height.x*640
    bb.height=boxes.faces[idist].width_height.y*480
    bb.confidence=1.0
    bb.tracker_id="face"
    tgt.bb=bb
    board["Target"]=tgt
    board["face_memory"]=50# = 1/2 sec
示例#3
0
def get_faces(boxes):
    board["num_faces"] = len(boxes.faces)
    idist = 0  #len(boxes.faces)/2
    #    cent=0.5
    #    idist=0
    #    td=2.0
    #    din=0
    #    for fc in boxes.faces:
    #        dx=(0.5-fc.top_left.x+fc.width_height.x/2.0)
    #        dx2=dx*dx
    #        dy=(0.5-fc.top_left.y+fc.width_height.y/2.0)
    #        dy2=dy*dy
    #        dist=(math.sqrt(dx2+dy2))
    #        if dist<td:idist=din
    #        din=din+1
    tgt = Target()
    #bridge=CvBridge()
    #source=bridge.imgmsg_to_cv2(board["img"],"bgr8")

    #tgt.img=source
    tgt.img = board["img"]  #.deepcopy or .copy
    bb = BoundingBox()
    bb.x = (boxes.faces[idist].top_left.x) * 640
    bb.y = (boxes.faces[idist].top_left.y) * 480
    bb.width = boxes.faces[idist].width_height.x * 640
    bb.height = boxes.faces[idist].width_height.y * 480
    bb.confidence = 1.0
    bb.tracker_id = "face"
    tgt.bb = bb
    board["Target"] = tgt
    board["face_memory"] = 50  # = 1/2 sec
    def __sendTrackingBox(self):
        target = Target()
        target.bb.x          = self.tracking_box.x
        target.bb.y          = self.tracking_box.y
        target.bb.width      = self.tracking_box.width
        target.bb.height     = self.tracking_box.height
        target.bb.confidence = 1.0
	#print "Target  =" , target
        target.img           = self.select_image
        self.publisher_tracking_box.publish( target )
	print "Bounding box send" 
        self.clock.tick(100)
	self.tracking_box = None
	self.tracking = False
示例#5
0
    def click_and_crop(self, event, x, y, flags, param):
        if event == cv2.EVENT_LBUTTONDOWN:
            self.cropping = True
            self.refPt = [(x,y)]

        if event == cv2.EVENT_LBUTTONUP:
            self.refPt.append((x,y))
            cv2.rectangle(self.currentImage, self.refPt[0], self.refPt[1], (0,255,0), 2)
            msg = Target()
            msg.bb.x = self.refPt[0][0]
            msg.bb.y = self.refPt[0][1]
            msg.bb.width = x - self.refPt[0][0]
            msg.bb.height = y - self.refPt[0][1]
            msg.bb.confidence = 1
            msg.img = self.bridge.cv2_to_imgmsg(self.currentImage, 'bgr8')
            self.pub_bb.publish(msg)
            cv2.imshow(self.ui_name, self.currentImage)
            cv2.waitKey(1000)
            cv2.destroyWindow(self.ui_name)
            cv2.namedWindow(self.ui_name, 1)
            cv2.moveWindow(self.ui_name, int(rospy.get_param('~window_pos')), 0)
            self.cropping = False
            self.tracking = True
示例#6
0
 board["wake_up_time"] = 6000  # 60 seconds
 board["stt"] = ""
 board["stt_read"] = True
 board["stop_counter"] = 0
 board["sonar_cm"] = 0
 board["compass_deg"] = 0
 board["num_faces"] = 0
 board["trackers"] = trackers
 board["tracker_watch"] = 0
 board["to_track"] = "null"
 board[
     "track_delay"] = 0  #delay motor movement commands about 1/2 sec or 1/4 sec
 board["face_memory"] = 0
 #board["face_seen"]=face_box()
 board["img"] = Image()
 board["Target"] = Target()
 ges = gesture_r()
 ges.direction = 'o'
 board["gesture"] = ges
 board["num_objects"] = 0
 board["object_id_list"] = []
 board["objects_memory"] = 0
 rospy.Subscriber("/sense/robot/get_sonar_cm",
                  sonar,
                  set_sonar,
                  queue_size=1)
 rospy.Subscriber("/sense/robot/get_compass_deg",
                  compass,
                  set_compass,
                  queue_size=1)
 rospy.Subscriber("/sense/stt/get_text", String, set_speech, queue_size=1)