Exemplo n.º 1
0
    def localize_detection(self, detection):
        """
            2d image detection -> 3d point in [map frame]
        """
        u = detection.x + detection.width / 2
        v = detection.y + detection.height / 2

        camera_info = None
        best_time = 100
        for ci in self.camera_infos:
            time = abs(ci.header.stamp.to_sec() -
                       detection.header.stamp.to_sec())
            if time < best_time:
                camera_info = ci
                best_time = time

        if not camera_info or best_time > 1:
            return False, None

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(camera_info)

        point = Point(
            ((u - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),
            ((v - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(),
            1)

        localization = self.localize(detection.header, point,
                                     self.region_scope)

        if not localization:
            return False, None

        point_trans = None

        print "Face localized ", localization.pose.position.x, localization.pose.position.y
        if localization.pose.position.x == 0.0:
            print "Ignoring this one!"
            return False, None
        try:
            #(pos_trans, rot) = self.tf_listener.lookupTransform('/map', detection.header.frame_id, rospy.Time(0))
            point_trans = self.tf_listener.transformPoint(
                '/map',
                PointStamped(detection.header, localization.pose.position))
        except (tf.LookupException, tf.ConnectivityException,
                tf.ExtrapolationException):
            pass

        if point_trans == None:  # transformation failed
            return False, None

        print "Face detected at %d,%d" % (point_trans.point.x,
                                          point_trans.point.y)
        return True, point_trans.point
class coord_converter:

  def __init__(self):
    self.image_sub = rospy.Subscriber("hough_circles/circles", CircleArrayStamped, self.callback)
    self.camera_info_msg = None
    self.camera_model = PinholeCameraModel() 
    self.image_info_sub = rospy.Subscriber("camera1/camera_info", CameraInfo, self.image_info_cb, queue_size=1) 

#    if self.camera_info_msg is not None:
#	print('PinholeCameraModel parameters:\n')
#       print('cx: ' + str(self.camera_model.cx()))
#	print('cy: ' + str(self.camera_model.cy()))
#	print('fx: ' + str(self.camera_model.fx()))
#	print('fy: ' + str(self.camera_model.fy()))

  def callback(self,data):
    print('\nIMAGE COORDINATES (center):')
    for i in range (len(data.circles)):
      print(' circle' + str(i+1) + ' detected:')
      print('  x: ' + str(data.circles[i].center.x))
      print('  y: ' + str(data.circles[i].center.y))
      
    print('\nWORLD COORDINATES (center):')
    print(' camera_robot coordinates:\n  x: 1.4\n  y: 2.2')
    for i in range (len(data.circles)):
      print(' circle' + str(i+1) + ' detected:')
      # using the equations showed at the beggining of this script:
      x_world = ((data.circles[i].center.x) - self.camera_model.cx()) * 1.25 / self.camera_model.fx() #z_world = camera rod height = 1.25
      y_world = ((data.circles[i].center.y) - self.camera_model.cy()) * 1.25 / self.camera_model.fy()
      print('  x: ' + str(1.4 + x_world)) #x=0 from image = 1.4 from world
      print('  y: ' + str(2.6 - y_world)) #y=0 from image = 2.6 from world / direction of y axis from world = -y from image

  def image_info_cb(self, msg):
    if self.camera_info_msg is None:
      self.camera_model.fromCameraInfo(msg)
      self.camera_info_msg = msg
Exemplo n.º 3
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        #global tf
        global numFaces

        u = detection.x + detection.width / 2
        v = detection.y + detection.height / 2

        camera_info = None
        best_time = 100
        for ci in self.camera_infos:
            time = abs(ci.header.stamp.to_sec() -
                       detection.header.stamp.to_sec())
            if time < best_time:
                camera_info = ci
                best_time = time

        if not camera_info or best_time > 1:
            print("Best time is higher than 1")
            return

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(camera_info)

        point = Point(
            ((u - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),
            ((v - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(),
            1)

        localization = self.localize(detection.header, point,
                                     self.region_scope)
        transformedPoint = point
        if not localization:
            return

        now = detection.header.stamp
        self.tf.waitForTransform("camera_rgb_optical_frame", "map", now,
                                 rospy.Duration(5.0))

        m = geometry_msgs.msg.PoseStamped()
        m.header.frame_id = "camera_rgb_optical_frame"
        m.pose = localization.pose
        m.header.stamp = detection.header.stamp
        transformedPoint = self.tf.transformPose("map", m)

        if (localization.pose.position.x != 0.0):

            #print("Localisation: ", localization)
            print("Transformation: ", transformedPoint)
            #print("Point: ", point)

            if counter == 0:
                #print("Counter is zero.")
                counter += 1
                lastAdded = transformedPoint
                #print("Adding the point to the array.")
                #alreadyDetected.append(transformedPoint)
                #print(len(alreadyDetected))
            else:
                #print("Counter: ", counter)
                dist = self.distance(transformedPoint, lastAdded)

                #print("Number of detected faces so far: ", len(detected))
                print("Distance is ", dist)

                if dist <= maxDistance:
                    #print("-----------Less then max----------------------------------")
                    if counter < 2 or counter >= 2:
                        #alreadyDetected.append(transformedPoint)
                        #print(len(alreadyDetected))
                        lastAdded = transformedPoint
                        counter += 1
                        beenDetected = False
                        for p in detected:
                            #print("Checking already detected")
                            #print("Distance: ", self.distance(p, transformedPoint))

                            if self.distance(p,
                                             transformedPoint) <= maxDistance:
                                #print("This face has already been detected")
                                #print("Distance: ", self.distance(p, transformedPoint))
                                beenDetected = True
                                break

                        if (beenDetected == False):
                            print(
                                "-----------------Good to go------------------------"
                            )
                            detected.append(lastAdded)

                            marker = Marker()
                            #print("Localisation: ", localization)
                            marker.header.stamp = detection.header.stamp
                            marker.header.frame_id = detection.header.frame_id
                            marker.pose = localization.pose
                            marker.type = Marker.CUBE
                            marker.action = Marker.ADD
                            marker.frame_locked = False
                            marker.lifetime = rospy.Duration.from_sec(1)
                            marker.id = self.marker_id_counter
                            marker.scale = Vector3(0.1, 0.1, 0.1)
                            marker.color = ColorRGBA(1, 0, 0, 1)
                            self.markers.markers.append(marker)
                            self.marker_id_counter += 1
                            self.soundhandle.say("I detected a face.",
                                                 blocking=False)
                            print("Number of detected faces: ", len(detected))
                            if len(detected) == numFaces:
                                #publish stop

                                #rospy.init_node('mapper', anonymous=True)
                                #msg = String()
                                #msg.data = "Found all faces."
                                print("Sending shutdown")
                                self.pub.publish("Found all faces.")

                else:
                    #print("-----------------------------------------More then max----")
                    lastAdded = transformedPoint

        else:
            print("Localisation: ", localization)
Exemplo n.º 4
0
class PixelConversion():
    def __init__(self):
        self.image_topic = "wall_camera/image_raw"
        self.image_info_topic = "wall_camera/camera_info"
        self.point_topic = "/clicked_point"
        self.frame_id = "/map"

        self.pixel_x = 350
        self.pixel_y = 215

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.image_topic
        cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
        cv.MoveWindow(self.cv_window_name, 25, 75)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        self.image_sub = rospy.Subscriber(self.image_topic, Image, self.image_cb, queue_size = 1)
        self.image_info_sub = rospy.Subscriber(self.image_info_topic, CameraInfo, self.image_info_cb, queue_size = 1)
        self.point_sub = rospy.Subscriber(self.point_topic, PointStamped, self.point_cb, queue_size = 4)

        self.line_pub = rospy.Publisher("line", Marker, queue_size = 10)
        self.inter_pub = rospy.Publisher("inter", Marker, queue_size = 10)
        self.plane_pub = rospy.Publisher("plane", Marker, queue_size = 10)

        self.points = []
        self.ready_for_image = False
        self.has_pic = False
        self.camera_info_msg = None

        self.tf_listener = tf.TransformListener()

        # self.display_picture()
        self.camera_model = PinholeCameraModel()
        

    def point_cb(self, point_msg):
        if len(self.points) < 4:
            self.points.append(point_msg.point)

        if len(self.points) == 1:
            self.ready_for_image = True 
            print "Point stored from click: ", point_msg.point


    def image_cb(self, img_msg):
        if self.ready_for_image:
            # Use cv_bridge() to convert the ROS image to OpenCV format
            try:
                frame = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
            except CvBridgeError, e:
                print e
            
            # Convert the image to a Numpy array since most cv2 functions
            # require Numpy arrays.
            frame = np.array(frame, dtype=np.uint8)
            print "Got array"
            
            # Process the frame using the process_image() function
            #display_image = self.process_image(frame)
                           
            # Display the image.
            self.img = frame 
            # self.has_pic = True
            

            if self.camera_info_msg is not None:
                print "cx ", self.camera_model.cx()
                print "cy ", self.camera_model.cy()
                print "fx ", self.camera_model.fx()
                print "fy ", self.camera_model.fy()

                # Get the point from the clicked message and transform it into the camera frame                
                self.tf_listener.waitForTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                point_in_camera_frame = tuple(np.dot(tf_mat, np.array([self.points[0].x, self.points[0].y, self.points[0].z, 1.0])))[:3]
                # Get the pixels related to the clicked point (the point must be in the camera frame)
                pixel_coords = self.camera_model.project3dToPixel(point_in_camera_frame)
                print "Pixel coords ", pixel_coords

                # Get the unit vector related to the pixel coords 
                unit_vector = self.camera_model.projectPixelTo3dRay((int(pixel_coords[0]), int(pixel_coords[1])))
                print "Unit vector ", unit_vector

                # TF the unit vector of the pixel to the frame of the clicked point from the map frame
                # self.tf_listener.waitForTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0), rospy.Duration(0.5))
                # (trans,rot) = self.tf_listener.lookupTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0))
                # self.tfros = tf.TransformerROS()
                # tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                # xyz = tuple(np.dot(tf_mat, np.array([unit_vector[0], unit_vector[1], unit_vector[2], 1.0])))[:3]

                # print "World xyz ", xyz 

                # intersect the unit vector with the ground plane 

                red = (0,125,255)
                cv2.circle(self.img, (int(pixel_coords[0]), int(pixel_coords[1])), 30, red)
                cv2.imshow(self.image_topic, self.img)


                ''' trying better calculations for going from pixel to world '''

                # need plane of the map in camera frame points
                # (0, 0, 0), (1, 1, 0), (-1, -1, 0) = map plane in map frame coordinates
                p1 = [-94.0, -98.0, 0.0]
                p2 = [-92.0, -88.0, 0.0]
                p3 = [-84.0, -88.0, 0.0]
                # p2 = [1.0, -1.0, 0.0]
                # p3 = [-1.0, -1.0, 0.0]
                # point_marker = self.create_points_marker([p1, p2, p3], "/map")
                # self.plane_pub.publish(point_marker)

                self.tf_listener.waitForTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(img_msg.header.frame_id, self.frame_id,  rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)

                # pts in camera frame coodrds
                p1 = list(np.dot(tf_mat, np.array([p1[0], p1[1], p1[2], 1.0])))[:3]
                p2 = list(np.dot(tf_mat, np.array([p2[0], p2[1], p2[2], 1.0])))[:3]
                p3 = list(np.dot(tf_mat, np.array([p3[0], p3[1], p3[2], 1.0])))[:3]
                plane_norm = self.get_plane_normal(p1, p2, p3) ## maybe viz
                print "P1 ", p1
                print "P2 ", p2
                print "P3 ", p3
                print "Plane norm: ", plane_norm
                point_marker = self.create_points_marker([p1, p2, p3], img_msg.header.frame_id)
                self.plane_pub.publish(point_marker)

                
                # need unit vector to define ray to cast onto plane
                line_pt = list(unit_vector)
                line_norm = self.get_line_normal([0.0,0.0,0.0], np.asarray(line_pt) * 10) # just scale the unit vector for another point, maybe just use both unit vector for the point and the line

                inter = self.line_plane_intersection(line_pt, line_norm, p1, plane_norm)

                ## intersection is in the camera frame...
                ### tf this into map frame
                self.tf_listener.waitForTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                xyz = tuple(np.dot(tf_mat,np.array([inter[0], inter[1], inter[2], 1.0])))[:3]
                print "intersection pt: ", xyz
                point_marker = self.create_point_marker(xyz, "/map")
                self.inter_pub.publish(point_marker)

                line_marker = self.create_line_marker([0.0,0.0,0.0], np.asarray(unit_vector) * 30, img_msg.header.frame_id) 
                self.line_pub.publish(line_marker)



            self.ready_for_image = False
            self.points = []
Exemplo n.º 5
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        global numFaces
        global brojach
        global avgx
        global avgy
        global avgr
        global flag
        global maxHeight
        global minHeight
        global maxDistRing
        if flag == 0:
            if brojach < 10:
                avgx = avgx + detection.pose.position.x
                avgy = avgy + detection.pose.position.y
                avgr = avgr + detection.pose.position.z
                brojach = brojach + 1
                return
            if brojach == 10:
                tp = detection
                avgx = avgx / 10
                avgy = avgy / 10
                avgr = avgr / 10
                camera_info = None
                best_time = 100
                for ci in self.camera_infos:
                    time = abs(ci.header.stamp.to_sec() -
                               detection.header.stamp.to_sec())
                    if time < best_time:
                        camera_info = ci
                        best_time = time

                if not camera_info or best_time > 1:
                    print("Best time is higher than 1")
                    tp.pose.position.z = -1
                    self.pub_avg_ring.publish(tp)
                    flag = 1
                    return

                camera_model = PinholeCameraModel()
                camera_model.fromCameraInfo(camera_info)

                p = Point(
                    ((avgx - camera_model.cx()) - camera_model.Tx()) /
                    camera_model.fx(),
                    ((avgy + avgr - camera_model.cy()) - camera_model.Ty()) /
                    camera_model.fy(), 0)
                #p2 = Point(((avgx - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),((avgy -avgr - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(), 0)
                avgx = avgy = avgr = 0
                lp = self.localize(detection.header, p1, self.region_scope)
                #lp2 = self.localize(detection.header, p2, self.region_scope)
                self.tf.waitForTransform(detection.header.frame_id, "map",
                                         rospy.Time.now(), rospy.Duration(5.0))
                mp = geometry_msgs.msg.PoseStamped()
                mp.header.frame_id = detection.header.frame_id
                mp.pose = lp.pose
                mp.header.stamp = detection.header.stamp
                tp = self.tf.transformPose("map", mp1)

                #mp2 = geometry_msgs.msg.PoseStamped()
                #mp2.header.frame_id = detection.header.frame_id
                #mp2.pose = lp2.pose
                #mp2.header.stamp = detection.header.stamp
                #tp2 = self.tf.transformPose("map", mp2)

                now2 = rospy.Time.now()
                self.tf2.waitForTransform("base_link", "map", now2,
                                          rospy.Duration(5.0))

                robot = geometry_msgs.msg.PoseStamped()
                robot.header.frame_id = "base_link"
                robot.pose.position.x = 0
                robot.pose.position.y = 0
                robot.header.stamp = now2
                robotmap = self.tf2.transformPose("map", robot)

                if lp1.pose.position.z != 0:
                    dp1 = self.distance(robotmap, tp)
                    if dp > maxDistRing:
                        print("Distance too big: ", dp)
                        tp.pose.position.z = -1
                    if tp.pose.position.z > maxHeight + 0.2 or tp.pose.position.z < minHeight:
                        print("Height is wrong: ", tp.pose.position.z)
                        tp.pose.position.z = -1
                    #if ((point1.y - point2.y)/2 > 0.7 or (point1.y - point2.y)/2 < 0.5):
                    #	tp.pose.position.z = -1
                    #if (point1.z > 0.5 or point1.z < 0.3):
                    #	tp.pose.position.z = -1
                    #if (point2.z > 0.5 or point2.z < 0.3): # visina
                    #	tp.pose.position.z = -1
                else:
                    print("Localizer failed")
                    tp.pose.position.z = -1
                self.pub_avg_ring.publish(tp)
                flag = 1
                return

        if flag == 1:
            u = detection.pose.position.x
            #u = avgx+avgr
            v = detection.pose.position.y + detection.pose.position.z
            #v = avgy

            w = detection.pose.position.x
            #w = avgx-avgr
            q = detection.pose.position.y - detection.pose.position.z

            g = detection.pose.position.x
            #w = avgx-avgr
            h = detection.pose.position.y + detection.pose.position.z + 3

            r = detection.pose.position.x
            #w = avgx-avgr
            t = detection.pose.position.y + detection.pose.position.z - 3
            #q = avgy

            camera_info = None
            best_time = 100
            for ci in self.camera_infos:
                time = abs(ci.header.stamp.to_sec() -
                           detection.header.stamp.to_sec())
                if time < best_time:
                    camera_info = ci
                    best_time = time

            if not camera_info or best_time > 1:
                print("Best time is higher than 1")
                return

            camera_model = PinholeCameraModel()
            camera_model.fromCameraInfo(camera_info)

            point1 = Point(((u - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((v - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point2 = Point(((w - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((q - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point3 = Point(((g - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((h - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point4 = Point(((r - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((t - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            localization1 = self.localize(detection.header, point1,
                                          self.region_scope)
            localization2 = self.localize(detection.header, point2,
                                          self.region_scope)
            localization3 = self.localize(detection.header, point3,
                                          self.region_scope)
            localization4 = self.localize(detection.header, point4,
                                          self.region_scope)
            if not (localization1 or localization2 or localization3
                    or localization):
                print("Localization failed")
                return
            now = detection.header.stamp
            self.tf.waitForTransform(detection.header.frame_id, "map", now,
                                     rospy.Duration(5.0))

            m = geometry_msgs.msg.PoseStamped()
            m.header.frame_id = detection.header.frame_id
            m.pose = localization1.pose
            m.header.stamp = detection.header.stamp
            transformedPoint1 = self.tf.transformPose("map", m)

            m2 = geometry_msgs.msg.PoseStamped()
            m2.header.frame_id = detection.header.frame_id
            m2.pose = localization2.pose
            m2.header.stamp = detection.header.stamp
            transformedPoint2 = self.tf.transformPose("map", m2)

            m3 = geometry_msgs.msg.PoseStamped()
            m3.header.frame_id = detection.header.frame_id
            m3.pose = localization3.pose
            m3.header.stamp = detection.header.stamp
            transformedPoint3 = self.tf.transformPose("map", m3)

            m4 = geometry_msgs.msg.PoseStamped()
            m4.header.frame_id = detection.header.frame_id
            m4.pose = localization4.pose
            m4.header.stamp = detection.header.stamp
            transformedPoint4 = self.tf.transformPose("map", m4)

            now2 = rospy.Time.now()
            self.tf2.waitForTransform("base_link", "map", now2,
                                      rospy.Duration(5.0))

            robot = geometry_msgs.msg.PoseStamped()
            robot.header.frame_id = "base_link"
            robot.pose.position.x = 0
            robot.pose.position.y = 0
            robot.header.stamp = now2
            robotmap = self.tf2.transformPose("map", robot)

            if localization1.pose.position.z != 0:
                dist1 = self.distance(robotmap, transformedPoint1)
            else:
                dist1 = 100000
            if localization2.pose.position.z != 0:
                dist2 = self.distance(robotmap, transformedPoint2)
            else:
                dist2 = 100000
            if localization3.pose.position.z != 0:
                dist3 = self.distance(robotmap, transformedPoint3)
            else:
                dist3 = 100000
            if localization4.pose.position.z != 0:
                dist4 = self.distance(robotmap, transformedPoint4)
            else:
                dist4 = 100000
            # find smallest distance to a point.
            dist = dist1
            transformedPoint = transformedPoint1

            if dist2 < dist:
                dist = dist2
                transformedPoint = transformedPoint2
            if dist3 < dist:
                dist = dist3
                transformedPoint = transformedPoint3
            if dist4 < dist:
                dist = dist4
                transformedPoint = transformedPoint4

            print("distance: ", dist)
            if (dist < maxDistRing):
                radius = abs((transformedPoint1.pose.position.y -
                              transformedPoint2.pose.position.y) / 2)
                print("radius: ", radius)
                print("height: ", transformedPoint.pose.position.z)
                if (dist1 < maxDistRing
                        and dist2 < maxDistRing) or (dist1 > maxDistRing
                                                     and dist2 > maxDistRing):
                    print("Checking radius")
                    if (radius > 0.7 or radius < 0.3):
                        print("Wrong radius")
                        return
                else:
                    print("Cant use radius")
                if (transformedPoint.pose.position.z > maxHeight
                        or transformedPoint.pose.position.z < minHeight):
                    print("Wrong height")
                    return
                print("start checking")
                print("Detected rings: ", len(detected))
                if len(detected) == 0:
                    beenDetected = False
                else:
                    beenDetected = False
                    for p in detected:
                        if self.distance(p, transformedPoint) <= maxDistance:
                            print("Already detected ring!")
                            beenDetected = True
                            break

                if (beenDetected == False):
                    detected.append(transformedPoint)
                    print("Publishing new ring")
                    self.pub_ring.publish(transformedPoint)
                    marker = Marker()
                    marker.header.stamp = detection.header.stamp
                    marker.header.frame_id = transformedPoint.header.frame_id
                    marker.pose.position.x = transformedPoint.pose.position.x
                    marker.pose.position.y = transformedPoint.pose.position.y
                    marker.pose.position.z = transformedPoint.pose.position.z
                    marker.type = Marker.CUBE
                    marker.action = Marker.ADD
                    marker.frame_locked = False
                    marker.lifetime = rospy.Duration.from_sec(1)
                    marker.id = self.marker_id_counter
                    marker.scale = Vector3(0.1, 0.1, 0.1)
                    marker.color = ColorRGBA(1, 0, 0, 1)
                    self.markers.markers.append(marker)
                    self.marker_id_counter += 1
                    print("Number of detected rings: ", len(detected))
                    #if len(detected) == numFaces:
                    #	print("Sending shutdown")
            else:
                print("Too far away")
class GridToImageConverter():
    def __init__(self):
        self.image_topic = "wall_camera/image_raw"
        self.image_info_topic = "wall_camera/camera_info"
        self.point_topic = "/clicked_point"
        self.frame_id = "/map"
        self.occupancy 

        self.pixel_x = 350
        self.pixel_y = 215

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.image_topic
        cv.NamedWindow(self.cv_window_name, cv.CV_WINDOW_NORMAL)
        cv.MoveWindow(self.cv_window_name, 25, 75)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        self.image_sub = rospy.Subscriber(self.image_topic, Image, self.image_cb, queue_size = 1)
        self.image_info_sub = rospy.Subscriber(self.image_info_topic, CameraInfo, self.image_info_cb, queue_size = 1)
        self.point_sub = rospy.Subscriber(self.point_topic, PointStamped, self.point_cb, queue_size = 4)

        self.points = []
        self.ready_for_image = False
        self.has_pic = False
        self.camera_info_msg = None

        self.tf_listener = tf.TransformListener()

        # self.display_picture()
        self.camera_model = PinholeCameraModel()
        

    def point_cb(self, point_msg):
        if len(self.points) < 4:
            self.points.append(point_msg.point)

        if len(self.points) == 1:
            self.ready_for_image = True 
            print "Point stored from click: ", point_msg.point


    def image_cb(self, img_msg):
        if self.ready_for_image:
            # Use cv_bridge() to convert the ROS image to OpenCV format
            try:
                frame = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
            except CvBridgeError, e:
                print e
            
            # Convert the image to a Numpy array since most cv2 functions
            # require Numpy arrays.
            frame = np.array(frame, dtype=np.uint8)
            print "Got array"
            
            # Process the frame using the process_image() function
            #display_image = self.process_image(frame)
                           
            # Display the image.
            self.img = frame 
            # self.has_pic = True
            

            if self.camera_info_msg is not None:
                print "cx ", self.camera_model.cx()
                print "cy ", self.camera_model.cy()
                print "fx ", self.camera_model.fx()
                print "fy ", self.camera_model.fy()

                # Get the point from the clicked message and transform it into the camera frame                
                self.tf_listener.waitForTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(img_msg.header.frame_id, self.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                point_in_camera_frame = tuple(np.dot(tf_mat, np.array([self.points[0].x, self.points[0].y, self.points[0].z, 1.0])))[:3]
                # Get the pixels related to the clicked point (the point must be in the camera frame)
                pixel_coords = self.camera_model.project3dToPixel(point_in_camera_frame)
                print "Pixel coords ", pixel_coords

                # Get the unit vector related to the pixel coords 
                unit_vector = self.camera_model.projectPixelTo3dRay((int(pixel_coords[0]), int(pixel_coords[1])))
                print "Unit vector ", unit_vector

                # TF the unit vector of the pixel to the frame of the clicked point from the map frame
                self.tf_listener.waitForTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0), rospy.Duration(0.5))
                (trans,rot) = self.tf_listener.lookupTransform(self.frame_id, img_msg.header.frame_id, rospy.Time(0))
                self.tfros = tf.TransformerROS()
                tf_mat = self.tfros.fromTranslationRotation(trans, rot)
                xyz = tuple(np.dot(tf_mat, np.array([unit_vector[0], unit_vector[1], unit_vector[2], 1.0])))[:3]

                print "World xyz ", xyz 

                red = (0,125,255)
                cv2.circle(self.img, (int(pixel_coords[0]), int(pixel_coords[1])), 30, red)
                cv2.imshow(self.image_topic, self.img)


            self.ready_for_image = False
            self.points = []
            
            # Process any keyboard commands
            self.keystroke = cv.WaitKey(5)
            if 32 <= self.keystroke and self.keystroke < 128:
                cc = chr(self.keystroke).lower()
                if cc == 'q':
                    # The user has press the q key, so exit
                    rospy.signal_shutdown("User hit q key to quit.")
Exemplo n.º 7
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        global numFaces
        global brojach
        global avgx
        global avgy
        global avgr
        global flag
        if flag == 0:
            if brojach < 10:
                avgx = avgx + detection.pose.position.x
                avgy = avgy + detection.pose.position.y
                avgr = avgr + detection.pose.position.z
                brojach = brojach + 1
                return
            if brojach == 10:
                avgx = avgx / 10
                avgy = avgy / 10
                avgr = avgr / 10
                p = Point(((avgx - camera_model.cx()) - camera_model.Tx()) /
                          camera_model.fx(),
                          ((avgy - camera_model.cy()) - camera_model.Ty()) /
                          camera_model.fy(), avgr)
                lp = self.localize(detection.header, p, self.region_scope)
                self.tf.waitForTransform(detection.header.frame_id, "map",
                                         rospy.Time.now(), rospy.Duration(5.0))
                mp = geometry_msgs.msg.PoseStamped()
                mp.header.frame_id = detection.header.frame_id
                mp.pose = lp.pose
                mp.header.stamp = detection.header.stamp
                tp = self.tf.transformPose("map", mp)
                if lp.pose.position.x != 0:
                    dp = self.distance(robotmap, tp)
                    if dp > 1.5:
                        tp.pose.position.z = -1
                    #if ((point1.y - point2.y)/2 > 0.7 or (point1.y - point2.y)/2 < 0.5):
                    #	tp.pose.position.z = -1
                    #if (point1.z > 0.5 or point1.z < 0.3):
                    #	tp.pose.position.z = -1
                    #if (point2.z > 0.5 or point2.z < 0.3): # visina
                    #	tp.pose.position.z = -1
                else:
                    tp.pose.position.z = -1
                self.pub_avg_ring.publish(tp)
                flag = 1
                return

        if flag == 1:
            u = detection.pose.position.x
            #u = avgx+avgr
            v = detection.pose.position.y + detection.pose.position.z
            #v = avgy

            w = detection.pose.position.x
            #w = avgx-avgr
            q = detection.pose.position.y - detection.pose.position.z

            g = detection.pose.position.x
            #w = avgx-avgr
            h = detection.pose.position.y + detection.pose.position.z + 4

            r = detection.pose.position.x
            #w = avgx-avgr
            t = detection.pose.position.y + detection.pose.position.z - 4
            #q = avgy

            camera_info = None
            best_time = 100
            for ci in self.camera_infos:
                time = abs(ci.header.stamp.to_sec() -
                           detection.header.stamp.to_sec())
                if time < best_time:
                    camera_info = ci
                    best_time = time

            if not camera_info or best_time > 1:
                print("Best time is higher than 1")
                return

            camera_model = PinholeCameraModel()
            camera_model.fromCameraInfo(camera_info)

            point1 = Point(((u - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((v - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point2 = Point(((w - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((q - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point3 = Point(((g - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((h - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            point4 = Point(((r - camera_model.cx()) - camera_model.Tx()) /
                           camera_model.fx(),
                           ((t - camera_model.cy()) - camera_model.Ty()) /
                           camera_model.fy(), 1)

            localization1 = self.localize(detection.header, point1,
                                          self.region_scope)
            localization2 = self.localize(detection.header, point2,
                                          self.region_scope)
            localization3 = self.localize(detection.header, point3,
                                          self.region_scope)
            localization4 = self.localize(detection.header, point4,
                                          self.region_scope)
            if not localization1:
                return

            # calculate center
            center = Point(localization1.pose.position.x,
                           localization1.pose.position.y,
                           localization1.pose.position.z)
            now = detection.header.stamp
            self.tf.waitForTransform(detection.header.frame_id, "map", now,
                                     rospy.Duration(5.0))

            m = geometry_msgs.msg.PoseStamped()
            m.header.frame_id = detection.header.frame_id
            #m.pose.position.x = center.x
            #m.pose.position.y = center.y
            m.pose = localization1.pose
            m.header.stamp = detection.header.stamp
            transformedPoint1 = self.tf.transformPose("map", m)

            m2 = geometry_msgs.msg.PoseStamped()
            m2.header.frame_id = detection.header.frame_id
            m2.pose = localization2.pose
            m2.header.stamp = detection.header.stamp
            transformedPoint2 = self.tf.transformPose("map", m2)

            m3 = geometry_msgs.msg.PoseStamped()
            m3.header.frame_id = detection.header.frame_id
            m3.pose = localization3.pose
            m3.header.stamp = detection.header.stamp
            transformedPoint3 = self.tf.transformPose("map", m3)

            m4 = geometry_msgs.msg.PoseStamped()
            m4.header.frame_id = detection.header.frame_id
            m4.pose = localization4.pose
            m4.header.stamp = detection.header.stamp
            transformedPoint4 = self.tf.transformPose("map", m4)

            now2 = rospy.Time.now()
            self.tf2.waitForTransform("base_link", "map", now2,
                                      rospy.Duration(5.0))

            robot = geometry_msgs.msg.PoseStamped()
            robot.header.frame_id = "base_link"
            robot.pose.position.x = 0
            robot.pose.position.y = 0
            #robot.pose = localization1.pose
            robot.header.stamp = now2
            robotmap = self.tf2.transformPose("map", robot)

            #dist = self.distance(robotmap,transformedPoint)
            transformedPoint = transformedPoint1
            if localization1.pose.position.x != 0:
                dist1 = self.distance(robotmap, transformedPoint1)
            else:
                dist1 = 100000
            if localization2.pose.position.x != 0:
                dist2 = self.distance(robotmap, transformedPoint2)
                transformedPoint = transformedPoint2
            else:
                dist2 = 100000
            if localization3.pose.position.x != 0:
                dist3 = self.distance(robotmap, transformedPoint3)
                transformedPoint = transformedPoint3
            else:
                dist3 = 100000
            if localization4.pose.position.x != 0:
                dist4 = self.distance(robotmap, transformedPoint4)
                transformedPoint = transformedPoint4
            else:
                dist4 = 100000
            # find smallest distance to a point.
            dist = dist1
            if dist2 < dist:
                dist = dist2
            if dist3 < dist:
                dist = dist3
            if dist4 < dist:
                dist = dist4

            print("distance: ", dist)
            if (dist < 1.5):
                rad = abs((point1.y - point2.y) / 2)
                print("radius: ", rad)
                print("height p1: ", point1.z)
                print("height p2: ", point2.z)
                #if ((point1.y - point2.y)/2 > 0.7 or (point1.y - point2.y)/2 < 0.5):
                #	return
                #if (point1.z > 0.5 or point1.z < 0.3):
                #	return
                #if (point2.z > 0.5 or point2.z < 0.3): # visina
                #	return
                self.pub.publish(transformedPoint)
                print("start checking")
                print(len(detected))
                if len(detected) == 0:
                    detected.append(transformedPoint)
                    beenDetected = False
                else:
                    beenDetected = False
                    for p in detected:
                        if self.distance(p, transformedPoint) <= maxDistance:
                            print("Already detected ring!")
                            beenDetected = True
                            break

                if (beenDetected == False):
                    print("Publishing new ring")
                    self.pub_ring.publish(transformedPoint)
                    marker = Marker()
                    marker.header.stamp = detection.header.stamp
                    marker.header.frame_id = transformedPoint.header.frame_id
                    marker.pose.position.x = transformedPoint.pose.position.x
                    marker.pose.position.y = transformedPoint.pose.position.y
                    marker.pose.position.z = transformedPoint.pose.position.z
                    marker.type = Marker.CUBE
                    marker.action = Marker.ADD
                    marker.frame_locked = False
                    marker.lifetime = rospy.Duration.from_sec(1)
                    marker.id = self.marker_id_counter
                    marker.scale = Vector3(0.1, 0.1, 0.1)
                    marker.color = ColorRGBA(1, 0, 0, 1)
                    self.markers.markers.append(marker)
                    self.marker_id_counter += 1
                    print("Number of detected rings: ", len(detected))
                    if len(detected) == numFaces:
                        print("Sending shutdown")
            else:
                print("Just zeros")
class Realsense:
    """
    Class to use a Relsense camera for Hand detection
    """
    def __init__(self):
        """
        Constructor
        """
        rospy.init_node("ros_yolo", anonymous=True, disable_signals=False)

        self.bridge = CvBridge()
        self.rgb_lock = threading.Lock()
        self.depth_lock = threading.Lock()

        self.cameraModel = PinholeCameraModel()
        self.cameraInfo = CameraInfo()

        self.sub_color = rospy.Subscriber("/camera/color/image_raw",
                                          Image,
                                          self.color_callback,
                                          queue_size=1)

        self.sub_depth = rospy.Subscriber(
            "/camera/aligned_depth_to_color/image_raw",
            Image,
            self.depth_callback,
            queue_size=1)
        self.sub_camera_info = rospy.Subscriber("/camera/depth/camera_info",
                                                CameraInfo,
                                                self.camera_info_callback,
                                                queue_size=1)

        self.target_pub = rospy.Publisher("/hand_detection/target_camera",
                                          PoseStamped)

        self.color_img = np.zeros((480, 640, 3), np.uint8)
        self.depth_img = np.zeros((480, 640), np.uint8)
        self.img_width = 640
        self.img_height = 480
        self.hand_counter = 0
        self.not_hand_counter = 0
        self.hand_3D_pos = np.empty((0, 3), int)

    def _random_centered_pixels(self, box_x, box_y, box_w, box_h, radius,
                                samples):
        """
        Returns an array of pixel positions in 2D
        """
        pixels = []
        center_x = int(box_w / 2)
        center_y = int(box_h / 2)
        x = box_x + center_x - radius
        y = box_y + center_y - radius
        samples_limit = radius * radius

        if samples < samples_limit:
            random_positions = random.sample(range(0, samples_limit), samples)
            for sample in random_positions:
                pixel_x = x + (sample % radius)
                if pixel_x > (self.img_width / 2 - 1):
                    pixel_x = self.img_width / 2 - 1
                pixel_y = y + int(sample / radius)
                if pixel_y > (self.img_height / 2 - 1):
                    pixel_y = self.img_height / 2 - 1
                pixels.append([pixel_x, pixel_y])
        else:
            print("The samples number should be minor" + " than radius*radius")
        return pixels

    def _load_model(self, custom=True):
        if custom:
            model_def_path = "config/yolov3-custom-hand-arm.cfg"
            weights_path = "checkpoints-arm-hand/yolov3_ckpt_54.pth"
            class_path = "data/custom/classes.names"
        else:
            model_def_path = "config/yolov3.cfg"
            weights_path = "weights/yolov3.weights"
            class_path = "data/coco.names"

        self.img_size = 416

        # Object confidence threshold
        self.conf_thres = 0.95
        # IOU thresshold for non-maximum suppression
        self.nms_thres = 0.05

        if torch.cuda.is_available():
            print("---------GPU AVAILABLE---------")
            self.Tensor = torch.cuda.FloatTensor
        else:
            print("---------CPU AVAILABLE---------")
            self.Tensor = torch.FloatTensor

        # Create the model
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model = Darknet(model_def_path, img_size=416).to(device)

        # Load the custom weights
        if custom:
            self.model.load_state_dict(torch.load(weights_path))
        else:
            self.model.load_darknet_weights(weights_path)

        self.model.eval()

        self.classes = load_classes(class_path)

        cmap = plt.get_cmap("tab20b")
        colors = [cmap(i) for i in np.linspace(0, 1, 10000)]
        self.bbox_colors = random.sample(colors, len(self.classes))

    def _infer(self):
        with self.rgb_lock:
            frame = self.color_img.copy()
            # frame = self.color_img
        # Display only purpose
        img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        img = ImagePIL.fromarray(frame)
        ratio = min(self.img_size / img.size[0], self.img_size / img.size[1])
        imw = round(img.size[0] * ratio)
        imh = round(img.size[1] * ratio)
        img_transforms = transforms.Compose([
            transforms.Resize((imh, imw)),
            transforms.Pad((max(int(
                (imh - imw) / 2), 0), max(int(
                    (imw - imh) / 2), 0), max(int(
                        (imh - imw) / 2), 0), max(int((imw - imh) / 2), 0)),
                           (128, 128, 128)),
            transforms.ToTensor(),
        ])

        image_tensor = img_transforms(img).float()
        image_tensor = image_tensor.unsqueeze_(0)
        input_img = Variable(image_tensor.type(self.Tensor))

        with torch.no_grad():
            detections = self.model(input_img)
            detections = non_max_suppression(detections, self.conf_thres,
                                             self.nms_thres)

        if detections[0] is not None:

            unit_scaling = 0.001
            center_x = self.cameraModel.cx()
            center_y = self.cameraModel.cy()
            constant_x = unit_scaling / self.cameraModel.fx()
            constant_y = unit_scaling / self.cameraModel.fy()

            # Rescale boxes to original image
            detections = rescale_boxes(detections[0], self.img_size,
                                       frame.shape[:2])

            unique_labels = detections[:, -1].cpu().unique()

            for x1, y1, x2, y2, conf, cls_conf, cls_pred in detections:
                box_w = x2 - x1
                box_h = y2 - y1
                w_proportion = box_w / img.size[0]
                h_proportion = box_h / img.size[1]
                if ((self.classes[int(cls_pred)] == 'Human hand'
                     or self.classes[int(cls_pred)] == 'Human arm')
                        and w_proportion >= 0.1 and w_proportion <= 0.90
                        and h_proportion >= 0.1 and h_proportion <= 1.0
                        and box_w > 10 and box_h > 10):

                    color = self.bbox_colors[int(
                        np.where(unique_labels == int(cls_pred))[0])]

                    frame = cv2.rectangle(
                        frame, (x1, y1), (x2, y2),
                        (color[0] * 255, color[1] * 255, color[2] * 255), 2)

                    cv2.putText(frame, self.classes[int(cls_pred)], (x1, y1),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                                2, cv2.LINE_AA)

                    radius = 5
                    samples = 10
                    pixels = self._random_centered_pixels(
                        float(x1), float(y1), box_w, box_h, radius, samples)
                    depthPoint = []
                    arrayPoints = np.empty((0, 3), int)

                    for i in range(0, samples - 1):
                        x = int(pixels[i][0])
                        y = int(pixels[i][1])
                        depthPoint = np.array([
                            (x - center_x / 2) *
                            self.depth_img[y, x] * constant_x,
                            (y - center_y / 2) * self.depth_img[y, x] *
                            constant_y, self.depth_img[y, x] * unit_scaling
                        ])
                        if depthPoint.sum() > 0:
                            arrayPoints = np.append(arrayPoints, [depthPoint],
                                                    axis=0)

                    if len(arrayPoints) > 0:
                        mean_x = np.mean(arrayPoints[:, 0], axis=0)
                        mean_y = np.mean(arrayPoints[:, 1], axis=0)
                        mean_z = np.mean(arrayPoints[:, 2], axis=0)

                        pos_3D = np.array([mean_x, mean_y, mean_z])
                        if self.hand_counter == 0:
                            self.hand_3D_pos = pos_3D
                            self.hand_counter = 1
                        elif mean_z > 0.25 and mean_z < 1.20:
                            dist = np.linalg.norm(self.hand_3D_pos - pos_3D)
                            if ((dist < 0.10)
                                    or (pos_3D[2] < self.hand_3D_pos[2])):
                                self.hand_3D_pos = pos_3D
                                self.hand_counter = self.hand_counter + 1

                                if self.hand_counter > 2:
                                    # if self.hand_counter == 3:
                                    #    cv2.imwrite('hand.jpg', frame)

                                    header = std_msgs.msg.Header()
                                    header.stamp = rospy.Time.now()
                                    header.frame_id = 'RealSense_optical_frame'
                                    # publish
                                    p = PoseStamped()
                                    p.header.frame_id = 'target_camera'
                                    p.header.stamp = rospy.Time.now()

                                    p.pose.position.x = mean_x
                                    p.pose.position.y = mean_y
                                    p.pose.position.z = mean_z
                                    self.target_pub.publish(p)
                                    break
                            else:
                                self.not_hand_counter = self.not_hand_counter + 1

            if self.not_hand_counter > 20:
                self.hand_counter = 0
                self.not_hand_counter = 0

        cv2.imshow('frame', frame)
        cv2.waitKey(1)

    def color_callback(self, data):
        """
        Callback function for color image subscriber
        """
        try:
            with self.rgb_lock:
                cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
                self.color_img = cv_image
        except CvBridgeError as e:
            print(e)

    def depth_callback(self, data):
        """
        Callback function for depth image subscriber
        """
        try:
            with self.depth_lock:
                cv_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
                self.depth_img = np.array(cv_image, dtype=np.float32)
        except CvBridgeError as e:
            print(e)

    def camera_info_callback(self, data):
        """
        Callback function for camera info subscriber
        """
        try:
            self.cameraModel.fromCameraInfo(data)
            self.cameraInfo = data
        except Error as e:
            print(e)
Exemplo n.º 9
0
    def detections_callback(self, detection):
        global counter
        global maxDistance  #the maximal distance between two points required for them to be considered the same detection
        global alreadyDetected
        global n
        global lastAdded
        global detected
        global numFaces

        u = detection.x + detection.width / 2
        v = detection.y + detection.height / 2

        camera_info = None
        best_time = 100
        for ci in self.camera_infos:
            time = abs(ci.header.stamp.to_sec() -
                       detection.header.stamp.to_sec())
            if time < best_time:
                camera_info = ci
                best_time = time

        if not camera_info or best_time > 1:
            print("Best time is higher than 1")
            return

        camera_model = PinholeCameraModel()
        camera_model.fromCameraInfo(camera_info)

        point = Point(
            ((u - camera_model.cx()) - camera_model.Tx()) / camera_model.fx(),
            ((v - camera_model.cy()) - camera_model.Ty()) / camera_model.fy(),
            1)

        localization = self.localize(detection.header, point,
                                     self.region_scope)
        transformedPoint = point
        if not localization:
            return

        now = detection.header.stamp
        self.tf.waitForTransform("camera_rgb_optical_frame", "map", now,
                                 rospy.Duration(5.0))

        m = geometry_msgs.msg.PoseStamped()
        m.header.frame_id = "camera_rgb_optical_frame"
        m.pose = localization.pose
        m.header.stamp = detection.header.stamp
        transformedPoint = self.tf.transformPose("map", m)

        if (localization.pose.position.x != 0.0):

            #print("Transformation: ", transformedPoint)
            beenDetected = False
            if counter == 0:
                lastAdded = transformedPoint
            else:
                lastAdded = transformedPoint
                for p in detected:

                    if self.distance(p, transformedPoint) <= maxDistance:
                        beenDetected = True
                        break

            if (beenDetected == False):
                counter += 1
                print("-----------------Good to go------------------------")
                detected.append(lastAdded)
                self.pub_face.publish(transformedPoint)
                marker = Marker()
                marker.header.stamp = detection.header.stamp
                marker.header.frame_id = detection.header.frame_id
                marker.pose = localization.pose
                marker.type = Marker.CUBE
                marker.action = Marker.ADD
                marker.frame_locked = False
                marker.lifetime = rospy.Duration.from_sec(1)
                marker.id = self.marker_id_counter
                marker.scale = Vector3(0.1, 0.1, 0.1)
                marker.color = ColorRGBA(1, 0, 0, 1)
                self.markers.markers.append(marker)
                self.marker_id_counter += 1
                #self.soundhandle.say("I detected a face.", blocking = False)
                print("Number of detected faces: ", len(detected))
                lastAdded = transformedPoint