Ejemplo n.º 1
0
 def publish(self, boxes, scores, classes, num, category_index, masks=None, fps=0):
     # init detection message
     msg = Detection()
     for i in range(boxes.shape[0]):
         if scores[i] > 0.5:
             if classes[i] in category_index.keys():
                 class_name = category_index[classes[i]]['name']
             else:
                 class_name = 'N/A'
             ymin, xmin, ymax, xmax = tuple(boxes[i].tolist())
             box = RegionOfInterest()
             box.x_offset = xmin + (xmax-xmin)/2.0
             box.y_offset = ymin + (ymax-ymin)/2.0
             box.height = ymax - ymin
             box.width = xmax - xmin
             # fill detection message with objects
             obj = Object()
             obj.box = box
             obj.class_name = class_name
             obj.score = int(100*scores[i])
             if masks is not None:
                 obj.mask = self._bridge.cv2_to_imgmsg(masks[i], encoding="passthrough")
             msg.objects.append(obj)
     msg.fps = fps
     # publish detection message
     self.DetPub.publish(msg)
Ejemplo n.º 2
0
 def publish_roi(self):
     if not self.drag_start:
         if self.track_box is not None:
             roi_box = self.track_box
         elif self.detect_box is not None:
             roi_box = self.detect_box
         else:
             return
     try:
         roi_box = self.cvBox2D_to_cvRect(roi_box)
     except:
         return
     
     # Watch out for negative offsets
     roi_box[0] = max(0, roi_box[0])
     roi_box[1] = max(0, roi_box[1])
     
     try:
         ROI = RegionOfInterest()
         ROI.x_offset = int(roi_box[0])
         ROI.y_offset = int(roi_box[1])
         ROI.width = int(roi_box[2])
         ROI.height = int(roi_box[3])
         self.roi_pub.publish(ROI)
     except:
         rospy.loginfo("Publishing ROI failed")
Ejemplo n.º 3
0
	def prediction_generate(self, match, last_match):
		#print "prediction_generate"
		prediction_roi = RegionOfInterest()
		prediction_roi.x_offset = 2 * match.x_offset - last_match.x_offset
		prediction_roi.y_offset = 2 * match.y_offset - last_match.y_offset
		print match.x_offset, last_match.x_offset, prediction_roi.x_offset
		print match.y_offset, last_match.y_offset, prediction_roi.y_offset
		return prediction_roi
Ejemplo n.º 4
0
 def prediction_generate(self, match, last_match):
     #print "prediction_generate"
     prediction_roi = RegionOfInterest()
     prediction_roi.x_offset = 2 * match.x_offset - last_match.x_offset
     prediction_roi.y_offset = 2 * match.y_offset - last_match.y_offset
     print match.x_offset, last_match.x_offset, prediction_roi.x_offset
     print match.y_offset, last_match.y_offset, prediction_roi.y_offset
     return prediction_roi
    def do_camshift(self, cv_image):
        """ Get the image size """
        image_size = cv.GetSize(cv_image)
        image_width = image_size[0]
        image_height = image_size[1]
        
        """ Convert to HSV and keep the hue """
        hsv = cv.CreateImage(image_size, 8, 3)
        cv.CvtColor(cv_image, hsv, cv.CV_BGR2HSV)
        self.hue = cv.CreateImage(image_size, 8, 1)
        cv.Split(hsv, self.hue, None, None, None)

        """ Compute back projection """
        backproject = cv.CreateImage(image_size, 8, 1)

        """ Run the cam-shift algorithm """
        cv.CalcArrBackProject( [self.hue], backproject, self.hist )
        cv.ShowImage("Backproject", backproject)
        if self.track_window and is_rect_nonzero(self.track_window):
            crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
            (iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
            self.track_window = rect

        """ If mouse is pressed, highlight the current selected rectangle
            and recompute the histogram """

        if self.drag_start and is_rect_nonzero(self.selection):
            sub = cv.GetSubRect(cv_image, self.selection)
            save = cv.CloneMat(sub)
            cv.ConvertScale(cv_image, cv_image, 0.5)
            cv.Copy(save, sub)
            x,y,w,h = self.selection
            cv.Rectangle(cv_image, (x,y), (x+w,y+h), (255,255,255))

            sel = cv.GetSubRect(self.hue, self.selection )
            cv.CalcArrHist( [sel], self.hist, 0)
            (_, max_val, _, _) = cv.GetMinMaxHistValue(self.hist)
            if max_val != 0:
                cv.ConvertScale(self.hist.bins, self.hist.bins, 255. / max_val)
        elif self.track_window and is_rect_nonzero(self.track_window):
            """ The width and height on the returned track_box seem to be exchanged--a bug in CamShift? """
            ((x, y), (w, h), angle) = track_box
            track_box = ((x, y), (h, w), angle)
            cv.EllipseBox(cv_image, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0)
            
            roi = RegionOfInterest()
            roi.x_offset = int(min(image_width, max(0, track_box[0][0] - track_box[1][0] / 2)))
            roi.y_offset = int(min(image_height, max(0, track_box[0][1] - track_box[1][1] / 2)))
            roi.width = int(track_box[1][0])
            roi.height = int(track_box[1][1])
            self.ROI.publish(roi)

        cv.ShowImage("Histogram", self.hue_histogram_as_image(self.hist))
        
        if not self.backproject_mode:
            return cv_image
        else:
            return backproject
 def extended_face_roi(self, face_roi, img_shape):
     extroi = RegionOfInterest()
     wext = int(face_roi.width * FACE_ROI_WIDTH_EXTENTION_K)
     hext = int(face_roi.height * FACE_ROI_HEIGHT_EXTENTION_K)
     extroi.x_offset = max(face_roi.x_offset - wext, 0)
     extroi.y_offset = max(face_roi.y_offset - hext, 0)
     extroi.width = min(face_roi.width + 2*wext, img_shape[1] - extroi.x_offset - 1)
     extroi.height = min(face_roi.height + 2*hext, img_shape[0] - extroi.y_offset - 1)
     return extroi
def click(s, x, y, click, u):
	if click:
		roi = RegionOfInterest()
		#r = 10
		roi.x_offset = x#max(x-r,0)
		roi.y_offset = y#max(y-r,0)
		#roi.width = 2*r
		#roi.height= 2*r
		roi_pub.publish(roi)
		print roi.x_offset, roi.y_offset
Ejemplo n.º 8
0
def click(s, x, y, click, u):
    if click:
        roi = RegionOfInterest()
        #r = 10
        roi.x_offset = x  #max(x-r,0)
        roi.y_offset = y  #max(y-r,0)
        #roi.width = 2*r
        #roi.height= 2*r
        roi_pub.publish(roi)
        print roi.x_offset, roi.y_offset
Ejemplo n.º 9
0
    def image_cb(self, img):

        body_list = []
        cv_image = self.bridge.imgmsg_to_cv2(img, "bgr8")
        gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
        bodies = self.body_cascade.detectMultiScale(gray, 1.3, 5)
        roi = RegionOfInterest()
        for body in bodies:
            roi.x_offset, roi.y_offset, roi.height, roi.width = body
            cv2.rectangle(cv_image, (body[0], body[1]),
                          (body[0] + body[2], body[1] + body[3]), (0, 0, 255),
                          15)
            body_list.append(roi)
        self.ub_image_pub.publish(self.bridge.cv2_to_imgmsg(cv_image, "bgr8"))
        self.ub_pub.publish(body_list)
    def __init__(self):

        self.bridge = CvBridge()
        self.rospack = rospkg.RosPack()
        self.roi = RegionOfInterest()

        rospy.on_shutdown(self.shutdown)

        # initialize dlib's face detector (HOG-based) and then create
        # the facial landmark predictor
        self.p = os.path.sep.join(
            [self.rospack.get_path('common_face_application')])
        self.libraryDir = os.path.join(self.p, "library")

        self.dlib_filename = self.libraryDir + "/shape_predictor_68_face_landmarks.dat"

        self.detector = dlib.get_frontal_face_detector()
        self.predictor = dlib.shape_predictor(self.dlib_filename)

        # Subscribe to Image msg
        image_topic = "/cv_camera/image_raw"
        self.image_sub = rospy.Subscriber(image_topic, Image, self.cbImage)

        # Subscribe to CameraInfo msg
        cameraInfo_topic = "/cv_camera/camera_info"
        self.cameraInfo_sub = rospy.Subscriber(cameraInfo_topic, CameraInfo,
                                               self.cbCameraInfo)

        # Publish to RegionOfInterest msgs
        roi_topic = "/faceROI"
        self.roi_pub = rospy.Publisher(roi_topic,
                                       RegionOfInterest,
                                       queue_size=10)
    def _get_face_rois_ids_openpose(recognitions):
        """
        Get ROIs of faces from openpose recognitions using the nose, left ear
        and right ear
        :param: recognitions from openpose
        """
        nose_recognitions = PeopleRecognizer2D._get_recognitions_with_label(
            "Nose", recognitions)
        left_ear_recognitions = PeopleRecognizer2D._get_recognitions_with_label(
            "LEar", recognitions)
        right_ear_recognitions = PeopleRecognizer2D._get_recognitions_with_label(
            "REar", recognitions)

        rois = []
        group_ids = []
        for nose_recognition in nose_recognitions:
            # We assume a vertical head here
            left_size = 50
            right_size = 50
            try:
                left_ear_recognition = next(
                    r for r in left_ear_recognitions
                    if r.group_id == nose_recognition.group_id)

                left_size = math.hypot(
                    left_ear_recognition.roi.x_offset -
                    nose_recognition.roi.x_offset,
                    left_ear_recognition.roi.y_offset -
                    nose_recognition.roi.y_offset)
            except StopIteration:
                pass
            try:
                right_ear_recognition = next(
                    r for r in right_ear_recognitions
                    if r.group_id == nose_recognition.group_id)

                right_size = math.hypot(
                    right_ear_recognition.roi.x_offset -
                    nose_recognition.roi.x_offset,
                    right_ear_recognition.roi.y_offset -
                    nose_recognition.roi.y_offset)
            except StopIteration:
                pass

            size = left_size + right_size
            width = int(size)
            height = int(math.sqrt(2) * size)

            rois.append(
                RegionOfInterest(x_offset=max(
                    0, int(nose_recognition.roi.x_offset - .5 * width)),
                                 y_offset=max(
                                     0,
                                     int(nose_recognition.roi.y_offset -
                                         .5 * height)),
                                 width=width,
                                 height=height))
            group_ids.append(nose_recognition.group_id)

        return rois, group_ids
    def _generate_dummy_person3d(self, rgb, depth, cam_info, name=None):
        person = Person3D()
        person.header = rgb.header
        person.name = name if name else ""  # Empty name makes this person unknwown
        person.age = 20 + self._counter
        person.gender = random.choice([0, 1])
        person.gender_confidence = random.normalvariate(mu=0.75, sigma=0.2)
        person.posture = random.choice(['standing', 'sitting'])
        person.emotion = 'happy'
        colors = ['black', 'orange', 'yellow']
        random.shuffle(colors)
        person.shirt_colors = colors
        person.tags = ['LWave', 'RWave'] + random.choice([[], ["is_pointing"]])
        # person.body_parts_pose
        person.position = Point()
        person.face.roi = RegionOfInterest(width=200, height=200)

        xs = [-2, -1, -0.5, 0.5, 1, 2]
        random.shuffle(xs)
        person.position.x = xs.pop()

        zs = [0.5, 1, 2, 3]
        random.shuffle(zs)
        person.position.z = zs.pop()

        person.pointing_pose = Pose(
            position=person.position,
            orientation=Quaternion(*quaternion_from_euler(0, 0, math.pi / 2)))

        return person
Ejemplo n.º 13
0
 def __init__(self, **kwargs):
     assert all(['_' + key in self.__slots__ for key in kwargs.keys()]), \
         "Invalid arguments passed to constructor: %r" % kwargs.keys()
     from std_msgs.msg import Header
     self.header = kwargs.get('header', Header())
     self.height = kwargs.get('height', int())
     self.width = kwargs.get('width', int())
     self.distortion_model = kwargs.get('distortion_model', str())
     self.d = kwargs.get('d', list())
     self.k = kwargs.get(
         'k',
         list([float() for x in range(9)])
     )
     self.r = kwargs.get(
         'r',
         list([float() for x in range(9)])
     )
     self.p = kwargs.get(
         'p',
         list([float() for x in range(12)])
     )
     self.binning_x = kwargs.get('binning_x', int())
     self.binning_y = kwargs.get('binning_y', int())
     from sensor_msgs.msg import RegionOfInterest
     self.roi = kwargs.get('roi', RegionOfInterest())
Ejemplo n.º 14
0
    def __init__(self):
        APP_ID = '18721308'
        API_KEY = 'lNQGdBNazTPv8LpSP4x0GQlI'
        SECRET_KEY = 'nW8grONY777n4I2KvpOVuKGDNiY03omI'
        self.client_face = AipFace(APP_ID, API_KEY, SECRET_KEY)
        self.client_body = AipBodyAnalysis(APP_ID, API_KEY, SECRET_KEY)
        self.image_type = "BASE64"
        filepath = "/Pictures/image.jpg"

        msg = reception_image()
        self.guest = Guest()
        # self.name = None
        # self.age = None
        # self.image = None
        # self.gender = None
        # self.drink = None

        self.options_body = {}
        self.options_body["type"] = "gender,upper_color,upper_wear_fg"
        self.options_face = {}
        self.options_face["face_field"] = "age,gender"
        self.options_face["max_face_num"] = 3
        self.options_face["face_type"] = "LIVE"
      

        #发布器
        self.roi_pub = rospy.Publisher('/image/roi',RegionOfInterest,queue_size=1)
        self.objpos_pub = rospy.Publisher('/image/object_position2d',String,queue_size=1)
        self.control_pub = rospy.Pubscriber("/control", reception, queue_size=1)
        self.speech_pub = rospy.Pubscriber('/speech/check_door',reception_image,self.find_people)
        #订阅器
        self.control_sub = rospy.Subscriber("/control", reception, self.controlCallback)
       
        self.ROI = RegionOfInterest()
    def __init__(self):
        rospy.init_node('marker_detection', anonymous=True)

        self.image_pub = rospy.Publisher("/detection/image_raw",
                                         Image,
                                         queue_size=1)

        # Initialize the Region of Interest and its publisher
        self.ROI = RegionOfInterest()
        self.ROI_pub = rospy.Publisher("/ch1/marker_bb",
                                       RegionOfInterest,
                                       queue_size=1)

        self.bridge = CvBridge()
        self.image_sub = rospy.Subscriber("/downward_cam/camera/image",
                                          Image,
                                          self.callback,
                                          queue_size=1,
                                          buff_size=2**24,
                                          tcp_nodelay=True)

        self.image_width = 1
        self.image_height = 1
        self.cameraInfo_sub = rospy.Subscriber(
            "/downward_cam/camera/camera_info",
            CameraInfo,
            self.get_camera_info,
            queue_size=1)

        rate = rospy.Rate(10)
        while not rospy.is_shutdown():
            rate.sleep()
Ejemplo n.º 16
0
    def __init__(self):
        self.node_name = "crop_row_publisher"

        rospy.init_node(self.node_name)

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.node_name

        # Initialize the Region of Interest and its publisher
        self.ROI = RegionOfInterest()
        self.roi_pub = rospy.Publisher("/roi", RegionOfInterest, queue_size=1)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        # Subscribe to the camera image and depth topics and set
        # the appropriate callbacks
        #self.image_sub = rospy.Subscriber("input_rgb_image", Image, self.image_callback, queue_size=1)
        self.image_sub = rospy.Subscriber("/camera/rgb/image_raw",
                                          Image,
                                          self.image_callback,
                                          queue_size=1)

        rospy.loginfo("Waiting for image topics...")
        rospy.wait_for_message("input_rgb_image", Image)
        rospy.loginfo("Ready.")
    def detect_poses(self, image):
        # `op` added to globals in the constructor
        datum = op.Datum()  # pylint: disable=undefined-variable # noqa: F821
        datum.cvInputData = image
        self._openpose_wrapper.emplaceAndPop(op.VectorDatum([datum]))

        keypoints = datum.poseKeypoints
        overlayed_image = datum.cvOutputData

        recognitions = []

        if keypoints is not None and len(
                keypoints.shape
        ) == 3:  # If no detections, keypoints will be None
            num_persons, num_bodyparts, _ = keypoints.shape
            for person_id in range(0, num_persons):
                for body_part_id in range(0, num_bodyparts):
                    body_part = self._model["body_parts"][body_part_id]
                    x, y, probability = keypoints[person_id][body_part_id]
                    if probability > 0:
                        recognitions.append(
                            Recognition(group_id=person_id,
                                        roi=RegionOfInterest(width=1,
                                                             height=1,
                                                             x_offset=int(x),
                                                             y_offset=int(y)),
                                        categorical_distribution=
                                        CategoricalDistribution(probabilities=[
                                            CategoryProbability(
                                                label=body_part,
                                                probability=float(probability))
                                        ])))

        return recognitions, overlayed_image
Ejemplo n.º 18
0
 def __init__(self, **kwargs):
     assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
         'Invalid arguments passed to constructor: %s' % \
         ', '.join(sorted(k for k in kwargs.keys() if '_' + k not in self.__slots__))
     from std_msgs.msg import Header
     self.header = kwargs.get('header', Header())
     self.height = kwargs.get('height', int())
     self.width = kwargs.get('width', int())
     self.distortion_model = kwargs.get('distortion_model', str())
     self.d = array.array('d', kwargs.get('d', []))
     if 'k' not in kwargs:
         self.k = numpy.zeros(9, dtype=numpy.float64)
     else:
         self.k = numpy.array(kwargs.get('k'), dtype=numpy.float64)
         assert self.k.shape == (9, )
     if 'r' not in kwargs:
         self.r = numpy.zeros(9, dtype=numpy.float64)
     else:
         self.r = numpy.array(kwargs.get('r'), dtype=numpy.float64)
         assert self.r.shape == (9, )
     if 'p' not in kwargs:
         self.p = numpy.zeros(12, dtype=numpy.float64)
     else:
         self.p = numpy.array(kwargs.get('p'), dtype=numpy.float64)
         assert self.p.shape == (12, )
     self.binning_x = kwargs.get('binning_x', int())
     self.binning_y = kwargs.get('binning_y', int())
     from sensor_msgs.msg import RegionOfInterest
     self.roi = kwargs.get('roi', RegionOfInterest())
Ejemplo n.º 19
0
    def detect_poses(self, image):
        keypoints, overlayed_image = self._openpose.forward(image, True)

        recognitions = []

        if len(keypoints.shape) == 3:
            num_persons, num_bodyparts, _ = keypoints.shape

            for person_id in range(0, num_persons):
                for body_part_id in range(0, num_bodyparts):
                    body_part = self._model["body_parts"][body_part_id]
                    x, y, probability = keypoints[person_id][body_part_id]
                    if probability > 0:
                        recognitions.append(
                            Recognition(group_id=person_id,
                                        roi=RegionOfInterest(width=1,
                                                             height=1,
                                                             x_offset=int(x),
                                                             y_offset=int(y)),
                                        categorical_distribution=
                                        CategoricalDistribution(probabilities=[
                                            CategoryProbability(
                                                label=body_part,
                                                probability=float(probability))
                                        ])))

        return recognitions, overlayed_image
Ejemplo n.º 20
0
def format_msg(src, Obj, s=2):
    assert type(Obj)==type and 'msg' in str(Obj)
    from sensor_msgs.msg import RegionOfInterest
    from geometry_msgs.msg import Point32, Polygon

    hd = hasattr(src,'header')
    if type(src)==str: im = cv2.imread(src)
    elif type(src)==np.ndarray: im = src.copy()
    elif hd: im = CVB.compressed_imgmsg_to_cv2(src)
    obj = Obj(header=src.header) if hd else Obj()

    im, res = get_instances(im, HSV_Set, {}, s)
    if not res: return obj, im, res

    if hasattr(Obj,'class_ids'): obj.class_ids = res['cid']
    if hasattr(Obj,'class_names'): obj.class_names = res['cls']
    if hasattr(Obj,'scores'): obj.scores = [1.0]*len(res['cls'])
    if hasattr(Obj,'boxes'): obj.boxes = [RegionOfInterest(x_offset=x,
        y_offset=y, width=w, height=h) for (x,y,w,h) in res['box']]
    if hasattr(Obj,'contours'): obj.contours = [Polygon([Point32(
        x=x, y=y) for (x,y) in c.transpose()]) for c in res['cnt']]
    elif hasattr(Obj,'masks'): # for masks
        for mk in res['mask']: # get masks
            m = CVB.cv2_to_imgmsg(mk.astype('uint8')*255, 'mono8')
            m.header = obj.header; obj.masks.append(m)
    return obj, im, res
Ejemplo n.º 21
0
class cvBridgeDemo():
    def __init__(self):
        self.node_name = "crop_row_publisher"

        rospy.init_node(self.node_name)

        # What we do during shutdown
        rospy.on_shutdown(self.cleanup)

        # Create the OpenCV display window for the RGB image
        self.cv_window_name = self.node_name

        # Initialize the Region of Interest and its publisher
        self.ROI = RegionOfInterest()
        self.roi_pub = rospy.Publisher("/roi", RegionOfInterest, queue_size=1)

        # Create the cv_bridge object
        self.bridge = CvBridge()

        # Subscribe to the camera image and depth topics and set
        # the appropriate callbacks
        self.image_sub = rospy.Subscriber("input_rgb_image",
                                          Image,
                                          self.image_callback,
                                          queue_size=1)

        rospy.loginfo("Waiting for image topics...")
        rospy.wait_for_message("input_rgb_image", Image)
        rospy.loginfo("Ready.")

    def image_callback(self, ros_image):
        # Use cv_bridge() to convert the ROS image to OpenCV format
        try:
            frame = self.bridge.imgmsg_to_cv2(ros_image, "bgr8")
        except CvBridgeError, e:
            print e

        # Convert the image to a numpy array since most cv2 functions
        # require numpy arrays.
        frame = np.array(frame, dtype=np.uint8)

        # Process the frame using the process_image() function
        (crop_lines, x_offset, width) = self.process_image(frame)

        ROI = RegionOfInterest()
        ROI.x_offset = x_offset
        ROI.width = width
        self.roi_pub.publish(ROI)

        # Display the image and the detected lines.

        cv2.imshow(self.node_name, cv2.addWeighted(frame, 1, crop_lines, 1,
                                                   0.0))
        # Process any keyboard commands
        self.keystroke = cv2.waitKey(5)
        if self.keystroke != -1:
            cc = chr(self.keystroke & 255).lower()
            if cc == 'q':
                # The user has press the q key, so exit
                rospy.signal_shutdown("User hit q key to quit.")
    def test_WithBoundingBox(self):
        bbox = RegionOfInterest(30, 40, 210, 302)
        species_id = SingleSpeciesId(
            bbox,
            [SpeciesScore(3, 0.4), SpeciesScore(2, 0.3)])

        SpeciesSelectionModule.speciesIdCallback(
            SpeciesIDResponse(3, [species_id]), self.module)

        self.checkSignalUpdate(3)

        d = self.module.GetOutputDict()
        self.assertEquals(d['state'], State.CANDIDATES_ARRIVED)
        self.assertEquals(d['name'], 'SpeciesSelection')
        foundBbox = d['bbox']
        self.assertEquals(foundBbox['x_offset'], 30)
        self.assertEquals(foundBbox['y_offset'], 40)
        self.assertEquals(foundBbox['height'], 210)
        self.assertEquals(foundBbox['width'], 302)
        self.assertEquals(len(d['species_hits']), 3)
        self.checkSpeciesInfo(self.speciesInfo[3], d['species_hits'][0])
        self.checkSpeciesInfo(self.speciesInfo[2], d['species_hits'][1])
        self.checkSpeciesInfo(SpeciesSelectionModule.UNKNOWN_SPECIES,
                              d['species_hits'][2])
        self.assertEquals(d['user_chose'], False)
        self.assertEquals(d['idx'], 0)
Ejemplo n.º 23
0
    def detect_persons_3d(self):
        """
        :return: [Skeleton]
        """
        width = 10  # px
        height = 10  # px

        persons = self.detect_persons()

        person_slot_rois = []
        for i, person in enumerate(persons):
            for slot in person.__slots__:
                detection = getattr(person, slot)
                if detection.x == 0 or detection.y == 0:
                    continue
                x_offset = max(0, detection.x - width // 2)
                y_offset = max(0, detection.y - height // 2)
                width = width
                height = height

                roi = RegionOfInterest(x_offset=x_offset,
                                       y_offset=y_offset,
                                       width=width,
                                       height=height)
                print((i, slot, roi))
                person_slot_rois.append((i, slot, roi))

        try:
            input = [r for _, _, r in person_slot_rois]
            rospy.loginfo('project input: %s', str(input))
            ps = self.project_rois(input).points
        except ValueError as e:
            rospy.loginfo('project_rois failed: %s', e)
            return []

        skeletons = [dict() for _ in range(len(persons))]
        for j, (person, slot, _) in enumerate(person_slot_rois):
            p = ps[j]
            if math.isnan(p.point.x) or math.isnan(p.point.y) or math.isnan(
                    p.point.z):
                rospy.loginfo(
                    'skipping %s because of invalid projection to 3d', slot)
                continue
            rospy.loginfo('adding point to person {}: {} at slot {}'.format(
                person, p.point, slot).replace('\n', ' '))
            skeletons[person][slot] = p

        for skeleton in skeletons:
            zmin = float('inf')
            for slot, point in skeleton.items():
                if point.point.z < zmin:
                    zmin = point.point.z

            for slot, point in skeleton.items():
                point.point.z = zmin

        skeletons = [Skeleton(bodyparts) for bodyparts in skeletons]
        self.visualize_skeletons(skeletons)
        return skeletons
Ejemplo n.º 24
0
def callback(msg, publisher):
    roi = RegionOfInterest(0, 0, msg.image.height, msg.image.width)
    region = ImageRegion(bounding_box=roi)
    request = SpeciesIDRequest(image_id=msg.image_id,
                               image=msg.image,
                               regions=[region])
    request.header.stamp = rospy.Time.now()
    publisher.publish(request)
Ejemplo n.º 25
0
    def camera_info(self):
        msg_header = Header()
        msg_header.frame_id = "f450/robot_camera_down"
        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = 480
        msg.width = 640
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [1.0, 0.0, 320.5, 0.0, 1.0, 240.5, 0.0, 0.0, 1.0]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            1.0, 0.0, 320.5, -0.0, 0.0, 1.0, 240.5, 0.0, 0.0, 0.0, 1.0, 0.0
        ]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi
        return msg
Ejemplo n.º 26
0
    def rosmsg(self):
        """:obj:`sensor_msgs.CamerInfo` : Returns ROS CamerInfo msg 
        """
        msg_header = Header()
        msg_header.frame_id = self._frame

        msg_roi = RegionOfInterest()
        msg_roi.x_offset = 0
        msg_roi.y_offset = 0
        msg_roi.height = 0
        msg_roi.width = 0
        msg_roi.do_rectify = 0

        msg = CameraInfo()
        msg.header = msg_header
        msg.height = self._height
        msg.width = self._width
        msg.distortion_model = 'plumb_bob'
        msg.D = [0.0, 0.0, 0.0, 0.0, 0.0]
        msg.K = [
            self._fx, 0.0, self._cx, 0.0, self._fy, self._cy, 0.0, 0.0, 1.0
        ]
        msg.R = [1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0]
        msg.P = [
            self._fx, 0.0, self._cx, 0.0, 0.0, self._fx, self._cy, 0.0, 0.0,
            0.0, 1.0, 0.0
        ]
        msg.binning_x = 0
        msg.binning_y = 0
        msg.roi = msg_roi

        return msg
def callback(image_msg):
    global pub_Image
    global pub_BoxesImage
    global pub_Boxes
    global face_cascade
    global bridge
    global boxes_msg
    global faces
    global local_position_pub
    global goal_pose
    # Convert ros image into a cv2 image
    cv_img = bridge.imgmsg_to_cv2(image_msg, desired_encoding="passthrough")
    cv_gray = cv2.cvtColor(cv_img, cv2.COLOR_BGR2GRAY)
    # Find the faces and draw a rectangle around them
    faces = face_cascade.detectMultiScale(cv_gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv_img = cv2.rectangle(cv_img, (x, y), (x + w, y + h), (255, 0, 0), 2)
        # For each image, we also want to publish the croped image
        crop_img = cv_img[y:y + h, x:x + w]
        crop_msg = bridge.cv2_to_imgmsg(crop_img, encoding="bgr8")
        # And the region of interest information
        pub_BoxesImage.publish(crop_msg)
        boxes_msg = RegionOfInterest()
        boxes_msg.x_offset = x
        boxes_msg.y_offset = y
        boxes_msg.height = h
        boxes_msg.width = w
        boxes_msg.do_rectify = False
        pub_Boxes.publish(boxes_msg)
    # Publish the original image with the rectangles
    mod_img = bridge.cv2_to_imgmsg(cv_img, encoding="bgr8")
    pub_Image.publish(mod_img)
Ejemplo n.º 28
0
    def detect_face(self, input_image):
        # Equalize the histogram to reduce lighting effects
        search_image = cv2.equalizeHist(input_image)
        
        # Begin the search using three different XML template
        # First check one of the frontal templates
        if self.cascade_2:
            faces = self.cascade_2.detectMultiScale(search_image, **self.haar_params)
                                         
        # If that fails, check the profile template
        if not len(faces):
            if self.cascade_3:
                faces = self.cascade_3.detectMultiScale(search_image,**self.haar_params)

        # If that fails, check a different frontal profile
        if not len(faces):
            if self.cascade_1:
                faces = self.cascade_1.detectMultiScale(search_image, **self.haar_params)
        
        # If we did not detect any faces in this frame, display the message
        # "LOST FACE" on the marker image (defined in ros2opencv2.py)
        if not len(faces):
            self.last_face_box = None
            if self.show_text:
                font_face = cv2.FONT_HERSHEY_SIMPLEX
                font_scale = 0.5
                cv2.putText(self.marker_image, "LOST FACE!", 
                            (20, int(self.frame_size[1] * 0.9)), 
                            font_face, font_scale, cv.RGB(255, 50, 50))
            return None
        
        # If the show_text is set, display the hit rate so far
        if self.show_text:
            font_face = cv2.FONT_HERSHEY_SIMPLEX
            font_scale = 0.5
            cv2.putText(self.marker_image, "Hit Rate: " + 
                        str(trunc(self.hit_rate, 2)), 
                        (20, int(self.frame_size[1] * 0.9)), 
                        font_face, font_scale, cv.RGB(255, 255, 0))
        
        # If we do have a face, rescale it and publish
        for (x, y, w, h) in faces:
            # Set the face box to be cvRect which is just a tuple in Python
            face_box = (x, y, w, h)
            
            # If we have a face, publish the bounding box as the ROI
            if face_box is not None:
                self.ROI = RegionOfInterest()
                self.ROI.x_offset = min(self.frame_size[0], max(0, x))
                self.ROI.y_offset = min(self.frame_size[1], max(0, y))
                self.ROI.width = min(self.frame_size[0], w)
                self.ROI.height = min(self.frame_size[1], h)
                
            self.pubROI.publish(self.ROI)

            # Break out of the loop after the first face 
            return face_box
Ejemplo n.º 29
0
 def __init__(self, node_name):	      
     self.node_name = node_name
     rospy.init_node(node_name)
     rospy.loginfo("Starting node " + str(node_name))
     rospy.on_shutdown(self.cleanup)
     self.show_text = rospy.get_param("~show_text", True)
     self.ROI = RegionOfInterest()
     self.roi_pub = rospy.Publisher("/roi", RegionOfInterest, queue_size=1)
     self.frame_size = None
     self.frame_width = None
     self.frame_height = None
     self.cps = 0 
     self.cps_values = list()
     self.cps_n_values = 20      
     self.bridge = CvBridge()
     self.track_box = None
     self.has_tracked = False
     self.redLower = None
     self.redUpper = None
     self.blueLower = None
     self.blueUpper = None
     self.track_color = None
     self.track_count = 0
     self.track_count_thread = 90  ###to publish roi, thread of track count
     self.sd = ShapeDetector()
     self.red_h_min = rospy.get_param("~red_h_min", 0)
     self.red_h_max = rospy.get_param("~red_h_max", 10)	
     self.blue_h_min = rospy.get_param("~blue_h_min", 100)
     self.blue_h_max = rospy.get_param("~blue_h_max", 124)
     self.s_min = rospy.get_param("~s_min", 43)	
     self.s_max = rospy.get_param("~s_max", 255)	
     self.v_min = rospy.get_param("~v_min", 46)	
     self.v_max = rospy.get_param("~v_max", 255)
     self.red_area_percent = rospy.get_param("~red_area_percent", 75)
     self.blue_area_percent = rospy.get_param("~blue_area_percent", 85)	
     self.image_pub = rospy.Publisher("camera/image",Image,queue_size=10)
     self.image_sub = rospy.Subscriber("camera/bgr/image_raw", Image, self.image_callback, queue_size=1)
     if int(minor_ver) < 3:
         self.tracker = cv2.Tracker_create(tracker_type)
     else:
    	    if tracker_type == 'BOOSTING':
         	self.tracker = cv2.TrackerBoosting_create()
         if tracker_type == 'MIL':
         	self.tracker = cv2.TrackerMIL_create()
         if tracker_type == 'KCF':
         	self.tracker = cv2.TrackerKCF_create()
         if tracker_type == 'TLD':
         	self.tracker = cv2.TrackerTLD_create()
         if tracker_type == 'MEDIANFLOW':
         	self.tracker = cv2.TrackerMedianFlow_create()
         if tracker_type == 'GOTURN':
         	self.tracker = cv2.TrackerGOTURN_create()
         if tracker_type == 'MOSSE':
         	self.tracker = cv2.TrackerMOSSE_create()
         if tracker_type == "CSRT":
         	self.tracker = cv2.TrackerCSRT_create()
Ejemplo n.º 30
0
    def detect_and_draw(self, imgmsg):
        if self.pause:
            return
        frame = self.br.imgmsg_to_cv2(imgmsg, "bgr8")
        self.frame = frame.copy()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        mask = cv2.inRange(hsv, np.array((0., 60., 32.)),
                           np.array((180., 255., 255.)))

        # rospy.loginfo("Selection %s / TS : %d" % (str(self.selection),self.tracking_state))

        if self.selection:
            x0, y0, x1, y1 = self.selection
            #print(self.selection)
            self.track_window = (x0, y0, x1 - x0, y1 - y0)
            hsv_roi = hsv[y0:y1, x0:x1]
            mask_roi = mask[y0:y1, x0:x1]
            hist = cv2.calcHist([hsv_roi], [0], mask_roi, [16], [0, 180])
            cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
            self.hist = hist.reshape(-1)

            vis_roi = self.frame[y0:y1, x0:x1]
            cv2.bitwise_not(vis_roi, vis_roi)
            self.frame[mask == 0] = 0

        if self.tracking_state == 1 and is_rect_nonzero(self.track_window):
            self.selection = None
            prob = cv2.calcBackProject([hsv], [0], self.hist, [0, 180], 1)
            prob &= mask
            term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10,
                         1)
            track_box, self.track_window = cv2.CamShift(
                prob, self.track_window, term_crit)
            print prob.shape
            self.backproject = prob[..., np.newaxis].copy()

            if self.show_backproj:
                self.frame = self.backproject.copy()
            try:
                cv2.ellipse(self.frame, track_box, (0, 0, 255), 2)
            except:
                print track_box
            x, y, w, h = self.track_window
            self.bbpub.publish(RegionOfInterest(x, y, w, h, False))
            proba_msg = self.br.cv2_to_imgmsg(self.backproject, "mono8")
            proba_msg.header = imgmsg.header
            self.bppub.publish(proba_msg)

        if not self.pause:
            if not self.backproject_mode:
                cv2.imshow("camshift", self.frame)
            else:
                cv2.imshow("camshift", self.backproject)
            if self.disp_hist:
                cv2.imshow("Histogram", self.hue_histogram_as_image(self.hist))
 def process_image(self, image):
     '''
     Convert inbound image to OpenCV.
     Apply LED detector
     Publish location is LED is detected
     '''
     cv2_img_bgr = self.bridge.imgmsg_to_cv2(image, desired_encoding="bgr8")
     image_hsv = cv2.cvtColor(cv2_img_bgr, cv2.COLOR_BGR2HSV)
     blue_mask = cv2.inRange(image_hsv, (119, 127, 127), (121, 255, 255))
     green_mask = cv2.inRange(image_hsv, (59, 127, 127), (61, 255, 255))
     red_mask = cv2.inRange(image_hsv, (0, 127, 127), (1, 255, 255))
     acquired = False
     masks = (red_mask, green_mask, blue_mask)
     color = [0, 0, 0]
     for i in range(len(masks)):
         contours, hierarchy = cv2.findContours(masks[i], cv2.RETR_TREE,
                                                cv2.CHAIN_APPROX_SIMPLE)
         # if we find a contour, use it...
         if len(contours) > 0:
             rospy.loginfo('Led detected')
             acquired = True
             color[i] = 255
             rospy.loginfo("Color %s", color)
             # publish annotated image
             cv2.drawContours(cv2_img_bgr, contours, 0, (20, 255, 57), 3)
             # publish center of contour
             M = cv2.moments(contours[0])
             cx = int(M['m10'] / M['m00'])
             cy = int(M['m01'] / M['m00'])
             # OpenCV switches x and y for points.  Be careful here.
             # We want:  origin top left, x right, y down
             rospy.loginfo("Centroid (%d, %d)", cx, cy)
             # publish bounding rectable
             rect = cv2.boundingRect(contours[0])
             rospy.loginfo('Bounding Rect %s', rect)
             image_blob = ImageBlob(header=image.header,
                                    color=ColorRGBA(r=color[0],
                                                    g=color[1],
                                                    b=color[2],
                                                    a=1.0),
                                    centroid=Point(x=cx, y=cy),
                                    bounding_rectangle=RegionOfInterest(
                                        x_offset=rect[0],
                                        y_offset=rect[1],
                                        width=rect[2],
                                        height=rect[3]))
             self.blob_publisher.publish(image_blob)
             break
     if acquired:
         self.acquired_publisher.publish(Empty())
     else:
         self.lost_publisher.publish(Empty())
     ros_img_contours = self.bridge.cv2_to_imgmsg(cv2_img_bgr,
                                                  encoding="bgr8")
     self.image_publisher.publish(ros_img_contours)
Ejemplo n.º 32
0
def mask_to_rle_msg_and_roi_msg(mask):
    mask_rle = mask_utils.encode(np.asfortranarray(mask))
    rle_msg = RLE(height=mask.shape[0],
                  width=mask.shape[1],
                  data=mask_rle['counts'])
    bbox = mask_utils.toBbox([mask_rle])[0]
    roi_msg = RegionOfInterest(x_offset=bbox[0],
                               y_offset=bbox[1],
                               width=bbox[2],
                               height=bbox[3])
    return rle_msg, roi_msg
Ejemplo n.º 33
0
    def publish(self, boxes, classes, labels, seg_map, fps=0):
        # init detection message
        msg = Segmentation()
        boxes = []
        for i in range(boxes.shape[0]):
            class_name = label[i]
            ymin, xmin, ymax, xmax = tuple(boxes[i].tolist())
            box = RegionOfInterest()
            box.x_offset = xmin + (xmax-xmin)/2.0
            box.y_offset = ymin + (ymax-ymin)/2.0
            box.height = ymax - ymin
            box.width = xmax - xmin
            # fill segmentation message
            msg.boxes.append(box)
            msg.class_names.append(class_name)

        msg.seg_map = self._bridge.cv2_to_imgmsg(seg_map, encoding="passthrough")
        msg.fps = fps
        # publish detection message
        self.SegPub.publish(msg)
Ejemplo n.º 34
0
def converts_to_ObjectInBoxe(image_size,
                             ymin,
                             xmin,
                             ymax,
                             xmax,
                             probability=(),
                             object_name_list=(),
                             use_normalized_coordinates=True,
                             do_rectify=False):
    """Adds a bounding box to an image.

  Bounding box coordinates can be specified in either absolute (pixel) or
  normalized coordinates by setting the use_normalized_coordinates argument.

  Each string in display_str_list is displayed on a separate line above the
  bounding box in black text on a rectangle filled with the input 'color'.
  If the top of the bounding box extends to the edge of the image, the strings
  are displayed below the bounding box.

  Args:
    image_size: image_size.
    ymin: ymin of bounding box.
    xmin: xmin of bounding box.
    ymax: ymax of bounding box.
    xmax: xmax of bounding box.
    object_name_list: list of strings to display in box
                      (each to be shown on its own line).
    use_normalized_coordinates: If True (default), treat coordinates
      ymin, xmin, ymax, xmax as relative to the image.  Otherwise treat
      coordinates as absolute.
  """
    im_height, im_width = image_size

    if use_normalized_coordinates:
        (left, right, top, bottom) = (xmin * im_width, xmax * im_width,
                                      ymin * im_height, ymax * im_height)

    else:
        (left, right, top, bottom) = (xmin, xmax, ymin, ymax)

    object = Object()
    object.object_name = object_name_list[0]
    object.probability = probability[0]

    roi = RegionOfInterest()
    roi.x_offset = int(left)
    roi.y_offset = int(top)
    roi.height = int(bottom - top)
    roi.width = int(right - left)
    roi.do_rectify = do_rectify

    object_in_box = ObjectInBox()
    object_in_box.object = object
    object_in_box.roi = roi

    return object_in_box
Ejemplo n.º 35
0
def compute_roi_from_indices(indices, width, height, padding = 30):
        
    xvals = [ind - width*(math.floor(ind / width)) for ind in indices]
    yvals = [math.floor(ind / width) for ind in indices]
    roi = RegionOfInterest()
    roi.x_offset = max(0, min(xvals) - padding)
    roi.y_offset = max(0, min(yvals) - padding)
    
    roi.height = max(yvals) - roi.y_offset + padding
    if roi.height + roi.y_offset > height:
        roi.height = height - roi.y_offset
    roi.width = max(xvals) - roi.x_offset + padding
    if roi.width + roi.x_offset > width:
        roi.width = width - roi.x_offset
    roi.do_rectify = 0

    return roi
Ejemplo n.º 36
0
    def _stage_recognition(self, roi, label):
        """
        Stage a manual recognition
        :param roi: ROI
        :param label: The label
        """
        x, y, width, height = roi
        r = Recognition(roi=RegionOfInterest(x_offset=x, y_offset=y, width=width, height=height))
        r.categorical_distribution.probabilities = [CategoryProbability(label=label, probability=1.0)]
        r.categorical_distribution.unknown_probability = 0.0

        self._response.recognitions.append(r)
Ejemplo n.º 37
0
    def publish_roi(self):
        roi_box = self.track_window
        # roi_box = self.track_box
        try:
            roi_box = self.cvBox2D_to_cvRect(roi_box)
        except:
            return

        # Watch out for negative offsets
        roi_box[0] = max(0, roi_box[0])
        roi_box[1] = max(0, roi_box[1])

        try:
            roi = RegionOfInterest()
            roi.x_offset = int(roi_box[0])
            roi.y_offset = int(roi_box[1])
            roi.width = int(roi_box[2])
            roi.height = int(roi_box[3])
            self.roi_pub.publish(roi)
        except:
            rospy.loginfo("Publishing ROI failed")
Ejemplo n.º 38
0
    def img_callback(self, data):
        self.parse_label_map()
        # if self.check_timestamp():
        # return None

        cv_image = data
        cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
        self.im_height, self.im_width, channels = cv_image.shape
        print("Height ", self.im_height, " Width: ", self.im_width)
        #  cv_image = cv2.resize(cv_image, (256, 144))
        # Run image through tensorflow graph
        boxes, scores, classes = detector_utils.detect_objects(
            cv_image, self.inference_graph, self.sess)

        # Draw Bounding box
        labelled_image, bbox = detector_utils.draw_box_on_image(
            self.num_objects_detect, self.score_thresh, scores, boxes, classes,
            self.im_width, self.im_height, cv_image, self.target)

        # Calculate FPS
        self.num_frames += 1
        elapsed_time = (datetime.datetime.now() -
                        self.start_time).total_seconds()
        fps = self.num_frames / elapsed_time

        # Display FPS on frame
        detector_utils.draw_text_on_image(
            "FPS : " + str("{0:.2f}".format(fps)), cv_image)
        print("bbox:", bbox)
        if len(bbox) > 0:
            pointx = (bbox[0][0] + bbox[1][0]) / 2
            pointy = (bbox[0][1] + bbox[1][1]) / 2
            pointxdist = abs(bbox[0][0] - bbox[1][0])
            pointydist = abs(bbox[0][1] - bbox[1][1])
            print(pointxdist)
            msg = Point(x=pointx,
                        y=pointy,
                        z=self.see_sub.last_image_time.to_sec())
            print("X: ", pointx, "Y: ", pointy, "TIMESTAMP: ", msg.z)
            self.bbox_pub.publish(msg)
            roi = RegionOfInterest(x_offset=int(bbox[0][0]),
                                   y_offset=int(bbox[0][1]),
                                   height=int(pointydist),
                                   width=int(pointxdist))
            self.roi_pub.publish(roi)

        # Publish image
        try:
            cv_image = cv2.cvtColor(cv_image, cv2.COLOR_RGB2BGR)
            self.debug_image_pub.publish(
                self.bridge.cv2_to_imgmsg(cv_image, 'bgr8'))
        except CvBridgeError as e:
            print(e)
Ejemplo n.º 39
0
    def match_template(self, cv_image):
        frame = np.array(cv_image, dtype=np.uint8)
        
        W,H = frame.shape[1], frame.shape[0]
        w,h = self.template.shape[1], self.template.shape[0]
        width = W - w + 1
        height = H - h + 1

        # Make sure that the template image is smaller than the source
        if W < w or H < h:
            rospy.loginfo( "Template image must be smaller than video frame." )
            return False
        
        if frame.dtype != self.template.dtype: 
            rospy.loginfo("Template and video frame must have same depth and number of channels.")
            return False
    
        # Create copies of the images to modify
        frame_copy = frame.copy()
        template_copy = self.template.copy()
        
        # Down pyramid the images
        for k in range(self.numDownPyrs):
            # Start with the source image
            W  = (W  + 1) / 2
            H = (H + 1) / 2
                
            frame_small = np.array([H, W], dtype=frame.dtype)
            frame_small = cv2.pyrDown(frame_copy)
            
#            frame_window = "PyrDown " + str(k)
#            cv.NamedWindow(frame_window, cv.CV_NORMAL)
#            cv.ShowImage(frame_window, cv.fromarray(frame_small))
#            cv.ResizeWindow(frame_window, 640, 480)
    
            #  Prepare for next loop, if any
            frame_copy = frame_small.copy()
    
            #Next, do the target
            w  = (w  + 1) / 2
            h = (h + 1) / 2
    
            template_small = np.array([h, w], dtype=self.template.dtype)
            template_small = cv2.pyrDown(template_copy)
            
#            template_window = "Template PyrDown " + str(k)
#            cv.NamedWindow(template_window, cv.CV_NORMAL)
#            cv.ShowImage(template_window, cv.fromarray(template_small))
#            cv.ResizeWindow(template_window, 640, 480)
    
            # Prepare for next loop, if any
            template_copy = template_small.copy()
            
        # Perform the match on the shrunken images
        small_frame_width = frame_copy.shape[1]
        small_frame_height = frame_copy.shape[0]
        
        small_template_width = template_copy.shape[1]
        small_template_height = template_copy.shape[0]
    
        result_width = small_frame_width - small_template_width + 1
        result_height = small_frame_height - small_template_height + 1
    
        result_mat = cv.CreateMat(result_height, result_width, cv.CV_32FC1)
        result = np.array(result_mat, dtype = np.float32)

        cv2.matchTemplate(frame_copy, template_copy, cv.CV_TM_CCOEFF_NORMED, result)
        
        cv2.imshow("Result", result)
        
        return (0, 0, 100, 100)
        
#        # Find the best match location
#        (minValue, maxValue, minLoc, maxLoc) = cv2.minMaxLoc(result)
#        
#       # Transform point back to original image
#        target_location = Point()
#        target_location.x, target_location.y = maxLoc
#        
#        return (target_location.x, target_location.y, w, h)
                
        # Find the top match locations
        locations = self.MultipleMaxLoc(result, self.numMaxima)

        foundPointsList = list()
        confidencesList = list()
        
        W,H = frame.shape[1], frame.shape[0]
        w,h = self.template.shape[1], self.template.shape[0]
        
        # Search the large images at the returned locations       
        for currMax in range(self.numMaxima):
            # Transform the point to its corresponding point in the larger image
            #locations[currMax].x *= int(pow(2.0, self.numDownPyrs))
            #locations[currMax].y *= int(pow(2.0, self.numDownPyrs))
            locations[currMax].x += w / 2
            locations[currMax].y += h / 2
    
            searchPoint = locations[currMax]
            
            print "Search Point", searchPoint
    
            # If we are searching for multiple targets and we have found a target or
            # multiple targets, we don't want to search in the same location(s) again
#            if self.findMultipleTargets and len(foundPointsList) != 0:
#                thisTargetFound = False
#                numPoints = len(foundPointsList)
#                
#                for currPoint in range(numPoints):
#                    foundPoint = foundPointsList[currPoint]
#                    if (abs(searchPoint.x - foundPoint.x) <= self.searchExpansion * 2) and (abs(searchPoint.y - foundPoint.y) <= self.searchExpansion * 2):
#                        thisTargetFound = True
#                        break
#    
#                # If the current target has been found, continue onto the next point
#                if thisTargetFound:
#                    continue
    
            # Set the source image's ROI to slightly larger than the target image,
            # centred at the current point
            searchRoi = RegionOfInterest()
            searchRoi.x_offset = searchPoint.x - w / 2 - self.searchExpansion
            searchRoi.y_offset = searchPoint.y - h / 2 - self.searchExpansion
            searchRoi.width = w + self.searchExpansion * 2
            searchRoi.height = h + self.searchExpansion * 2
            
            #print (searchRoi.x_offset, searchRoi.y_offset, searchRoi.width, searchRoi.height)
                
            # Make sure ROI doesn't extend outside of image
            if searchRoi.x_offset < 0: 
                searchRoi.x_offset = 0

            if searchRoi.y_offset < 0:
                searchRoi.y_offset = 0

            if (searchRoi.x_offset + searchRoi.width) > (W - 1):
                numPixelsOver = (searchRoi.x_offset + searchRoi.width) - (W - 1)
                print "NUM PIXELS OVER", numPixelsOver
                searchRoi.width -= numPixelsOver
    
            if (searchRoi.y_offset + searchRoi.height) > (H - 1):
                numPixelsOver = (searchRoi.y_offset + searchRoi.height) - (H - 1)
                searchRoi.height -= numPixelsOver
                
            mask = (searchRoi.x_offset, searchRoi.y_offset, searchRoi.width, searchRoi.height)
    
            frame_mat = cv.fromarray(frame)
        
            searchImage = cv.CreateMat(searchRoi.height, searchRoi.width, cv.CV_8UC3)
            searchImage = cv.GetSubRect(frame_mat, mask)
            searchArray = np.array(searchImage, dtype=np.uint8)
                       
            # Perform the search on the large images
            result_width = searchRoi.width - w + 1
            result_height = searchRoi.height - h + 1
    
            result_mat = cv.CreateMat(result_height, result_width, cv.CV_32FC1)
            result = np.array(result_mat, dtype = np.float32)
            
            cv2.matchTemplate(searchArray, self.template, cv.CV_TM_CCOEFF_NORMED, result)
    
            # Find the best match location
            (minValue, maxValue, minLoc, maxLoc) = cv2.minMaxLoc(result)

            maxValue *= 100
    
            # Transform point back to original image
            target_location = Point()
            target_location.x, target_location.y = maxLoc
            target_location.x += searchRoi.x_offset - w / 2 + self.searchExpansion
            target_location.y += searchRoi.y_offset - h / 2 + self.searchExpansion
    
            if maxValue >= self.matchPercentage:
                # Add the point to the list
                foundPointsList.append(maxLoc)
                confidencesList.append(maxValue)
    
                # If we are only looking for a single target, we have found it, so we
                # can return
                if not self.findMultipleTargets:
                    break
    
        if len(foundPointsList) == 0:
            rospy.loginfo("Target was not found to required confidence")
    
        return (target_location.x, target_location.y, w, h)