def main(args):
    global image
    rospy.init_node('skeleton_viewer', anonymous=True)

    image_info_sub = rospy.Subscriber("/camera/rgb/camera_info", CameraInfo,
                                      image_info_cb)

    if test_mode:
        cap = cv2.VideoCapture(0)

        size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    else:
        ic = image_converter()

        cam_model = PinholeCameraModel()
        while (not camera_info) and (not rospy.is_shutdown()):
            time.sleep(1)
            print("waiting for camera info")

        cam_model.fromCameraInfo(camera_info)
        size = cam_model.fullResolution()
        print(cam_model.cx(), cam_model.cy(), cam_model.fullResolution())

    fps = 30
    rate = rospy.Rate(fps)
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    writer = cv2.VideoWriter()
    success = writer.open(file, fourcc, fps, size, True)

    while not rospy.is_shutdown():
        if test_mode:
            ret, image = cap.read()

        writer.write(image)

        cv2.imshow("Image window", image)

        if cv2.waitKey(1) & 0xFF == 27:
            break

        rate.sleep()

    if test_mode:
        cap.release()

    writer.release()
示例#2
0
class Colorama(object):
    def __init__(self):
        info_topic = camera_root + "/camera_info"
        image_topic = camera_root + "/image_rect_color"

        self.tf_listener = tf.TransformListener()
        self.status_pub = rospy.Publisher("/database_color_status", ColoramaDebug, queue_size=1)

        self.odom = None 
        set_odom = lambda msg: setattr(self, "odom", navigator_tools.pose_to_numpy(msg.pose.pose))
        rospy.Subscriber("/odom", Odometry, set_odom)
        fprint("Waiting for odom...")
        while self.odom is None and not rospy.is_shutdown():
            rospy.sleep(1)
        fprint("Odom found!", msg_color='green')

        db_request = rospy.ServiceProxy("/database/requests", ObjectDBQuery)
        self.make_request = lambda **kwargs: db_request(ObjectDBQueryRequest(**kwargs))

        self.image_history = ImageHistory(image_topic)

        # Wait for camera info, and exit if not found
        fprint("Waiting for camera info on: '{}'".format(info_topic))
        while not rospy.is_shutdown():
            try:
                camera_info_msg = rospy.wait_for_message(info_topic, CameraInfo, timeout=3)
            except rospy.exceptions.ROSException:
                rospy.sleep(1)
                continue
            break

        fprint("Camera info found!", msg_color="green")
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(camera_info_msg)

        self.debug = DebugImage("/colorama/debug", prd=.5)

        # These are color mappings from Hue values [0, 360]
        self.color_map = {'red': np.radians(0), 'yellow': np.radians(60),
                          'green': np.radians(120), 'blue': np.radians(200)}

        # RGB map for database setting
        self.database_color_map = {'red': (255, 0, 0), 'yellow': (255, 255, 0), 'green': (0, 255, 0), 'blue': (0, 0, 255)}

        # ========= Some tunable parameters ===================================
        self.update_time = 1  # s

        # Observation parameters
        self.saturation_reject = 20 # Reject color obs with below this saturation
        self.value_reject = 50      # Reject color obs with below this value
        self.height_remove = 0.4    # Remove points that are this percent down on the object (%)
                                    # 1 keeps all, .4 removes the bottom 40%
        # Update parameters
        self.history_length = 100   # How many of each color to keep
        self.min_obs = 5            # Need atleast this many observations before making a determination
        self.conf_reject = .5       # When to reject an observation based on it's confidence

        # Confidence weights
        self.v_factor = 0.6         # Favor value values close to the nominal value
        self.v_u = 200              # Mean of norm for variance error
        self.v_sig = 60             # Sigma of norm for variance error
        self.dist_factor = 0.3      # Favor being closer to the totem
        self.dist_sig = 30          # Sigma of distance (m)
        self.q_factor = 0           # Favor not looking into the sun
        self.q_sig = 1.2            # Sigma of norm for quaternion error (rads)
        
        # Maps id => observations
        self.colored = {}

        rospy.Timer(ros_t(self.update_time), self.do_observe)

    def _compute_average_angle(self, angles, weights):
        """
        Returns weighted average of angles.
        Tends to break down with too many angles 180 degrees of each other - try not to do that.
        """
        angles = np.array(angles)
        if np.linalg.norm(weights) == 0:
            return None

        w = weights / np.linalg.norm(weights)
        s = np.sum(w * np.sin(angles))
        c = np.sum(w * np.cos(angles))
        avg_angle = np.arctan2(s, c)
        return avg_angle
   
    def _get_quaternion_error(self, q, target_q):
        """
        Returns an angluar differnce between q and target_q in radians
        """
        dq = trns.quaternion_multiply(np.array(target_q), trns.quaternion_inverse(np.array(q)))
        return 2 * np.arccos(dq[3])

    def _get_closest_color(self, hue_angle):
        """
        Returns a pair of the most likely color and the radian error associated with that prediction
            `hue_angle` := The radian value associated with hue [0, 2*pi] 
        Colors are found from `self.color_map`
        """
        c = np.cos(hue_angle)
        s = np.sin(hue_angle)
        error = np.inf
        likely_color = 'undef'
        for color, h_val in self.color_map.iteritems():
            cg = np.cos(h_val)
            sg = np.sin(h_val)
            # We need a signed error for some math later on so no absolute value
            this_error = np.arctan2(sg*c - cg*s, cg*c + sg*s)
            if np.abs(this_error) < np.abs(error):
                error = this_error
                likely_color = color

        fprint("Likely color: {} with an hue error of {} rads.".format(likely_color, np.round(error, 3)))
        return [likely_color, error] 

    def do_estimate(self, totem_id):
        """Calculates best color estimate for a given totem id"""
        fprint("DOING ESTIMATE", msg_color='blue') 
        if totem_id not in self.colored:
            fprint("Totem ID {} not found!".format(totem_id), msg_color='red')
            return None
       
        t_color = self.colored[totem_id]
        
        if len(t_color) < self.min_obs:
            fprint("Only {} observations. {} required.".format(len(t_color), self.min_obs), msg_color='red')
            return None

        kwargs = {'v_u': self.v_u, 'v_sig': self.v_sig, 'dist_sig': self.dist_sig, 
                  'q_factor': self.q_factor, 'q_sig': self.q_sig}

        w, weights = t_color.compute_confidence([self.v_factor, self.dist_factor, self.q_factor], True, **kwargs)
        fprint("CONF: {}".format(w))
        if np.mean(w) < self.conf_reject:
            return None
        
        hue_angles = np.radians(np.array(t_color.hues) * 2) 
        angle = self._compute_average_angle(hue_angles, w)
        color = self._get_closest_color(angle)
        
        msg = t_color.as_message 
        msg.id = totem_id
        msg.confidence = w
        msg.labels = ["value_errs", "dists", "q_diffs"]
        msg.weights = weights
        msg.color = colors[0]
        msg.est_hues = angle * 2
        msg.hues = np.array(t_color.hues) * 2
        self.status_pub.publish(msg)

        fprint("Color: {}".format(color[0]))
        return color
    
    def got_request(self, req):
        # Threading blah blah something unsafe
        colored_ids = [_id for _id, color_err in self.colored.iteritems() if self.valid_color(_id) == req.color]
        
        fprint("Colored IDs: {}".format(colored_ids), msg_color='blue')
        print '\n' * 50
        if len(colored_ids) == 0:
            return ColorRequestResponse(found=False)
        
        return ColorRequestResponse(found=True, ids=colored_ids)

    def do_observe(self, *args):
        resp = self.make_request(name='totem')
        
        # If atleast one totem was found start observing
        if resp.found:
            # Time of the databse request
            time_of_marker = resp.objects[0].header.stamp# - ros_t(1)
            fprint("Looking for image at {}".format(time_of_marker.to_sec()), msg_color='yellow')
            image_holder = self.image_history.get_around_time(time_of_marker)
            if not image_holder.contains_valid_image:
                t = self.image_history.newest_image.time
                if t is None:
                    fprint("No images found.")
                    return
                
                fprint("No valid image found for t={} ({}) dt: {}".format(time_of_marker.to_sec(), t.to_sec(), (rospy.Time.now() - t).to_sec()), msg_color='red')
                return
            header = navigator_tools.make_header(frame='/enu', stamp=image_holder.time)
            image = image_holder.image
            self.debug.image = np.copy(image)

            cam_tf = self.camera_model.tfFrame()
            try:
                fprint("Getting transform between /enu and {}...".format(cam_tf))
                self.tf_listener.waitForTransform("/enu", cam_tf, time_of_marker, ros_t(1))
                t_mat44 = self.tf_listener.asMatrix(cam_tf, header)
            except tf.ExtrapolationException as e:
                fprint("TF error found and excepted: {}".format(e))
                return

            for obj in resp.objects:
                if len(obj.points) <= 1:
                    fprint("No points in object")
                    continue

                fprint("{} {}".format(obj.id, "=" * 50))
                
                # Get object position in px coordinates to determine if it's in frame
                object_cam = t_mat44.dot(np.append(p2np(obj.position), 1))
                object_px = map(int, self.camera_model.project3dToPixel(object_cam[:3]))
                if not self._object_in_frame(object_cam):
                    fprint("Object not in frame")
                    continue
                
                # Get enu points associated with this totem and remove ones that are too low
                points_np = np.array(map(p2np, obj.points))
                height = np.max(points_np[:, 2]) - np.min(points_np[:, 2])
                if height < .1:
                    # If the height of the object is too small, skip (units are meters)
                    fprint("Object too small")
                    continue

                threshold = np.min(points_np[:, 2]) + self.height_remove * height  
                points_np = points_np[points_np[:, 2] > threshold]
                
                # Shove ones in there to make homogenous points to get points in image frame
                points_np_homo = np.hstack((points_np, np.ones((points_np.shape[0], 1)))).T
                points_cam = t_mat44.dot(points_np_homo).T
                points_px = map(self.camera_model.project3dToPixel, points_cam[:, :3])
                
                [cv2.circle(self.debug.image, tuple(map(int, p)), 2, (255, 255, 255), -1) for p in points_px]
                
                # Get color information from the points
                roi = self._get_ROI_from_points(points_px)
                h, s, v = self._get_hsv_from_ROI(roi, image)

                # Compute parameters that go into the confidence
                boat_q = self.odom[1]
                target_q = self._get_solar_angle()
                q_err = self._get_quaternion_error(boat_q, target_q)
                
                dist = np.linalg.norm(self.odom[0] - p2np(obj.position))

                fprint("H: {}, S: {}, V: {}".format(h, s, v))
                fprint("q_err: {}, dist: {}".format(q_err, dist))

                # Add to database and setup debug image
                if s < self.saturation_reject or v < self.value_reject:
                    err_msg = "The colors aren't expressive enough s: {} ({}) v: {} ({}). Rejecting."
                    fprint(err_msg.format(s, self.saturation_reject, v, self.value_reject), msg_color='red')

                else:
                    if obj.id not in self.colored:
                       self.colored[obj.id] = Observation() 
                    
                    # Add this observation in
                    self.colored[obj.id] += h, v, dist, q_err
                    print self.colored[obj.id]

                rgb = (0, 0, 0)
                color = self.do_estimate(obj.id)
                # Do we have a valid color estimate
                if color:
                    fprint("COLOR IS VALID", msg_color='green')
                    rgb = self.database_color_map[color[0]]
                    
                    cmd = '{name}={rgb[0]},{rgb[1]},{rgb[2]},{_id}'
                    self.make_request(cmd=cmd.format(name=obj.name,_id=obj.id, rgb=rgb))

                bgr = rgb[::-1]
                cv2.circle(self.debug.image, tuple(object_px), 10, bgr, -1)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(self.debug.image, str(obj.id), tuple(object_px), font, 1, bgr, 2)

    def _get_solar_angle(self):
        return [0, 0, 0, 1]

    def _get_ROI_from_points(self, image_points):
        cnt = np.array([image_points]).astype(np.float32)
        rect = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        fprint("Drawing rectangle")
        cv2.drawContours(self.debug.image, [box], 0, (0, 0, 255), 2)
        return box

    def _get_hsv_from_ROI(self, roi, img):
        mask = np.zeros(img.shape[:2], np.uint8)
        cv2.drawContours(mask, [roi], 0, 255, -1)
        bgr = cv2.mean(img, mask)
        # We have to treat that bgr value as a 3d array to get cvtColor to work
        bgr = np.array([[bgr]])[:, :3].astype(np.uint8)
        h, s, v = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)[0, 0]

        return h, s, v
        
        # Now check that s and v are in a good range
        if s < self.saturation_reject or v < self.value_reject:
            err_msg = "The colors aren't expressive enough s: {} ({}) v: {} ({}). Rejecting."
            fprint(err_msg.format(s, self.saturation_reject, v, self.value_reject), msg_color='red')
            return None

        # Compute hue error in SO2
        hue_angle = np.radians(h * 2) 
        
        # Display the current values
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(self.debug.image, "h_val: {}".format(np.degrees(hue_angle)), 
                    tuple(roi[1]), font, 1, (255, 255, 255), 2)

        likely_color, error = self.get_closest_color(hue_angle)

        if error > self.hue_error_reject:
            fprint("Closest color was {} with an error of {} rads (> {}). Rejecting.".format(likely_color, np.round(error, 3), 
                                                                                            self.hue_error_reject), msg_color='red')
            return None

        fprint("Likely color: {} with an hue error of {} rads.".format(likely_color, np.round(error, 3)))
        return [likely_color, error]

    def _object_in_frame(self, object_point):
        """
        Returns if the object is in frame given the centroid of the object.
        `object_point` should be in the camera_model's frame.
        """
        if object_point[2] < 0:
            return False

        px = np.array(self.camera_model.project3dToPixel(object_point))
        resoultion = self.camera_model.fullResolution()
        return not (np.any([0, 0] > px) or np.any(px > resoultion))
    parser = argparse.ArgumentParser(description='''Generate yaml file''')
    parser.add_argument('--bag', help='Root of folders.', default='')
    parser.add_argument('--out', default="")
    parser.add_argument('--cam_info_topic', default="")
    parser.add_argument('--image_topic', default="")

    args = parser.parse_args()

    bag_path = os.path.join(os.getcwd(), args.bag)

    bag = Bag2CameraInfo(bag_path, args.cam_info_topic)
    bag_im = Bag2Images(bag_path, args.image_topic)

    c = PinholeCameraModel()
    c.fromCameraInfo(bag.msg)
    size = c.fullResolution()
    K_new, _ = cv2.getOptimalNewCameraMatrix(c.fullIntrinsicMatrix(),
                                             c.distortionCoeffs(), size, 0.0)

    pts = []
    for x in range(size[0]):
        for y in range(size[1]):
            pts.append([[x, y]])

    new_points = cv2.undistortPoints(np.array(pts, dtype=np.float32),
                                     bag.K,
                                     bag.D,
                                     P=K_new)
    map_x = np.reshape(new_points[:, 0, 0],
                       newshape=(size[0], size[1])).transpose()
    map_y = np.reshape(new_points[:, 0, 1],
示例#4
0
class Colorama(object):
    def __init__(self):
        info_topic = camera_root + "/camera_info"
        image_topic = camera_root + "/image_raw"  # Change this to rect on boat

        self.tf_listener = tf.TransformListener()

        # Map id => [{color, error}, ...] for determining when a color is good
        self.colored = {}

        db_request = rospy.ServiceProxy("/database/requests", ObjectDBQuery)
        self.make_request = lambda **kwargs: db_request(
            ObjectDBQueryRequest(**kwargs))

        rospy.Service("/vision/buoy_colorizer", VisionRequest,
                      self.got_request)

        self.image_history = ImageHistory(image_topic)

        # Wait for camera info, and exit if not found
        try:
            fprint("Waiting for camera info on: '{}'".format(info_topic))
            camera_info_msg = rospy.wait_for_message(info_topic,
                                                     CameraInfo,
                                                     timeout=3)
        except rospy.exceptions.ROSException:
            fprint("Camera info not found! Terminating.", msg_color="red")
            rospy.signal_shutdown("Camera not found!")
            return

        fprint("Camera info found!", msg_color="green")
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(camera_info_msg)

        self.debug = DebugImage("/colorama/debug", prd=.5)

        # These are color mappings from Hue values [0, 360]
        self.color_map = {
            'red': np.radians(0),
            'yellow': np.radians(60),
            'green': np.radians(120),
            'blue': np.radians(240)
        }

        # Some tunable parameters
        self.update_time = .5  # s
        self.saturation_reject = 50
        self.value_reject = 50
        self.hue_error_reject = .4  # rads

        # To decide when to accept color observations
        self.error_tolerance = .4  # rads
        self.spottings_req = 4
        self.similarity_reject = .1  # If two colors are within this amount of error, reject

        rospy.Timer(ros_t(self.update_time), self.do_update)

    def valid_color(self, _id):
        """
        Either returns valid color or None depending if the color is valid or not
        """
        if _id not in self.colored:
            return None

        object_data = self.colored[_id]
        print "object_data", object_data

        # Maps color => [err1, err2, ..]
        potential_colors = {}
        for data in object_data:
            color, err = data['color'], data['err']
            if color not in potential_colors:
                potential_colors[color] = []

            potential_colors[color].append(err)

        potential_colors_avg = {}
        for color, errs in potential_colors.iteritems():
            if len(errs) > self.spottings_req:
                potential_colors_avg[color] = np.average(errs)

        print "potential_colors_avg", potential_colors_avg
        if len(potential_colors_avg) == 1:
            # No debate about the color
            color = potential_colors_avg.keys()[0]
            err = potential_colors_avg[color]
            fprint("Only one color found, error: {} (/ {})".format(
                err, self.error_tolerance))
            if len(potential_colors[color]
                   ) > self.spottings_req and err <= self.error_tolerance:
                return color

        elif len(potential_colors_avg) > 1:
            # More than one color, see if one of them is still valid
            fprint("More than one color detected. Removing all.")
            self.colored[_id] = []
            return None

    def got_request(self, req):
        # Threading blah blah something unsafe

        color = self.valid_color(req.target_id)
        if color is None:
            fprint("ID {} is NOT a valid color".format(req.target_id),
                   msg_color='red')
            return VisionRequestResponse(found=False)

        return VisionRequestResponse(found=True, color=color)

    def do_update(self, *args):
        resp = self.make_request(name='all')

        if resp.found:
            # temp
            time_of_marker = rospy.Time.now() - ros_t(
                .2)  # header.stamp not filled out
            image_holder = self.image_history.get_around_time(time_of_marker)
            if not image_holder.contains_valid_image:
                return

            header = navigator_tools.make_header(
                frame="/enu", stamp=image_holder.time)  #resp.objects[0].header
            image = image_holder.image
            self.debug.image = np.copy(image)

            cam_tf = self.camera_model.tfFrame()
            fprint("Getting transform between /enu and {}...".format(cam_tf))
            self.tf_listener.waitForTransform("/enu", cam_tf, time_of_marker,
                                              ros_t(1))
            t_mat44 = self.tf_listener.asMatrix(
                cam_tf, header)  # homogenous 4x4 transformation matrix

            for obj in resp.objects:
                if len(obj.points) > 0 and obj.name == "totem":
                    fprint("{} {}".format(obj.id, "=" * 50))

                    print obj.position
                    # Get objects position in camera frame and make sure it's in view
                    object_cam = t_mat44.dot(
                        np.append(navigator_tools.point_to_numpy(obj.position),
                                  1))
                    object_px = map(
                        int,
                        self.camera_model.project3dToPixel(object_cam[:3]))
                    if not self._object_in_frame(object_cam):
                        continue

                    #print object_px

                    points_np = np.array(
                        map(navigator_tools.point_to_numpy, obj.points))
                    # We dont want points below a certain level
                    points_np = points_np[points_np[:, 2] > -2.5]
                    # Shove ones in there to make homogenous points
                    points_np_homo = np.hstack(
                        (points_np, np.ones((points_np.shape[0], 1)))).T
                    points_cam = t_mat44.dot(points_np_homo).T
                    points_px = map(self.camera_model.project3dToPixel,
                                    points_cam[:, :3])
                    #print points_px

                    roi = self._get_ROI_from_points(points_px)
                    color_info = self._get_color_from_ROI(
                        roi, image)  # hue, string_color, error
                    bgr = (0, 0, 0)

                    if color_info is not None:
                        hue, color, error = color_info
                        c = (int(hue), 255, 255)
                        hsv = np.array([[c]])[:, :3].astype(np.uint8)
                        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0, 0]
                        bgr = tuple(bgr.astype(np.uint8).tolist())

                        if hue is not None:
                            if obj.id not in self.colored:
                                self.colored[obj.id] = []

                            self.colored[obj.id].append({
                                'color': color,
                                'err': error
                            })

                            if self.valid_color(obj.id):
                                fprint("COLOR IS VALID", msg_color='green')
                                self.make_request(
                                    cmd=
                                    '{name}={bgr[2]},{bgr[1]},{bgr[0]},{_id}'.
                                    format(name=obj.name, _id=obj.id, bgr=bgr))

                    [
                        cv2.circle(self.debug.image, tuple(map(int, p)), 2,
                                   bgr, -1) for p in points_px
                    ]
                    cv2.circle(self.debug.image, tuple(object_px), 10, bgr, -1)
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(self.debug.image, str(obj.id),
                                tuple(object_px), font, 1, bgr, 2)

                    print '\n' * 2

    def _get_ROI_from_points(self, image_points):
        # Probably will return a set of contours
        cnt = np.array(image_points).astype(np.float32)

        rect = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        fprint("Drawing rectangle")
        cv2.drawContours(self.debug.image, [box], 0, (0, 0, 255), 2)
        return box

    def _get_color_from_ROI(self, roi, img):
        mask = np.zeros(img.shape[:2], np.uint8)
        cv2.drawContours(mask, [roi], 0, 255, -1)
        bgr = cv2.mean(img, mask)
        # We have to treat that bgr value as a 3d array to get cvtColor to work
        bgr = np.array([[bgr]])[:, :3].astype(np.uint8)
        h, s, v = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)[0, 0]
        # Now check that s and v are in a good range
        if s < self.saturation_reject or v < self.value_reject:
            fprint("The colors aren't expressive enough. Rejecting.",
                   msg_color='red')
            return None

        # Compute hue error in SO2
        hue_angle = np.radians(h * 2)
        c = np.cos(hue_angle)
        s = np.sin(hue_angle)
        error = np.inf
        likely_color = 'bazinga'
        for color, h_val in self.color_map.iteritems():
            cg = np.cos(h_val)
            sg = np.sin(h_val)
            this_error = np.abs(np.arctan2(sg * c - cg * s, cg * c + sg * s))
            if this_error < error:
                error = this_error
                likely_color = color

        if error > self.hue_error_reject:
            fprint(
                "Closest color was {} with an error of {} rads (> {}) Rejecting."
                .format(likely_color, np.round(error, 3),
                        self.hue_error_reject),
                msg_color='red')
            return None

        fprint("Likely color: {} with an hue error of {} rads.".format(
            likely_color, np.round(error, 3)))
        return [
            np.degrees(self.color_map[likely_color]) / 2.0, likely_color, error
        ]

    def _object_in_frame(self, object_point):
        """
        Returns if the object is in frame given the centroid of the object.
        `object_point` should be in the camera_model's frame.
        """
        print object_point
        if object_point[2] < 0:
            return False

        px = np.array(self.camera_model.project3dToPixel(object_point))
        resoultion = self.camera_model.fullResolution()
        if np.any([0, 0] > px) or np.any(px > resoultion):
            return False

        return True
示例#5
0
class Colorama(object):
    def __init__(self):
        info_topic = camera_root + "/camera_info"
        image_topic = camera_root + "/image_raw"  # Change this to rect on boat

        self.tf_listener = tf.TransformListener()

        # Map id => [{color, error}, ...] for determining when a color is good
        self.colored = {}

        db_request = rospy.ServiceProxy("/database/requests", ObjectDBQuery)
        self.make_request = lambda **kwargs: db_request(ObjectDBQueryRequest(**kwargs))

        rospy.Service("/vision/buoy_colorizer", VisionRequest, self.got_request)

        self.image_history = ImageHistory(image_topic)

        # Wait for camera info, and exit if not found
        try:
            fprint("Waiting for camera info on: '{}'".format(info_topic))
            camera_info_msg = rospy.wait_for_message(info_topic, CameraInfo, timeout=3)
        except rospy.exceptions.ROSException:
            fprint("Camera info not found! Terminating.", msg_color="red")
            rospy.signal_shutdown("Camera not found!")
            return

        fprint("Camera info found!", msg_color="green")
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(camera_info_msg)

        self.debug = DebugImage("/colorama/debug", prd=.5)

        # These are color mappings from Hue values [0, 360]
        self.color_map = {'red': np.radians(0), 'yellow': np.radians(60),
                          'green': np.radians(120), 'blue': np.radians(240)}

        # Some tunable parameters
        self.update_time = .5  # s
        self.saturation_reject = 50
        self.value_reject = 50
        self.hue_error_reject = .4  # rads

        # To decide when to accept color observations
        self.error_tolerance = .4 # rads
        self.spottings_req = 4
        self.similarity_reject = .1 # If two colors are within this amount of error, reject

        rospy.Timer(ros_t(self.update_time), self.do_update)

    def valid_color(self, _id):
        """
        Either returns valid color or None depending if the color is valid or not
        """
        if _id not in self.colored:
            return None

        object_data = self.colored[_id]
        print "object_data", object_data

        # Maps color => [err1, err2, ..]
        potential_colors = {}
        for data in object_data:
            color, err = data['color'], data['err']
            if color not in potential_colors:
                potential_colors[color] = []

            potential_colors[color].append(err)

        potential_colors_avg = {}
        for color, errs in potential_colors.iteritems():
            if len(errs) > self.spottings_req:
                potential_colors_avg[color] = np.average(errs)

        print "potential_colors_avg", potential_colors_avg
        if len(potential_colors_avg) == 1:
            # No debate about the color
            color = potential_colors_avg.keys()[0]
            err = potential_colors_avg[color]
            fprint("Only one color found, error: {} (/ {})".format(err, self.error_tolerance))
            if len(potential_colors[color]) > self.spottings_req and err <= self.error_tolerance:
                return color

        elif len(potential_colors_avg) > 1:        
            # More than one color, see if one of them is still valid
            fprint("More than one color detected. Removing all.")
            self.colored[_id] = []
            return None


    def got_request(self, req):
        # Threading blah blah something unsafe

        color = self.valid_color(req.target_id)
        if color is None:
            fprint("ID {} is NOT a valid color".format(req.target_id), msg_color='red')
            return VisionRequestResponse(found=False)

        return VisionRequestResponse(found=True, color=color)

    def do_update(self, *args):
        resp = self.make_request(name='all')

        if resp.found:
            # temp
            time_of_marker = rospy.Time.now() - ros_t(.2)  # header.stamp not filled out
            image_holder = self.image_history.get_around_time(time_of_marker)
            if not image_holder.contains_valid_image:
                return

            header = navigator_tools.make_header(frame="/enu", stamp=image_holder.time) #resp.objects[0].header
            image = image_holder.image
            self.debug.image = np.copy(image)

            cam_tf = self.camera_model.tfFrame()
            fprint("Getting transform between /enu and {}...".format(cam_tf))
            self.tf_listener.waitForTransform("/enu", cam_tf, time_of_marker, ros_t(1))
            t_mat44 = self.tf_listener.asMatrix(cam_tf, header)  # homogenous 4x4 transformation matrix

            for obj in resp.objects:
                if len(obj.points) > 0 and obj.name == "totem":
                    fprint("{} {}".format(obj.id, "=" * 50))

                    print obj.position
                    # Get objects position in camera frame and make sure it's in view
                    object_cam = t_mat44.dot(np.append(navigator_tools.point_to_numpy(obj.position), 1))
                    object_px = map(int, self.camera_model.project3dToPixel(object_cam[:3]))
                    if not self._object_in_frame(object_cam):
                        continue

                    #print object_px

                    points_np = np.array(map(navigator_tools.point_to_numpy, obj.points))
                    # We dont want points below a certain level
                    points_np = points_np[points_np[:, 2] > -2.5]
                    # Shove ones in there to make homogenous points
                    points_np_homo = np.hstack((points_np, np.ones((points_np.shape[0], 1)))).T
                    points_cam = t_mat44.dot(points_np_homo).T
                    points_px = map(self.camera_model.project3dToPixel, points_cam[:, :3])
                    #print points_px

                    roi = self._get_ROI_from_points(points_px)
                    color_info = self._get_color_from_ROI(roi, image)  # hue, string_color, error
                    bgr = (0, 0, 0)
                                        
                    if color_info is not None:
                        hue, color, error = color_info
                        c = (int(hue), 255, 255)
                        hsv = np.array([[c]])[:, :3].astype(np.uint8)
                        bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0, 0] 
                        bgr = tuple(bgr.astype(np.uint8).tolist())

                        if hue is not None:
                            if obj.id not in self.colored:
                                self.colored[obj.id] = []

                            self.colored[obj.id].append({'color':color, 'err':error})

                            if self.valid_color(obj.id):
                                fprint("COLOR IS VALID", msg_color='green')
                                self.make_request(cmd='{name}={bgr[2]},{bgr[1]},{bgr[0]},{_id}'.format(name=obj.name,_id=    obj.id, bgr=bgr))

                    [cv2.circle(self.debug.image, tuple(map(int, p)), 2, bgr, -1) for p in points_px]
                    cv2.circle(self.debug.image, tuple(object_px), 10, bgr, -1)
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    cv2.putText(self.debug.image, str(obj.id), tuple(object_px), font, 1, bgr, 2)

                    print '\n' * 2

    def _get_ROI_from_points(self, image_points):
        # Probably will return a set of contours
        cnt = np.array(image_points).astype(np.float32)

        rect = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        fprint("Drawing rectangle")
        cv2.drawContours(self.debug.image, [box], 0, (0, 0, 255), 2)
        return box

    def _get_color_from_ROI(self, roi, img):
        mask = np.zeros(img.shape[:2], np.uint8)
        cv2.drawContours(mask, [roi], 0, 255, -1)
        bgr = cv2.mean(img, mask)
        # We have to treat that bgr value as a 3d array to get cvtColor to work
        bgr = np.array([[bgr]])[:, :3].astype(np.uint8)
        h, s, v = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)[0, 0]
        # Now check that s and v are in a good range
        if s < self.saturation_reject or v < self.value_reject:
            fprint("The colors aren't expressive enough. Rejecting.", msg_color='red')
            return None

        # Compute hue error in SO2
        hue_angle = np.radians(h * 2) 
        c = np.cos(hue_angle)
        s = np.sin(hue_angle)
        error = np.inf
        likely_color = 'bazinga'
        for color, h_val in self.color_map.iteritems():
            cg = np.cos(h_val)
            sg = np.sin(h_val)
            this_error = np.abs(np.arctan2(sg*c - cg*s, cg*c + sg*s))
            if this_error < error:
                error = this_error
                likely_color = color

        if error > self.hue_error_reject:
            fprint("Closest color was {} with an error of {} rads (> {}) Rejecting.".format(likely_color, np.round(error, 3), 
                                                                                            self.hue_error_reject), msg_color='red')
            return None

        fprint("Likely color: {} with an hue error of {} rads.".format(likely_color, np.round(error, 3)))
        return [np.degrees(self.color_map[likely_color]) / 2.0, likely_color, error]

    def _object_in_frame(self, object_point):
        """
        Returns if the object is in frame given the centroid of the object.
        `object_point` should be in the camera_model's frame.
        """
        print object_point
        if object_point[2] < 0:
            return False

        px = np.array(self.camera_model.project3dToPixel(object_point))
        resoultion = self.camera_model.fullResolution()
        if np.any([0,0] > px) or np.any(px > resoultion):
            return False

        return True
class TargetTracker(object):
    def __init__(self):
        self.targets = []
        self.markers = []
        self.pose = Pose()
        self.processing_target_id = 0
        self.localizing_target_id = 0
        self.uav_status = uav_status_dict['searching']
        self.seq = 0
        self.process_status = {
            target_status_dict['mapped']:
            self.process_mapped,
            target_status_dict['converged']:
            self.process_converged,
            target_status_dict['pre_tracking']:
            self.process_pre_tracking,
            target_status_dict['false_tracking']:
            self.process_false_tracking,
            target_status_dict['b_cylinder_estimating']:
            self.process_tracking,
            target_status_dict['false_tracking_examining']:
            self.process_false_tracking_examining
        }
        self.trigger_converging_DE = params['trigger_converging_DE']
        self.trigger_resampling_KLD = 0.1
        self.trigger_mapping_KLD = 0.01
        self.trigger_mapping_N = 3  # the minimum number of keyframes with satisfying KLD to trigger mapping
        self.seq_examine_mapping = 0
        self.check_edge_box_boarder = 5
        self.target_spacing_threshold = 1.  # mapping will be skipped if the target center distance are smaller than this value

        self.pub_markers = rospy.Publisher("/target_tracker/ellipsoids",
                                           MarkerArray,
                                           queue_size=1)
        self.client = actionlib.SimpleActionClient(
            "/path_planner/target_plan", target_mapping.msg.TargetPlanAction)
        #self.client.wait_for_server()

        # changed
        self.publish_image = True
        if self.publish_image:
            self.update_img = False
            self.bridge = CvBridge()
            self.pub = rospy.Publisher("/target_tracker/detection_image",
                                       Image,
                                       queue_size=1)
            #raw_image_sub = rospy.Subscriber('/bbox_tracker/detection_image', Image, self.image_callback, queue_size=1)
            raw_image_sub = rospy.Subscriber('/darknet_ros/detection_image',
                                             Image,
                                             self.image_callback,
                                             queue_size=2)
            self.img = np.zeros(1)

        if not ROS_BAG:
            rospy.loginfo("checking tf from camera to base_link ...")
            listener = tf.TransformListener()
            while not rospy.is_shutdown():
                try:
                    now = rospy.Time.now()
                    listener.waitForTransform("base_link", "r200", now,
                                              rospy.Duration(5.0))
                    (trans,
                     rot) = listener.lookupTransform("base_link", "r200", now)
                except (tf.LookupException, tf.ConnectivityException,
                        tf.ExtrapolationException):
                    continue
        else:
            trans_vec = (0.1, 0, -0.01)
            trans = tf.transformations.translation_matrix(trans_vec)
            #quaternion = (-0.6743797, 0.6743797, - 0.2126311, 0.2126311)
            quaternion = (-0.6830127, 0.6830127, -0.1830127, 0.1830127)
            rot = tf.transformations.quaternion_matrix(quaternion)

        self.T_camera2base = np.matmul(trans, rot)

        self.pcl_pub = rospy.Publisher("/target_tracker/points",
                                       PointCloud2,
                                       queue_size=10)

        camera_info = rospy.wait_for_message("/r200/rgb/camera_info",
                                             CameraInfo)
        self.pinhole_camera_model = PinholeCameraModel()
        self.pinhole_camera_model.fromCameraInfo(camera_info)
        self.image_W, self.image_H = self.pinhole_camera_model.fullResolution()

        self.sub_bbox = message_filters.Subscriber(
            '/bbox_tracker/bounding_boxes_drop',
            BoundingBoxes,
            queue_size=10,
            buff_size=2**24)
        #self.sub_bbox = message_filters.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, queue_size=10,buff_size=2**24)
        self.sub_pose = message_filters.Subscriber(
            '/mavros/local_position/pose',
            PoseStamped,
            queue_size=10,
            buff_size=2**24)
        # self.sub_vel = message_filters.Subscriber('/mavros/local_position/velocity_local', TwistStamped)
        # self.timer = rospy.Timer(rospy.Duration(.5), self.timerCallback)

        self.ts = message_filters.ApproximateTimeSynchronizer(
            [self.sub_bbox, self.sub_pose], queue_size=10, slop=0.05)
        #self.ts = message_filters.TimeSynchronizer([self.sub_bbox, self.sub_pose], 10)
        self.ts.registerCallback(self.callback)

        # self.sub1 = message_filters.Subscriber('/mavros/odometry/in', Odometry, queue_size=10, buff_size=2 ** 24)
        # self.sub_bbox = message_filters.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes)
        # self.sub2 = message_filters.Subscriber('/mavros/local_position/pose', PoseStamped, queue_size=10, buff_size=2 ** 24)
        # self.sub_vel = message_filters.Subscriber('/mavros/local_position/velocity_local', TwistStamped)
        self.timer = rospy.Timer(rospy.Duration(0.5), self.timerCallback)
        # self.ts1 = message_filters.ApproximateTimeSynchronizer([self.sub1, self.sub2], queue_size=10, slop=0.1)
        # self.ts = message_filters.TimeSynchronizer([self.sub_bbox, self.sub_pose], 10)
        # self.ts1.registerCallback(self.callback1)

        print("target_tracker initialized!")

    def timerCallback(self, timer):
        self.publishTargetPoints()
        self.publishTargetMarkers(
        )  # this is needed because markers will be used for localization and mapping
        N = len(self.targets)
        for id in reversed(range(N)):
            target = self.targets[id]
            target_status = target.check_status()
            if target_status == target_status_dict['false_tracking']:
                self.processing_target_id = id
                target = self.targets[self.processing_target_id]
                print("target id: " + str(self.processing_target_id))
                self.process_status[target_status](target)

    def callback(self, bbox_msg, pose_msg):
        self.timer.shutdown()
        # processing observation,
        # True, so that timer callback process is disabled, and other target points will be saved
        self.seq += 1
        self.pose = pose_msg.pose
        bboxes = bbox_msg.bounding_boxes
        enable_localization = False
        if len(self.targets) > 0:
            for bbox in bboxes:
                if self.checkBBoxOnEdge(bbox):
                    continue
                bbox_in_target_bools = []
                for id, target in enumerate(self.targets):
                    if target.check_points_in_bbox(bbox, self.pose):
                        bbox_in_target_bools.append(True)
                        update_status = target.update_points(bbox, self.pose)
                        if update_status == update_status_dict['converged']:
                            enable_localization = True  # enable localizeTarget
                        if update_status == update_status_dict[
                                'true_keyframe']:
                            enable_localization = True
                        if update_status == update_status_dict[
                                'false_keyframe']:
                            continue
                if len(bbox_in_target_bools) == 0:
                    if self.uav_status == uav_status_dict['searching']:
                        self.generateTarget(bbox)
        else:
            map(self.generateTarget, bboxes)
            enable_localization = True

        self.publishTargetPoints()
        self.publishTargetMarkers(
        )  # this is needed because markers will be used for localization and mapping

        if enable_localization:
            self.localizeTarget()
        self.timer = rospy.Timer(rospy.Duration(0.5), self.timerCallback)

    def generateTarget(self, bbox):
        if self.checkBBoxOnEdge(bbox):
            return None
        target = TargetPoints(self.pinhole_camera_model, self.T_camera2base)
        target.register_points(bbox, self.pose)
        self.targets.append(target)

    def checkBBoxOnEdge(self, bbox):
        x1 = bbox.xmin
        y1 = bbox.ymin
        x2 = bbox.xmax
        y2 = bbox.ymax
        if x1 < self.check_edge_box_boarder:
            return True  # it is on the edge
        if y1 < self.check_edge_box_boarder:
            return True
        if (self.image_W - x2) < self.check_edge_box_boarder:
            return True
        if (self.image_H - y2) < self.check_edge_box_boarder:
            return True
        return False

    def localizeTarget(self):
        if self.processing_target_id - len(self.targets) == 0:
            return
        elif self.processing_target_id - len(self.targets) > 0:
            self.processing_target_id -= 1
            return

        N = len(self.targets)
        for id in reversed(range(N)):
            target = self.targets[id]
            target_status = target.check_status()
            if target_status == target_status_dict['false_tracking']:
                self.processing_target_id = id
                target = self.targets[self.processing_target_id]
                print("target id: " + str(self.processing_target_id))
                self.process_status[target_status](target)

        target_statuses = [
            target.check_status(update_seq=False) for target in self.targets
        ]
        for id, target_status in enumerate(target_statuses):
            if target_status == target_status_dict['converged']:
                self.processing_target_id = id
                target = self.targets[self.processing_target_id]
                # check if the target has already been mapped
                if self.assert_mapped_by_others(id, target_statuses):
                    self.process_status[target_status_dict['false_tracking']](
                        target)
                else:
                    # else, then map
                    print("target id: " + str(self.processing_target_id))
                    self.process_status[target_status](target)

        for id, target_status in enumerate(target_statuses):
            if target_status == target_status_dict['b_cylinder_estimating']:
                self.processing_target_id = id
                target = self.targets[self.processing_target_id]
                print("target id: " + str(self.processing_target_id))
                self.process_status[target_status](target)
                self.update_img = True

    def process_pre_tracking(self, target):
        pass

    def process_tracking(self, target):
        KLD, DE = target.get_statistic()
        print('KLD: ' + str(KLD))
        print('DE: ' + str(DE))

        if DE < self.trigger_converging_DE:
            if self.uav_status == uav_status_dict['searching']:
                #target.resampling_uniform_dist()
                #target.expanded_resampling_uniform_dist()
                self.requestLocalizing()
                self.localizing_target_id = self.processing_target_id
                rospy.sleep(2.)
            if (self.uav_status == uav_status_dict['b_cylinder_estimating_motion']) & \
                    (KLD <= self.trigger_mapping_KLD):
                self.seq_examine_mapping += 1
                if self.seq_examine_mapping >= self.trigger_mapping_N:
                    target.set_localized()
                    self.seq_examine_mapping = 0
            else:
                self.seq_examine_mapping = 0

    def process_false_tracking_examining(self, target):
        return

    def process_false_tracking(self, target):
        rospy.loginfo('false tracking')
        del self.targets[self.processing_target_id]
        if self.uav_status != uav_status_dict['searching']:
            print("processing target id: " + str(self.processing_target_id))
            print("tracking target id: " + str(self.localizing_target_id))
            if self.processing_target_id == self.localizing_target_id:
                self.continueSearch()
            if self.processing_target_id < self.localizing_target_id:
                self.localizing_target_id -= 1

    def process_converged(self, target):
        result = self.requestMapping()
        if result.success:
            rospy.loginfo('mapped')
            target.set_mapped()
            ros_pts = result.pointcloud_map
            o3d_pts = convertCloudFromRosToOpen3d(ros_pts)
            pts = np.asarray(o3d_pts.points)
            target.update_points_after_mapping(pts)
            self.continueSearch()
        else:
            rospy.loginfo('false mapping')
            del self.targets[self.processing_target_id]
            self.continueSearch()

    def process_mapped(self, target):
        print('mapped')

    def requestLocalizing(self):
        rospy.loginfo('requesting b-cylinder estimating')
        goal = target_mapping.msg.TargetPlanGoal()
        goal.header.stamp = rospy.Time.now()
        goal.id.data = self.processing_target_id
        goal.mode.data = uav_status_dict['b_cylinder_estimating_motion']
        goal.markers = self.markers
        self.client.send_goal(goal)
        self.uav_status = uav_status_dict['b_cylinder_estimating_motion']

    def requestMapping(self):
        rospy.loginfo('requesting mapping')
        goal = target_mapping.msg.TargetPlanGoal()
        goal.header.stamp = rospy.Time.now()
        goal.id.data = self.processing_target_id
        goal.mode.data = uav_status_dict['mapping']
        goal.markers = self.markers
        self.client.send_goal(goal)
        self.uav_status = uav_status_dict['mapping']
        rospy.sleep(5.)
        self.client.wait_for_result()
        result = self.client.get_result()
        return result

    def continueSearch(self):
        rospy.loginfo('resuming searching')
        goal = target_mapping.msg.TargetPlanGoal()
        goal.header.stamp = rospy.Time.now()
        goal.id.data = self.processing_target_id
        goal.mode.data = uav_status_dict['searching']
        goal.markers = self.markers
        self.client.send_goal(goal)
        self.client.wait_for_result()
        result = self.client.get_result()
        self.uav_status = uav_status_dict['searching']

    def image_callback(self, data):
        self.image_header = data.header
        raw_image = self.bridge.imgmsg_to_cv2(data).astype(np.uint8)
        if len(raw_image.shape) > 2:
            self.img = raw_image
        if self.update_img:
            if len(self.targets) <= self.processing_target_id:
                return
            target = self.targets[self.processing_target_id]
            points_2d = target.points_2d
            img = self.draw_points(self.img, points_2d)
            image_msg = self.bridge.cv2_to_imgmsg(img, 'bgr8')
            self.pub.publish(image_msg)
            self.update_img = False

    def draw_points(self, image, pts):
        for pt in pts:
            image = cv2.circle(image, (int(pt[0]), int(pt[1])),
                               radius=2,
                               color=(255, 0, 0),
                               thickness=2)
        return image

    def assert_mapped_by_others(self, id, statuses):
        mapped_ids = [
            i for i, status in enumerate(statuses)
            if status == target_status_dict['mapped']
        ]
        target_marker = self.markers.markers[id]
        target_x = target_marker.pose.position.x
        target_y = target_marker.pose.position.y
        target_z = target_marker.pose.position.z
        target_pos = np.array((target_x, target_y, target_z))
        for mapped_id in mapped_ids:
            other_marker = self.markers.markers[mapped_id]
            other_x = other_marker.pose.position.x
            other_y = other_marker.pose.position.y
            other_z = other_marker.pose.position.z
            other_pos = np.array((other_x, other_y, other_z))
            dis = np.linalg.norm(target_pos - other_pos)
            if dis < self.target_spacing_threshold:
                return True

        return False

    def publishTargetMarkers(self):
        markerArray = MarkerArray()
        for id, target in enumerate(self.targets):
            pts = target.get_points_3d()
            center = np.mean(pts, axis=0)
            w, v, quat = target.get_eigens()
            marker = self.markerVector(id * 3, w * 10, v, center, quat, pts)
            markerArray.markers.append(marker)

        if len(self.targets) > 0:
            self.pub_markers.publish(markerArray)
            self.markers = markerArray

    def markerVector(self, id, scale, rotation, position, quaternion, pts):
        marker = Marker()
        marker.header.frame_id = "map"
        marker.header.stamp = rospy.Time.now()
        marker.ns = "target_mapping"
        marker.id = id
        marker.type = visualization_msgs.msg.Marker.SPHERE
        marker.action = visualization_msgs.msg.Marker.ADD
        marker.scale.x = scale[0]
        marker.scale.y = scale[1]
        marker.scale.z = scale[2]
        marker.color.a = 1.0
        marker.color.r = 1
        marker.color.g = 0
        marker.color.b = 0

        marker.pose.position.x = position[0]
        marker.pose.position.y = position[1]
        marker.pose.position.z = position[2]
        R = np.eye(4)
        R[:3, :3] = rotation
        q = tf.transformations.quaternion_from_matrix(R)

        marker.pose.orientation.x = quaternion[0]
        marker.pose.orientation.y = quaternion[1]
        marker.pose.orientation.z = quaternion[2]
        marker.pose.orientation.w = quaternion[3]
        points = [Point(p[0], p[1], p[2]) for p in pts]
        marker.points = points
        return marker

    def publishTargetPoints(self):
        # convert target_points to pointcloud messages
        # if len(self.targets) > 0:
        all_points = []
        for target in self.targets:
            all_points = all_points + target.get_points_3d().tolist()
        if len(all_points) == 0:
            all_points = [[0, 0, 0]]

        header = std_msgs.msg.Header()
        header.stamp = rospy.Time.now()
        header.frame_id = 'map'
        # create pcl from points
        pc_msg = pcl2.create_cloud_xyz32(header, all_points)
        self.pcl_pub.publish(pc_msg)
示例#7
0
class Colorama(object):
    def __init__(self):
        info_topic = camera_root + "/camera_info"
        image_topic = camera_root + "/image_rect_color"

        self.tf_listener = tf.TransformListener()
        self.status_pub = rospy.Publisher("/database_color_status",
                                          ColoramaDebug,
                                          queue_size=1)

        self.odom = None
        set_odom = lambda msg: setattr(
            self, "odom", navigator_tools.pose_to_numpy(msg.pose.pose))
        rospy.Subscriber("/odom", Odometry, set_odom)
        fprint("Waiting for odom...")
        while self.odom is None and not rospy.is_shutdown():
            rospy.sleep(1)
        fprint("Odom found!", msg_color='green')

        db_request = rospy.ServiceProxy("/database/requests", ObjectDBQuery)
        self.make_request = lambda **kwargs: db_request(
            ObjectDBQueryRequest(**kwargs))

        self.image_history = ImageHistory(image_topic)

        # Wait for camera info, and exit if not found
        fprint("Waiting for camera info on: '{}'".format(info_topic))
        while not rospy.is_shutdown():
            try:
                camera_info_msg = rospy.wait_for_message(info_topic,
                                                         CameraInfo,
                                                         timeout=3)
            except rospy.exceptions.ROSException:
                rospy.sleep(1)
                continue
            break

        fprint("Camera info found!", msg_color="green")
        self.camera_model = PinholeCameraModel()
        self.camera_model.fromCameraInfo(camera_info_msg)

        self.debug = DebugImage("/colorama/debug", prd=.5)

        # These are color mappings from Hue values [0, 360]
        self.color_map = {
            'red': np.radians(0),
            'yellow': np.radians(60),
            'green': np.radians(120),
            'blue': np.radians(200)
        }

        # RGB map for database setting
        self.database_color_map = {
            'red': (255, 0, 0),
            'yellow': (255, 255, 0),
            'green': (0, 255, 0),
            'blue': (0, 0, 255)
        }

        # ========= Some tunable parameters ===================================
        self.update_time = 1  # s

        # Observation parameters
        self.saturation_reject = 20  # Reject color obs with below this saturation
        self.value_reject = 50  # Reject color obs with below this value
        self.height_remove = 0.4  # Remove points that are this percent down on the object (%)
        # 1 keeps all, .4 removes the bottom 40%
        # Update parameters
        self.history_length = 100  # How many of each color to keep
        self.min_obs = 5  # Need atleast this many observations before making a determination
        self.conf_reject = .5  # When to reject an observation based on it's confidence

        # Confidence weights
        self.v_factor = 0.6  # Favor value values close to the nominal value
        self.v_u = 200  # Mean of norm for variance error
        self.v_sig = 60  # Sigma of norm for variance error
        self.dist_factor = 0.3  # Favor being closer to the totem
        self.dist_sig = 30  # Sigma of distance (m)
        self.q_factor = 0  # Favor not looking into the sun
        self.q_sig = 1.2  # Sigma of norm for quaternion error (rads)

        # Maps id => observations
        self.colored = {}

        rospy.Timer(ros_t(self.update_time), self.do_observe)

    def _compute_average_angle(self, angles, weights):
        """
        Returns weighted average of angles.
        Tends to break down with too many angles 180 degrees of each other - try not to do that.
        """
        angles = np.array(angles)
        if np.linalg.norm(weights) == 0:
            return None

        w = weights / np.linalg.norm(weights)
        s = np.sum(w * np.sin(angles))
        c = np.sum(w * np.cos(angles))
        avg_angle = np.arctan2(s, c)
        return avg_angle

    def _get_quaternion_error(self, q, target_q):
        """
        Returns an angluar differnce between q and target_q in radians
        """
        dq = trns.quaternion_multiply(np.array(target_q),
                                      trns.quaternion_inverse(np.array(q)))
        return 2 * np.arccos(dq[3])

    def _get_closest_color(self, hue_angle):
        """
        Returns a pair of the most likely color and the radian error associated with that prediction
            `hue_angle` := The radian value associated with hue [0, 2*pi] 
        Colors are found from `self.color_map`
        """
        c = np.cos(hue_angle)
        s = np.sin(hue_angle)
        error = np.inf
        likely_color = 'undef'
        for color, h_val in self.color_map.iteritems():
            cg = np.cos(h_val)
            sg = np.sin(h_val)
            # We need a signed error for some math later on so no absolute value
            this_error = np.arctan2(sg * c - cg * s, cg * c + sg * s)
            if np.abs(this_error) < np.abs(error):
                error = this_error
                likely_color = color

        fprint("Likely color: {} with an hue error of {} rads.".format(
            likely_color, np.round(error, 3)))
        return [likely_color, error]

    def do_estimate(self, totem_id):
        """Calculates best color estimate for a given totem id"""
        fprint("DOING ESTIMATE", msg_color='blue')
        if totem_id not in self.colored:
            fprint("Totem ID {} not found!".format(totem_id), msg_color='red')
            return None

        t_color = self.colored[totem_id]

        if len(t_color) < self.min_obs:
            fprint("Only {} observations. {} required.".format(
                len(t_color), self.min_obs),
                   msg_color='red')
            return None

        kwargs = {
            'v_u': self.v_u,
            'v_sig': self.v_sig,
            'dist_sig': self.dist_sig,
            'q_factor': self.q_factor,
            'q_sig': self.q_sig
        }

        w, weights = t_color.compute_confidence(
            [self.v_factor, self.dist_factor, self.q_factor], True, **kwargs)
        fprint("CONF: {}".format(w))
        if np.mean(w) < self.conf_reject:
            return None

        hue_angles = np.radians(np.array(t_color.hues) * 2)
        angle = self._compute_average_angle(hue_angles, w)
        color = self._get_closest_color(angle)

        msg = t_color.as_message
        msg.id = totem_id
        msg.confidence = w
        msg.labels = ["value_errs", "dists", "q_diffs"]
        msg.weights = weights
        msg.color = colors[0]
        msg.est_hues = angle * 2
        msg.hues = np.array(t_color.hues) * 2
        self.status_pub.publish(msg)

        fprint("Color: {}".format(color[0]))
        return color

    def got_request(self, req):
        # Threading blah blah something unsafe
        colored_ids = [
            _id for _id, color_err in self.colored.iteritems()
            if self.valid_color(_id) == req.color
        ]

        fprint("Colored IDs: {}".format(colored_ids), msg_color='blue')
        print '\n' * 50
        if len(colored_ids) == 0:
            return ColorRequestResponse(found=False)

        return ColorRequestResponse(found=True, ids=colored_ids)

    def do_observe(self, *args):
        resp = self.make_request(name='totem')

        # If atleast one totem was found start observing
        if resp.found:
            # Time of the databse request
            time_of_marker = resp.objects[0].header.stamp  # - ros_t(1)
            fprint("Looking for image at {}".format(time_of_marker.to_sec()),
                   msg_color='yellow')
            image_holder = self.image_history.get_around_time(time_of_marker)
            if not image_holder.contains_valid_image:
                t = self.image_history.newest_image.time
                if t is None:
                    fprint("No images found.")
                    return

                fprint("No valid image found for t={} ({}) dt: {}".format(
                    time_of_marker.to_sec(), t.to_sec(),
                    (rospy.Time.now() - t).to_sec()),
                       msg_color='red')
                return
            header = navigator_tools.make_header(frame='/enu',
                                                 stamp=image_holder.time)
            image = image_holder.image
            self.debug.image = np.copy(image)

            cam_tf = self.camera_model.tfFrame()
            try:
                fprint(
                    "Getting transform between /enu and {}...".format(cam_tf))
                self.tf_listener.waitForTransform("/enu", cam_tf,
                                                  time_of_marker, ros_t(1))
                t_mat44 = self.tf_listener.asMatrix(cam_tf, header)
            except tf.ExtrapolationException as e:
                fprint("TF error found and excepted: {}".format(e))
                return

            for obj in resp.objects:
                if len(obj.points) <= 1:
                    fprint("No points in object")
                    continue

                fprint("{} {}".format(obj.id, "=" * 50))

                # Get object position in px coordinates to determine if it's in frame
                object_cam = t_mat44.dot(np.append(p2np(obj.position), 1))
                object_px = map(
                    int, self.camera_model.project3dToPixel(object_cam[:3]))
                if not self._object_in_frame(object_cam):
                    fprint("Object not in frame")
                    continue

                # Get enu points associated with this totem and remove ones that are too low
                points_np = np.array(map(p2np, obj.points))
                height = np.max(points_np[:, 2]) - np.min(points_np[:, 2])
                if height < .1:
                    # If the height of the object is too small, skip (units are meters)
                    fprint("Object too small")
                    continue

                threshold = np.min(points_np[:,
                                             2]) + self.height_remove * height
                points_np = points_np[points_np[:, 2] > threshold]

                # Shove ones in there to make homogenous points to get points in image frame
                points_np_homo = np.hstack(
                    (points_np, np.ones((points_np.shape[0], 1)))).T
                points_cam = t_mat44.dot(points_np_homo).T
                points_px = map(self.camera_model.project3dToPixel,
                                points_cam[:, :3])

                [
                    cv2.circle(self.debug.image, tuple(map(int, p)), 2,
                               (255, 255, 255), -1) for p in points_px
                ]

                # Get color information from the points
                roi = self._get_ROI_from_points(points_px)
                h, s, v = self._get_hsv_from_ROI(roi, image)

                # Compute parameters that go into the confidence
                boat_q = self.odom[1]
                target_q = self._get_solar_angle()
                q_err = self._get_quaternion_error(boat_q, target_q)

                dist = np.linalg.norm(self.odom[0] - p2np(obj.position))

                fprint("H: {}, S: {}, V: {}".format(h, s, v))
                fprint("q_err: {}, dist: {}".format(q_err, dist))

                # Add to database and setup debug image
                if s < self.saturation_reject or v < self.value_reject:
                    err_msg = "The colors aren't expressive enough s: {} ({}) v: {} ({}). Rejecting."
                    fprint(err_msg.format(s, self.saturation_reject, v,
                                          self.value_reject),
                           msg_color='red')

                else:
                    if obj.id not in self.colored:
                        self.colored[obj.id] = Observation()

                    # Add this observation in
                    self.colored[obj.id] += h, v, dist, q_err
                    print self.colored[obj.id]

                rgb = (0, 0, 0)
                color = self.do_estimate(obj.id)
                # Do we have a valid color estimate
                if color:
                    fprint("COLOR IS VALID", msg_color='green')
                    rgb = self.database_color_map[color[0]]

                    cmd = '{name}={rgb[0]},{rgb[1]},{rgb[2]},{_id}'
                    self.make_request(
                        cmd=cmd.format(name=obj.name, _id=obj.id, rgb=rgb))

                bgr = rgb[::-1]
                cv2.circle(self.debug.image, tuple(object_px), 10, bgr, -1)
                font = cv2.FONT_HERSHEY_SIMPLEX
                cv2.putText(self.debug.image, str(obj.id), tuple(object_px),
                            font, 1, bgr, 2)

    def _get_solar_angle(self):
        return [0, 0, 0, 1]

    def _get_ROI_from_points(self, image_points):
        cnt = np.array([image_points]).astype(np.float32)
        rect = cv2.minAreaRect(cnt)
        box = cv2.cv.BoxPoints(rect)
        box = np.int0(box)
        fprint("Drawing rectangle")
        cv2.drawContours(self.debug.image, [box], 0, (0, 0, 255), 2)
        return box

    def _get_hsv_from_ROI(self, roi, img):
        mask = np.zeros(img.shape[:2], np.uint8)
        cv2.drawContours(mask, [roi], 0, 255, -1)
        bgr = cv2.mean(img, mask)
        # We have to treat that bgr value as a 3d array to get cvtColor to work
        bgr = np.array([[bgr]])[:, :3].astype(np.uint8)
        h, s, v = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)[0, 0]

        return h, s, v

        # Now check that s and v are in a good range
        if s < self.saturation_reject or v < self.value_reject:
            err_msg = "The colors aren't expressive enough s: {} ({}) v: {} ({}). Rejecting."
            fprint(err_msg.format(s, self.saturation_reject, v,
                                  self.value_reject),
                   msg_color='red')
            return None

        # Compute hue error in SO2
        hue_angle = np.radians(h * 2)

        # Display the current values
        font = cv2.FONT_HERSHEY_SIMPLEX
        cv2.putText(self.debug.image,
                    "h_val: {}".format(np.degrees(hue_angle)), tuple(roi[1]),
                    font, 1, (255, 255, 255), 2)

        likely_color, error = self.get_closest_color(hue_angle)

        if error > self.hue_error_reject:
            fprint(
                "Closest color was {} with an error of {} rads (> {}). Rejecting."
                .format(likely_color, np.round(error, 3),
                        self.hue_error_reject),
                msg_color='red')
            return None

        fprint("Likely color: {} with an hue error of {} rads.".format(
            likely_color, np.round(error, 3)))
        return [likely_color, error]

    def _object_in_frame(self, object_point):
        """
        Returns if the object is in frame given the centroid of the object.
        `object_point` should be in the camera_model's frame.
        """
        if object_point[2] < 0:
            return False

        px = np.array(self.camera_model.project3dToPixel(object_point))
        resoultion = self.camera_model.fullResolution()
        return not (np.any([0, 0] > px) or np.any(px > resoultion))