class ForceFromGravity(object):
    def __init__(self):
        self.tf_l = TransformListener()
        self.last_wrench = None
        self.wrench_sub = rospy.Subscriber('/left_wrist_ft',
                                           WrenchStamped,
                                           self.wrench_cb,
                                           queue_size=1)
    def wrench_cb(self, data):
        self.last_wrench = data

    def compute_forces(self):
        qs = self.get_orientation()
        o = qs.quaternion
        r, p, y = euler_from_quaternion([o.x, o.y, o.z, o.w])
        rospy.loginfo("wrist_left_ft_link rpy vs world: " + str( (round(r, 3), round(p, 3), round(y, 3)) ))
        rospy.loginfo("in degrees: " + str( (round(degrees(r), 3), round(degrees(p), 3), round(degrees(y), 3)) ))
        hand_total_force = 10 # 10 Newton, near to a Kg
        fx = hand_total_force * cos(r) * -1.0 # hack
        fy = hand_total_force * cos(p)
        fz = hand_total_force * cos(y)
        #total = abs(fx) + abs(fy) + abs(fz)
        #rospy.loginfo("fx, fy, fz, total:")
        #rospy.loginfo( str( (round(fx, 3), round(fy, 3), round(fz, 3), round(total, 3)) ) )
        return fx, fy, fz

    def get_last_forces(self):
        f = self.last_wrench.wrench.force
        return f.x, f.y, f.z

    def get_orientation(self, from_frame="wrist_left_ft_link"):
        qs = QuaternionStamped()
        qs.quaternion.w = 1.0
        qs.header.stamp = self.tf_l.getLatestCommonTime("world", from_frame)
        qs.header.frame_id = "/" + from_frame
        transform_ok = False
        while not transform_ok and not rospy.is_shutdown():
            try:
                world_q = self.tf_l.transformQuaternion("/world", qs)
                transform_ok = True
                return world_q
            except ExtrapolationException as e:
                rospy.logwarn(
                    "Exception on transforming pose... trying again \n(" + str(e) + ")")
                rospy.sleep(0.05)
                qs.header.stamp = self.tf_l.getLatestCommonTime(
                    "world", from_frame)

    def run(self):
        r = rospy.Rate(1)
        while not rospy.is_shutdown():
            cx, cy, cz = self.compute_forces()
            c_total = abs(cx) + abs(cy) + abs(cz)
            fx, fy, fz = self.get_last_forces()
            f_total = abs(fx) + abs(fy) + abs(fz)
            rospy.loginfo("Computed Forces:" + str(round(c_total, 3)) + "\n" + str( (round(cx, 3), round(cy, 3), round(cz, 3) )))
            rospy.loginfo("Real Forces    :" + str(round(f_total, 3)) + "\n" + str( (round(fx, 3), round(fy, 3), round(fz, 3) )))
            r.sleep()
class Predictor(object):
    def __init__(self):
        rospy.loginfo("Initializing biternion predictor")
        self.counter = 0

        modelname = rospy.get_param("~model", "head_50_50")
        weightsname = abspath(expanduser(rospy.get_param("~weights", ".")))
        rospy.loginfo("Predicting using {} & {}".format(modelname, weightsname))

        topic = rospy.get_param("~topic", "/biternion")
        self.pub = rospy.Publisher(topic, HeadOrientations, queue_size=3)
        self.pub_vis = rospy.Publisher(topic + '/image', ROSImage, queue_size=3)
        self.pub_pa = rospy.Publisher(topic + "/pose", PoseArray, queue_size=3)
        self.pub_tracks = rospy.Publisher(topic + "/tracks", TrackedPersons, queue_size=3)

        # Ugly workaround for "jumps back in time" that the synchronizer sometime does.
        self.last_stamp = rospy.Time()

        # Create and load the network.
        netlib = import_module(modelname)
        self.net = netlib.mknet()
        self.net.__setstate__(np.load(weightsname))
        self.net.evaluate()

        self.aug = netlib.mkaug(None, None)
        self.preproc = netlib.preproc
        self.getrect = netlib.getrect

        # Do a fake forward-pass for precompilation.
        im = cutout(np.zeros((480,640,3), np.uint8), 0, 0, 150, 450)
        im = next(self.aug.augimg_pred(self.preproc(im), fast=True))
        self.net.forward(np.array([im]))
        rospy.loginfo("BiternionNet initialized")

        src = rospy.get_param("~src", "tra")
        subs = []
        if src == "tra":
            subs.append(message_filters.Subscriber(rospy.get_param("~tra", "/TODO"), TrackedPersons2d))
        elif src == "ubd":
            subs.append(message_filters.Subscriber(rospy.get_param("~ubd", "/upper_body_detector/detections"), UpperBodyDetector))
        else:
            raise ValueError("Unknown source type: " + src)

        rgb = rospy.get_param("~rgb", "/head_xtion/rgb/image_rect_color")
        subs.append(message_filters.Subscriber(rgb, ROSImage))
        subs.append(message_filters.Subscriber(rospy.get_param("~d", "/head_xtion/depth/image_rect_meters"), ROSImage))
        subs.append(message_filters.Subscriber('/'.join(rgb.split('/')[:-1] + ['camera_info']), CameraInfo))

        tra3d = rospy.get_param("~tra3d", "")
        if src == "tra" and tra3d:
            subs.append(message_filters.Subscriber(tra3d, TrackedPersons))
            self.listener = TransformListener()

        ts = message_filters.ApproximateTimeSynchronizer(subs, queue_size=5, slop=0.5)
        ts.registerCallback(self.cb)

    def cb(self, src, rgb, d, caminfo, *more):
        # Ugly workaround because approximate sync sometimes jumps back in time.
        if rgb.header.stamp <= self.last_stamp:
            rospy.logwarn("Jump back in time detected and dropped like it's hot")
            return

        self.last_stamp = rgb.header.stamp

        detrects = get_rects(src)

        # Early-exit to minimize CPU usage if possible.
        #if len(detrects) == 0:
        #    return

        # If nobody's listening, why should we be computing?
        if 0 == sum(p.get_num_connections() for p in (self.pub, self.pub_vis, self.pub_pa, self.pub_tracks)):
            return

        header = rgb.header
        bridge = CvBridge()
        rgb = bridge.imgmsg_to_cv2(rgb)[:,:,::-1]  # Need to do BGR-RGB conversion manually.
        d = bridge.imgmsg_to_cv2(d)
        imgs = []
        for detrect in detrects:
            detrect = self.getrect(*detrect)
            det_rgb = cutout(rgb, *detrect)
            det_d = cutout(d, *detrect)

            # Preprocess and stick into the minibatch.
            im = subtractbg(det_rgb, det_d, 1.0, 0.5)
            im = self.preproc(im)
            imgs.append(im)
            sys.stderr.write("\r{}".format(self.counter)) ; sys.stderr.flush()
            self.counter += 1

        # TODO: We could further optimize by putting all augmentations in a
        #       single batch and doing only one forward pass. Should be easy.
        if len(detrects):
            bits = [self.net.forward(batch) for batch in self.aug.augbatch_pred(np.array(imgs), fast=True)]
            preds = bit2deg(ensemble_biternions(bits)) - 90  # Subtract 90 to correct for "my weird" origin.
            # print(preds)
        else:
            preds = []

        if 0 < self.pub.get_num_connections():
            self.pub.publish(HeadOrientations(
                header=header,
                angles=list(preds),
                confidences=[0.83] * len(imgs)
            ))

        # Visualization
        if 0 < self.pub_vis.get_num_connections():
            rgb_vis = rgb[:,:,::-1].copy()
            for detrect, alpha in zip(detrects, preds):
                l, t, w, h = self.getrect(*detrect)
                px =  int(round(np.cos(np.deg2rad(alpha))*w/2))
                py = -int(round(np.sin(np.deg2rad(alpha))*h/2))
                cv2.rectangle(rgb_vis, (detrect[0], detrect[1]), (detrect[0]+detrect[2],detrect[1]+detrect[3]), (0,255,255), 1)
                cv2.rectangle(rgb_vis, (l,t), (l+w,t+h), (0,255,0), 2)
                cv2.line(rgb_vis, (l+w//2, t+h//2), (l+w//2+px,t+h//2+py), (0,255,0), 2)
                # cv2.putText(rgb_vis, "{:.1f}".format(alpha), (l, t+25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 2)
            vismsg = bridge.cv2_to_imgmsg(rgb_vis, encoding='rgb8')
            vismsg.header = header  # TODO: Seems not to work!
            self.pub_vis.publish(vismsg)

        if 0 < self.pub_pa.get_num_connections():
            fx, cx = caminfo.K[0], caminfo.K[2]
            fy, cy = caminfo.K[4], caminfo.K[5]

            poseArray = PoseArray(header=header)

            for (dx, dy, dw, dh, dd), alpha in zip(get_rects(src, with_depth=True), preds):
                dx, dy, dw, dh = self.getrect(dx, dy, dw, dh)

                # PoseArray message for boundingbox centres
                poseArray.poses.append(Pose(
                    position=Point(
                        x=dd*((dx+dw/2.0-cx)/fx),
                        y=dd*((dy+dh/2.0-cy)/fy),
                        z=dd
                    ),
                    # TODO: Use global UP vector (0,0,1) and transform into frame used by this message.
                    orientation=Quaternion(*quaternion_about_axis(np.deg2rad(alpha), [0, -1, 0]))
                ))

            self.pub_pa.publish(poseArray)

        if len(more) == 1 and 0 < self.pub_tracks.get_num_connections():
            t3d = more[0]
            try:
                self.listener.waitForTransform(header.frame_id, t3d.header.frame_id, rospy.Time(), rospy.Duration(1))
                for track, alpha in zip(t3d.tracks, preds):
                    track.pose.pose.orientation = self.listener.transformQuaternion(t3d.header.frame_id, QuaternionStamped(
                        header=header,
                        # TODO: Same as above!
                        quaternion=Quaternion(*quaternion_about_axis(np.deg2rad(alpha), [0, -1, 0]))
                    )).quaternion
                self.pub_tracks.publish(t3d)
            except TFException:
                pass
Exemplo n.º 3
0
 def transformQuaternion(self, target_frame, quat):
     now = rospy.Time.now()
     self.waitForTransform(target_frame, quat.header.frame_id, rospy.Time.now(), rospy.Duration(5.0))
     quat.header.stamp = now
     return TransformListener.transformQuaternion(self, target_frame, quat)
class Predictor(object):
    def __init__(self):
        rospy.loginfo("Initializing biternion predictor")
        self.counter = 0

        modelname = rospy.get_param("~model", "head_50_50")
        weightsname = abspath(expanduser(rospy.get_param("~weights", ".")))
        rospy.loginfo("Predicting using {} & {}".format(
            modelname, weightsname))

        topic = rospy.get_param("~topic", "/biternion")
        self.pub = rospy.Publisher(topic, HeadOrientations, queue_size=3)
        self.pub_vis = rospy.Publisher(topic + '/image',
                                       ROSImage,
                                       queue_size=3)
        self.pub_pa = rospy.Publisher(topic + "/pose", PoseArray, queue_size=3)

        # Ugly workaround for "jumps back in time" that the synchronizer sometime does.
        self.last_stamp = rospy.Time()

        # Create and load the network.
        netlib = import_module(modelname)
        self.net = netlib.mknet()
        self.net.__setstate__(np.load(weightsname))
        self.net.evaluate()

        self.aug = netlib.mkaug(None, None)
        self.preproc = netlib.preproc
        self.getrect = netlib.getrect

        # Do a fake forward-pass for precompilation.
        im = cutout(np.zeros((480, 640, 3), np.uint8), 0, 0, 150, 450)
        im = next(self.aug.augimg_pred(self.preproc(im), fast=True))
        self.net.forward(np.array([im]))
        rospy.loginfo("BiternionNet initialized")

        src = rospy.get_param("~src", "tra")
        subs = []
        if src == "tra":
            subs.append(
                message_filters.Subscriber(rospy.get_param("~tra", "/TODO"),
                                           TrackedPersons2d))
        elif src == "ubd":
            subs.append(
                message_filters.Subscriber(
                    rospy.get_param("~ubd", "/upper_body_detector/detections"),
                    UpperBodyDetector))
        else:
            raise ValueError("Unknown source type: " + src)

        rgb = rospy.get_param("~rgb", "/head_xtion/rgb/image_rect_color")
        subs.append(message_filters.Subscriber(rgb, ROSImage))
        subs.append(
            message_filters.Subscriber(
                rospy.get_param("~d", "/head_xtion/depth/image_rect_meters"),
                ROSImage))
        subs.append(
            message_filters.Subscriber(
                '/'.join(rgb.split('/')[:-1] + ['camera_info']), CameraInfo))

        tra3d = rospy.get_param("~tra3d", "")
        if src == "tra" and tra3d and HAS_TRACKED_PERSONS:
            self.pub_tracks = rospy.Publisher(topic + "/tracks",
                                              TrackedPersons,
                                              queue_size=3)
            subs.append(message_filters.Subscriber(tra3d, TrackedPersons))
            self.listener = TransformListener()
        else:
            self.pub_tracks = None

        ts = message_filters.ApproximateTimeSynchronizer(subs,
                                                         queue_size=5,
                                                         slop=0.5)
        ts.registerCallback(self.cb)

    def cb(self, src, rgb, d, caminfo, *more):
        # Ugly workaround because approximate sync sometimes jumps back in time.
        if rgb.header.stamp <= self.last_stamp:
            rospy.logwarn(
                "Jump back in time detected and dropped like it's hot")
            return

        self.last_stamp = rgb.header.stamp

        detrects = get_rects(src)

        # Early-exit to minimize CPU usage if possible.
        #if len(detrects) == 0:
        #    return

        # If nobody's listening, why should we be computing?
        listeners = sum(p.get_num_connections()
                        for p in (self.pub, self.pub_vis, self.pub_pa))
        if self.pub_tracks is not None:
            listeners += self.pub_tracks.get_num_connections()
        if listeners == 0:
            return

        header = rgb.header
        bridge = CvBridge()
        rgb = bridge.imgmsg_to_cv2(
            rgb)[:, :, ::-1]  # Need to do BGR-RGB conversion manually.
        d = bridge.imgmsg_to_cv2(d)
        imgs = []
        for detrect in detrects:
            detrect = self.getrect(*detrect)
            det_rgb = cutout(rgb, *detrect)
            det_d = cutout(d, *detrect)

            # Preprocess and stick into the minibatch.
            im = subtractbg(det_rgb, det_d, 1.0, 0.5)
            im = self.preproc(im)
            imgs.append(im)
            sys.stderr.write("\r{}".format(self.counter))
            sys.stderr.flush()
            self.counter += 1

        # TODO: We could further optimize by putting all augmentations in a
        #       single batch and doing only one forward pass. Should be easy.
        if len(detrects):
            bits = [
                self.net.forward(batch)
                for batch in self.aug.augbatch_pred(np.array(imgs), fast=True)
            ]
            preds = bit2deg(ensemble_biternions(
                bits)) - 90  # Subtract 90 to correct for "my weird" origin.
            # print(preds)
        else:
            preds = []

        if 0 < self.pub.get_num_connections():
            self.pub.publish(
                HeadOrientations(header=header,
                                 angles=list(preds),
                                 confidences=[0.83] * len(imgs)))

        # Visualization
        if 0 < self.pub_vis.get_num_connections():
            rgb_vis = rgb[:, :, ::-1].copy()
            for detrect, alpha in zip(detrects, preds):
                l, t, w, h = self.getrect(*detrect)
                px = int(round(np.cos(np.deg2rad(alpha)) * w / 2))
                py = -int(round(np.sin(np.deg2rad(alpha)) * h / 2))
                cv2.rectangle(
                    rgb_vis, (detrect[0], detrect[1]),
                    (detrect[0] + detrect[2], detrect[1] + detrect[3]),
                    (0, 255, 255), 1)
                cv2.rectangle(rgb_vis, (l, t), (l + w, t + h), (0, 255, 0), 2)
                cv2.line(rgb_vis, (l + w // 2, t + h // 2),
                         (l + w // 2 + px, t + h // 2 + py), (0, 255, 0), 2)
                # cv2.putText(rgb_vis, "{:.1f}".format(alpha), (l, t+25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,255), 2)
            vismsg = bridge.cv2_to_imgmsg(rgb_vis, encoding='rgb8')
            vismsg.header = header  # TODO: Seems not to work!
            self.pub_vis.publish(vismsg)

        if 0 < self.pub_pa.get_num_connections():
            fx, cx = caminfo.K[0], caminfo.K[2]
            fy, cy = caminfo.K[4], caminfo.K[5]

            poseArray = PoseArray(header=header)

            for (dx, dy, dw, dh,
                 dd), alpha in zip(get_rects(src, with_depth=True), preds):
                dx, dy, dw, dh = self.getrect(dx, dy, dw, dh)

                # PoseArray message for boundingbox centres
                poseArray.poses.append(
                    Pose(
                        position=Point(x=dd * ((dx + dw / 2.0 - cx) / fx),
                                       y=dd * ((dy + dh / 2.0 - cy) / fy),
                                       z=dd),
                        # TODO: Use global UP vector (0,0,1) and transform into frame used by this message.
                        orientation=Quaternion(*quaternion_about_axis(
                            np.deg2rad(alpha), [0, -1, 0]))))

            self.pub_pa.publish(poseArray)

        if len(
                more
        ) == 1 and self.pub_tracks is not None and 0 < self.pub_tracks.get_num_connections(
        ):
            t3d = more[0]
            try:
                self.listener.waitForTransform(header.frame_id,
                                               t3d.header.frame_id,
                                               rospy.Time(), rospy.Duration(1))
                for track, alpha in zip(t3d.tracks, preds):
                    track.pose.pose.orientation = self.listener.transformQuaternion(
                        t3d.header.frame_id,
                        QuaternionStamped(
                            header=header,
                            # TODO: Same as above!
                            quaternion=Quaternion(*quaternion_about_axis(
                                np.deg2rad(alpha), [0, -1, 0])))).quaternion
                self.pub_tracks.publish(t3d)
            except TFException:
                pass
Exemplo n.º 5
0
 def transformQuaternion(self, target_frame, quat):
     now = rospy.Time.now()
     self.waitForTransform(target_frame, quat.header.frame_id,
                           rospy.Time.now(), rospy.Duration(5.0))
     quat.header.stamp = now
     return TransformListener.transformQuaternion(self, target_frame, quat)
Exemplo n.º 6
0
class ForceFromGravity(object):
    def __init__(self):
        self.tf_l = TransformListener()
        self.last_wrench = None
        self.wrench_sub = rospy.Subscriber('/left_wrist_ft',
                                           WrenchStamped,
                                           self.wrench_cb,
                                           queue_size=1)

    def wrench_cb(self, data):
        self.last_wrench = data

    def compute_forces(self):
        qs = self.get_orientation()
        o = qs.quaternion
        r, p, y = euler_from_quaternion([o.x, o.y, o.z, o.w])
        rospy.loginfo("wrist_left_ft_link rpy vs world: " +
                      str((round(r, 3), round(p, 3), round(y, 3))))
        rospy.loginfo("in degrees: " +
                      str((round(degrees(r), 3), round(degrees(p), 3),
                           round(degrees(y), 3))))
        hand_total_force = 10  # 10 Newton, near to a Kg
        fx = hand_total_force * cos(r) * -1.0  # hack
        fy = hand_total_force * cos(p)
        fz = hand_total_force * cos(y)
        #total = abs(fx) + abs(fy) + abs(fz)
        #rospy.loginfo("fx, fy, fz, total:")
        #rospy.loginfo( str( (round(fx, 3), round(fy, 3), round(fz, 3), round(total, 3)) ) )
        return fx, fy, fz

    def get_last_forces(self):
        f = self.last_wrench.wrench.force
        return f.x, f.y, f.z

    def get_orientation(self, from_frame="wrist_left_ft_link"):
        qs = QuaternionStamped()
        qs.quaternion.w = 1.0
        qs.header.stamp = self.tf_l.getLatestCommonTime("world", from_frame)
        qs.header.frame_id = "/" + from_frame
        transform_ok = False
        while not transform_ok and not rospy.is_shutdown():
            try:
                world_q = self.tf_l.transformQuaternion("/world", qs)
                transform_ok = True
                return world_q
            except ExtrapolationException as e:
                rospy.logwarn(
                    "Exception on transforming pose... trying again \n(" +
                    str(e) + ")")
                rospy.sleep(0.05)
                qs.header.stamp = self.tf_l.getLatestCommonTime(
                    "world", from_frame)

    def run(self):
        r = rospy.Rate(1)
        while not rospy.is_shutdown():
            cx, cy, cz = self.compute_forces()
            c_total = abs(cx) + abs(cy) + abs(cz)
            fx, fy, fz = self.get_last_forces()
            f_total = abs(fx) + abs(fy) + abs(fz)
            rospy.loginfo("Computed Forces:" + str(round(c_total, 3)) + "\n" +
                          str((round(cx, 3), round(cy, 3), round(cz, 3))))
            rospy.loginfo("Real Forces    :" + str(round(f_total, 3)) + "\n" +
                          str((round(fx, 3), round(fy, 3), round(fz, 3))))
            r.sleep()