Esempio n. 1
0
    def updateTransforms(self, bodyAngles, inclinationAngles, supportLegChain=LLEG_CHAIN, debug=False):
        """
        TOOD: get supportFoot from ALMotion (if it is our own walk engine should
        be easier)

        Calculate all forward transformation matrices from the center of mass to each
        end effector. For the head this is the focal point of the camera. We also
        calculate the transformation from the camera frame to the world frame.
        Then we calculate horizon and camera height which is necessary for the
        calculation of pix estimates.

        Input:
            bodyAngles, inclinationAngles - angles of body joints and accelerometer calculated
                inclination.
            supportLegChain - which leg is on the ground.
        """

        self._bodyAngles = bodyAngles
        self._inclination = inclinationAngles

        if debug:
            import pdb

            pdb.set_trace()

        # Make up bogus values
        HEAD_START, LLEG_START, RLEG_START = 0, 8, 14
        headAngles = bodyAngles[HEAD_START:2]
        lLegAngles = bodyAngles[LLEG_START : LLEG_START + 6]
        rLegAngles = bodyAngles[RLEG_START : RLEG_START + 6]

        origin = vector4D(0.0, 0.0, 0.0)

        cameraToBodyTransform = calculateForwardTransform(HEAD_CHAIN, headAngles)

        leg_angles = (supportLegChain == LLEG_CHAIN and lLegAngles) or rLegAngles
        supportLegToBodyTransform = calculateForwardTransform(supportLegChain, leg_angles)

        supportLegLocation = dot(supportLegToBodyTransform, origin)

        # At this time we trust inertial
        bodyInclinationX, bodyInclinationY = inclinationAngles

        bodyToWorldTransform = dot(rotation4D(X_AXIS, bodyInclinationX), rotation4D(Y_AXIS, bodyInclinationY))

        torsoLocationInLegFrame = dot(bodyToWorldTransform, supportLegLocation)
        # get the Z component of the location
        self.comHeight = -torsoLocationInLegFrame[Z]

        self.cameraToWorldFrame = cameraToWorldFrame = dot(bodyToWorldTransform, cameraToBodyTransform)

        self.calcImageHorizonLine()
        self.focalPointInWorldFrame = [cameraToWorldFrame[X, 3], cameraToWorldFrame[Y, 3], cameraToWorldFrame[Z, 3]]
        return self.focalPointInWorldFrame
Esempio n. 2
0
    def bodyEstimate(self, x, y, dist, USE_WEBOTS_ESTIMATE=True):
        """
        Body estimate takes a pixel on the screen, and a vision calculated
        distance to that pixel, and calculates where that pixel is relative
        to the world frame.  It then returns an estimate to that position,
        with units in cm. See also pixEstimate
        """
        if dist <= 0.0:
            return NULL_ESTIMATE

        # all angle signs are according to right hand rule for the major axis
        # get bearing angle in image plane,left pos, right negative
        object_bearing = (IMAGE_CENTER_X - float(x)) * PIX_TO_RAD_X
        # get elevation angle in image plane, up negative, down is postive
        object_elevation = (float(y) - IMAGE_CENTER_Y) * PIX_TO_RAD_Y
        # convert dist estimate to mm
        object_dist = dist * 10

        # object in the camera frame
        objectInCameraFrame = vector4D(
            object_dist * cos(object_bearing) * cos(-object_elevation),
            object_dist * sin(object_bearing),
            object_dist * cos(object_bearing) * sin(-object_elevation),
        )

        # object in world frame
        objectInWorldFrame = dot(cameraToWorldFrame, objectInCameraFrame)

        objectInBodyFrame = dot(cameraToBodyTransform, objectInCameraFrame)

        if USE_WEBOTS_ESTIMATE:
            badBearing = self.getEstimate(objectInWorldFrame)
            goodBearing = self.getEstimate(objectInBodyFrame)
            # cout << goodBearing[EST_BEARING] << "\t" << badBearing[EST_BEARING] << endl;
            goodEst = Estimate(
                [
                    badBearing[EST_DIST],
                    goodBearing[EST_ELEVATION],
                    goodBearing[EST_BEARING],
                    badBearing[EST_X],
                    badBearing[EST_Y],
                ]
            )

            return goodEst
        else:
            return self.getEstimate(objectInWorldFrame)
Esempio n. 3
0
    def pixEstimate(self, pixelX, pixelY, objectHeight_cm, debug=False):
        """
         returns an estimate to a given x,y pixel, representing an
                         object at a certain height from the ground. Takes units of
                         CM, and returns in CM. See also bodyEstimate
         """

        if debug:
            import pdb

            pdb.set_trace()

        if pixelX >= IMAGE_WIDTH or pixelX < 0 or pixelY >= IMAGE_HEIGHT or pixelY < 0:
            return NULL_ESTIMATE

        objectHeight = objectHeight_cm * CM_TO_MM

        # declare x,y,z coordinate of pixel in relation to focal point
        pixelInCameraFrame = vector4D(
            FOCAL_LENGTH_MM,
            (IMAGE_CENTER_X - float(pixelX)) * PIX_X_TO_MM,
            (IMAGE_CENTER_Y - float(pixelY)) * PIX_Y_TO_MM,
        )

        # Declare x,y,z coordinate of pixel in relation to body center
        # transform camera coordinates to body frame coordinates for a test pixel
        pixelInWorldFrame = dot(self.cameraToWorldFrame, pixelInCameraFrame)

        # Draw the line between the focal point and the pixel while in the world
        # frame. Our goal is to find the point of intersection of that line and
        # the plane, parallel to the ground, passing through the object height.
        # In most cases, this plane is the ground plane, which is comHeight below the
        # origin of the world frame. If we call this method with objectHeight != 0,
        # then the plane is at a different height.
        object_z_in_world_frame = -self.comHeight + objectHeight

        # We are going to parameterize the line with one variable t. We find the t
        # for which the line goes through the plane, then evaluate the line at t for
        # the x,y,z coordinate
        t = 0

        # calculate t knowing object_z_in_body_frame (don't calculate if looking up)
        if (self.focalPointInWorldFrame[Z] - pixelInWorldFrame[Z]) > 0:
            t = (object_z_in_world_frame - pixelInWorldFrame[Z]) / (
                self.focalPointInWorldFrame[Z] - pixelInWorldFrame[Z]
            )

        x = pixelInWorldFrame[X] + (self.focalPointInWorldFrame[X] - pixelInWorldFrame[X]) * t
        y = pixelInWorldFrame[Y] + (self.focalPointInWorldFrame[Y] - pixelInWorldFrame[Y]) * t
        z = pixelInWorldFrame[Z] + (self.focalPointInWorldFrame[Z] - pixelInWorldFrame[Z]) * t
        objectInWorldFrame = vector4D(x, y, z)

        # SANITY CHECKS
        # If the plane where the target object is, is below the camera height,
        # then we need to make sure that the pixel in world frame is lower than
        # the focal point, or else, we will get odd results, since the point
        # of intersection with that plane will be behind us.
        if (
            objectHeight < self.comHeight + self.focalPointInWorldFrame[Z]
            and pixelInWorldFrame[Z] > self.focalPointInWorldFrame[Z]
        ):
            return NULL_ESTIMATE

        est = self.getEstimate(objectInWorldFrame)
        # TODO: why that function? it seems to be perfectly fine without
        # that correction. It is basically a parabola. Why those parameters?
        # did they do any sort of measurement?
        # est[EST_DIST] = correctDistance(est[EST_DIST])

        return est
Esempio n. 4
0
    def intersectLineWithXYPlane(aLine):
        """
         Method to take a vector of two points describing a line, and intersect it with
         the XYplane of the relevant coordinate frame. Could probably be made faster
         if dependency on matrix multiplication was removed.
        """
        from scipy.linalg import (
            lu,
            lu_factor,
        )  # TODO - only place this is needed, and not really used, hence last minute import.

        l1, l2 = aLine[0], aLine[1]

        # points on the plane level with the ground in the horizon coord frame
        # normally need 3 points, but since one is the origin, it can get ignored
        unitX = vector4D(1, 0, 0)
        unitY = vector4D(0, 1, 0)

        # we now solve the point of intersection using linear algebra
        # Ax=b, where b is the target, x is the solution of weights (t,u,v)
        # to solve l1 + (l2 -l1)t = o1*u + o2*v
        # Note!: usually a plane is defined by three vectors. e.g. in this case of
        # the target plane goes through the origin of the target
        # frame, so one of the vectors is the zero vector, so we ignore it
        # See http://en.wikipedia.org/wiki/Line-plane_intersection for detail
        eqSystem = zeros((3, 3))
        eqSystem[0, 0] = l1[0] - l2[0]
        eqSystem[0, 1] = unitX[0]
        eqSystem[0, 2] = unitY[0]

        eqSystem[1, 0] = l1[1] - l2[1]
        eqSystem[1, 1] = unitX[1]
        eqSystem[1, 2] = unitY[1]

        eqSystem[2, 0] = l1[2] - l2[2]
        eqSystem[2, 1] = unitX[2]
        eqSystem[2, 2] = unitY[2]

        # Solve for the solution of the weights.
        # Now usually we would solve eqSystem*target = l1 for target, but l1 is
        # defined in homogeneous coordiantes. We need it to be a 3 by 1 vector to
        # solve the system of equations.
        target = array(l1)
        lu, piv = lu_factor(eqSystem)
        # TODO: check the lu and piv like singularRow check in original code

        # If the matrix is near singular, this value will be != 0
        # singularRow = dot(m,lu_solve(lu_factor(m),[0,1]))lu_factorize(eqSystem, P)
        # if (singularRow != 0) {
        #  # The camera is parallel to the ground
        #  # Since l1 is the top (left/right) of the image, the horizon
        #  # will be at the top of the screen in this case which works for us.
        #  return l1;
        # }

        result = lu_solve((lu, piv), target)
        t = result[0]

        # the first variable in the linear equation was t, so it appears at the top of
        # the vector 'result'. The 't' is such that the point l1 + (l2 -l1)t is on
        # the horizon plane
        # NOTE: this intersection is still in the horizon frame though
        intersection = l2 - l1
        intersection *= t
        intersection += l1

        # The intersection seems to currently have the wrong y coordinate. It's the
        # negative of what it should be.
        return intersection
Esempio n. 5
0
    def pixWidthToDistance(self, pixWidth, cmWidth):
        """
        Return the distance to the object based on the image magnification of its
        height
        """
        if not pixWidth:
            return INFTY
        return (FOCAL_LENGTH_MM / (pixWidth * PIX_X_TO_MM)) * cmWidth


def correctDistance(uncorrectedDist):
    return -0.000591972 * uncorrectedDist * uncorrectedDist + 0.858283 * uncorrectedDist + 2.18768


# Screen edge coordinates in the camera coordinate frame
topLeft = vector4D(FOCAL_LENGTH_MM, IMAGE_WIDTH_MM / 2, IMAGE_HEIGHT_MM / 2)
bottomLeft = vector4D(FOCAL_LENGTH_MM, IMAGE_WIDTH_MM / 2, -IMAGE_HEIGHT_MM / 2)
topRight = vector4D(FOCAL_LENGTH_MM, -IMAGE_WIDTH_MM / 2, IMAGE_HEIGHT_MM / 2)
bottomRight = vector4D(FOCAL_LENGTH_MM, -IMAGE_WIDTH_MM / 2, -IMAGE_HEIGHT_MM / 2)

# Kinematics

SHOULDER_OFFSET_Y = 98.0
UPPER_ARM_LENGTH = 90.0
LOWER_ARM_LENGTH = 145.0
SHOULDER_OFFSET_Z = 100.0
THIGH_LENGTH = 100.0
TIBIA_LENGTH = 100.0
NECK_OFFSET_Z = 126.5
HIP_OFFSET_Y = 50.0
HIP_OFFSET_Z = 85.0