def updateTransforms(self, bodyAngles, inclinationAngles, supportLegChain=LLEG_CHAIN, debug=False): """ TOOD: get supportFoot from ALMotion (if it is our own walk engine should be easier) Calculate all forward transformation matrices from the center of mass to each end effector. For the head this is the focal point of the camera. We also calculate the transformation from the camera frame to the world frame. Then we calculate horizon and camera height which is necessary for the calculation of pix estimates. Input: bodyAngles, inclinationAngles - angles of body joints and accelerometer calculated inclination. supportLegChain - which leg is on the ground. """ self._bodyAngles = bodyAngles self._inclination = inclinationAngles if debug: import pdb pdb.set_trace() # Make up bogus values HEAD_START, LLEG_START, RLEG_START = 0, 8, 14 headAngles = bodyAngles[HEAD_START:2] lLegAngles = bodyAngles[LLEG_START : LLEG_START + 6] rLegAngles = bodyAngles[RLEG_START : RLEG_START + 6] origin = vector4D(0.0, 0.0, 0.0) cameraToBodyTransform = calculateForwardTransform(HEAD_CHAIN, headAngles) leg_angles = (supportLegChain == LLEG_CHAIN and lLegAngles) or rLegAngles supportLegToBodyTransform = calculateForwardTransform(supportLegChain, leg_angles) supportLegLocation = dot(supportLegToBodyTransform, origin) # At this time we trust inertial bodyInclinationX, bodyInclinationY = inclinationAngles bodyToWorldTransform = dot(rotation4D(X_AXIS, bodyInclinationX), rotation4D(Y_AXIS, bodyInclinationY)) torsoLocationInLegFrame = dot(bodyToWorldTransform, supportLegLocation) # get the Z component of the location self.comHeight = -torsoLocationInLegFrame[Z] self.cameraToWorldFrame = cameraToWorldFrame = dot(bodyToWorldTransform, cameraToBodyTransform) self.calcImageHorizonLine() self.focalPointInWorldFrame = [cameraToWorldFrame[X, 3], cameraToWorldFrame[Y, 3], cameraToWorldFrame[Z, 3]] return self.focalPointInWorldFrame
def bodyEstimate(self, x, y, dist, USE_WEBOTS_ESTIMATE=True): """ Body estimate takes a pixel on the screen, and a vision calculated distance to that pixel, and calculates where that pixel is relative to the world frame. It then returns an estimate to that position, with units in cm. See also pixEstimate """ if dist <= 0.0: return NULL_ESTIMATE # all angle signs are according to right hand rule for the major axis # get bearing angle in image plane,left pos, right negative object_bearing = (IMAGE_CENTER_X - float(x)) * PIX_TO_RAD_X # get elevation angle in image plane, up negative, down is postive object_elevation = (float(y) - IMAGE_CENTER_Y) * PIX_TO_RAD_Y # convert dist estimate to mm object_dist = dist * 10 # object in the camera frame objectInCameraFrame = vector4D( object_dist * cos(object_bearing) * cos(-object_elevation), object_dist * sin(object_bearing), object_dist * cos(object_bearing) * sin(-object_elevation), ) # object in world frame objectInWorldFrame = dot(cameraToWorldFrame, objectInCameraFrame) objectInBodyFrame = dot(cameraToBodyTransform, objectInCameraFrame) if USE_WEBOTS_ESTIMATE: badBearing = self.getEstimate(objectInWorldFrame) goodBearing = self.getEstimate(objectInBodyFrame) # cout << goodBearing[EST_BEARING] << "\t" << badBearing[EST_BEARING] << endl; goodEst = Estimate( [ badBearing[EST_DIST], goodBearing[EST_ELEVATION], goodBearing[EST_BEARING], badBearing[EST_X], badBearing[EST_Y], ] ) return goodEst else: return self.getEstimate(objectInWorldFrame)
def calculateForwardTransform(id, angles): """ id - chain id angles - [float] """ fullTransform = identity() # Do base transforms for baseTransform in BASE_TRANSFORMS[id]: fullTransform = dot(fullTransform, baseTransform) # Do mDH transforms numTransforms = NUM_JOINTS_CHAIN[id] for angle, (alpha, l, theta, d) in zip(angles, MDH_PARAMS[id]): # Right before we do a transformation, we are in the correct # coordinate frame and we need to store it, so we know where all the # links of a chain are. We only need to do this if the transformation # gives us a new link # length L - movement along the X(i-1) axis if l != 0: transX = translation4D(l, 0.0, 0.0) fullTransform = dot(fullTransform, transX) # twist: - rotate about the X(i-1) axis if alpha != 0: rotX = rotation4D(X_AXIS, alpha) fullTransform = dot(fullTransform, rotX) # theta - rotate about the Z(i) axis if theta + angle != 0: rotZ = rotation4D(Z_AXIS, theta + angle) fullTransform = dot(fullTransform, rotZ) # offset D movement along the Z(i) axis if d != 0: transZ = translation4D(0.0, 0.0, d) fullTransform = dot(fullTransform, transZ) # Do the end transforms for endTransform in END_TRANSFORMS[id]: fullTransform = dot(fullTransform, endTransform) return fullTransform
def pixEstimate(self, pixelX, pixelY, objectHeight_cm, debug=False): """ returns an estimate to a given x,y pixel, representing an object at a certain height from the ground. Takes units of CM, and returns in CM. See also bodyEstimate """ if debug: import pdb pdb.set_trace() if pixelX >= IMAGE_WIDTH or pixelX < 0 or pixelY >= IMAGE_HEIGHT or pixelY < 0: return NULL_ESTIMATE objectHeight = objectHeight_cm * CM_TO_MM # declare x,y,z coordinate of pixel in relation to focal point pixelInCameraFrame = vector4D( FOCAL_LENGTH_MM, (IMAGE_CENTER_X - float(pixelX)) * PIX_X_TO_MM, (IMAGE_CENTER_Y - float(pixelY)) * PIX_Y_TO_MM, ) # Declare x,y,z coordinate of pixel in relation to body center # transform camera coordinates to body frame coordinates for a test pixel pixelInWorldFrame = dot(self.cameraToWorldFrame, pixelInCameraFrame) # Draw the line between the focal point and the pixel while in the world # frame. Our goal is to find the point of intersection of that line and # the plane, parallel to the ground, passing through the object height. # In most cases, this plane is the ground plane, which is comHeight below the # origin of the world frame. If we call this method with objectHeight != 0, # then the plane is at a different height. object_z_in_world_frame = -self.comHeight + objectHeight # We are going to parameterize the line with one variable t. We find the t # for which the line goes through the plane, then evaluate the line at t for # the x,y,z coordinate t = 0 # calculate t knowing object_z_in_body_frame (don't calculate if looking up) if (self.focalPointInWorldFrame[Z] - pixelInWorldFrame[Z]) > 0: t = (object_z_in_world_frame - pixelInWorldFrame[Z]) / ( self.focalPointInWorldFrame[Z] - pixelInWorldFrame[Z] ) x = pixelInWorldFrame[X] + (self.focalPointInWorldFrame[X] - pixelInWorldFrame[X]) * t y = pixelInWorldFrame[Y] + (self.focalPointInWorldFrame[Y] - pixelInWorldFrame[Y]) * t z = pixelInWorldFrame[Z] + (self.focalPointInWorldFrame[Z] - pixelInWorldFrame[Z]) * t objectInWorldFrame = vector4D(x, y, z) # SANITY CHECKS # If the plane where the target object is, is below the camera height, # then we need to make sure that the pixel in world frame is lower than # the focal point, or else, we will get odd results, since the point # of intersection with that plane will be behind us. if ( objectHeight < self.comHeight + self.focalPointInWorldFrame[Z] and pixelInWorldFrame[Z] > self.focalPointInWorldFrame[Z] ): return NULL_ESTIMATE est = self.getEstimate(objectInWorldFrame) # TODO: why that function? it seems to be perfectly fine without # that correction. It is basically a parabola. Why those parameters? # did they do any sort of measurement? # est[EST_DIST] = correctDistance(est[EST_DIST]) return est
def calcImageHorizonLine(self): """ Calculates a horizon line for real image via the camera matrix which is a global member of Pose. The line is stored as two endpoints on the left and right of the screen in horizonLeft and horizonRight. """ # Moving the camera frame to the center of the body lets us compare the # rotation of the camera frame relative to the world frame. cameraToHorizonFrame = array(self.cameraToWorldFrame) self.cameraToHorizonFrame = cameraToHorizonFrame cameraToHorizonFrame[X_AXIS, W_AXIS] = 0.0 cameraToHorizonFrame[Y_AXIS, W_AXIS] = 0.0 cameraToHorizonFrame[Z_AXIS, W_AXIS] = 0.0 # We need the inverse but we calculate the transpose because they are # equivalent for orthogonal matrices and transpose is faster. horizonToCameraFrame = cameraToHorizonFrame.T # We defined each edge of the CCD as a line, and solve # for where that line intersects the horizon plane ( xy plane level with the # ground, at the height of the focal point leftEdge = [dot(cameraToHorizonFrame, topLeft), dot(cameraToHorizonFrame, bottomLeft)] rightEdge = [dot(cameraToHorizonFrame, topRight), dot(cameraToHorizonFrame, bottomRight)] # intersection points in the horizon frame # intersectionLeft = [] # intersectionRight = [] # try { # intersectionLeft = intersectLineWithXYPlane(leftEdge); # intersectionRight = intersectLineWithXYPlane(rightEdge); ##} catch (boost::numeric::ublas::internal_logic* const e) { # } catch (...) { ## TODO: needs to fix this thoroughly... # std::cout << "ERROR: intersectLineWithXYPlane threw an exception, trying to cope..." << std::endl; # } ## Now they are in the camera frame. Result still stored in intersection 1,2 # intersectionLeft = prod(horizonToCameraFrame, intersectionLeft); # intersectionRight = prod(horizonToCameraFrame, intersectionRight); ##we are only interested in the height (z axis), not the width # const float height_mm_left = intersectionLeft[Z]; # const float height_mm_right = intersectionRight[Z]; # TODO: Temp fix for BURST: height_mm_left = IMAGE_HEIGHT_MM / 2 height_mm_right = IMAGE_HEIGHT_MM / 2 height_pix_left = -height_mm_left * MM_TO_PIX_Y + IMAGE_HEIGHT / 2 height_pix_right = -height_mm_right * MM_TO_PIX_Y + IMAGE_HEIGHT / 2 # cout << "height_mm_left: " << height_mm_left << endl; # cout << "height_mm_right: " << height_mm_right << endl; # cout << "height_pix_left: " << height_pix_left << endl; # cout << "height_pix_right: " << height_pix_right << endl; self.horizonLeft[:] = 0.0, round(height_pix_left) self.horizonRight[:] = IMAGE_WIDTH - 1, round(height_pix_right) self.horizonSlope = float((height_pix_right - height_pix_left) / (IMAGE_WIDTH - 1.0)) # cout << "horizonSlope: " << horizonSlope << endl; if self.horizonSlope != 0: perpenHorizonSlope = -1 / self.horizonSlope else: perpenHorizonSlope = INFTY