def testPixEstimateMany(self, camera_pitch=0.0, acceptable_error=1): """ Return results that are incorrect - those that have an error in position larger then acceptable_error and should be visible. """ from pylab import linalg, linspace, array results = [ (self.testPixEstimateStraightForward(px, py, camera_pitch), px, py) for px, py in grid_points(linspace(200, 300, 20), linspace(-100, 100, 20)) ] results = [(linalg.norm(array((px, py)) - (res[0].x, res[0].y)), res, px, py) for res, px, py in results] return [ (err, est, pixel_x, pixel_y, px, py) for err, (est, pixel_x, pixel_y), px, py in results if err > acceptable_error and 0 <= pixel_x <= IMAGE_WIDTH and 0 <= pixel_y <= IMAGE_HEIGHT ]
def intersectLineWithXYPlane(aLine): """ Method to take a vector of two points describing a line, and intersect it with the XYplane of the relevant coordinate frame. Could probably be made faster if dependency on matrix multiplication was removed. """ from scipy.linalg import ( lu, lu_factor, ) # TODO - only place this is needed, and not really used, hence last minute import. l1, l2 = aLine[0], aLine[1] # points on the plane level with the ground in the horizon coord frame # normally need 3 points, but since one is the origin, it can get ignored unitX = vector4D(1, 0, 0) unitY = vector4D(0, 1, 0) # we now solve the point of intersection using linear algebra # Ax=b, where b is the target, x is the solution of weights (t,u,v) # to solve l1 + (l2 -l1)t = o1*u + o2*v # Note!: usually a plane is defined by three vectors. e.g. in this case of # the target plane goes through the origin of the target # frame, so one of the vectors is the zero vector, so we ignore it # See http://en.wikipedia.org/wiki/Line-plane_intersection for detail eqSystem = zeros((3, 3)) eqSystem[0, 0] = l1[0] - l2[0] eqSystem[0, 1] = unitX[0] eqSystem[0, 2] = unitY[0] eqSystem[1, 0] = l1[1] - l2[1] eqSystem[1, 1] = unitX[1] eqSystem[1, 2] = unitY[1] eqSystem[2, 0] = l1[2] - l2[2] eqSystem[2, 1] = unitX[2] eqSystem[2, 2] = unitY[2] # Solve for the solution of the weights. # Now usually we would solve eqSystem*target = l1 for target, but l1 is # defined in homogeneous coordiantes. We need it to be a 3 by 1 vector to # solve the system of equations. target = array(l1) lu, piv = lu_factor(eqSystem) # TODO: check the lu and piv like singularRow check in original code # If the matrix is near singular, this value will be != 0 # singularRow = dot(m,lu_solve(lu_factor(m),[0,1]))lu_factorize(eqSystem, P) # if (singularRow != 0) { # # The camera is parallel to the ground # # Since l1 is the top (left/right) of the image, the horizon # # will be at the top of the screen in this case which works for us. # return l1; # } result = lu_solve((lu, piv), target) t = result[0] # the first variable in the linear equation was t, so it appears at the top of # the vector 'result'. The 't' is such that the point l1 + (l2 -l1)t is on # the horizon plane # NOTE: this intersection is still in the horizon frame though intersection = l2 - l1 intersection *= t intersection += l1 # The intersection seems to currently have the wrong y coordinate. It's the # negative of what it should be. return intersection
def calcImageHorizonLine(self): """ Calculates a horizon line for real image via the camera matrix which is a global member of Pose. The line is stored as two endpoints on the left and right of the screen in horizonLeft and horizonRight. """ # Moving the camera frame to the center of the body lets us compare the # rotation of the camera frame relative to the world frame. cameraToHorizonFrame = array(self.cameraToWorldFrame) self.cameraToHorizonFrame = cameraToHorizonFrame cameraToHorizonFrame[X_AXIS, W_AXIS] = 0.0 cameraToHorizonFrame[Y_AXIS, W_AXIS] = 0.0 cameraToHorizonFrame[Z_AXIS, W_AXIS] = 0.0 # We need the inverse but we calculate the transpose because they are # equivalent for orthogonal matrices and transpose is faster. horizonToCameraFrame = cameraToHorizonFrame.T # We defined each edge of the CCD as a line, and solve # for where that line intersects the horizon plane ( xy plane level with the # ground, at the height of the focal point leftEdge = [dot(cameraToHorizonFrame, topLeft), dot(cameraToHorizonFrame, bottomLeft)] rightEdge = [dot(cameraToHorizonFrame, topRight), dot(cameraToHorizonFrame, bottomRight)] # intersection points in the horizon frame # intersectionLeft = [] # intersectionRight = [] # try { # intersectionLeft = intersectLineWithXYPlane(leftEdge); # intersectionRight = intersectLineWithXYPlane(rightEdge); ##} catch (boost::numeric::ublas::internal_logic* const e) { # } catch (...) { ## TODO: needs to fix this thoroughly... # std::cout << "ERROR: intersectLineWithXYPlane threw an exception, trying to cope..." << std::endl; # } ## Now they are in the camera frame. Result still stored in intersection 1,2 # intersectionLeft = prod(horizonToCameraFrame, intersectionLeft); # intersectionRight = prod(horizonToCameraFrame, intersectionRight); ##we are only interested in the height (z axis), not the width # const float height_mm_left = intersectionLeft[Z]; # const float height_mm_right = intersectionRight[Z]; # TODO: Temp fix for BURST: height_mm_left = IMAGE_HEIGHT_MM / 2 height_mm_right = IMAGE_HEIGHT_MM / 2 height_pix_left = -height_mm_left * MM_TO_PIX_Y + IMAGE_HEIGHT / 2 height_pix_right = -height_mm_right * MM_TO_PIX_Y + IMAGE_HEIGHT / 2 # cout << "height_mm_left: " << height_mm_left << endl; # cout << "height_mm_right: " << height_mm_right << endl; # cout << "height_pix_left: " << height_pix_left << endl; # cout << "height_pix_right: " << height_pix_right << endl; self.horizonLeft[:] = 0.0, round(height_pix_left) self.horizonRight[:] = IMAGE_WIDTH - 1, round(height_pix_right) self.horizonSlope = float((height_pix_right - height_pix_left) / (IMAGE_WIDTH - 1.0)) # cout << "horizonSlope: " << horizonSlope << endl; if self.horizonSlope != 0: perpenHorizonSlope = -1 / self.horizonSlope else: perpenHorizonSlope = INFTY