Example #1
0
 def negation3(self):
     """
     error in curent params
     error: (-215) type == CV_32F || type == CV_64F in function cv::invert
     :return:
     """
     cv2.invert(self.cvImage.image, self.cvImage.image)
Example #2
0
def interp_step(dog, octv, intvl, r, c):
	dD = deriv_3D(dog, octv, intvl, r, c)
	H = hessian_3D(dog, octv, intvl, r, c)
	H_inv = np.zeros(H.T.shape)
	cv2.invert(H, H_inv, cv2.DECOMP_SVD)
	gm = cv2.gemm(H_inv, dD, -1, None, 0)
	return gm
Example #3
0
def refine_sky(bopt, image):
    sky_mask = make_mask(bopt, image)
    ground = np.ma.array(image,mask=cv2.cvtColor(cv2.bitwise_not(sky_mask), cv2.COLOR_GRAY2BGR)).compressed()
    sky = np.ma.array(image,mask=cv2.cvtColor(sky_mask, cv2.COLOR_GRAY2BGR)).compressed()
    ground.shape = (ground.size//3, 3)
    sky.shape = (sky.size//3, 3)
    ret, label, center = cv2.kmeans(np.float32(sky),2,None,(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0),10,cv2.KMEANS_RANDOM_CENTERS)
    sigma_s1, mu_s1 = cv2.calcCovarMatrix(sky[label.ravel() == 0],None,cv2.COVAR_NORMAL | cv2.COVAR_ROWS | cv2.COVAR_SCALE)
    ic_s1 = cv2.invert(sigma_s1, cv2.DECOMP_SVD)[1]
    sigma_s2, mu_s2 = cv2.calcCovarMatrix(sky[label.ravel() == 1],None,cv2.COVAR_NORMAL | cv2.COVAR_ROWS | cv2.COVAR_SCALE)
    ic_s2 = cv2.invert(sigma_s2, cv2.DECOMP_SVD)[1]
    sigma_g, mu_g = cv2.calcCovarMatrix(ground,None,cv2.COVAR_NORMAL | cv2.COVAR_ROWS | cv2.COVAR_SCALE)
    icg = cv2.invert(sigma_g, cv2.DECOMP_SVD)[1]
    if cv2.Mahalanobis(mu_s1, mu_g, ic_s1) > cv2.Mahalanobis(mu_s2, mu_g, ic_s2):
        mu_s = mu_s1
        sigma_s = sigma_s1
        ics = ic_s1
    else:
        mu_s = mu_s2
        sigma_s = sigma_s2
        ics = ic_s2
    for x in range(image.shape[1]):
        cnt = np.sum(np.less(spatial.distance.cdist(image[0:bopt[x], x],mu_s,'mahalanobis',VI=ics),spatial.distance.cdist(image[0:bopt[x], x],mu_g,'mahalanobis',VI=icg)))
        if cnt < (bopt[x] / 2):bopt[x] = 0
    return bopt
Example #4
0
def getRSByNewtonsMethod(leftUpperPoint, leftDownPoint, rightUpperPoint,
                         rightDownPoint, x, y, noIteration):
    baseRS = np.zeros((1, 2), dtype="float64")
    baseRS[0, 0] = 0.5
    baseRS[0, 1] = 0.5

    basePoint = np.zeros((1, 2), np.float64)
    basePoint[0, 0] = x
    basePoint[0, 1] = y

    for x in range(0, noIteration):
        titeratedP = np.zeros((1, 2), np.float64)
        titeratedP[0, 0] = (1 - baseRS[0, 1]) * (
            (1 - baseRS[0, 0]) * leftDownPoint[0] +
            baseRS[0, 0] * rightDownPoint[0]) + baseRS[0, 1] * (
                baseRS[0, 0] * rightUpperPoint[0] +
                (1 - baseRS[0, 0]) * leftUpperPoint[0])
        titeratedP[0, 1] = (1 - baseRS[0, 1]) * (
            (1 - baseRS[0, 0]) * leftDownPoint[1] +
            baseRS[0, 0] * rightDownPoint[1]) + baseRS[0, 1] * (
                baseRS[0, 0] * rightUpperPoint[1] +
                (1 - baseRS[0, 0]) * leftUpperPoint[1])
        jacobian = createJacobian(baseRS[0, 0], baseRS[0, 1], leftDownPoint,
                                  rightDownPoint, rightUpperPoint,
                                  leftUpperPoint)
        invertedJacobian = np.zeros((2, 2), dtype="float64")
        cv2.invert(jacobian, invertedJacobian, cv2.DECOMP_LU)

        baseRS = baseRS - invertedJacobian * (titeratedP - basePoint)

    return baseRS
Example #5
0
def __original_points_coords(point_list):
    """
    Detects the coordinates of the board on the original image.

    :param point_list: List of the relative points in the sequence of
        image transformations done in each layer.
    :return: The coordinates in the original image of the chessboard
        corners and the coordinates of each of the corners of the
        chessboard squares as a pair of board_corners and
        square_corners.
    """
    ptslims = np.float32([[0, 0], [1200, 0], [1200, 1200], [0, 1200]])
    last_index = len(point_list) - 1

    # Compute all of the transformation matrixes
    transf_mats = []
    for i in range(last_index):
        transf_mat = cv2.getPerspectiveTransform(np.float32(point_list[i]),
                                                 ptslims)
        cv2.invert(transf_mat, transf_mat)
        transf_mats.append(transf_mat)

    # Multiply into an equivalent single transformation matrix
    transf_mat = transf_mats[0]
    for i in range(1, last_index):
        transf_mat = transf_mat.dot(transf_mats[i])

    # Transform the actual corner points
    transf_points = cv2.perspectiveTransform(
        np.float32(point_list[last_index]).reshape(-1, 1, 2), transf_mat)

    board_corners = np.int32(
        [transf_points[i][0] for i in range(len(transf_points))])

    # Now obtain the corners of each square in the chessboard
    # To do so we need also the last transformation matrix
    last_transf_mat = cv2.getPerspectiveTransform(
        np.float32(point_list[last_index]), ptslims)
    cv2.invert(last_transf_mat, last_transf_mat)
    transf_mat = transf_mat.dot(last_transf_mat)

    # Generate the corners of the squares as if the board were of size
    # 1200x1200
    corners = []
    for row_corner in range(0, 1200 + 150, 150):
        for col_corner in range(0, 1200 + 150, 150):
            corners.append([row_corner, col_corner])

    # Transform the corners of the squares
    square_corners = cv2.perspectiveTransform(
        np.float32(corners).reshape(-1, 1, 2), transf_mat)

    square_corners = np.int32(
        [square_corners[i][0] for i in range(len(square_corners))])

    return board_corners, square_corners
def TSAI(Hmaker2world, Hgrid2cam):
    A = []
    n = len(Hgrid2cam)
    Hgij = np.zeros(Hmaker2world.shape)
    Hcij = np.zeros(Hgrid2cam.shape)
    for i in range(n - 1):
        Hgij[:, :,
             i] = cv2.invert(Hmaker2world[:, :, i + 1]) * Hmaker2world[:, :, i]
        Hcij[:, :, i] = Hgrid2cam[:, :, i + 1] * cv2.invert(Hgrid2cam[:, :, i])
        rgij = cv2.Rodrigues()
Example #7
0
def hartleyRectify(points1, points2, imgSize, M1, M2, D1, D2, F = None):
    F, mask = cv2.findFundamentalMat(points1, points2, cv2.FM_RANSAC, 3, 0.99)
    #print 'mask\n', mask
    retval, H1, H2 = cv2.stereoRectifyUncalibrated(
        points1, points2, F, imgSize)
    retval, M1i = cv2.invert(M1); retval, M2i = cv2.invert(M2)
    R1, R2 = np.dot(np.dot(M1i, H1), M1), np.dot(np.dot(M2i, H2), M2)
    map1x, map1y = cv2.initUndistortRectifyMap(M1, D1, R1, M1, imgSize, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(M2, D2, R2, M2, imgSize, cv2.CV_32FC1)
    return (map1x, map1y, map2x, map2y), F
def hartleyRectify(points1, points2, imgSize, M1, M2, D1, D2, F):
    # F, mask = cv2.findFundamentalMat(points1, points2, cv2.FM_RANSAC, 3, 0.99)
    # print 'mask\n', mask
    retval, H1, H2 = cv2.stereoRectifyUncalibrated(
        points1, points2, F, imgSize)
    retval, M1i = cv2.invert(M1);
    retval, M2i = cv2.invert(M2)
    R1, R2 = np.dot(np.dot(M1i, H1), M1), np.dot(np.dot(M2i, H2), M2)
    map1x, map1y = cv2.initUndistortRectifyMap(M1, D1, R1, M1, imgSize, cv2.CV_32FC1)
    map2x, map2y = cv2.initUndistortRectifyMap(M2, D2, R2, M2, imgSize, cv2.CV_32FC1)
    return (map1x, map1y, map2x, map2y), F
Example #9
0
def inv(a):
    if not isinstance(a, cp.Mat):
        raise TypeError("Must use Mat objects for inv()")
    elif not cp.CV_[a.dtype.name] in [cv.CV_32F, cv.CV_64F]:
        raise TypeError("Must use float32 or float64 dtype for inv()")
    shape = (a.shape[1], a.shape[0])
    out = cp.Mat(shape, dtype=a.dtype, UMat=a.UMat)
    if shape[0] == shape[1]:
        decompType = cv.DECOMP_LU
    else:
        decompType = cv.DECOMP_SVD
    cv.invert(a._, dst=out._, flags=decompType)
    return out
Example #10
0
    def desenhalinha(self, frame, H):
        start_line = (self.point_in_model[0][0][0].astype(int),
                      self._model.params['upperLineY'])
        end_line = (self.point_in_model[0][0][0].astype(int),
                    self._model.params['lowerLineY'])

        md = self.model_image
        print("startl: %s" % (start_line, ))
        print("endll: %s" % (end_line, ))
        cv2.line(md, start_line, end_line, (255, 0, 0), 2)
        cv2.imshow('linha_modelo', md)
        ldst = self._modelTr.dst
        cv2.line(ldst, start_line, end_line, (255, 0, 0), 2)
        cv2.imshow('modelo tudo', ldst)

        res, inv_H = cv2.invert(H)
        model_line_points = np.array([start_line, end_line], dtype=np.float32)
        field_line_points = cv2.perspectiveTransform(
            np.array([model_line_points]), inv_H)
        start_line_field = (field_line_points[0][0][0],
                            field_line_points[0][0][1])
        end_line_field = (field_line_points[0][1][0],
                          field_line_points[0][1][1])

        print("startl_field: %s" % (start_line_field, ))
        print("endll_field: %s" % (end_line_field, ))
        #frm=frame
        #cv2.line(frm, start_line_field, end_line_field, (0, 0, 255), 2)
        #cv2.imshow('linha_field', frm)
        #cv2.waitKey()

        height, width, depth = frame.shape
        line_mask = np.zeros((height, width), np.uint8)
        line_image = np.zeros((height, width, 3), np.uint8)
        cv2.line(line_image, start_line_field, end_line_field, (0, 0, 255), 4)
        cv2.line(line_mask, start_line_field, end_line_field, (255, 255, 255),
                 3)

        # field_mask = self.mask_builder(image, 38, 88, 34, 101, 0, 174)
        hsv_image = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        field_mask = cv2.inRange(hsv_image, self._hsv_low, self._hsv_high)
        field_mask_inv = cv2.bitwise_not(field_mask)

        cv2.imshow('field_mask', field_mask)
        cv2.imshow('field_mask_inv', field_mask_inv)
        #cv2.waitKey()

        lineToDraw = cv2.addWeighted(line_mask, 1, field_mask_inv, -1, 0)
        lineToDraw_inv = cv2.bitwise_not(lineToDraw)
        line = cv2.bitwise_and(line_image, line_image, mask=lineToDraw)

        field = cv2.bitwise_and(frame, frame, mask=lineToDraw_inv)
        output = cv2.addWeighted(line, 1, field, 1, 0)

        #cv2.imshow('line', line)
        #cv2.imshow('field', field)
        #cv2.imshow('output', output)
        #cv2.waitKey()

        return output
Example #11
0
def calculateMatchingPrecisionAndRecallAirsim(**kwargs):
    width, height = kwargs.get('width'), kwargs.get('height')
    H1to2 = kwargs.get('H1to2')
    depth1 = kwargs.get('depth1')
    depth2 = kwargs.get('depth2')
    keypoints1 = kwargs.get('points1')
    keypoints2 = kwargs.get('points2')
    matches1 = kwargs.get('matches1')
    matches2 = kwargs.get('matches2')
    nTruePositiveCount: int = 0
    err_thresh: float = kwargs.get('err_thresh')

    fPrecision:float = 0.0
    fRecall:float = 0.0

    assert (matches1.size() == matches2.size())
    nMatches:int = len(matches1)

    K = numpy.eye(3).astype(numpy.float32)
    pK = K.reshape(9)
    pK[4] = pK[0] = width/2
    pK[2] = width/2
    pK[5] = height/2

    _, Kinv = cv2.invert(K)

    matches1t = calcProjectionsAirsim(keypoints1, H1to2, depth1, K, Kinv);
    calcProjectionsAirsim(matches1, H1to2, depth1, K, Kinv, matches1t);
def warp_back(warped, m, Lane):

    warp_zero = np.zeros_like(warped).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    # Recast the x and y points into usable format for cv2.fillPoly()
    pts_left = np.array(
        [np.transpose(np.vstack([Lane.left_line.allx, Lane.left_line.ally]))])
    pts_right = np.array([
        np.flipud(
            np.transpose(
                np.vstack([Lane.right_line.allx, Lane.right_line.ally])))
    ])
    pts = np.hstack((pts_left, pts_right))

    Lane.left_line.pointx = pts_left[0, :, 0]

    Lane.right_line.pointx = pts_right[0, :, 0]

    Lane.left_line.pointy = pts_left[0, :, 1]

    Lane.right_line.pointy = pts_right[0, :, 1]

    Lane.center = (pts_right[0][-1][0] +
                   pts_left[0][-1][0]) // 2 - warped.shape[1] // 2

    # Draw the lane onto the warped blank image
    cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
    invertible, m_inv = cv2.invert(m)
    # Warp the blank back to original image space using inverse perspective matrix (Minv)
    newwarp = cv2.warpPerspective(color_warp, m_inv,
                                  (color_warp.shape[1], color_warp.shape[0]))

    return newwarp, Lane
Example #13
0
def drawCueBall(img, target, matrix):
    inv = cv2.invert(matrix)
    matrixInverted = np.asarray(inv[1][:, :])

    # get White
    lower = np.array([220, 220, 220])
    upper = np.array([255, 255, 255])
    shapeMask = cv2.inRange(img, lower, upper)

    # find the contours in the mask
    (cnts, _) = cv2.findContours(shapeMask.copy(), cv2.RETR_EXTERNAL,
                                 cv2.CHAIN_APPROX_SIMPLE)

    # loop over the contours
    for c in cnts:
        # draw the contour and show only when larger than 15
        # cv2.drawContours(img, [c], -1, (0, 255, 0), 2)
        (x, y), radius = cv2.minEnclosingCircle(c)
        center = np.array([int(x), int(y), 1])
        radius = int(radius)
        if 22 < radius < 25:
            translatedCenter = matrixInverted.dot(center)
            cv2.circle(target,
                       (int(translatedCenter[0]), int(translatedCenter[1])),
                       16, (0, 0, 255), 2)
    def readCalibrationParams(self, path):
        parser = JSONParser()
        jsonObj = parser.parse(path)

        self.calibrationMatrix = np.array(jsonObj['cameraMatrix'],np.float32)
        ret, self.inverseCalibrationMatrix = cv2.invert(self.calibrationMatrix)
        self.distortCoefs = np.array(jsonObj['distortedCoefficients'], np.float32)
Example #15
0
def transformPoints(x, y, reverse=False, integer=True):
    if not reverse:
        H = transform_matrix
    else:
        val, H = cv2.invert(transform_matrix)

    # get the elements in the transform matrix
    h0 = H[0, 0]
    h1 = H[0, 1]
    h2 = H[0, 2]
    h3 = H[1, 0]
    h4 = H[1, 1]
    h5 = H[1, 2]
    h6 = H[2, 0]
    h7 = H[2, 1]
    h8 = H[2, 2]

    tx = (h0 * x + h1 * y + h2)
    ty = (h3 * x + h4 * x + h5)
    tz = (h6 * x + h7 * y + h8)

    if integer:
        px = int(tx / tz)
        py = int(ty / tz)
        Z = int(1 / tz)
    else:
        px = tx / tz
        py = ty / tz
        Z = 1 / tz

    return px, py
def virtual_point(homographyMatrix):
    pts = np.float32([[round(CAM_WIDTH / 2),
                       round(CAM_HEIGHT / 2)]]).reshape(-1, 1, 2)
    m = cv2.invert(homographyMatrix)
    # find the location of this same point in the projector image
    dst = cv2.perspectiveTransform(pts, m[1])
    return dst
Example #17
0
def matchProfiles(model, profiles):
    tProfiles = np.zeros(profiles.shape[0])
    for l in range(profiles.shape[0]):
        covar = model[0][l]
        mean = model[1][l]
        icovar = cv2.invert(covar, flags=cv2.DECOMP_SVD)[1]

        m = (profiles.shape[1] - 1) / 2
        n = (model[1].shape[1] - 1) / 2

        dist = np.zeros(2 * (m - n) + 1)
        for t in range(2 * (m - n) + 1):
            tMean = translateMean(mean, t, m)
            tIcovar = translateCovar(icovar, t, m)
            dist[t] = cv2.Mahalanobis(profiles[l], tMean, tIcovar)
        tProfiles[l] = m - n - np.argmin(dist)
    ''' PROFILE VISUALISATION
    ppl.vlines(np.arange(mean.shape[0]), np.zeros_like(mean), mean)
    ppl.show()
    ppl.vlines(np.arange(profiles[20].shape[0]), np.zeros_like(profiles[20]), profiles[20])
    ppl.show()
    print tProfiles
    '''

    return tProfiles
Example #18
0
def backproject_sample_rect(warp_matrix, sample_dims):
    warp_matrix_inv = cv2.invert(warp_matrix)[1]
    #warp_matrix_inv = np.array(warp_matrix_inv)/cv.Get2D(warp_matrix_inv, 2, 2)[0]
    #print warp_matrix_inv
    warp_matrix_inv = cv.fromarray(warp_matrix_inv)
    rectified_shape = np.array((1., 1.))
    width_center = rectified_shape[1] / 2
    sample_dims = [x / 2 for x in sample_dims]

    #roi_bottom = (rectified_shape[0] - neighborhood_length) * np.random.random() + \
    #    neighborhood_length
    roi_bottom = 0.5

    rectified_sample_roi = [
        width_center - sample_dims[0], roi_bottom + sample_dims[1],
        width_center - sample_dims[0], roi_bottom - sample_dims[1],
        width_center + sample_dims[0], roi_bottom - sample_dims[1],
        width_center + sample_dims[0], roi_bottom + sample_dims[1]
    ]
    rectified_sample_roi = np.array(rectified_sample_roi, dtype=np.float32)
    rectified_sample_roi = rectified_sample_roi.reshape((1, 4, 2))
    backprojected_sample_boundary = cv.CreateMat(1, 4, cv.CV_32FC2)
    rectified_sample_roi = cv.fromarray(rectified_sample_roi)
    cv.PerspectiveTransform(rectified_sample_roi,
                            backprojected_sample_boundary, warp_matrix_inv)

    return cvutils.array2point_list(np.array(backprojected_sample_boundary))
Example #19
0
    def applyHomographyToPoint(self, frame, H):
        start_line = (self.point_in_model[0][0][0].astype(int),
                      self._model.params['upperLineY'])
        end_line = (self.point_in_model[0][0][0].astype(int),
                    self._model.params['lowerLineY'])

        start_line_scrimmage = (
            self.scrimmage_point_in_model[0][0][0].astype(int),
            self._model.params['upperLineY'])
        end_line_scrimmage = (
            self.scrimmage_point_in_model[0][0][0].astype(int),
            self._model.params['lowerLineY'])

        res, inv_H = cv2.invert(H)
        model_line_points = np.array([start_line, end_line], dtype=np.float32)
        model_line_scrimmage_points = np.array(
            [start_line_scrimmage, end_line_scrimmage], dtype=np.float32)

        field_line_points = cv2.perspectiveTransform(
            np.array([model_line_points]), inv_H)
        field_scrimmage_line_points = cv2.perspectiveTransform(
            np.array([model_line_scrimmage_points]), inv_H)

        start_line_field = (field_line_points[0][0][0],
                            field_line_points[0][0][1])
        end_line_field = (field_line_points[0][1][0],
                          field_line_points[0][1][1])

        start_scrimmage_line_field = (field_scrimmage_line_points[0][0][0],
                                      field_scrimmage_line_points[0][0][1])
        end_scrimmage_line_field = (field_scrimmage_line_points[0][1][0],
                                    field_scrimmage_line_points[0][1][1])
        return self.draw_line(frame, start_line_field, end_line_field,
                              start_scrimmage_line_field,
                              end_scrimmage_line_field)
Example #20
0
def true_sky(border, src_image):

    #制作天空图像掩码和地面图像掩码
    sky_mask = make_sky_mask(src_image, border, 1)
    ground_mask = make_sky_mask(src_image, border, 0)

    #扣取天空图像和地面图像
    sky_image_ma = np.ma.array(src_image, mask = cv2.cvtColor(sky_mask, cv2.COLOR_GRAY2BGR))
    ground_image_ma = np.ma.array(src_image, mask = cv2.cvtColor(ground_mask, cv2.COLOR_GRAY2BGR))

    #将天空和地面区域shape转换为n*3
    sky_image = sky_image_ma.compressed()
    ground_image = ground_image_ma.compressed()

    sky_image.shape = (sky_image.size//3, 3)
    ground_image.shape = (ground_image.size//3, 3)

    # k均值聚类调整天空区域边界--2类
    sky_image_float = np.float32(sky_image)
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
    flags = cv2.KMEANS_RANDOM_CENTERS
    compactness, labels, centers = cv2.kmeans(sky_image_float, 2, None, criteria, 10, flags)

    sky_label_0 = sky_image[labels.ravel() == 0]
    sky_label_1 = sky_image[labels.ravel() == 1]

    sky_covar_0, sky_mean_0 = cv2.calcCovarMatrix(sky_label_0, mean= None, flags= cv2.COVAR_ROWS + cv2.COVAR_NORMAL + cv2.COVAR_SCALE)
    sky_covar_1, sky_mean_1 = cv2.calcCovarMatrix(sky_label_1, mean= None, flags= cv2.COVAR_ROWS + cv2.COVAR_NORMAL + cv2.COVAR_SCALE)
    ground_covar, ground_mean = cv2.calcCovarMatrix(ground_image, mean= None, flags= cv2.COVAR_ROWS + cv2.COVAR_NORMAL + cv2.COVAR_SCALE)

    ic_s0 = cv2.invert(sky_covar_0, cv2.DECOMP_SVD)[1]
    ic_s1 = cv2.invert(sky_covar_1, cv2.DECOMP_SVD)[1]
    ic_g = cv2.invert(ground_covar, cv2.DECOMP_SVD)[1]

    #推断真实的天空区域
    if cv2.Mahalanobis(sky_mean_0, ground_mean, ic_s0) > cv2.Mahalanobis(sky_mean_1, ground_mean, ic_s1):
        sky_mean = sky_mean_0
        sky_covar = sky_covar_0
        ic_s = ic_s0
    else:
        sky_mean = sky_mean_1
        sky_covar = sky_covar_1
        ic_s = ic_s1


    return sky_covar,sky_mean,ic_s,ground_covar, ground_mean,ic_g
Example #21
0
    def __init__(self, homography_matrix):
        Transformation.__init__(self)

        self._homography = homography_matrix
        self._homography_inverse = None

        _, inv = cv2.invert(self._homography)
        self._homography_inverse = inv
Example #22
0
def multi_thread_load_dataset(files):
    sizes = []
    labels = []
    Ks = []
    for f in files:
        img_path = os.path.join(root, 'training', 'image_2',
                                '{}.png'.format(f))
        label_path = os.path.join(root, 'training', 'label_2',
                                  '{}.txt'.format(f))
        calib_path = os.path.join(root, 'training', 'calib',
                                  '{}.txt'.format(f))

        # process shape
        img = cv2.imread(img_path)
        sizes.append(np.array([img.shape[1], img.shape[0]]))

        # process calib
        P2 = calib_utils.get_calib_P2(calib_path).reshape((3, -1))
        K = P2[:3, :3]
        T = P2[:3, 3]
        invK = cv2.invert(K, flags=cv2.DECOMP_LU)[1]
        TT = np.matmul(invK, T.reshape(-1, 1)).reshape((-1))
        with open(label_path) as f:
            objs = []
            for line in f.read().splitlines():
                splits = line.split()
                cls = utils.name_2_label(splits[0])
                if cls == -1:
                    continue
                cls = np.array([float(cls)])
                bbox = np.array([
                    float(splits[4]),
                    float(splits[5]),
                    float(splits[6]),
                    float(splits[7])
                ])
                dim = np.array(
                    [float(splits[8]),
                     float(splits[9]),
                     float(splits[10])])  # H, W, L
                loc = np.array([
                    float(splits[11]),
                    float(splits[12]) - float(splits[8]) / 2,
                    float(splits[13])
                ]) + TT  # x, y, z
                alpha = np.array([float(splits[3])])
                ry = np.array([float(splits[-1])])
                objs.append(
                    np.concatenate([cls, bbox, dim, alpha, ry, loc],
                                   axis=0).reshape((1, -1)))
            labels.append(np.concatenate(objs, axis=0))
        Ks.append(K.reshape((-1)))

    return sizes, labels, Ks
Example #23
0
def remove_noise_and_smooth(img):
    # img = cv2.imread(file_name, 0)
    # img = cv2.adaptiveThreshold(img.astype(np.uint8), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 41)
    kernel = np.ones((1, 1), np.uint8)
    opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
    closing = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
    or_image = cv2.bitwise_or(img, closing)

    or_image = cv2.invert(or_image)
    kernel = np.ones((1, 1), np.uint8)
    or_image = cv2.erode(or_image, kernel, iterations=1)
    return or_image
    def get2Dmapping(self, srcPts, dstPts):
        """
		Use findHomography to get a transform from the 2D source points to the corresponding 2D dest points.
		Will return a src2dst tranformation matrix, as well as the inverted dst2src matrix
		"""
        src2dst_transform, mask = cv2.findHomography(srcPts.reshape(-1, 1, 2),
                                                     dstPts.reshape(-1, 1, 2),
                                                     cv2.RANSAC, 5.0)
        dst2src_transform = cv2.invert(
            src2dst_transform
        )  # note: the transformation matrix is in index=1 of the returned var
        return src2dst_transform, dst2src_transform[1]
Example #25
0
def overlaySolutionSkewed(originalCorners, originalImage, newImage, M):
    rows, cols, __ = np.shape(originalImage)
    _, M = cv2.invert(M)

    unwarped = cv2.warpPerspective(
        newImage,
        M,
        (cols, rows),
    )
    cv2.fillPoly(originalImage, pts=[originalCorners], color=(0, 0, 0))
    final2 = cv2.add(unwarped, originalImage)
    return final2
Example #26
0
 def warp_blob(self, blob, transform_matrix):
     if transform_matrix.dtype != numpy.float32:
         new_transform = numpy.ndarray(transform_matrix.shape,
                                       numpy.float32)
         new_transform[:] = transform_matrix
         transform_matrix = new_transform
     _, invert_transform = cv2.invert(transform_matrix)
     shape = (blob.shape[0], 3)
     extended_blob = numpy.ndarray(shape, dtype=blob.dtype)
     extended_blob[...,:2] = blob
     extended_blob[...,2] = 1.
     warped_blob = invert_transform.dot(extended_blob.transpose())
     return warped_blob.transpose()[...,:2]
Example #27
0
    def fixedCvInvert(self, H):
        """[If the determinate is zero, OpenCV returns a wrong inverted matrix. ]

        Args:
            H ([type]): [3x3 Matrix]

        Returns:
            [type]: [Inverted matrix]
        """
        if (cv2.determinant(H) != 0.0):
            return cv2.invert(H)[1]
        else:
            return np.array([[1., 0., -H[0, -1]], [0., 1., -H[1, -1]],
                             [0., 0., 0.]])
Example #28
0
    def _correct_colors(self, parents, next_H, order):
        log.debug('Recoloring in order %s',
                  ','.join(self._images[i].name for i in order))
        labs = [
            cv2.cvtColor(i.image, cv2.COLOR_RGB2LAB).astype(np.float32)
            for i in self._images
        ]
        for dst_idx in order:
            assert dst_idx != self.center
            src_idx = parents[dst_idx]
            src = self._images[src_idx]
            dst = self._images[dst_idx]
            log.debug('Color Correcting %s => %s', src.name, dst.name)
            src, dst = src.image, dst.image
            src_mask = np.zeros(src.shape[:2], dtype=np.uint8)
            dst_mask = np.zeros(dst.shape[:2], dtype=np.uint8)

            H = next_H[dst_idx]
            src_corners = cv2.perspectiveTransform(
                image_corners(dst).reshape(1, 4, 2), H)
            dst_corners = cv2.perspectiveTransform(
                image_corners(src).reshape(1, 4, 2),
                cv2.invert(H)[1])
            log.debug('Using relative H:\n%s', H)
            log.debug('Src mask corners:\n%s', src_corners)
            log.debug('Dst mask corners:\n%s', dst_corners)

            cv2.fillPoly(src_mask, np.rint(src_corners).astype(int), 255)
            cv2.fillPoly(dst_mask, np.rint(dst_corners).astype(int), 255)

            if self.debug:
                imshow(src_mask)
                imshow(cv2.bitwise_and(src, src, mask=src_mask))
                imshow(dst_mask)
                imshow(cv2.bitwise_and(dst, dst, mask=dst_mask))
            src_lab = labs[src_idx]
            dst_lab = labs[dst_idx]
            # Probably a better way to do this
            # Adapted from http://www.pyimagesearch.com/2014/06/30/super-fast-color-transfer-images/
            src_mean, src_std = color_stats(src_lab, mask=src_mask)
            dst_mean, dst_std = color_stats(dst_lab, mask=dst_mask)
            dst_lab[:] = ((dst_lab - dst_mean) *
                          (dst_std / src_std)) + src_mean

        for image, lab in zip(self._images, labs):
            lab = np.clip(lab, 0, 255).astype(np.uint8)
            image.image[:] = cv2.merge(
                cv2.split(cv2.cvtColor(lab, cv2.COLOR_LAB2RGB)) +
                [image.image[..., 3]])
Example #29
0
def dst_xy_to_src_xy(xy, normH, src_size=(1, 1), dsize=(1, 1)):
    '''
    xy: x, y coords or point in src image
    normH: normalized homography matrix
    src_size: size of src image (width, height)
    dsize: size of dest space (weight, height)
    '''
    normH = cv2.invert(normH)[1]
    x = float(xy[0]) / src_size[0]
    y = float(xy[1]) / src_size[1]
    dst_x = (normH[0, 0] * x + normH[0, 1] * y +
             normH[0, 2]) / (normH[2, 0] * x + normH[2, 1] * y + normH[2, 2])
    dst_y = (normH[1, 0] * x + normH[1, 1] * y +
             normH[1, 2]) / (normH[2, 0] * x + normH[2, 1] * y + normH[2, 2])
    return (dst_x * dsize[0], dst_y * dsize[1])
def warp_perspective(img_list, ymax, ymin, xmax, xmin, ht, homography):
    stitch_r = np.zeros((ymax - ymin, xmax - xmin, 3)).astype(np.uint8)
    trans_mat = cv2.invert(ht.dot(homography))[1]
    for i in range(ymax - ymin):
        for j in range(xmax - xmin):
            img_i = int(
                (trans_mat[1][0] * j + trans_mat[1][1] * i + trans_mat[1][2]) /
                (trans_mat[2][0] * j + trans_mat[2][1] * i + trans_mat[2][2]))
            img_j = int(
                (trans_mat[0][0] * j + trans_mat[0][1] * i + trans_mat[0][2]) /
                (trans_mat[2][0] * j + trans_mat[2][1] * i + trans_mat[2][2]))
            if img_i >= 0 and img_j >= 0 and img_i < img_list[1].shape[
                    0] and img_j < img_list[1].shape[1]:
                stitch_r[i][j] = img_list[1][img_i][img_j]

    return stitch_r
Example #31
0
    def detect(self, imageGray, M):
        _, M_inv = cv2.invert(M)
        im_dst = self.correctPerspective(imageGray, M_inv)
        im_Color = cv2.cvtColor(im_dst, cv2.COLOR_GRAY2BGR)

        #self.inProcess = True

        #im_O = lineDetect(im_dst)
        x1 = 20
        y1 = int(im_dst.shape[0] * 0.15)
        x2 = im_dst.shape[1] - 20
        y2 = y1 + 55
        cv2.rectangle(im_Color, (x1, y1), (x2, y2), (0, 0, 255), 1)

        L, im_Color = self.detectLetters(im_dst, (x1, y1), (x2, y2))

        return L, im_Color
Example #32
0
			elif opt in ("-v", "--verbose"):
				_debug = 1                  
			elif opt in ("-b", "--database"):
				database = arg
		if not os.path.isfile(database):
			print >> sys.stderr, "Database %s does not exist." % database
			sys.exit(1)
		conn = sqlite3.connect(database)
		setUpDatabase(database)
		c = conn.cursor()
		if _debug == 1:
			print "reading out data..."
		namelist = getNames(c)
		channellist = getChannels(c)
		(eigval, eigbase, avg) = getEigenBase(c)
		(_, inveigbase) = cv2.invert(eigbase)
		inveigbase = numpy.matrix(inveigbase)
		processed = 0
		matrix = []
		LIMIT = len(namelist)
		length = min(LIMIT,len(namelist))
		start = time.time()
		last = start
		#TODO: aptimisation: use arrays instead of dict()s
		representants = dict()
		for name in namelist[:LIMIT]:
			representants[name[0]] = dict()
			for channel in channellist:
				representants[name[0]][channel[0]] = getRepresentants(name[0],channel[0],inveigbase,c)
		for name in range(0,len(namelist[:LIMIT])-1):
			processed += 1
Example #33
0
#

flags = cv2.COVAR_NORMAL | cv2.COVAR_ROWS

grouped_data = []
icovariance = []
for c in range(0, 10):
    grouped_data.append([])
    for i in range(0, len(digits)):
        if labels[i] == c:
            grouped_data[c].append(digits[i])
    print 'group:', c, len(grouped_data[c])
    x = np.array(grouped_data[c])
    covar, mean = cv2.calcCovarMatrix(x, flags)
    icovar = np.empty(covar.shape, covar.dtype)
    cv2.invert(covar, icovar, cv2.DECOMP_SVD)
    icovariance.append(icovar)


#
# Q5: For each group, calculate the Mahalanobis distance of every element
#     to the centroid, then draw the centroid followed by the three farthest
#     elements (showing their Mahalanobis distance).
#

for index, centroid in enumerate(centroids):
    points = np.float32(grouped_data[index])
    icovar = np.float32(icovariance[index])

    mdistance = []
    for point in points:
def distance_finder(frame, lines = None, fallas = None):

    if lines != None:
        # Variables importantes
        distaciaFocal = 1286    #1286.07
        largoEntreLineas = 25   #cm
        altura_camara = 11.2 #cm

        # Variables menos importantes
        height = frame.shape[0]
        width = frame.shape[1]

        #Desempacamos
        [lineaIzq,lineaDer] = lines

        ### Ahora viene el calculo de lineas paralelas ###

        # Primero calculamos los cuatro puntos limites de las lineas electricas.
        rectOriginal = np.float32(perspective_bound(frame, [lineaIzq,lineaDer]))
        # Ahora definimos los limites de la nueva imagen
        rectPerspective = np.float32([[0,0], [height,0], [0,height], [height,height]])
        # Calculamos la matriz de perspectiva
        if lineaIzq == None:
            frame_perspective = np.zeros((height,height,3))
        else:
            M = cv2.getPerspectiveTransform(rectOriginal,rectPerspective)
            # Transformamos la imagen para ver como queda
            frame_perspective = cv2.warpPerspective(frame,M, (height,height))

            # #Iteramos sobre todos los puntos que encontramos para encontrar su paralela
            for color in fallas:
                for centro in fallas[color]:
                    #Transformamos el centroide del
                    centro_perspectiva = cv2.perspectiveTransform(np.array([[centro[0]]],dtype = 'float32'),M)    #Los puntos que le entran a la funcion perspectiveTransform, tienen que ser de la forma [[[x1,y1],[x2,y2, ...]]], con ESA cantidad de parentesis.
                    # Calculamos el mismo punto en la otra linea
                    centro_perspectiva = centro_perspectiva[0][0].copy()
                    if centro_perspectiva[0] < height/2:
                        punto_contrario_perspectiva = np.array([height, centro_perspectiva[1]])
                    else:
                        punto_contrario_perspectiva = np.array([0, centro_perspectiva[1]], dtype = 'float')
                    # Destransformamos el punto
                    # print "M = {}".format(M)
                    # print "invM = {}".format(cv2.invert(M))
                    punto_contrario = cv2.perspectiveTransform(np.array([[punto_contrario_perspectiva]],dtype = 'float32'),cv2.invert(M)[1])
                    # Graficamos, las lineas que encontramos
                    punto_contrario = punto_contrario[0][0].copy()
                    cv2.line(frame,tuple(centro[0]),tuple(punto_contrario),coloresDibujo['azul'],2)
                    # Calculamos la distancia en pixeles
                    distPixeles = np.sqrt(np.square(centro[0][0] - punto_contrario[0]) + np.square(centro[0][1] - punto_contrario[1]))
                    # Usamos la ecuacion de similitud triangular para sacar la distancia
                    distCamara = largoEntreLineas*distaciaFocal/distPixeles
                    # Ahora usamos la relacion de pitagoras para calcular la distancia de la falla al chasis
                    distChasis = np.sqrt(np.square(distCamara) - np.square(altura_camara))
                    # guardamos la distancia en centimetros en el diccionario de fallas

                    centro[1] = distChasis

            return fallas
        # #Iteramos sobre todos los puntos que encontramos para encontrar su paralela
        for color in fallas:
            for centro in fallas[color]:
                #Transformamos el centroide del
                centro_perspectiva = cv2.perspectiveTransform(np.array([[centro[0]]],dtype = 'float32'),M)    #Los puntos que le entran a la funcion perspectiveTransform, tienen que ser de la forma [[[x1,y1],[x2,y2, ...]]], con ESA cantidad de parentesis.
                # Calculamos el mismo punto en la otra linea
                centro_perspectiva = centro_perspectiva[0][0].copy()
                if centro_perspectiva[0] < height/2:
                    punto_contrario_perspectiva = np.array([height, centro_perspectiva[1]])
                else:
                    punto_contrario_perspectiva = np.array([0, centro_perspectiva[1]], dtype = 'float')
                # Destransformamos el punto
                # print "M = {}".format(M)
                # print "invM = {}".format(cv2.invert(M))
                punto_contrario = cv2.perspectiveTransform(np.array([[punto_contrario_perspectiva]],dtype = 'float32'),cv2.invert(M)[1])
                # Graficamos, las lineas que encontramos
                punto_contrario = punto_contrario[0][0].copy()
                cv2.line(frame_copy,tuple(centro[0]),tuple(punto_contrario),coloresDibujo['azul'],2)
                # Calculamos la distancia en pixeles
                distPixeles = np.sqrt(np.square(centro[0][0] - punto_contrario[0]) + np.square(centro[0][1] - punto_contrario[1]))
                # Usamos la ecuacion de similitud triangular para sacar la distancia
                distCamara = largoEntreLineas*distaciaFocal/distPixeles
                # Ahora usamos la relacion de pitagoras para calcular la distancia de la falla al chasis
                distChasis = np.sqrt(np.square(distCamara) - np.square(altura_camara))
                # guardamos la distancia en centimetros en el diccionario de fallas

                centro[1] = distChasis

        # Ahora imprimimos y mostramos todo.
        # Pequeno for para graficar las fallas y sus distancias.
Example #36
0
def inverted(img, s):
    img = img.astype(float)
    im = cv2.invert(img)
    write(im, 'inverted/inv_'+s+'.jpg' )
Example #37
0
    def distanceFinder(self, frame, lines = None, fallas = None):

        if lines != None:

            #Desempacamos
            [lineaIzq,lineaDer] = lines

            ### Ahora viene el calculo de lineas paralelas ###

            # Primero calculamos los cuatro puntos limites de las lineas electricas.
            rectOriginal = np.float32(self.perspective_bound(frame, [lineaIzq,lineaDer]))
            # Ahora definimos los limites de la nueva imagen
            rectPerspective = np.float32([[0,0], [self.height,0], [0,self.height], [self.height,self.height]])
            # Calculamos la matriz de perspectiva
            if lineaIzq == None:
                frame_perspective = np.zeros((self.height,self.height,3))
            else:
                M = cv2.getPerspectiveTransform(rectOriginal,rectPerspective)

                if 'distance' in self.debug_mode:
                    # Transformamos la imagen para ver como queda
                    frame_perspective = cv2.warpPerspective(frame,M, (self.height,self.height))

                print("    [-] Calcular matrix de perspectiva: {}".format(time.time() - self.last_time))
                self.last_time = time.time()

                # #Iteramos sobre todos los puntos que encontramos para encontrar su paralela
                for color in fallas:
                    for centro in fallas[color]:
                        #Transformamos el centroide del
                        centro_perspectiva = cv2.perspectiveTransform(np.array([[centro[0]]],dtype = 'float32'),M)    #Los puntos que le entran a la funcion perspectiveTransform, tienen que ser de la forma [[[x1,y1],[x2,y2, ...]]], con ESA cantidad de parentesis.
                        # Calculamos el mismo punto en la otra linea
                        centro_perspectiva = centro_perspectiva[0][0].copy()
                        if centro_perspectiva[0] < self.height/2:
                            punto_contrario_perspectiva = np.array([self.height, centro_perspectiva[1]])
                        else:
                            punto_contrario_perspectiva = np.array([0, centro_perspectiva[1]], dtype = 'float')
                        # Destransformamos el punto
                        # print "M = {}".format(M)
                        # print "invM = {}".format(cv2.invert(M))
                        punto_contrario = cv2.perspectiveTransform(np.array([[punto_contrario_perspectiva]],dtype = 'float32'),cv2.invert(M)[1])
                        # Graficamos, las lineas que encontramos
                        punto_contrario = punto_contrario[0][0].copy()
                        cv2.line(frame,tuple(centro[0]),tuple(punto_contrario),self.coloresDibujo['azul'],2)
                        # Calculamos la distancia en pixeles
                        distPixeles = np.sqrt(np.square(centro[0][0] - punto_contrario[0]) + np.square(centro[0][1] - punto_contrario[1]))
                        # Usamos la ecuacion de similitud triangular para sacar la distancia
                        distCamara = self.largoEntreLineas*self.distaciaFocal/distPixeles
                        # Ahora usamos la relacion de pitagoras para calcular la distancia de la falla al chasis
                        distChasis = np.sqrt(np.square(distCamara) - np.square(self.altura_camara))
                        # guardamos la distancia en centimetros en el diccionario de fallas

                        centro[1] = distChasis

                print("    [-] Trans y destransformaciones de perspectiva {}".format(time.time() - self.last_time))
                self.last_time = time.time()

                #Debugging output
                if 'distance' in self.debug_mode:

                    # Sacamos una copia de la imagen
                    frame_copy = self.frame.copy()

                    # Dibujamos un poco de informacion de utilidad
                    for color in fallas:
                        i = 1
                        for objeto in fallas[color]:
                            cv2.circle(frame_copy,tuple(objeto[0]),40,self.coloresDibujo[color],3)
                            cv2.putText(frame_copy, "{} #{}".format(color,i), (objeto[0][0] + 45, objeto[0][1]), cv2.FONT_HERSHEY_SIMPLEX,1.0, self.coloresDibujo[color], 2)
                            cv2.putText(frame_copy, "{:0.2f}cm".format(objeto[1]), (objeto[0][0] + 45, objeto[0][1] + 20), cv2.FONT_HERSHEY_SIMPLEX,0.7, self.coloresDibujo[color], 2)
                            i+=1

                    resultado = np.hstack((frame_copy,frame_perspective))
                    resultado = cv2.resize(resultado,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_AREA)

                    self.frame_debug_distance = resultado.copy()

                    print("    [-] Distance debug: {}".format(time.time() - self.last_time))
                    self.last_time = time.time()


                return fallas
Example #38
0
    ## Get Matches
    im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    
    keypoints,descriptors = detector.detectAndCompute(im,None)
    captured = CapturedImage(im, descriptors,keypoints)
    
    matches = getMatches(captured, target)
    
    ## Only update Homography and board if we can see it
    if len(matches) > 4:
        ## Get points, find Homography
        src_pts = np.float32([ target.getKeypoints()[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
        dst_pts = np.float32([ captured.getKeypoints()[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)
        H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,6.0)

        successInv, invH = cv2.invert(H)
        imRect = cv2.warpPerspective(im, invH, (w,h))

        matchesMask = mask.ravel().tolist()
        
        xPieces = [None]*9
        if None in pieces:
            ## Check for X's
            for quadrant in xrange(0,9):

                quadrantImage, upperLeft, lowerRight = extractQuadrant(imRect, quadrant, boardX, boardY, w, h)
                # cv2.rectangle(frame,(top_left[0], top_left[1]), (bottom_right[0], bottom_right[1]), 100, 2)
                res = cv2.matchTemplate(quadrantImage,X_template,cv2.TM_CCOEFF_NORMED)
                min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) # this currently searches whole image.  can use mask for each box

                # print "maxval = ", max_locval
def invert(matrix):
    return cv2.invert(matrix)[1]    # only output the result, not the status; use with care
Example #40
0
        
T = np.array([[1, 0, 0, 0],[0, 1, 0, 0],[0,0, 1, dist],[0, 0, 0, 1]])
A2 = np.array([[f, 0, w/2,0],[0, f, h/2, 0], [0,0,1,0]])

A2[0:3,0:3]=camera_matrix
      
M2 = np.dot(A2,np.dot(T, np.dot(R,A1)))
print(M2)
warped = cv2.warpPerspective(gray2, M2, (sz[1],sz[0]),flags=cv2.WARP_INVERSE_MAP)


cv2.imwrite(str(mnum)+'POST.jpg',warped)
cap.release()

_,M3 = cv2.invert(M2)


ymin =  ceil(-M3[2,2]/ M3[2,1])

yyy = np.arange(ymin,1000)

out  = (M3[2,1]*yyy + M3[2,2])
#plt.plot(yyy,out)

# this is how to convert x-y positions in the image to real 2-d coordinates
original = np.array([((0,800), (1920,800),(0,1000),(1920, 1000))], dtype=np.float32)
original = np.array([((0,800),(0,0))], dtype=np.float32)
converted = cv2.perspectiveTransform(original, M3)
print(converted)
plt.plot(converted[0,:,0],converted[0,:,1],'.')
    def calibrate(self, confFile, output):
        parser = JSONParser()
        jsonObj = parser.parseCameraConf(confFile, self.id)

        print 'Calibration status: parsing configuration file'

        if jsonObj is None:
            print 'Wrong conf file'
            return  None

        print 'Calibration status: parsing finished'

        patternSize = (jsonObj['patternWidth']-1,jsonObj['patternHeight']-1)
        patternPoints = np.zeros((np.prod(patternSize), 3), np.float32)
        patternPoints[:, :2] = np.indices(patternSize).T.reshape(-1, 2)
        patternPoints *= jsonObj['squareSize']

        w, h = self.resolution[0], self.resolution[1]
        objPoints = []
        imgPoints = []
        frameCnt = 0
        frameDelay = int((jsonObj['inputDelay']/1000.0)*60)

        print 'Calibration status: selecting frames for calibration'

        while(1):
            ret, frame = self.capture.read()
            if (ret is True) and ((frameCnt % frameDelay) == 0):

                if(jsonObj['flipAroundHorizontalAxis'] is True):
                    cv2.flip(frame, 0, frame)

                found, corners = cv2.findChessboardCorners(frame,patternSize)

                if found:
                    term = (cv2.TERM_CRITERIA_EPS*cv2.TERM_CRITERIA_COUNT, 30, 0.1)
                    cvtFrame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
                    cv2.cornerSubPix(cvtFrame,corners,(5,5),(-1,-1), term)
                else:
                    continue

                imgPoints.append(corners.reshape(-1,2))
                objPoints.append(patternPoints)

                if(len(imgPoints) == jsonObj['numOfFrames']):
                    break

            frameCnt += 1

        print 'Calibration status: frames selected'
        print 'Calibration status: started calibration'

        rms, cameraMatrix, distCoefs, rvecs, tvecs = cv2.calibrateCamera(objPoints, imgPoints, (int(w), int(h)), self.getFlags(jsonObj), None)

        print 'Calibration status: calibration finished'

        self.calibrationMatrix = cameraMatrix
        ret, self.inverseCalibrationMatrix = cv2.invert(cameraMatrix)
        self.distortCoefs = distCoefs

        print "Calibration status: writing results to file"
        object = {'id':self.id, 'cameraMatrix': cameraMatrix.tolist(), 'distortedCoefficients': distCoefs.tolist()}
        jsonObj = json.dumps(object)
        parser.writeJSON(jsonObj,output)

        print("RMS:", rms)
Example #42
-1
def polynomial_triangulation(u1, P1, u2, P2):
    """
    Polynomial (Optimal) triangulation.
    Uses Linear-Eigen for final triangulation.
    Relative speed: 0.1
    
    (u1, P1) is the reference pair containing normalized image coordinates (x, y) and the corresponding camera matrix.
    (u2, P2) is the second pair.
    
    u1 and u2 are matrices: amount of points equals #rows and should be equal for u1 and u2.
    
    The status-vector is based on the assumption that all 3D points have finite coordinates.
    """
    P1_full = np.eye(4); P1_full[0:3, :] = P1[0:3, :]    # convert to 4x4
    P2_full = np.eye(4); P2_full[0:3, :] = P2[0:3, :]    # convert to 4x4
    P_canon = P2_full.dot(cv2.invert(P1_full)[1])    # find canonical P which satisfies P2 = P_canon * P1
    
    # "F = [t]_cross * R" [HZ 9.2.4]; transpose is needed for numpy
    F = np.cross(P_canon[0:3, 3], P_canon[0:3, 0:3], axisb=0).T
    
    # Other way of calculating "F" [HZ (9.2)]
    #op1 = (P2[0:3, 3:4] - P2[0:3, 0:3] .dot (cv2.invert(P1[0:3, 0:3])[1]) .dot (P1[0:3, 3:4]))
    #op2 = P2[0:3, 0:4] .dot (cv2.invert(P1_full)[1][0:4, 0:3])
    #F = np.cross(op1.reshape(-1), op2, axisb=0).T
    
    # Project 2D matches to closest pair of epipolar lines
    u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))
    
    # For a purely sideways trajectory of 2nd cam, correctMatches() returns NaN for all possible points!
    if np.isnan(u1_new).all() or np.isnan(u2_new).all():
        F = cv2.findFundamentalMat(u1, u2, cv2.FM_8POINT)[0]    # so use a noisy version of the fund mat
        u1_new, u2_new = cv2.correctMatches(F, u1.reshape(1, len(u1), 2), u2.reshape(1, len(u1), 2))
    
    # Triangulate using the refined image points
    return linear_eigen_triangulation(u1_new[0], P1, u2_new[0], P2)    # TODO: replace with linear_LS: better results for points not at Inf