예제 #1
0
    def RotationMatrix_Image2(self):
        """
        return the rotation matrix of the second image

        :return: rotation matrix

        :rtype: np.array 3x3
        """
        return Compute3DRotationMatrix(self.__relativeOrientationImage2[3], self.__relativeOrientationImage2[4],
                                       self.__relativeOrientationImage2[5])
예제 #2
0
    def RotationMatrix_Image1(self):
        """
        return the rotation matrix of the first image

        :return: rotation matrix

        :rtype: np.array 3x3
        """
        return Compute3DRotationMatrix(self.__relativeOrientationImage1[3], self.__relativeOrientationImage1[4],
                                       self.__relativeOrientationImage1[5])
예제 #3
0
    def rotationMatrix(self):
        """
        The rotation matrix of the image

        Relates to the exterior orientation
        :return: rotation matrix

        :rtype: np.ndarray (3x3)
        """

        R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],
                                    self.exteriorOrientationParameters[5])

        return R
예제 #4
0
    def ImageToGround_GivenZ(self, imagePoints, Z_values):
        """
        Compute corresponding ground point given the height in world system

        :param imagePoints: points in image space
        :param Z_values: height of the ground points


        :type Z_values: np.array nx1
        :type imagePoints: np.array nx2
        :type eop: np.ndarray 6x1

        :return: corresponding ground points

        :rtype: np.ndarray

        .. warning::

             This function is empty, need implementation

        .. note::

            - The exterior orientation parameters needed here are called by ``self.exteriorOrientationParameters``
            - The focal length can be called by ``self.camera.focalLength``

        **Usage Example**

        .. code-block:: py


            imgPnt = np.array([-50., -33.])
            img.ImageToGround_GivenZ(imgPnt, 115.)

        """
        # print('ImagePoints:',imagePoints,'\nZ_Values:',Z_values)
        imgpoint = np.array([[imagePoints[0],\
                              imagePoints[1],\
                              -self.camera.focalLength]]).reshape(3,1)
        # print('\nImgpoints:',imgpoint)
        r = Compute3DRotationMatrix(self.exteriorOrientationParameters[0],\
                                    self.exteriorOrientationParameters[1],\
                                    self.exteriorOrientationParameters[2])
        lam = (Z_values - self.exteriorOrientationParameters[2]) / (np.dot(
            r, imgpoint))[2]
        # print('\nLam:',lam)
        return (self.exteriorOrientationParameters[0:3]).reshape(
            3, 1) + lam * np.dot(r, imgpoint)
예제 #5
0
    def Build_A_B_W(self, cameraPoints1, cameraPoints2, x):
        """
        Function for computing the A and B matrices and vector w.
        :param cameraPoints1: points in the first camera system
        :param ImagePoints2: corresponding homology points in the second camera system
        :param x: initialValues vector by, bz, omega, phi, kappa ( bx=1)

        :type cameraPoints1: np.array nx3
        :type cameraPoints2: np.array nx3
        :type x: np.array (5,1)

        :return: A ,B matrices, w vector

        :rtype: tuple
        """
        numPnts = cameraPoints1.shape[0]  # Number of points

        dbdy = np.array([[0, 0, 1], [0, 0, 0], [-1, 0, 0]])
        dbdz = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 0]])

        dXdx = np.array([1, 0, 0])
        dXdy = np.array([0, 1, 0])

        # Compute rotation matrix and it's derivatives
        rotationMatrix2 = Compute3DRotationMatrix(x[2, 0], x[3, 0], x[4, 0])
        dRdOmega = Compute3DRotationDerivativeMatrix(x[2, 0], x[3, 0], x[4, 0],
                                                     'omega')
        dRdPhi = Compute3DRotationDerivativeMatrix(x[2, 0], x[3, 0], x[4, 0],
                                                   'phi')
        dRdKappa = Compute3DRotationDerivativeMatrix(x[2, 0], x[3, 0], x[4, 0],
                                                     'kappa')

        # Create the skew matrix from the vector [bx, by, bz]
        bMatrix = ComputeSkewMatrixFromVector(np.array([1, x[0, 0], x[1, 0]]))

        # Compute A matrix; the coplanar derivatives with respect to the unknowns by, bz, omega, phi, kappa
        A = np.zeros((numPnts, 5))
        A[:, 0] = np.diag(
            np.dot(cameraPoints1,
                   np.dot(dbdy, np.dot(
                       rotationMatrix2,
                       cameraPoints2.T))))  # derivative in respect to by
        A[:, 1] = np.diag(
            np.dot(cameraPoints1,
                   np.dot(dbdz, np.dot(
                       rotationMatrix2,
                       cameraPoints2.T))))  # derivative in respect to bz
        A[:, 2] = np.diag(
            np.dot(cameraPoints1,
                   np.dot(bMatrix, np.dot(
                       dRdOmega,
                       cameraPoints2.T))))  # derivative in respect to omega
        A[:, 3] = np.diag(
            np.dot(cameraPoints1,
                   np.dot(bMatrix, np.dot(
                       dRdPhi,
                       cameraPoints2.T))))  # derivative in respect to phi
        A[:, 4] = np.diag(
            np.dot(cameraPoints1,
                   np.dot(bMatrix, np.dot(
                       dRdKappa,
                       cameraPoints2.T))))  # derivative in respect to kappa

        # Compute B matrix; the coplanar derivatives in respect to the observations, x', y', x'', y''.
        B = np.zeros((numPnts, 4 * numPnts))
        k = 0
        for i in range(numPnts):
            p1vec = cameraPoints1[i, :]
            p2vec = cameraPoints2[i, :]
            B[i, k] = np.dot(dXdx,
                             np.dot(bMatrix, np.dot(rotationMatrix2, p2vec)))
            B[i,
              k + 1] = np.dot(dXdy,
                              np.dot(bMatrix, np.dot(rotationMatrix2, p2vec)))
            B[i,
              k + 2] = np.dot(np.dot(p1vec, np.dot(bMatrix, rotationMatrix2)),
                              dXdx)
            B[i,
              k + 3] = np.dot(np.dot(p1vec, np.dot(bMatrix, rotationMatrix2)),
                              dXdy)
            k += 4

        # w vector
        w = np.diag(
            np.dot(cameraPoints1,
                   np.dot(bMatrix, np.dot(rotationMatrix2, cameraPoints2.T))))

        return A, B, w
예제 #6
0
                                           yAxis[1,
                                                 0]], [x0[2, 0], yAxis[2, 0]]
    ax.plot(xs, ys, zs, c='g')
    # plot z axis - blue
    xs, ys, zs = [x0[0, 0], zAxis[0, 0]], [x0[1, 0],
                                           zAxis[1,
                                                 0]], [x0[2, 0], zAxis[2, 0]]
    ax.plot(xs, ys, zs, c='b')


if __name__ == '__main__':
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    #chek if the DrawRays function works
    grdPnts = np.array([[201.062, 741.351, 241.987]])
    drawRays(grdPnts, np.array([[50], [50], [50]]))

    # check if drawimageframe function works
    f = 0.153
    R = Compute3DRotationMatrix(np.pi / 3, 0, 0)
    scale = 50
    drawImageFrame(0.5, 0.5, R, np.array([[50], [50], [50]]), f, scale)

    # check if drawOrientation function works
    R = Compute3DRotationMatrix(np.pi / 3, 0, 0)
    x0 = np.array([[50], [50], [50]])
    drawOrientation(R, x0, scale)

    plt.show()
예제 #7
0
    def ImagesToGround(self, imagePoints1, imagePoints2, Method=None):
        """
        Computes ground coordinates of homological points

        :param imagePoints1: points in image 1
        :param imagePoints2: corresponding points in image 2
        :param Method: method to use for the ray intersection, three options exist: geometric, vector, Collinearity

        :type imagePoints1: np.array nx2
        :type imagePoints2: np.array nx2
        :type Method: string

        :return: ground points, their accuracies.

        :rtype: dict

        .. warning::

            This function is empty, need implementation


        **Usage example**

        .. code-block:: py

            camera = Camera(152, None, None, None, None)
            image1 = SingleImage(camera)
            image2 = SingleImage(camera)

            imagePoints1 = np.array([[-4.83,7.80],
                                [-4.64, 134.86],
                                [5.39,-100.80],
                                [4.58,55.13],
                                [98.73,9.59],
                                [62.39,128.00],
                                [67.90,143.92],
                                    [56.54,-85.76]])
            imagePoints2 = np.array([[-83.17,6.53],
                                 [-102.32,146.36],
                                 [-62.84,-102.87],
                                 [-97.33,56.40],
                                 [-3.51,14.86],
                                 [-27.44,136.08],
                                 [-23.70,152.90],
                                 [-8.08,-78.07]])

            new = ImagePair(image1, image2)

            new.ImagesToGround(imagePoints1, imagePoints2, 'geometric'))

        """
        picpoints_1_mm = self.__image1.ImageToCamera(imagePoints1)
        picpoints_2_mm = self.__image1.ImageToCamera(imagePoints2)
        exori_XYZ_1 = self.__image1.exteriorOrientationParameters[0:3]
        exori_XYZ_2 = self.__image2.exteriorOrientationParameters[0:3]

        result_Gpoints = []
        dist_e = []

        for i in range(picpoints_1_mm.shape[0]): #calculating per point set
            # following the geometric method for forward intersection:
            x_img1 = np.hstack((picpoints_1_mm[i, :], -self.__image1.camera.focalLength)) / 1000  # to meter
            x_img2 = np.hstack((picpoints_2_mm[i, :], -self.__image2.camera.focalLength)) / 1000
            v_img1 = (np.dot(Compute3DRotationMatrix(self.__image1.exteriorOrientationParameters[3],\
                                                     self.__image1.exteriorOrientationParameters[4],\
                                                     self.__image1.exteriorOrientationParameters[5]), x_img1)).reshape(3, 1)  # Rotating vector +T
            v_img2 = (np.dot(Compute3DRotationMatrix(self.__image1.exteriorOrientationParameters[3],\
                                                     self.__image1.exteriorOrientationParameters[4],\
                                                     self.__image1.exteriorOrientationParameters[5]), x_img2)).reshape(3, 1)  # Rotating vector +T
            v_img1 /= la.norm(v_img1)  # normalization
            v_img2 /= la.norm(v_img2)

            # Creating proper vectors
            vvt_img1 = np.dot(v_img1, v_img1.T)
            vvt_img2 = np.dot(v_img2, v_img2.T)
            I = np.eye(v_img1.shape[0])

            # Partial derivatives
            A_img1 = I - v_img1
            A_img2 = I - v_img2

            # L vector
            l1 = np.dot(A_img1, self.PerspectiveCenter_Image1)
            l2 = np.dot(A_img2, self.PerspectiveCenter_Image2)

            # Stack
            A = np.vstack((A_img1, A_img2))
            l = np.vstack((l1.reshape(3,1), l2.reshape(3,1)))

            # Direct solution (no iterations needed)
            X = np.dot(la.inv(np.dot(A.T, A)), np.dot(A.T, l))
            # dist_e1 = np.dot((I - vvt_img1), X - exori_XYZ_1)
            dist_e1 = np.dot(A_img1, X)- l1.reshape(1,3)
            # dist_e2 = np.dot((I - vvt_img2), X - exori_XYZ_2)
            dist_e2 = np.dot(A_img2, X)- l2.reshape(1,3)

            dist_e.append((np.abs(dist_e1) + np.abs(dist_e2)) / 2) #Average
            result_Gpoints.append([X[0,0],X[1,0],X[2,0]])
        return np.array(result_Gpoints), np.array(dist_e)
예제 #8
0
def ComputeDesignMatrix(groundPoints, Xo, Yo, Zo, omega, phi, kappa, foc):
    """
        Compute the derivatives of the collinear law (design matrix)

        :param groundPoints: Ground coordinates of the control points

        :type groundPoints: np.array nx3

        :return: The design matrix

        :rtype: np.array nx6

    """
    # initialization for readability
    # omega = self.exteriorOrientationParameters[3]
    # phi = self.exteriorOrientationParameters[4]
    # kappa = self.exteriorOrientationParameters[5]

    # Coordinates subtraction
    dX = groundPoints[:, 0] - Xo
    dY = groundPoints[:, 1] - Yo
    dZ = groundPoints[:, 2] - Zo
    dXYZ = np.vstack([dX, dY, dZ])

    rotationMatrixT = (Compute3DRotationMatrix(omega, phi, kappa)).T
    rotatedG = rotationMatrixT.dot(dXYZ)
    rT1g = rotatedG[0, :]
    rT2g = rotatedG[1, :]
    rT3g = rotatedG[2, :]

    focalBySqauredRT3g = foc / rT3g**2

    dxdg = rotationMatrixT[0, :][
        None, :] * rT3g[:, None] - rT1g[:,
                                        None] * rotationMatrixT[2, :][None, :]
    dydg = rotationMatrixT[1, :][
        None, :] * rT3g[:, None] - rT2g[:,
                                        None] * rotationMatrixT[2, :][None, :]

    dgdX0 = np.array([-1, 0, 0], 'f')
    dgdY0 = np.array([0, -1, 0], 'f')
    dgdZ0 = np.array([0, 0, -1], 'f')

    # Derivatives with respect to X0
    dxdX0 = -focalBySqauredRT3g * np.dot(dxdg, dgdX0)
    dydX0 = -focalBySqauredRT3g * np.dot(dydg, dgdX0)

    # Derivatives with respect to Y0
    dxdY0 = -focalBySqauredRT3g * np.dot(dxdg, dgdY0)
    dydY0 = -focalBySqauredRT3g * np.dot(dydg, dgdY0)

    # Derivatives with respect to Z0
    dxdZ0 = -focalBySqauredRT3g * np.dot(dxdg, dgdZ0)
    dydZ0 = -focalBySqauredRT3g * np.dot(dydg, dgdZ0)

    dRTdOmega = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'omega').T
    dRTdPhi = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'phi').T
    dRTdKappa = Compute3DRotationDerivativeMatrix(omega, phi, kappa, 'kappa').T

    gRT3g = dXYZ * rT3g

    # Derivatives with respect to Omega
    dxdOmega = -focalBySqauredRT3g * (dRTdOmega[0, :][None, :].dot(gRT3g) -
                                      rT1g *
                                      (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]

    dydOmega = -focalBySqauredRT3g * (dRTdOmega[1, :][None, :].dot(gRT3g) -
                                      rT2g *
                                      (dRTdOmega[2, :][None, :].dot(dXYZ)))[0]

    # Derivatives with respect to Phi
    dxdPhi = -focalBySqauredRT3g * (dRTdPhi[0, :][None, :].dot(gRT3g) - rT1g *
                                    (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]

    dydPhi = -focalBySqauredRT3g * (dRTdPhi[1, :][None, :].dot(gRT3g) - rT2g *
                                    (dRTdPhi[2, :][None, :].dot(dXYZ)))[0]

    # Derivatives with respect to Kappa
    dxdKappa = -focalBySqauredRT3g * (dRTdKappa[0, :][None, :].dot(gRT3g) -
                                      rT1g *
                                      (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]

    dydKappa = -focalBySqauredRT3g * (dRTdKappa[1, :][None, :].dot(gRT3g) -
                                      rT2g *
                                      (dRTdKappa[2, :][None, :].dot(dXYZ)))[0]

    # all derivatives of x and y
    dd = np.array([
        np.vstack([dxdX0, dxdY0, dxdZ0, dxdKappa]).T,
        np.vstack([dydX0, dydY0, dydZ0, dydKappa]).T
    ])

    a = np.zeros((2 * dd[0].shape[0], 4))
    a[0::2] = dd[0]
    a[1::2] = dd[1]

    return a
예제 #9
0
    def ImageToGround_GivenZ(self, imagePoints, Z_values):
        """
        Compute corresponding ground point given the height in world system

        :param imagePoints: points in image space
        :param Z_values: height of the ground points


        :type Z_values: np.array nx1
        :type imagePoints: np.array nx2
        :type eop: np.ndarray 6x1

        :return: corresponding ground points

        :rtype: np.ndarray

        .. warning::

             This function is empty, need implementation

        .. note::

            - The exterior orientation parameters needed here are called by ``self.exteriorOrientationParameters``
            - The focal length can be called by ``self.camera.focalLength``

        **Usage Example**

        .. code-block:: py


            imgPnt = np.array([-50., -33.])
            img.ImageToGround_GivenZ(imgPnt, 115.)

        """
        cameraPoints = self.ImageToCamera(imagePoints)
        cameraPoints = cameraPoints.T
        pars = self.exteriorOrientationParameters
        X0 = pars[0]
        Y0 = pars[1]
        Z0 = pars[2]

        T = np.array([[X0], [Y0], [Z0]])

        omega = pars[3]
        phi = pars[4]
        kappa = pars[5]
        R = Compute3DRotationMatrix(omega, phi, kappa)

        f = self.camera.focalLength

        # allocating memory for return array
        groundPoints = []

        for i in range(len(cameraPoints[1])):
            camVec = np.insert(cameraPoints[:, i], np.size(cameraPoints), -f)
            lam = (Z_values - Z0) / (np.dot(R[2, :], camVec))

            X = X0 + lam * np.dot(R[0, :], camVec)
            Y = Y0 + lam * np.dot(R[1, :], camVec)

            xy = [X, Y, Z_values]
            groundPoints.append(xy)

        groundPoints = np.array(groundPoints)

        return groundPoints
예제 #10
0
    def RayIntersection(self, cameraPoints1, cameraPoints2, cameraPoints3):
        """
        Compute coordinates of the corresponding model point

        :param cameraPoints1: points in camera1 coordinate system
        :param cameraPoints2: points in camera2 coordinate system
        :param cameraPoints3: points in camera3 coordinate system

        :type cameraPoints1 np.array nx3
        :type cameraPoints2: np.array nx3
        :type cameraPoints3: np.array nx3

        :return: point in model coordinate system
        :rtype: np.array nx3
        """
        result_Gpoints = []
        dist_e = []

        Ra = Compute3DRotationMatrix(self.__imagePair1.relativeOrientationImage2[3], \
                                     self.__imagePair1.relativeOrientationImage2[4], \
                                     self.__imagePair1.relativeOrientationImage2[5])

        Rb = Compute3DRotationMatrix(self.__imagePair2.relativeOrientationImage2[3], \
                                     self.__imagePair2.relativeOrientationImage2[4], \
                                     self.__imagePair2.relativeOrientationImage2[5])

        R3 = np.dot(Ra, Rb)
        o1 = np.array([[0],[0],[0]])
        o2 = np.array([[self.__imagePair1.relativeOrientationImage2[0]], \
                       [self.__imagePair1.relativeOrientationImage2[1]], \
                       [self.__imagePair1.relativeOrientationImage2[2]]])
        b23 = np.array([[self.__imagePair2.relativeOrientationImage2[0]], \
                        [self.__imagePair2.relativeOrientationImage2[1]], \
                        [self.__imagePair2.relativeOrientationImage2[2]]])
        o3 = o2 + self.__scale[0] * np.dot(R3, b23)


        for i in range(cameraPoints1.shape[0]):  # calculating per point set
            # following the geometric method for forward intersection:
            x_img1 = cameraPoints1[i, :] / 1000  # to meter
            x_img2 = cameraPoints2[i, :] / 1000
            x_img3 = cameraPoints3[i, :] / 1000


            v_img1 = (x_img1).reshape(3, 1)
            v_img2 = (np.dot(Ra, x_img2)).reshape(3, 1)
            v_img3 = (np.dot(R3, x_img3)).reshape(3, 1)
            v_img1 /= np.linalg.norm(v_img1)  # normalization
            v_img2 /= np.linalg.norm(v_img2)
            v_img3 /= np.linalg.norm(v_img3)

            # Creating proper vectors
            vvt_img1 = np.dot(v_img1, v_img1.T)
            vvt_img2 = np.dot(v_img2, v_img2.T)
            vvt_img2 = np.dot(v_img3, v_img3.T)
            I = np.eye(v_img1.shape[0])

            # Partial derivatives
            A_img1 = I - v_img1
            A_img2 = I - v_img2
            A_img3 = I - v_img3


            # L vector
            l1 = np.dot(A_img1, o1)
            l2 = np.dot(A_img2, o2)
            l3 = np.dot(A_img3, o3)

            # Stack
            A = np.vstack((A_img1, A_img2, A_img3))
            l = np.vstack((l1, l2, l3))

            # Direct solution (no iterations needed)
            X = np.dot(np.linalg.inv(np.dot(A.T, A)), np.dot(A.T, l))

            result_Gpoints.append([X[0,0],X[1,0],X[2,0]])

        return np.array(result_Gpoints)