Beispiel #1
0
    def __init__(self, imageList_, dataMatrix_):
        '''
        :param imageList_: List of all images in dataset.
        :param dataMatrix_: Matrix with all pose data in dataset.
        :return:
        '''
        self.imageList = [
        ]  # for storing the corrected image after projection transform.
        self.dataMatrix = (np.asarray(dataMatrix_)).astype(np.float)
        self.masks_backward = []
        self.masks_forward = []

        for i in range(0, len(imageList_)):
            # downsample the image to speed things up. 4000x3000 is huge!
            image = imageList_[i][::6, ::6, :]
            M = gm.computeUnRotMatrix(self.dataMatrix[i, :])

            # TODO create list of masks for feature detection for each image.
            mask_backward, mask_forward = create_mask(self.dataMatrix,
                                                      i,
                                                      image.shape,
                                                      full_mask=True)

            # Perform a perspective transformation based on pose information.
            # Ideally, this will make each image look as if it's viewed from the top.
            # We assume the ground plane is perfectly flat.
            correctedImage = gm.warpPerspectiveWithPadding(image, M)
            mask_backward = gm.warpPerspectiveWithPadding(mask_backward, M)
            mask_forward = gm.warpPerspectiveWithPadding(mask_forward, M)
            self.masks_backward.append(mask_backward)
            self.masks_forward.append(mask_forward)
            # store only corrected images to use in combination
            self.imageList.append(correctedImage)
        self.resultImage = self.imageList[0]
Beispiel #2
0
 def __init__(self, imageList_, dataMatrix_):
     '''
     :param imageList_: List of all images in dataset.
     :param dataMatrix_: Matrix with all pose data in dataset.
     :return:
     '''
     self.imageList = []
     self.dataMatrix = dataMatrix_
     detector = None
     if imutils.is_cv2():
         detector = cv2.ORB()
     elif imutils.is_cv3():
         detector = cv2.ORB_create()
     for i in range(0, len(imageList_)):
         image = imageList_[
             i][::2, ::
                2, :]  #downsample the image to speed things up. 4000x3000 is huge!
         M = gm.computeUnRotMatrix(self.dataMatrix[i, :])
         #Perform a perspective transformation based on pose information.
         #Ideally, this will mnake each image look as if it's viewed from the top.
         #We assume the ground plane is perfectly flat.
         correctedImage = gm.warpPerspectiveWithPadding(image, M)
         self.imageList.append(
             correctedImage
         )  #store only corrected images to use in combination
     self.resultImage = self.imageList[0]
Beispiel #3
0
def changePerspective(imageList, dataMatrix):

    images = sorted(glob.glob("temp/*.png"))
    print("Warping Perspective of Images Now")

    for i in range(0, len(images)):
        image = cv2.imread(images[i])
        image = image[::2, ::2, :]

        M = gm.computeUnRotMatrix(dataMatrix[i, :])
        correctedImage = gm.warpPerspectiveWithPadding(image, M)

        cv2.imwrite("temp/" + str(i).zfill(4) + ".png", correctedImage)

    print("Done Warping Perspective")
Beispiel #4
0
 def __init__(self,imageList_,dataMatrix_):
     '''
     :param imageList_: List of all images in dataset.
     :param dataMatrix_: Matrix with all pose data in dataset.
     :return:
     '''
     self.imageList = []
     self.dataMatrix = dataMatrix_
     detector = cv2.ORB()
     for i in range(0,len(imageList_)):
         image = imageList_[i][::2,::2,:] #downsample the image to speed things up. 4000x3000 is huge!
         M = gm.computeUnRotMatrix(self.dataMatrix[i,:])
         #Perform a perspective transformation based on pose information.
         #Ideally, this will mnake each image look as if it's viewed from the top.
         #We assume the ground plane is perfectly flat.
         correctedImage = gm.warpPerspectiveWithPadding(image,M)
         self.imageList.append(correctedImage) #store only corrected images to use in combination
     self.resultImage = self.imageList[0]
Beispiel #5
0
    def combine(self, data, pose):
        im = Image()

        if self.init == False:
            self.init = True
            self.result = en.compress(data)
            im._id = self.count
            im._is_seed = True
            im._is_attached = True
            im._transformation = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
            im._pose = pose[:3]
            M = gm.computeUnRotMatrix(pose)
            self.result, corners = gm.warpPerspectiveWithPadding(
                self.result, M)
            im._corners = corners
            self.position_data.append(pose[:3])
            self.imageDataList.append(im)
            self.count += 1
            return

        image = en.compress(data)
        M = gm.computeUnRotMatrix(pose)
        image, corners = gm.warpPerspectiveWithPadding(image, M)
        im._id = self.count
        im._corners = corners
        im._pose = pose
        # self.position_data.append (im._pose[:3])
        self.imageDataList.append(im)
        total = self.get_neighbours(im._id)
        print(self.count, "neighbors:", total)
        mask_corners = self.get_mask_corners(total, im._id)
        mask_re = self.get_mask(mask_corners, self.result.shape[:2])

        gray_im = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        _, mask_im = cv2.threshold(gray_im, 1, 255, cv2.THRESH_BINARY)
        kp_im, desc_im = self.detector.detectAndCompute(gray_im, mask_im)

        print("No. of keypoints in the image:", len(kp_im))

        gray_re = cv2.cvtColor(self.result, cv2.COLOR_BGR2GRAY)
        kp_re, desc_re = self.detector.detectAndCompute(gray_re, mask_re)

        print("No. of keypoints in the result:", len(kp_re))

        matches = self.matcher.knnMatch(desc_im, desc_re, k=2)

        good = []
        for m, n in matches:
            if m.distance < 0.55 * n.distance:
                good.append(m)
        print(str(len(good)) + " Matches were Found")

        if (len(good)) <= 100:
            self.low_matches_handler(image, pose)

        matches = copy.copy(good)

        src_match = np.float32([kp_im[m.queryIdx].pt for m in matches])
        dst_match = np.float32([kp_re[m.trainIdx].pt for m in matches])

        src_pts = src_match.reshape(-1, 1, 2)
        dst_pts = dst_match.reshape(-1, 1, 2)

        ransac = RANSAC()
        final_src, final_dst = ransac.thread(src_match, dst_match, 50)
        h_agent = Homography()
        gh = h_agent.global_homography(final_src, final_dst)

        HomogResult = cv2.findHomography(src_pts, dst_pts, method=cv2.RANSAC)
        H = HomogResult[0]
        # H = gh

        final_w, final_h, offset_x, offset_y = final_size(
            image, self.result, H)
        mesh = get_mesh((final_w, final_h), self.mesh_size + 1)
        vertices = get_vertice((final_w, final_h), self.mesh_size,
                               (offset_x, offset_y))

        stitcher = Apap(0, [final_w, final_h], [offset_x, offset_y], 1)
        local_homography_im = np.zeros([final_h, final_w, 3, 3],
                                       dtype=np.float)
        local_homography_im[:, :] = H
        stitcher.local_homography2(final_src, final_dst, vertices,
                                   local_homography_im)

        translation = np.float32(([1, 0, offset_x], [0, 1,
                                                     offset_y], [0, 0, 1]))
        fullTransformation = np.dot(translation, H)

        warpedImage = np.zeros([final_h, final_w, 3], dtype=np.uint8)
        stitcher.local_warp2(image, local_homography_im, warpedImage)

        warpedResImg = cv2.warpPerspective(self.result, translation,
                                           (final_w, final_h))
        self.result = np.where(warpedImage != 0, warpedImage, warpedResImg)

        # im._transformation = fullTransformation
        # im._is_attached = True
        self.imageDataList[im._id]._transformation = fullTransformation
        self.imageDataList[im._id]._is_attached = True

        self.transformation_series.append(translation)
        # self.imageDataList.append (im)
        self.position_data.append(im._pose[:3])
        cv2.imwrite('temp/finalImage' + str(self.count) + '.jpg', self.result)
        self.count += 1

        return