def sanitize_prob(Gcomplete):

    for edge in Gcomplete.edges():

        if Gcomplete.node[edge[0]]['Pview'][edge[1]] > 1.0:
            Gcomplete.node[edge[0]]['Pview'][edge[1]] = 1.0

        if Gcomplete.node[edge[0]]['Pshare'][edge[1]] > 1.0:
            Gcomplete.node[edge[0]]['Pshare'][edge[1]] = 1.0

        if Gcomplete.node[edge[1]]['Pview'][edge[0]] > 1.0:
            Gcomplete.node[edge[1]]['Pview'][edge[0]] = 1.0

        if Gcomplete.node[edge[1]]['Pshare'][edge[0]] > 1.0:
            Gcomplete.node[edge[1]]['Pshare'][edge[0]] = 1.0

        if Gcomplete.node[edge[0]]['Pview'][edge[1]] < 0.0:
            Gcomplete.node[edge[0]]['Pview'][edge[1]] = 0.0

        if Gcomplete.node[edge[0]]['Pshare'][edge[1]] < 0.0:
            Gcomplete.node[edge[0]]['Pshare'][edge[1]] = 0.0

        if Gcomplete.node[edge[1]]['Pview'][edge[0]] < 0.0:
            Gcomplete.node[edge[1]]['Pview'][edge[0]] = 0.0

        if Gcomplete.node[edge[1]]['Pshare'][edge[0]] < 0.0:
            Gcomplete.node[edge[1]]['Pshare'][edge[0]] = 0.0

    display("sanitize_prob", "Sanitized Gcomplete.")

    return
def compare_hog_descriptors(image_file):

	#NOTE: dimension of hog = product of all array dimensions of modified hog

	display("Hog is: ")

	image_hog = get_hog_descriptor(image_file)

	describe_array(image_hog)

	display("Modified hog is: ")

	image_modified_hog = get_modified_hog_descriptor(image_file)

	for descriptor in image_modified_hog:

		describe_array(descriptor)
def class_from_target(classification_data, classification_dict):

	result_list = []

	for classification in np.nditer(classification_data):

		#display(type(classification))
		#display(type(classification_data))
		#display(type(classification_dict))

		classification_key = int(classification)

		class_prediction = classification_dict[classification_key]

		display("Class prediction is: ")
		display(class_prediction)

		result_list.append(class_prediction)

	return result_list
Exemplo n.º 4
0
def main():

    #calib = calibration.calibration(visualtion=True)
    base_dataset_path = os.path.join(os.getcwd(), "datasets", "test_lab6")
    file_name = os.path.join(base_dataset_path, "imageData.txt")
    image_directory = base_dataset_path
    drone_location = os.path.join(base_dataset_path, "drone_postion.txt")
    write_img_dir_path = os.path.join(base_dataset_path, "results")

    all_images, data_matrix = util.importData(file_name, drone_location,
                                              image_directory)
    all_images = all_images[:12]
    data_matrix = data_matrix[:12]
    # for i in range(0,3):
    #     all_images[i] = all_images[i][::10, ::10, :]
    #all_imgs_undistorted = calib.calibrate(all_images)
    # stitcher = cv2.createStitcher() if imutils.is_cv3() else cv2.Stitcher_create()
    # (status, stitched) = stitcher.stitch(all_images)
    my_combiner = Combiner.Combiner(all_images, data_matrix)
    result = my_combiner.createMosaic()
    util.display("RESULT", result)
    if not os.path.exists(write_img_dir_path):
        os.makedirs(write_img_dir_path)
    cv2.imwrite(os.path.join(write_img_dir_path, "finalResult3.png"), result)
Exemplo n.º 5
0
if os.path.isdir('results') == True:
    os.rename('results', 'results - ' + str(now))

os.mkdir('results')

fileName = "datasets/imageData.txt"
imageDirectory = "datasets/images/"

print("Creating Temp Directory")

if os.path.isdir('temp') == True:
    shutil.rmtree('temp', ignore_errors=False, onerror=None)

os.mkdir('temp')

print("Copying Images to Temp Directory")

allImages, dataMatrix = util.importData(fileName, imageDirectory)
# Perspective.changePerspective(allImages, dataMatrix)

print("Sitiching Images")
start = time.time()
result = Combiner.combine()
end = time.time()

util.display("RESULT", result, 4000000)
cv2.imwrite("results/final_result.jpg", result)
print("Time --->>>>>", end - start)
print("Done. Find your final image in results folder as final_result.jpg")
Exemplo n.º 6
0
'''
Driver script. Execute this to perform the mosaic procedure.
'''

import utilities as util
import Combiner
import cv2

fileName = "datasets/imageData.txt"
imageDirectory = "datasets/images/"
allImages, dataMatrix = util.importData(fileName, imageDirectory)
myCombiner = Combiner.Combiner(allImages, dataMatrix)
result = myCombiner.createMosaic()
util.display("RESULT", result)
cv2.imwrite("results/finalResult.png", result)















Exemplo n.º 7
0
    def combine(self, index2):
        '''
        :param index2: index of self.imageList and self.kpList to combine with self.referenceImage and self.referenceKeypoints
        :return: combination of reference image and image at index 2
        '''

        #Attempt to combine one pair of images at each step. Assume the order in which the images are given is the best order.
        #This intorduces drift!
        image1 = copy.copy(self.imageList[index2 - 1])
        image2 = copy.copy(self.imageList[index2])
        '''
        Descriptor computation and matching.
        Idea: Align the images by aligning features.
        '''
        # Keypoint extraction from .npz
        path = os.getcwd()
        print(path)
        image_path = os.path.join(path, "d2_net/datasets/resized_images")

        # pair_path = "/Users/arun/Downloads/AERO2ASTRO/Project GIS/ortho_d2net/ortho/d2_net/qualitative/images/pair_4"

        feat1 = np.load(os.path.join(image_path, '1.jpg.d2-net'))
        feat2 = np.load(os.path.join(image_path, '2.jpg.d2-net'))

        gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
        kp1_npz = feat1['keypoints']
        kp1_del = np.delete(kp1_npz, 2, 1)
        kp1 = [cv2.KeyPoint(point[0], point[1], 1) for point in kp1_del]

        gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
        kp2_npz = feat2['keypoints']
        kp2_del = np.delete(kp2_npz, 2, 1)
        kp2 = [cv2.KeyPoint(point[0], point[1], 1) for point in kp2_del]

        descriptors1 = feat1['descriptors']
        descriptors2 = feat2['descriptors']

        print("descriptors1", descriptors1.shape)
        print("descriptors2", descriptors2.shape)

        #Visualize matching procedure.
        keypoints1Im = cv2.drawKeypoints(image1,
                                         kp1,
                                         outImage=None,
                                         color=(0, 0, 255))
        util.display("KEYPOINTS", keypoints1Im)
        keypoints2Im = cv2.drawKeypoints(image2,
                                         kp2,
                                         outImage=None,
                                         color=(0, 0, 255))
        util.display("KEYPOINTS", keypoints2Im)

        matcher = cv2.BFMatcher()  #use brute force matching
        matches = matcher.knnMatch(descriptors2, descriptors1,
                                   k=2)  #find pairs of nearest matches
        #prune bad matches
        # https://stackoverflow.com/questions/50945385/python-opencv-findhomography-inputs

        #define constants
        MIN_MATCH_COUNT = 20
        MIN_DIST_THRESHOLD = 0.7
        RANSAC_REPROJ_THRESHOLD = 4.0

        good = []
        for m, n in matches:
            if m.distance < MIN_DIST_THRESHOLD * n.distance:
                good.append(m)
        matches = copy.copy(good)
        print("Number of Good Matches: ", len(good))

        #Visualize matches
        matchDrawing = util.drawMatches(gray2, kp2, gray1, kp1, matches)
        util.display("matches", matchDrawing)

        if len(good) > MIN_MATCH_COUNT:

            #NumPy syntax for extracting location data from match data structure in matrix form
            src_pts = np.float32([kp2[m.queryIdx].pt
                                  for m in matches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp1[m.trainIdx].pt
                                  for m in matches]).reshape(-1, 1, 2)

            A = cv2.estimateAffinePartial2D(src_pts, dst_pts)
            if A == None:
                H, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,
                                          RANSAC_REPROJ_THRESHOLD)

        else:
            raise Exception("Not enough matches are found - {}/{}".format(
                len(good), MIN_MATCH_COUNT))
        '''
        Compute Affine Transform
        Idea: Because we corrected for camera orientation, an affine transformation *should* be enough to align the images
        '''
        '''
        Compute 4 Image Corners Locations
        Idea: Same process as warpPerspectiveWithPadding() excewpt we have to consider the sizes of two images. Might be cleaner as a function.
        '''
        height1, width1 = image1.shape[:2]
        height2, width2 = image2.shape[:2]
        corners1 = np.float32(([0, 0], [0, height1], [width1,
                                                      height1], [width1, 0]))
        corners2 = np.float32(([0, 0], [0, height2], [width2,
                                                      height2], [width2, 0]))
        warpedCorners2 = np.zeros((4, 2))
        for i in range(0, 4):
            cornerX = corners2[i, 0]
            cornerY = corners2[i, 1]
            if A != None:  #check if we're working with affine transform or perspective transform
                warpedCorners2[
                    i,
                    0] = A[0][0, 0] * cornerX + A[0][0, 1] * cornerY + A[0][0,
                                                                            2]
                warpedCorners2[
                    i,
                    1] = A[0][1, 0] * cornerX + A[0][1, 1] * cornerY + A[0][1,
                                                                            2]
            else:
                warpedCorners2[i,
                               0] = (H[0, 0] * cornerX + H[0, 1] * cornerY +
                                     H[0, 2]) / (H[2, 0] * cornerX +
                                                 H[2, 1] * cornerY + H[2, 2])
                warpedCorners2[i,
                               1] = (H[1, 0] * cornerX + H[1, 1] * cornerY +
                                     H[1, 2]) / (H[2, 0] * cornerX +
                                                 H[2, 1] * cornerY + H[2, 2])
        allCorners = np.concatenate((corners1, warpedCorners2), axis=0)
        [xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
        [xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)
        '''Compute Image Alignment and Keypoint Alignment'''
        translation = np.float32(([1, 0, -1 * xMin], [0, 1,
                                                      -1 * yMin], [0, 0, 1]))
        warpedResImg = cv2.warpPerspective(self.resultImage, translation,
                                           (xMax - xMin, yMax - yMin))
        if A == None:
            fullTransformation = np.dot(
                translation, H
            )  #again, images must be translated to be 100% visible in new canvas
            warpedImage2 = cv2.warpPerspective(image2, fullTransformation,
                                               (xMax - xMin, yMax - yMin))
        else:
            warpedImageTemp = cv2.warpPerspective(image2, translation,
                                                  (xMax - xMin, yMax - yMin))
            warpedImage2 = cv2.warpAffine(warpedImageTemp, np.float32(A[0]),
                                          (xMax - xMin, yMax - yMin))
        self.imageList[index2] = copy.copy(
            warpedImage2
        )  #crucial: update old images for future feature extractions

        resGray = cv2.cvtColor(self.resultImage, cv2.COLOR_BGR2GRAY)
        warpedResGray = cv2.warpPerspective(resGray, translation,
                                            (xMax - xMin, yMax - yMin))
        '''Compute Mask for Image Combination'''
        ret, mask1 = cv2.threshold(warpedResGray, 1, 255,
                                   cv2.THRESH_BINARY_INV)
        mask3 = np.float32(mask1) / 255

        #apply mask
        warpedImage2[:, :, 0] = warpedImage2[:, :, 0] * mask3
        warpedImage2[:, :, 1] = warpedImage2[:, :, 1] * mask3
        warpedImage2[:, :, 2] = warpedImage2[:, :, 2] * mask3

        result = warpedResImg + warpedImage2
        #visualize and save result
        self.resultImage = result
        util.display("result", result)
        cv2.imwrite("results/intermediateResult" + str(index2) + ".png",
                    result)
        return result
Exemplo n.º 8
0
    def combine(self, index2):
        '''
        :param index2: index of self.imageList and self.kpList to combine with self.referenceImage and self.referenceKeypoints
        :return: combination of reference image and image at index 2
        '''

        #Attempt to combine one pair of images at each step. Assume the order in which the images are given is the best order.
        #This intorduces drift!
        image1 = copy.copy(self.imageList[index2 - 1])
        image2 = copy.copy(self.imageList[index2])
        '''
        Descriptor computation and matching.
        Idea: Align the images by aligning features.
        '''
        detector = None
        if imutils.is_cv2():
            detector = cv2.SURF(500)  #SURF showed best results
            detector.extended = True
        elif imutils.is_cv3():
            detector = cv2.xfeatures2d.SIFT_create()
        gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
        ret1, mask1 = cv2.threshold(gray1, 1, 255, cv2.THRESH_BINARY)
        kp1, descriptors1 = detector.detectAndCompute(gray1,
                                                      mask1)  #kp = keypoints

        gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
        ret2, mask2 = cv2.threshold(gray2, 1, 255, cv2.THRESH_BINARY)
        kp2, descriptors2 = detector.detectAndCompute(gray2, mask2)

        #Visualize matching procedure.
        keypoints1Im = None
        keypoints2Im = None
        if imutils.is_cv2():
            keypoints1Im = cv2.drawKeypoints(image1, kp1, color=(0, 0, 255))
            keypoints2Im = cv2.drawKeypoints(image2, kp2, color=(0, 0, 255))
        elif imutils.is_cv3():
            keypoints1Im = cv2.drawKeypoints(image1,
                                             kp1,
                                             None,
                                             color=(0, 0, 255))
            keypoints2Im = cv2.drawKeypoints(image2,
                                             kp2,
                                             None,
                                             color=(0, 0, 255))
        util.display("KEYPOINTS", keypoints1Im)
        util.display("KEYPOINTS", keypoints2Im)

        matcher = cv2.BFMatcher()  #use brute force matching
        matches = matcher.knnMatch(descriptors2, descriptors1,
                                   k=2)  #find pairs of nearest matches
        #prune bad matches
        good = []
        for m, n in matches:
            if m.distance < 0.55 * n.distance:
                good.append(m)
        matches = copy.copy(good)

        #Visualize matches
        matchDrawing = util.drawMatches(gray2, kp2, gray1, kp1, matches)
        util.display("matches", matchDrawing)

        #NumPy syntax for extracting location data from match data structure in matrix form
        src_pts = np.float32([kp2[m.queryIdx].pt
                              for m in matches]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp1[m.trainIdx].pt
                              for m in matches]).reshape(-1, 1, 2)
        '''
        Compute Affine Transform
        Idea: Because we corrected for camera orientation, an affine transformation *should* be enough to align the images
        '''
        A = None
        A = cv2.estimateRigidTransform(
            src_pts, dst_pts, fullAffine=False
        )  #false because we only want 5 DOF. we removed 3 DOF when we unrotated
        print('Transformarion....')
        print(A)
        if imutils.is_cv2():
            if A == None:  #RANSAC sometimes fails in estimateRigidTransform(). If so, try full homography. OpenCV RANSAC implementation for homography is more robust.
                HomogResult = cv2.findHomography(src_pts,
                                                 dst_pts,
                                                 method=cv2.RANSAC)
                H = HomogResult[0]
        elif imutils.is_cv3():
            if A is None:  #RANSAC sometimes fails in estimateRigidTransform(). If so, try full homography. OpenCV RANSAC implementation for homography is more robust.
                HomogResult = cv2.findHomography(src_pts,
                                                 dst_pts,
                                                 method=cv2.RANSAC)
                H = HomogResult[0]
        '''
        Compute 4 Image Corners Locations
        Idea: Same process as warpPerspectiveWithPadding() excewpt we have to consider the sizes of two images. Might be cleaner as a function.
        '''
        height1, width1 = image1.shape[:2]
        height2, width2 = image2.shape[:2]
        corners1 = np.float32(([0, 0], [0, height1], [width1,
                                                      height1], [width1, 0]))
        corners2 = np.float32(([0, 0], [0, height2], [width2,
                                                      height2], [width2, 0]))
        warpedCorners2 = np.zeros((4, 2))
        for i in range(0, 4):
            cornerX = corners2[i, 0]
            cornerY = corners2[i, 1]
            if imutils.is_cv2():
                if A != None:  #check if we're working with affine transform or perspective transform
                    warpedCorners2[
                        i, 0] = A[0, 0] * cornerX + A[0, 1] * cornerY + A[0, 2]
                    warpedCorners2[
                        i, 1] = A[1, 0] * cornerX + A[1, 1] * cornerY + A[1, 2]
                else:
                    warpedCorners2[i, 0] = (
                        H[0, 0] * cornerX + H[0, 1] * cornerY + H[0, 2]) / (
                            H[2, 0] * cornerX + H[2, 1] * cornerY + H[2, 2])
                    warpedCorners2[i, 1] = (
                        H[1, 0] * cornerX + H[1, 1] * cornerY + H[1, 2]) / (
                            H[2, 0] * cornerX + H[2, 1] * cornerY + H[2, 2])
            elif imutils.is_cv3():
                if A.any(
                ):  #check if we're working with affine transform or perspective transform
                    warpedCorners2[
                        i, 0] = A[0, 0] * cornerX + A[0, 1] * cornerY + A[0, 2]
                    warpedCorners2[
                        i, 1] = A[1, 0] * cornerX + A[1, 1] * cornerY + A[1, 2]
                else:
                    warpedCorners2[i, 0] = (
                        H[0, 0] * cornerX + H[0, 1] * cornerY + H[0, 2]) / (
                            H[2, 0] * cornerX + H[2, 1] * cornerY + H[2, 2])
                    warpedCorners2[i, 1] = (
                        H[1, 0] * cornerX + H[1, 1] * cornerY + H[1, 2]) / (
                            H[2, 0] * cornerX + H[2, 1] * cornerY + H[2, 2])
        allCorners = np.concatenate((corners1, warpedCorners2), axis=0)
        [xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
        [xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)
        '''Compute Image Alignment and Keypoint Alignment'''
        translation = np.float32(([1, 0, -1 * xMin], [0, 1,
                                                      -1 * yMin], [0, 0, 1]))
        warpedResImg = cv2.warpPerspective(self.resultImage, translation,
                                           (xMax - xMin, yMax - yMin))
        if imutils.is_cv2():
            if A == None:
                fullTransformation = np.dot(
                    translation, H
                )  #again, images must be translated to be 100% visible in new canvas
                warpedImage2 = cv2.warpPerspective(image2, fullTransformation,
                                                   (xMax - xMin, yMax - yMin))
            else:
                warpedImageTemp = cv2.warpPerspective(
                    image2, translation, (xMax - xMin, yMax - yMin))
                warpedImage2 = cv2.warpAffine(warpedImageTemp, A,
                                              (xMax - xMin, yMax - yMin))
        elif imutils.is_cv3():
            if A is None:
                fullTransformation = np.dot(
                    translation, H
                )  # again, images must be translated to be 100% visible in new canvas
                warpedImage2 = cv2.warpPerspective(image2, fullTransformation,
                                                   (xMax - xMin, yMax - yMin))
                util.display("warped0", warpedImage2)
            else:
                warpedImageTemp = cv2.warpPerspective(
                    image2, translation, (xMax - xMin, yMax - yMin))
                warpedImage2 = cv2.warpAffine(warpedImageTemp, A,
                                              (xMax - xMin, yMax - yMin))
                util.display("warped1", warpedImage2)

        self.imageList[index2] = copy.copy(
            warpedImage2
        )  #crucial: update old images for future feature extractions

        resGray = cv2.cvtColor(self.resultImage, cv2.COLOR_BGR2GRAY)
        warpedResGray = cv2.warpPerspective(resGray, translation,
                                            (xMax - xMin, yMax - yMin))
        '''Compute Mask for Image Combination'''
        ret, mask1 = cv2.threshold(warpedResGray, 1, 255,
                                   cv2.THRESH_BINARY_INV)
        mask3 = np.float32(mask1) / 255

        #apply mask
        warpedImage2[:, :, 0] = warpedImage2[:, :, 0] * mask3
        warpedImage2[:, :, 1] = warpedImage2[:, :, 1] * mask3
        warpedImage2[:, :, 2] = warpedImage2[:, :, 2] * mask3

        result = warpedResImg + warpedImage2
        #visualize and save result
        self.resultImage = result
        util.display("result", result)
        cv2.imwrite("results/intermediateResult" + str(index2) + ".png",
                    result)
        return result
Exemplo n.º 9
0
 def display(self, data):  # TODO DISPLAY EYE POSITION
     print "client.py/Client.display"
     # Display the eye positions
     utilities.display(data)
Exemplo n.º 10
0
def combine(image1, image2, detector):

    #detector = cv2.ORB_create(nfeatures=10000, score = cv2.ORB_FAST_SCORE) #SURF showed best results
    #detector = cv2.xfeatures2d.SIFT_create(nfeatures=10000)
    '''
    detector = cv2.xfeatures2d.SURF_create(10)
    >>>>>>> 01bb016deff80f1a90292757a521e2b8ee47594d
    '''

    #detector.setExtended (True)
    #detector.setUpright (True)

    gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
    ret1, mask1 = cv2.threshold(gray1, 1, 255, cv2.THRESH_BINARY)
    kp1, descriptors1 = detector.detectAndCompute(gray1, mask1)

    gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
    ret2, mask2 = cv2.threshold(gray2, 1, 255, cv2.THRESH_BINARY)
    kp2, descriptors2 = detector.detectAndCompute(gray2, mask2)

    keypoints1Im = cv2.drawKeypoints(image1,
                                     kp1,
                                     outImage=cv2.DRAW_MATCHES_FLAGS_DEFAULT,
                                     color=(0, 0, 255))
    util.display("KEYPOINTS", keypoints1Im)
    keypoints2Im = cv2.drawKeypoints(image2,
                                     kp2,
                                     outImage=cv2.DRAW_MATCHES_FLAGS_DEFAULT,
                                     color=(0, 0, 255))
    util.display("KEYPOINTS", keypoints2Im)

    matcher = cv2.BFMatcher()
    matches = matcher.knnMatch(descriptors2, descriptors1, k=2)

    good = []
    for m, n in matches:
        if m.distance < 0.55 * n.distance:
            good.append(m)

    print(str(len(good)) + " Matches were Found")

    if len(good) <= 10:
        return image1

    matches = copy.copy(good)

    matchDrawing = util.drawMatches(gray2, kp2, gray1, kp1, matches)
    util.display("matches", matchDrawing)

    src_pts = np.float32([kp2[m.queryIdx].pt
                          for m in matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp1[m.trainIdx].pt
                          for m in matches]).reshape(-1, 1, 2)

    A = cv2.estimateRigidTransform(src_pts, dst_pts, fullAffine=False)

    if A is None:
        HomogResult = cv2.findHomography(src_pts, dst_pts, method=cv2.RANSAC)
        H = HomogResult[0]

    height1, width1 = image1.shape[:2]
    height2, width2 = image2.shape[:2]

    corners1 = np.float32(([0, 0], [0, height1], [width1,
                                                  height1], [width1, 0]))
    corners2 = np.float32(([0, 0], [0, height2], [width2,
                                                  height2], [width2, 0]))

    warpedCorners2 = np.zeros((4, 2))

    for i in range(0, 4):
        cornerX = corners2[i, 0]
        cornerY = corners2[i, 1]
        if A is not None:  #check if we're working with affine transform or perspective transform
            warpedCorners2[i,
                           0] = A[0, 0] * cornerX + A[0, 1] * cornerY + A[0, 2]
            warpedCorners2[i,
                           1] = A[1, 0] * cornerX + A[1, 1] * cornerY + A[1, 2]
        else:
            warpedCorners2[i, 0] = (H[0, 0] * cornerX + H[0, 1] * cornerY +
                                    H[0, 2]) / (H[2, 0] * cornerX +
                                                H[2, 1] * cornerY + H[2, 2])
            warpedCorners2[i, 1] = (H[1, 0] * cornerX + H[1, 1] * cornerY +
                                    H[1, 2]) / (H[2, 0] * cornerX +
                                                H[2, 1] * cornerY + H[2, 2])

    allCorners = np.concatenate((corners1, warpedCorners2), axis=0)

    [xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
    [xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)

    translation = np.float32(([1, 0, -1 * xMin], [0, 1, -1 * yMin], [0, 0, 1]))
    warpedResImg = cv2.warpPerspective(image1, translation,
                                       (xMax - xMin, yMax - yMin))

    if A is None:
        fullTransformation = np.dot(
            translation, H
        )  #again, images must be translated to be 100% visible in new canvas
        warpedImage2 = cv2.warpPerspective(image2, fullTransformation,
                                           (xMax - xMin, yMax - yMin))

    else:
        warpedImageTemp = cv2.warpPerspective(image2, translation,
                                              (xMax - xMin, yMax - yMin))
        warpedImage2 = cv2.warpAffine(warpedImageTemp, A,
                                      (xMax - xMin, yMax - yMin))

    #util.display("r", warpedResImg, 400000)

    #util.display("r", warpedResImg, 400000)

    #warpedImage2 = warpedImage2[::2, ::2, :]
    #warpedResImg = warpedResImg[::2, ::2, :]
    #warpedImage2 = 0.8*warpedImage2

    #warpedImage2[::2, ::2,:] = 0
    result = np.where(warpedImage2 != 0, warpedImage2, warpedResImg)
    #    cv2.imwrite("back.JPG", warpedResImg)
    #    cv2.imwrite("fore.JPG", warpedImage2)
    #result = Image.blend(warpedResImg, warpedImage2, alpha=0.5)
    #print (warpedImage2.shape[:2])
    #print (warpedResImg.shape[:2])

    #result = image1 + image2
    #result = Image.blend(warpedResImg, warpedImage2, alpha=0.5)

    util.display("result", result)

    return result
Exemplo n.º 11
0
 def display(self, data):
     print "client.py/Client.display"
     # Display the eye positions
     utilities.display(data)
Exemplo n.º 12
0
    def combine(self, index2):
        '''
        :param index2: index of self.imageList and self.kpList to combine with self.referenceImage and self.referenceKeypoints
        :return: combination of reference image and image at index 2
        '''

        #Attempt to combine one pair of images at each step. Assume the order in which the images are given is the best order.
        #This intorduces drift!
        image1 = copy.copy(self.imageList[index2 - 1])
        image2 = copy.copy(self.imageList[index2])

        '''
        Descriptor computation and matching.
        Idea: Align the images by aligning features.
        '''
        detector = cv2.SURF(500) #SURF showed best results
        detector.extended = True
        gray1 = cv2.cvtColor(image1,cv2.COLOR_BGR2GRAY)
        ret1, mask1 = cv2.threshold(gray1,1,255,cv2.THRESH_BINARY)
        kp1, descriptors1 = detector.detectAndCompute(gray1,mask1) #kp = keypoints

        gray2 = cv2.cvtColor(image2,cv2.COLOR_BGR2GRAY)
        ret2, mask2 = cv2.threshold(gray2,1,255,cv2.THRESH_BINARY)
        kp2, descriptors2 = detector.detectAndCompute(gray2,mask2)

        #Visualize matching procedure.
        keypoints1Im = cv2.drawKeypoints(image1,kp1,color=(0,0,255))
        util.display("KEYPOINTS",keypoints1Im)
        keypoints2Im = cv2.drawKeypoints(image2,kp2,color=(0,0,255))
        util.display("KEYPOINTS",keypoints2Im)

        matcher = cv2.BFMatcher() #use brute force matching
        matches = matcher.knnMatch(descriptors2,descriptors1, k=2) #find pairs of nearest matches
        #prune bad matches
        good = []
        for m,n in matches:
            if m.distance < 0.55*n.distance:
                good.append(m)
        matches = copy.copy(good)

        #Visualize matches
        matchDrawing = util.drawMatches(gray2,kp2,gray1,kp1,matches)
        util.display("matches",matchDrawing)

        #NumPy syntax for extracting location data from match data structure in matrix form
        src_pts = np.float32([ kp2[m.queryIdx].pt for m in matches ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp1[m.trainIdx].pt for m in matches ]).reshape(-1,1,2)

        '''
        Compute Affine Transform
        Idea: Because we corrected for camera orientation, an affine transformation *should* be enough to align the images
        '''
        A = cv2.estimateRigidTransform(src_pts,dst_pts,fullAffine=False) #false because we only want 5 DOF. we removed 3 DOF when we unrotated
        if A == None: #RANSAC sometimes fails in estimateRigidTransform(). If so, try full homography. OpenCV RANSAC implementation for homography is more robust.
            HomogResult = cv2.findHomography(src_pts,dst_pts,method=cv2.RANSAC)
            H = HomogResult[0]

        '''
        Compute 4 Image Corners Locations
        Idea: Same process as warpPerspectiveWithPadding() excewpt we have to consider the sizes of two images. Might be cleaner as a function.
        '''
        height1,width1 = image1.shape[:2]
        height2,width2 = image2.shape[:2]
        corners1 = np.float32(([0,0],[0,height1],[width1,height1],[width1,0]))
        corners2 = np.float32(([0,0],[0,height2],[width2,height2],[width2,0]))
        warpedCorners2 = np.zeros((4,2))
        for i in range(0,4):
            cornerX = corners2[i,0]
            cornerY = corners2[i,1]
            if A != None: #check if we're working with affine transform or perspective transform
                warpedCorners2[i,0] = A[0,0]*cornerX + A[0,1]*cornerY + A[0,2]
                warpedCorners2[i,1] = A[1,0]*cornerX + A[1,1]*cornerY + A[1,2]
            else:
                warpedCorners2[i,0] = (H[0,0]*cornerX + H[0,1]*cornerY + H[0,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
                warpedCorners2[i,1] = (H[1,0]*cornerX + H[1,1]*cornerY + H[1,2])/(H[2,0]*cornerX + H[2,1]*cornerY + H[2,2])
        allCorners = np.concatenate((corners1, warpedCorners2), axis=0)
        [xMin, yMin] = np.int32(allCorners.min(axis=0).ravel() - 0.5)
        [xMax, yMax] = np.int32(allCorners.max(axis=0).ravel() + 0.5)

        '''Compute Image Alignment and Keypoint Alignment'''
        translation = np.float32(([1,0,-1*xMin],[0,1,-1*yMin],[0,0,1]))
        warpedResImg = cv2.warpPerspective(self.resultImage, translation, (xMax-xMin, yMax-yMin))
        if A == None:
            fullTransformation = np.dot(translation,H) #again, images must be translated to be 100% visible in new canvas
            warpedImage2 = cv2.warpPerspective(image2, fullTransformation, (xMax-xMin, yMax-yMin))
        else:
            warpedImageTemp = cv2.warpPerspective(image2, translation, (xMax-xMin, yMax-yMin))
            warpedImage2 = cv2.warpAffine(warpedImageTemp, A, (xMax-xMin, yMax-yMin))
        self.imageList[index2] = copy.copy(warpedImage2) #crucial: update old images for future feature extractions

        resGray = cv2.cvtColor(self.resultImage,cv2.COLOR_BGR2GRAY)
        warpedResGray = cv2.warpPerspective(resGray, translation, (xMax-xMin, yMax-yMin))

        '''Compute Mask for Image Combination'''
        ret, mask1 = cv2.threshold(warpedResGray,1,255,cv2.THRESH_BINARY_INV)
        mask3 = np.float32(mask1)/255

        #apply mask
        warpedImage2[:,:,0] = warpedImage2[:,:,0]*mask3
        warpedImage2[:,:,1] = warpedImage2[:,:,1]*mask3
        warpedImage2[:,:,2] = warpedImage2[:,:,2]*mask3

        result = warpedResImg + warpedImage2
        #visualize and save result
        self.resultImage = result
        util.display("result",result)
        cv2.imwrite("results/intermediateResult"+str(index2)+".png",result)
        return result
def draw_graph(graph, title):
    graph_gvz = nx.to_agraph(graph)
    graph_gvz.layout(prog="neato")
    graph_gvz.draw(os.path.join(path_prefix, title + ".ps"))
    display("draw_graph", "Drawn " + title + " and saved to " + title + ".ps")
def run_on_all_images(process_image, image_directory, images_to_sample_per_class, number_to_stop_at, calc_descriptors, testing=False, descriptor_only=False):

	cheap_display("Running on all images...")

	#TODO: Make sure these are being run on images in the correct order

	#specify generic form of return data list
	return_data_list = []
	subregions_list = ()

	classification_list = []
	classification_dict = {}
	class_count = 0

	#number of images processed
	images_processed = 0

	#number of images to sample per class
	images_sampled_in_class = 0

	display(image_directory)


	# For every class
	class_files = os.listdir(image_directory)

	for filename in class_files:

		display(filename)

		current_class = filename

		#classification_dict[current_class] = class_count
		classification_dict[class_count] = current_class

		filename = image_directory +"/" + filename

		images_sampled_in_class = 0


		# For every instance of that class
		class_instance_files = os.listdir(filename)

		# If testing, reverse the list so we start from the end, that way training and testing use the beginning and end of list respectively
		if(testing):
			class_instance_files.reverse()

		for image_file in class_instance_files:

			if(images_sampled_in_class < images_to_sample_per_class):

				#display(image_file)

				image_file = filename +"/" + image_file

				if(calc_descriptors):

					descriptors = process_image(image_file)

					subregions, descriptor = spm_subregions(descriptors)

					subregions_list = subregions_list + (subregions,)

					# if we eventually want encodings
					if not descriptor_only:

						for description in descriptor:
							return_data_list.append(description)

						#log classification of this images
						#	Appending outside of the descriptor iterator loop is fine, as descriptors are eventually pooled
						classification_list.append(class_count)

					else:

						for description in descriptor:
							return_data_list.append(description)
							classification_list.append(class_count)

					#return if reached max images total to process
					images_processed = images_processed + 1
					cheap_display("Processed ",str(images_processed)," images so far")
					if(images_processed >= number_to_stop_at):

						raise UserError("Return case not implemented")

						return return_data_list

				line_break()
				line_break()

			images_sampled_in_class = images_sampled_in_class+1

		#done with this class
		class_count = class_count + 1


	display(classification_list)
	display(classification_dict)

	# convert return data list to vstacked array all at once, to use less copies
	if(calc_descriptors):
		cheap_display("Stacking descriptors...")

		return_array = np.vstack(return_data_list)
		cheap_display(return_array.shape)

		cheap_display("Making subregions list an array...")
		subregions_array = np.asarray(subregions_list)

		# convert classification list to target array
		target_array = np.asarray(classification_list)

	else:
		return_array = np.empty([1,1])
		subregions_array = np.empty([1,1])
		target_array = np.empty([1,1])

	return subregions_array, return_array, target_array, classification_dict