def setReference(self, ref):
        '''
        ref  ... either quad, grid, homography or reference image

        quad --> list of four image points(x,y) marking the edges of the quad
               to correct
        homography --> h. matrix to correct perspective distortion
        referenceImage --> image of same object without perspective distortion
        '''
#         self.maps = {}
        self.quad = None
#         self.refQuad = None
        self._camera_position = None
        self._homography = None
        self._homography_is_fixed = True
#         self.tvec, self.rvec = None, None
        self._pose = None

        # evaluate input:
        if isinstance(ref, np.ndarray) and ref.shape == (3, 3):
            # REF IS HOMOGRAPHY
            self._homography = ref
            # REF IS QUAD
        elif len(ref) == 4:
            self.quad = sortCorners(ref)

            # TODO: cleanup # only need to call once - here
            o = self.obj_points  # no property any more

            # REF IS IMAGE
        else:
            self.ref = imread(ref)
#             self._refshape = ref.shape[:2]
            self.pattern = PatternRecognition(self.ref)
            self._homography_is_fixed = False
Пример #2
0
    def addImg(self, img, overlap=None, direction='bottom'):
        '''
        '''
        assert direction == 'bottom','only direction=bottom implemented by now'
        
        #CUT IMAGE TO ONLY COMPARE POINTS AT OVERLAP:
        if overlap is not None:
            #only direction bottom for now...
            s = self.img_orig.shape
            oimgcut = self.img_orig[s[0]-overlap:,:]
            imgcut = img[:overlap,:]
        else:
            oimgcut = self.img_orig
            imgcut = img
        
        #PATTERN COMPARISON:
        if not self._firstTime or overlap is not None:
            self.pattern = PatternRecognition(oimgcut)   
        (H, inlierRatio) = self.pattern.findHomography(imgcut)[0:2]
        H_inv = self.pattern.invertHomography(H)

        #STITCH:
        self.img_orig = self._stitchImg(H_inv, inlierRatio, img, overlap)
        self._firstTime = False
        return self.img_orig
Пример #3
0
 def __init__(self, img):
     '''
     @param img -> reference image
     '''
     self.img_orig = img
     self._firstTime = True
     self.pattern = PatternRecognition(img)
    def __init__(self, img, bg=None, maxDev=1e-4, maxIter=10, remove_border_size=0,
                 # feature_size=5,
                 cameraMatrix=None, distortionCoeffs=None):  # 20
        """
        Args:
            img (path or array): Reference image
        Kwargs:
            bg (path or array): background image - same for all given images
            maxDev (float): Relative deviation between the last two iteration steps
                            Stop iterative refinement, if deviation is smaller
            maxIter (int): Stop iterative refinement after maxIter steps
        """
        self.lens = None
        if cameraMatrix is not None:
            self.lens = LensDistortion()
            self.lens._coeffs['distortionCoeffs'] = distortionCoeffs
            self.lens._coeffs['cameraMatrix'] = cameraMatrix

        self.maxDev = maxDev
        self.maxIter = maxIter
        self.remove_border_size = remove_border_size
        #self.feature_size = feature_size
        img = imread(img, 'gray')

        self.bg = bg
        if bg is not None:
            self.bg = getBackground(bg)
            if not isinstance(self.bg, np.ndarray):
                self.bg = np.full_like(img, self.bg, dtype=img.dtype)
            else:
                self.bg = self.bg.astype(img.dtype)
            img = cv2.subtract(img, self.bg)

        if self.lens is not None:
            img = self.lens.correct(img, keepSize=True)
        # CREATE TEMPLATE FOR PATTERN COMPARISON:
        pos = self._findObject(img)
        self.obj_shape = img[pos].shape

        PatternRecognition.__init__(self, img[pos])

        self._ff_mma = MaskedMovingAverage(shape=img.shape,
                                           dtype=np.float64)

        self.object = None

        self.Hs = []    # Homography matrices of all fitted images
        self.Hinvs = []  # same, but inverse
        self.fits = []  # all imaged, fitted to reference
        self._fit_masks = []

        self._refined = False
    def addImg(self, img, overlap=None, direction='bottom'):
        '''
        '''
        assert direction == 'bottom', \
            'only direction=bottom implemented by now'

        # CUT IMAGE TO ONLY COMPARE POINTS AT OVERLAP:
        if overlap is not None:
            # only direction bottom for now...
            s = self.img_orig.shape
            oimgcut = self.img_orig[s[0] - overlap:, :]
            imgcut = img[:overlap, :]
        else:
            oimgcut = self.img_orig
            imgcut = img

        # PATTERN COMPARISON:
        if not self._firstTime or overlap is not None:
            self.pattern = PatternRecognition(oimgcut)
        (H, inlierRatio) = self.pattern.findHomography(imgcut)[0:2]
        H_inv = self.pattern.invertHomography(H)

        # STITCH:
        self.img_orig = self._stitchImg(H_inv, inlierRatio, img, overlap)
        self._firstTime = False
        return self.img_orig
 def __init__(self, img):
     '''
     @param img -> reference image
     '''
     self.img_orig = img
     self._firstTime = True
     self.pattern = PatternRecognition(img)
class PerspectiveImageStitching(object):
    '''
    fit or add an image to the first image of this display
    using perspective transformations
    '''

    def __init__(self, img):
        '''
        @param img -> reference image
        '''
        self.img_orig = img
        self._firstTime = True
        self.pattern = PatternRecognition(img)

    def fitImg(self, img_rgb):
        '''
        fit perspective and size of the input image to the base image
        '''
        H = self.pattern.findHomography(img_rgb)[0]
        H_inv = self.pattern.invertHomography(H)
        s = self.img_orig.shape
        warped = cv2.warpPerspective(img_rgb, H_inv, (s[1], s[0]))
        return warped

    def addImg(self, img, overlap=None, direction='bottom'):
        '''
        '''
        assert direction == 'bottom', \
            'only direction=bottom implemented by now'

        # CUT IMAGE TO ONLY COMPARE POINTS AT OVERLAP:
        if overlap is not None:
            # only direction bottom for now...
            s = self.img_orig.shape
            oimgcut = self.img_orig[s[0] - overlap:, :]
            imgcut = img[:overlap, :]
        else:
            oimgcut = self.img_orig
            imgcut = img

        # PATTERN COMPARISON:
        if not self._firstTime or overlap is not None:
            self.pattern = PatternRecognition(oimgcut)
        (H, inlierRatio) = self.pattern.findHomography(imgcut)[0:2]
        H_inv = self.pattern.invertHomography(H)

        # STITCH:
        self.img_orig = self._stitchImg(H_inv, inlierRatio, img, overlap)
        self._firstTime = False
        return self.img_orig

    def addDir(self, image_dir, img_filter=None):
        '''
        @param image_dir -> 'directory' containing all images
        @param img_filter -> 'JPG'; None->Take all images
        '''
        dir_list = []
        try:
            dir_list = os.listdir(image_dir)
            if img_filter:
                # remove all files that doen't end with .[image_filter]
                dir_list = [x for x in dir_list if x.find(img_filter) > -1]
            try:  # remove Thumbs.db, is existent (windows only)
                dir_list.remove('Thumbs.db')
            except ValueError:
                pass
        except:
            raise IOError("Unable to open directory: %s" % image_dir)
        dir_list = [os.path.join(image_dir, x) for x in dir_list]
        dir_list = [x for x in dir_list if x != image_dir]
        return self._stitchDirRecursive(self.base_img_rgb, dir_list, 0)

    def filterMatches(self, matches, ratio=0.75):
        filtered_matches = []
        for m in matches:
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                filtered_matches.append(m[0])
        return filtered_matches

    def imageDistance(self, matches):
        sumDistance = 0.0
        for match in matches:
            sumDistance += match.distance
        return sumDistance

    def findDimensions(self, image, homography):
        base_p1 = np.ones(3, np.float32)
        base_p2 = np.ones(3, np.float32)
        base_p3 = np.ones(3, np.float32)
        base_p4 = np.ones(3, np.float32)

        (y, x) = image.shape[:2]

        base_p1[:2] = [0, 0]
        base_p2[:2] = [x, 0]
        base_p3[:2] = [0, y]
        base_p4[:2] = [x, y]

        max_x = None
        max_y = None
        min_x = None
        min_y = None

        for pt in [base_p1, base_p2, base_p3, base_p4]:

            hp = np.matrix(homography, np.float32) * \
                np.matrix(pt, np.float32).T
            hp_arr = np.array(hp, np.float32)
            normal_pt = np.array([hp_arr[0] / hp_arr[2],
                                  hp_arr[1] / hp_arr[2]], np.float32)

            if (max_x is None or normal_pt[0, 0] > max_x):
                max_x = normal_pt[0, 0]
            if (max_y is None or normal_pt[1, 0] > max_y):
                max_y = normal_pt[1, 0]
            if (min_x is None or normal_pt[0, 0] < min_x):
                min_x = normal_pt[0, 0]
            if (min_y is None or normal_pt[1, 0] < min_y):
                min_y = normal_pt[1, 0]

        min_x = min(0, min_x)
        min_y = min(0, min_y)

        return (min_x, min_y, max_x, max_y)

    def _stitchDirRecursive(self, dir_list, recursion_round=0):
        if (len(dir_list) < 1):
            return self.base_img_rgb
        # Find key points in base image for motion estimation
        self.detector.detectAndCompute(self.base_img, None)

        print("Iterating through next images...")

        closestImage = None

        # TODO: Thread this loop since each iteration is independent

        # Find the best next image from the remaining images
        for next_img_path in dir_list:

            print("Reading %s..." % next_img_path)

            next_img_rgb = cv2.imread(next_img_path)

            (H, inlierRatio,
             averagePointDistance,
             next_img, next_features,
             next_descs,
             matches_subset) = self.pattern.findHomography(next_img_rgb)

            # if ( closestImage == None or averagePointDistance <
            # closestImage['dist'] ):
            if (closestImage is None or inlierRatio > closestImage['inliers']):
                closestImage = {'h':       H,
                                'inliers': inlierRatio,
                                'dist':    averagePointDistance,
                                'path':    next_img_path,
                                'rgb':     next_img_rgb,
                                'img':     next_img,
                                'feat':    next_features,
                                'desc':    next_descs,
                                'match':   matches_subset}

        print("Closest Image: ", closestImage['path'])
        print("Closest Image Ratio: ", closestImage['inliers'])

        dir_list = [x for x in dir_list if x != closestImage['path']]

        self.base_img_rgb = self._stitchImg(closestImage)
        self.base_img = cv2.GaussianBlur(cv2.cvtColor(
            self.base_img_rgb, cv2.COLOR_BGR2GRAY), (5, 5), 0)

        return self._stitchDirRecursive(dir_list, recursion_round + 1)

    def _stitchImg(self, H_inv, inliers, img, overlap=0):
        # TODO: use img_orig with can be float array
        # to return stitched results as floatarray of the same kind

        isColor = img.ndim == 3

        if (inliers > 0.1):  # and

            # add translation to homography to consider overlap:
            if overlap:
                H_inv[1, 2] += self.img_orig.shape[0] - overlap

            (min_x, min_y, max_x, max_y) = self.findDimensions(img, H_inv)

            # Adjust max_x and max_y by base img size
            max_x = max(max_x, self.img_orig.shape[1])
            max_y = max(max_y, self.img_orig.shape[0])

            move_h = np.matrix(np.identity(3), np.float32)

            if (min_x < 0):
                move_h[0, 2] += -min_x
                max_x += -min_x

            if (min_y < 0):
                move_h[1, 2] += -min_y
                max_y += -min_y

#             print "Homography: \n", H
            print("Inverse Homography: \n", H_inv)
            print("Min Points: ", (min_x, min_y))

            mod_inv_h = move_h * H_inv

            img_w = int(math.ceil(max_x))
            img_h = int(math.ceil(max_y))

            print("New Dimensions: ", (img_w, img_h))

            # Warp the new image given the homography from the old image
            base_img_warp = cv2.warpPerspective(
                self.img_orig, move_h, (img_w, img_h))
            print("Warped base image")

            next_img_warp = cv2.warpPerspective(img, mod_inv_h, (img_w, img_h))
            print("Warped next image")

            # Put the base image on an enlarged palette
            if isColor:
                enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)
            else:
                enlarged_base_img = np.zeros((img_h, img_w), np.uint8)

            print("Enlarged Image Shape: ", enlarged_base_img.shape)
            print("Base Image Shape: ", self.img_orig.shape)
            print("Base Image Warp Shape: ", base_img_warp.shape)

            # Create a mask from the warped image for constructing masked
            # composite
            if isColor:
                d = np.sum(next_img_warp, axis=-1)
            else:
                d = next_img_warp

            # Now add the warped image
            data_map = d == 0
            enlarged_base_img[data_map] = base_img_warp[data_map]
            final_img = enlarged_base_img + next_img_warp

            # average overlap:
            if isColor:
                dd = np.sum(base_img_warp, axis=-1)
            else:
                dd = base_img_warp
            mask = np.logical_and(d != 0, dd != 0)
            av = (
                0.5 * (
                    cv2.subtract(
                        base_img_warp[mask],
                        next_img_warp[mask]))).astype(
                final_img.dtype)
            if not isColor:
                av = av[:, 0]
            final_img[mask] += av

#             final_img = cv2.add(enlarged_base_img, next_img_warp,
#                 dtype=cv2.CV_8U)

            # Crop off the black edges
#             final_gray = self._rgb2Gray(final_img)
#             _, thresh = cv2.threshold(final_gray, 1, 255, cv2.THRESH_BINARY)
#
            thresh = final_img > 0
            if isColor:
                thresh = np.sum(thresh, axis=0)

            contours, _, _ = cv2.findContours(thresh.astype(np.uint8),
                                              cv2.RETR_EXTERNAL,
                                              cv2.CHAIN_APPROX_NONE)
            print("Found %d contours..." % (len(contours)))

            max_area = 0
            best_rect = (0, 0, 0, 0)

            for cnt in contours:
                x, y, w, h = cv2.boundingRect(cnt)
                # print "Bounding Rectangle: ", (x,y,w,h)

                deltaHeight = h - y
                deltaWidth = w - x

                area = deltaHeight * deltaWidth

                if (area > max_area and deltaHeight > 0 and deltaWidth > 0):
                    max_area = area
                    best_rect = (x, y, w, h)

            if (max_area > 0):
                print("Maximum Contour: ", max_area)
                print("Best Rectangle: ", best_rect)

                final_img_crop = final_img[best_rect[1]:best_rect[1] +
                                           best_rect[3],
                                           best_rect[0]:best_rect[0] +
                                           best_rect[2]]

                final_img = final_img_crop

            return final_img

        else:
            return self.base_img_rgb
class PerspectiveCorrection(object):

    def __init__(self,
                 img_shape,
                 # obj_height_mm=None,
                 # obj_width_mm=None,
                 cameraMatrix=None,
                 distCoeffs=np.zeros((5, 1)),
                 do_correctIntensity=False,
                 px_per_phys_unit=None,
                 new_size=(None, None),
                 in_plane=False,
                 border=0,
                 maxShear=0.05,
                 material='EL_Si_module',
                 cv2_opts={}):
        '''
        correction(warp+intensity factor) + uncertainty due to perspective distortion

        new_size = (sizey, sizex)
            if either sizey or sizex is None the resulting size will set 
            using an (assumed) aspect ratio 

        in_plane=True --> object has to tilt, only rotation and translation


        !!!
        given images need to be already free from lens distortion
        and distCoeffs should be 0
        !!!
        '''
        # TODO: remove camera matrix and dist coeffs completely - don't needed
        # since img needs to be lens corrected anyway

        # TODO: insert aspect ratio and remove obj_width, height
        self.opts = {  # 'obj_height_mm': obj_height_mm,
            #'obj_width_mm': obj_width_mm,
            'distCoeffs': distCoeffs.astype(np.float32),
            'do_correctIntensity': do_correctIntensity,
            'new_size': new_size,
            'in_plane': in_plane,
            'cv2_opts': cv2_opts,
            'border': border,
            'material': material,
            'maxShear': maxShear,
            'shape': img_shape[:2]}
        if cameraMatrix is None:
            cameraMatrix = genericCameraMatrix(img_shape)

        self.opts['cameraMatrix'] = cameraMatrix.astype(np.float32)
        self.refQuad = None
        self._obj_points = None
        self.px_per_phys_unit = px_per_phys_unit

        self._newBorders = self.opts['new_size']

    def setReferenceQuad(self, refQuad):
        '''TODO'''
        self.refQuad = sortCorners(refQuad)

    def setReference(self, ref):
        '''
        ref  ... either quad, grid, homography or reference image

        quad --> list of four image points(x,y) marking the edges of the quad
               to correct
        homography --> h. matrix to correct perspective distortion
        referenceImage --> image of same object without perspective distortion
        '''
#         self.maps = {}
        self.quad = None
#         self.refQuad = None
        self._camera_position = None
        self._homography = None
        self._homography_is_fixed = True
#         self.tvec, self.rvec = None, None
        self._pose = None

        # evaluate input:
        if isinstance(ref, np.ndarray) and ref.shape == (3, 3):
            # REF IS HOMOGRAPHY
            self._homography = ref
            # REF IS QUAD
        elif len(ref) == 4:
            self.quad = sortCorners(ref)

            # TODO: cleanup # only need to call once - here
            o = self.obj_points  # no property any more

            # REF IS IMAGE
        else:
            self.ref = imread(ref)
#             self._refshape = ref.shape[:2]
            self.pattern = PatternRecognition(self.ref)
            self._homography_is_fixed = False

    @property
    def homography(self):
        if self._homography is None:
            b = self.opts['border']
            if self.quad is not None:

                if self.refQuad is not None:
                    dst = self.refQuad.astype(np.float32)
                else:
                    sy, sx = self._newBorders
                    dst = np.float32([
                        [b,  b],
                        [sx - b, b],
                        [sx - b, sy - b],
                        [b,  sy - b]])

                self._homography = cv2.getPerspectiveTransform(
                    self.quad.astype(np.float32), dst)
            else:
                try:
                    # GET HOMOGRAPHY FROM REFERENCE IMAGE USING PATTERN
                    # RECOGNITION
                    self._Hinv = h = self.pattern.findHomography(self.img)[0]
                    H = self.pattern.invertHomography(h)
                except Exception as e:
                    print(e)
                    if perspCorrectionViaQuad:
                        # PROPRIETARY FALLBACK METHOD
                        quad = perspCorrectionViaQuad(
                            self.img, self.ref, border=b)
                        sy, sx = self.ref.shape
                        dst = np.float32([
                            [b,  b],
                            [sx - b, b],
                            [sx - b, sy - b],
                            [b,  sy - b]])

                        H = cv2.getPerspectiveTransform(
                            quad.astype(np.float32), dst)

                    else:
                        raise e

# #                 #test fit quality:
#                 if abs(decompHomography(H)[-1]) > self.opts['maxShear']:
#                     #shear too big
#

                self._homography = H

                sy, sx = self.opts['new_size']
                ssy, ssx = self.ref.shape[:2]
                if sx is None:
                    sx = ssx
                if sy is None:
                    sy = ssy
                self._newBorders = (sy, sx)

        return self._homography

    def distort(self, img, rotX=0, rotY=0, quad=None):
        '''
        Apply perspective distortion ion self.img
        angles are in DEG and need to be positive to fit into image

        '''
        self.img = imread(img)
        # fit old image to self.quad:
        corr = self.correct(self.img)

        s = self.img.shape
        if quad is None:
            wquad = (self.quad - self.quad.mean(axis=0)).astype(float)

            win_width = s[1]
            win_height = s[0]
            # project quad:
            for n, q in enumerate(wquad):
                p = Point3D(q[0], q[1], 0).rotateX(-rotX).rotateY(-rotY)
                p = p.project(win_width, win_height, s[1], s[1])
                wquad[n] = (p.x, p.y)
            wquad = sortCorners(wquad)
            # scale result so that longest side of quad and wquad are equal
            w = wquad[:, 0].max() - wquad[:, 0].min()
            h = wquad[:, 1].max() - wquad[:, 1].min()
            scale = min(s[1] / w, s[0] / h)
            # scale:
            wquad = (wquad * scale).astype(int)
        else:
            wquad = sortCorners(quad)
        wquad -= wquad.min(axis=0)

        lx = corr.shape[1]
        ly = corr.shape[0]

        objP = np.array([
            [0, 0],
            [lx, 0],
            [lx, ly],
            [0, ly],
        ], dtype=np.float32)

        homography = cv2.getPerspectiveTransform(
            wquad.astype(np.float32), objP)
        # distort corr:
        w = wquad[:, 0].max() - wquad[:, 0].min()
        h = wquad[:, 1].max() - wquad[:, 1].min()
        #(int(w),int(h))
        dist = cv2.warpPerspective(corr, homography, (int(w), int(h)),
                                   flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP)

        # move middle of dist to middle of the old quad:
        bg = np.zeros(shape=s)
        rmn = (bg.shape[0] / 2, bg.shape[1] / 2)

        ss = dist.shape
        mn = (ss[0] / 2, ss[1] / 2)  # wquad.mean(axis=0)
        ref = (int(rmn[0] - mn[0]), int(rmn[1] - mn[1]))

        bg[ref[0]:ss[0] + ref[0], ref[1]:ss[1] + ref[1]] = dist

        # finally move quad into right position:
        self.quad = wquad
        self.quad += (ref[1], ref[0])
        self.img = bg
        self._homography = None
        self._poseFromQuad()

        if self.opts['do_correctIntensity']:
            tf = self.tiltFactor()
            if self.img.ndim == 3:
                for col in range(self.img.shape[2]):
                    self.img[..., col] *= tf
            else:
                #                 tf = np.tile(tf, (1,1,self.img.shape[2]))
                self.img = self.img * tf

        return self.img

    def objectOrientation(self):
        tvec, r = self.pose()
        eulerAngles = mat2euler(cv2.Rodrigues(r)[0], axes='rzxy')

        tilt = eulerAngles[1]
        rot = eulerAngles[0]
        dist = tvec[2, 0]  # only take depth component np.linalg.norm(tvec)
        return dist, tilt, rot

    def correctGrid(self, img, grid):
        '''
        grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,)
        '''

        self.img = imread(img)
        h = self.homography  # TODO: cleanup only needed to get newBorder attr.

        if self.opts['do_correctIntensity']:
            self.img = self.img / self._getTiltFactor(self.img.shape)

        s0, s1 = grid.shape[:2]
        n0, n1 = s0 - 1, s1 - 1

        snew = self._newBorders
        b = self.opts['border']

        sx, sy = (snew[0] - 2 * b) // n0, (snew[1] - 2 * b) // n1

        out = np.empty(snew[::-1], dtype=self.img.dtype)

        def warp(ix, iy, objP, outcut):
            shape = outcut.shape[::-1]
            quad = grid[ix:ix + 2,
                        iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])]
            hcell = cv2.getPerspectiveTransform(
                quad.astype(np.float32), objP)
            cv2.warpPerspective(self.img, hcell, shape, outcut,
                                flags=cv2.INTER_LANCZOS4,
                                **self.opts['cv2_opts'])
            return quad

        objP = np.array([[0, 0],
                         [sx, 0],
                         [sx, sy],
                         [0, sy]], dtype=np.float32)
        # INNER CELLS
        for ix in range(1, n0 - 1):
            for iy in range(1, n1 - 1):
                sub = out[iy * sy + b: (iy + 1) * sy + b,
                          ix * sx + b: (ix + 1) * sx + b]
#                 warp(ix, iy, objP, sub)

                shape = sub.shape[::-1]
                quad = grid[ix:ix + 2,
                            iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])]
#                 print(quad, objP)

                hcell = cv2.getPerspectiveTransform(
                    quad.astype(np.float32), objP)
                cv2.warpPerspective(self.img, hcell, shape, sub,
                                    flags=cv2.INTER_LANCZOS4,
                                    **self.opts['cv2_opts'])

#         return out
        # TOP CELLS
        objP[:, 1] += b
        for ix in range(1, n0 - 1):
            warp(ix, 0, objP, out[: sy + b,
                                  ix * sx + b: (ix + 1) * sx + b])
        # BOTTOM CELLS
        objP[:, 1] -= b
        for ix in range(1, n0 - 1):
            iy = (n1 - 1)
            y = iy * sy + b
            x = ix * sx + b
            warp(ix, iy, objP, out[y: y + sy + b, x: x + sx])
        # LEFT CELLS
        objP[:, 0] += b
        for iy in range(1, n1 - 1):
            y = iy * sy + b
            warp(0, iy, objP, out[y: y + sy, : sx + b])
        # RIGHT CELLS
        objP[:, 0] -= b
        ix = (n0 - 1)
        x = ix * sx + b
        for iy in range(1, n1 - 1):
            y = iy * sy + b
            warp(ix, iy, objP, out[y: y + sy, x: x + sx + b])
        # BOTTOM RIGHT CORNER
        warp(n0 - 1, n1 - 1, objP, out[-sy - b - 1:, x: x + sx + b])
#         #TOP LEFT CORNER
        objP += (b, b)
        warp(0, 0, objP, out[0: sy + b, 0: sx + b])
        # TOP RIGHT CORNER
        objP[:, 0] -= b
#         x = (n0-1)*sx+b
        warp(n0 - 1, 0, objP, out[: sy + b, x: x + sx + b])
#         #BOTTOM LEFT CORNER
        objP += (b, -b)
        warp(0, n1 - 1, objP, out[-sy - b - 1:, : sx + b])
        return out

    def uncorrect(self, img):
        img = imread(img)
        s = img.shape[:2]
        return cv2.warpPerspective(img, self.homography, s[::-1],
                                   flags=cv2.INTER_CUBIC | cv2.WARP_INVERSE_MAP)

    def correct(self, img):
        '''
        ...from perspective distortion:
         --> perspective transformation
         --> apply tilt factor (view factor) correction 
        '''
        print("CORRECT PERSPECTIVE ...")
        self.img = imread(img)

        if not self._homography_is_fixed:
            self._homography = None
        h = self.homography

        if self.opts['do_correctIntensity']:
            tf = self.tiltFactor()
            self.img = np.asfarray(self.img)
            if self.img.ndim == 3:
                for col in range(self.img.shape[2]):
                    self.img[..., col] /= tf
            else:
                self.img = self.img / tf
        warped = cv2.warpPerspective(self.img,
                                     h,
                                     self._newBorders[::-1],
                                     flags=cv2.INTER_LANCZOS4,
                                     **self.opts['cv2_opts'])
        return warped

    def correctPoints(self, pts):
        if not self._homography_is_fixed:
            self._homography = None
        h = self._homography
        if pts.ndim == 2:
            pts = pts.reshape(1, *pts.shape)
        return cv2.perspectiveTransform(pts.astype(np.float32), h)

    @property
    def camera_position(self, pose=None):
        '''
        returns camera position in world coordinates using self.rvec and self.tvec
        from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector
        '''
        if pose is None:
            pose = self.pose()
        t, r = pose
        return -np.matrix(cv2.Rodrigues(r)[0]).T * np.matrix(t)

    def planeSfN(self, rvec):
        # get z:
            # 1 undistort plane:
        rot = cv2.Rodrigues(rvec)[0]
        aa = np.array([0., 0., 1.])
        return aa.dot(rot)

    def depthMap(self, midpointdepth=None, pose=None):
        shape = self.opts['shape']
        if pose is None:
            pose = self.pose()
        t, r = pose

        n = self.planeSfN(r)
        # z component from plane-equation solved for z:
        zpart = np.fromfunction(lambda y, x: (-n[0] * x
                                              - n[1] * y) / (
            -n[2]), shape)

        ox, oy = self.objCenter()
        v = zpart[int(oy), int(ox)]

        if midpointdepth is None:
            # TODO: review
            midpointdepth = t[2, 0]

        zpart += midpointdepth - v
        return zpart

    def cam2PlaneVectorField(self, midpointdepth=None, **kwargs):
        t, r = self.pose()
        shape = self.opts['shape']

        cam = self.opts['cameraMatrix']
        # move reference point from top left quad corner to
        # optical center:
#         q0 = self.quad[0]
        q0 = self.objCenter()
#         dx,dy = cam[0,2]-q0[0], cam[1,2]-q0[1]
        dx, dy = shape[1] // 2 - q0[0], shape[0] // 2 - q0[1]

        # x,y component of undist plane:
        rot0 = np.array([0, 0, 0], dtype=float)
        worldCoord = np.fromfunction(lambda x, y:
                                     imgPointToWorldCoord((y - dy, x - dx), rot0, t, cam
                                                          ), shape).reshape(3, *shape)
        # z component from plane-equation solved for z:
        n = self.planeSfN(r)
        x, y = worldCoord[:2]
        zpart = (-n[0] * x - n[1] * y) / (-n[2])
        ox, oy = self.objCenter()
        v = zpart[int(oy), int(ox)]

        if midpointdepth is None:
            # TODO: review
            midpointdepth = t[2, 0]
        zpart += midpointdepth - v
        worldCoord[2] = zpart
        return worldCoord

    # BEFORE REMOVING THINGS: MAKE EXTRA FN
    def viewAngle(self, **kwargs):
        '''
        calculate view factor between one small and one finite surface
        vf =1/pi * integral(cos(beta1)*cos(beta2)/s**2) * dA
        according to VDI heatatlas 2010 p961
        '''
        v0 = self.cam2PlaneVectorField(**kwargs)
        # obj cannot be behind camera
        v0[2][v0[2] < 0] = np.nan

        _t, r = self.pose()
        n = self.planeSfN(r)
        # because of different x,y orientation:
        n[2] *= -1
#         beta2 = vectorAngle(v0, vectorToField(n) )
        beta2 = vectorAngle(v0, n)
        return beta2

    def foreground(self, quad=None):
        '''return foreground (quad) mask'''
        fg = np.zeros(shape=self._newBorders[::-1], dtype=np.uint8)
        if quad is None:
            quad = self.quad
        else:
            quad = quad.astype(np.int32)
        cv2.fillConvexPoly(fg, quad, 1)
        return fg.astype(bool)

    def tiltFactor(self, midpointdepth=None,
                   printAvAngle=False):
        '''
        get tilt factor from inverse distance law
        https://en.wikipedia.org/wiki/Inverse-square_law
        '''
        # TODO: can also be only def. with FOV, rot, tilt
        beta2 = self.viewAngle(midpointdepth=midpointdepth)
        try:
            angles, vals = getattr(
                emissivity_vs_angle, self.opts['material'])()
        except AttributeError:
            raise AttributeError("material[%s] is not in list of know materials: %s" % (
                self.opts['material'], [o[0] for o in getmembers(emissivity_vs_angle)
                                        if isfunction(o[1])]))
        if printAvAngle:
            avg_angle = beta2[self.foreground()].mean()
            print('angle: %s DEG' % np.degrees(avg_angle))

        # use averaged angle instead of beta2 to not overemphasize correction
        normEmissivity = np.clip(
            InterpolatedUnivariateSpline(
                np.radians(angles), vals)(beta2), 0, 1)
        return normEmissivity

    @property
    def areaRatio(self):
        # AREA RATIO AFTER/BEFORE:
            # AREA OF QUADRILATERAL:
        if self.quad is None:
            q = self.quadFromH()[0]
        else:
            q = self.quad
        quad_size = 0.5 * abs((q[2, 0] - q[0, 0]) * (q[3, 1] - q[1, 1]) +
                              (q[3, 0] - q[1, 0]) * (q[0, 1] - q[2, 1]))
        sx, sy = self._newBorders

        return (sx * sy) / quad_size

    def standardUncertainties(self, focal_Length_mm, f_number, midpointdepth=1000,
                              focusAtYX=None,
                              # sigma_best_focus=0,
                              # quad_pos_err=0,
                              shape=None,
                              uncertainties=(0, 0)):
        '''
        focusAtXY - image position with is in focus
            if not set it is assumed that the image middle is in focus
        sigma_best_focus - standard deviation of the PSF
                             within the best focus (default blur)
        uncertainties - contibutors for standard uncertainty
                        these need to be perspective transformed to fit the new 
                        image shape
        '''
        # TODO: consider quad_pos_error
        # (also influences intensity corr map)

        if shape is None:
            s = self.img.shape
        else:
            s = shape

        # 1. DEFOCUS DUE TO DEPTH OF FIELD
        ##################################
        depthMap = self.depthMap(midpointdepth)
        if focusAtYX is None:
            # assume image middle is in-focus:
            focusAtYX = s[0] // 2, s[1] // 2
        infocusDepth = depthMap[focusAtYX]
        depthOfField_blur = defocusThroughDepth(
            depthMap, infocusDepth, focal_Length_mm, f_number, k=2.335)

        # 2. INCREAASED PIXEL SIZE DUE TO INTERPOLATION BETWEEN
        #   PIXELS MOVED APARD
        ######################################################
        # index maps:
        py, px = np.mgrid[0:s[0], 0:s[1]]
        # warped index maps:
        wx = cv2.warpPerspective(np.asfarray(px), self.homography,
                                 self._newBorders,
                                 borderValue=np.nan,
                                 flags=cv2.INTER_LANCZOS4)
        wy = cv2.warpPerspective(np.asfarray(py), self.homography,
                                 self._newBorders,
                                 borderValue=np.nan,
                                 flags=cv2.INTER_LANCZOS4)

        pxSizeFactorX = 1 / np.abs(np.gradient(wx)[1])
        pxSizeFactorY = 1 / np.abs(np.gradient(wy)[0])

        # WARP ALL FIELD TO NEW PERSPECTIVE AND MULTIPLY WITH PXSIZE FACTOR:
        depthOfField_blur = cv2.warpPerspective(
            depthOfField_blur, self.homography, self._newBorders,
            borderValue=np.nan,
        )

        # perspective transform given uncertainties:
        warpedU = []
        for u in uncertainties:
            #             warpedU.append([])
            #             for i in u:
            # print i, type(i), isinstance(i, np.ndarray)
            if isinstance(u, np.ndarray) and u.size > 1:
                u = cv2.warpPerspective(u, self.homography,
                                        self._newBorders,
                                        borderValue=np.nan,
                                        flags=cv2.INTER_LANCZOS4)  # *f

            else:
                # multiply with area ratio: after/before perspective warp
                u *= self.areaRatio

            warpedU.append(u)

        # given uncertainties after warp:
        ux, uy = warpedU

        ux = pxSizeFactorX * (ux**2 + depthOfField_blur**2)**0.5
        uy = pxSizeFactorY * (uy**2 + depthOfField_blur**2)**0.5

        # TODO: remove depthOfField_blur,fx,fy from return
        return ux, uy, depthOfField_blur, pxSizeFactorX, pxSizeFactorY

    def pose(self):
        #         if self.tvec is None:
        #         if self._pose is None:
        if self.quad is not None:
            self._pose = self._poseFromQuad()
        else:
            self._pose = self._poseFromHomography()
        return self._pose

    def setPose(self, obj_center=None, distance=None,
                rotation=None, tilt=None, pitch=None):
        tvec, rvec = self.pose()

        if distance is not None:
            tvec[2, 0] = distance
        if obj_center is not None:
            tvec[0, 0] = obj_center[0]
            tvec[1, 0] = obj_center[1]

        if rotation is None and tilt is None:
            return rvec
        r, t, p = rvec2euler(rvec)
        if rotation is not None:
            r = np.radians(rotation)
        if tilt is not None:
            t = np.radians(tilt)
        if pitch is not None:
            p = np.radians(pitch)
        rvec = euler2rvec(r, t, p)

        self._pose = tvec, rvec

    def _poseFromHomography(self):
        quad = self.quadFromH()
        return self._poseFromQuad(quad)

    def quadFromH(self):
        sy, sx = self.img.shape[:2]
        # image edges:
        objP = np.array([[
            [0, 0],
            [sx, 0],
            [sx, sy],
            [0, sy],
        ]], dtype=np.float32)
        return cv2.perspectiveTransform(objP, self._Hinv)

    def objCenter(self):
        if self.quad is None:
            sy, sx = self.img.shape[:2]
            return sx // 2, sy // 2
        return self.quad[:, 0].mean(), self.quad[:, 1].mean()

    def _poseFromQuad(self, quad=None):
        '''
        estimate the pose of the object plane using quad
            setting:
        self.rvec -> rotation vector
        self.tvec -> translation vector
        '''
        if quad is None:
            quad = self.quad
        if quad.ndim == 3:
            quad = quad[0]
        # http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
        # Find the rotation and translation vectors.
        img_pn = np.ascontiguousarray(quad[:, :2],
                                      dtype=np.float32).reshape((4, 1, 2))

        obj_pn = self.obj_points - self.obj_points.mean(axis=0)
        retval, rvec, tvec = cv2.solvePnP(
            obj_pn,
            img_pn,
            self.opts['cameraMatrix'],
            self.opts['distCoeffs'],
            flags=cv2.SOLVEPNP_P3P  # because exactly four points are given
        )
        if not retval:
            print("Couln't estimate pose")
        return tvec, rvec

    @property
    def obj_points(self):
        if self._obj_points is None:

            if self.refQuad is not None:
                quad = self.refQuad
            else:
                quad = self.quad

            try:
                # estimate size
                sy, sx = self.opts['new_size']
#                 sy = self.opts['obj_height_mm']
                aspectRatio = sx / sy
            except TypeError:
                aspectRatio = calcAspectRatioFromCorners(quad,
                                                         self.opts['in_plane'])
                print('aspect ratio assumed to be %s' % aspectRatio)
            # output size:
            if None in self._newBorders:
                b = self.opts['border']
                ssx, ssy = self._calcQuadSize(quad, aspectRatio)
                bx, by = self._newBorders
                if bx is None:
                    bx = int(round(ssx + 2 * b))
                if by is None:
                    by = int(round(ssy + 2 * b))
                self._newBorders = (bx, by)

            if None in (sx, sy):
                sx, sy = self._newBorders

            # image edges:
            self._obj_points = np.float32([
                [0,  0, 0],
                [sx, 0, 0],
                [sx, sy, 0],
                [0,  sy, 0]])

        return self._obj_points

    def drawQuad(self, img=None, quad=None, thickness=30):
        '''
        Draw the quad into given img 
        '''
        if img is None:
            img = self.img
        if quad is None:
            quad = self.quad
        q = np.int32(quad)
        c = int(img.max())
        cv2.line(img, tuple(q[0]), tuple(q[1]), c, thickness)
        cv2.line(img, tuple(q[1]), tuple(q[2]), c, thickness)
        cv2.line(img, tuple(q[2]), tuple(q[3]), c, thickness)
        cv2.line(img, tuple(q[3]), tuple(q[0]), c, thickness)
        return img

    def draw3dCoordAxis(self, img=None, thickness=8):
        '''
        draw the 3d coordinate axes into given image
        if image == False:
            create an empty image
        '''
        if img is None:
            img = self.img
        elif img is False:
            img = np.zeros(shape=self.img.shape, dtype=self.img.dtype)
        else:
            img = imread(img)
        # project 3D points to image plane:
        # self.opts['obj_width_mm'], self.opts['obj_height_mm']
        w, h = self.opts['new_size']
        axis = np.float32([[0.5 * w, 0.5 * h, 0],
                           [w, 0.5 * h, 0],
                           [0.5 * w, h, 0],
                           [0.5 * w, 0.5 * h, -0.5 * w]])
        t, r = self.pose()
        imgpts = cv2.projectPoints(axis, r, t,
                                   self.opts['cameraMatrix'],
                                   self.opts['distCoeffs'])[0]

        mx = int(img.max())
        origin = tuple(imgpts[0].ravel())
        cv2.line(img, origin, tuple(imgpts[1].ravel()), (0, 0, mx), thickness)
        cv2.line(img, origin, tuple(imgpts[2].ravel()), (0, mx, 0), thickness)
        cv2.line(
            img, origin, tuple(imgpts[3].ravel()), (mx, 0, 0), thickness * 2)
        return img

    @staticmethod
    def _calcQuadSize(corners, aspectRatio):
        '''
        return the size of a rectangle in perspective distortion in [px]
        DEBUG: PUT THAT BACK IN??::
            if aspectRatio is not given is will be determined
        '''
        if aspectRatio > 1:  # x is bigger -> reduce y
            x_length = PerspectiveCorrection._quadXLength(corners)
            y = x_length / aspectRatio
            return x_length, y
        else:  # y is bigger -> reduce x
            y_length = PerspectiveCorrection._quadYLength(corners)
            x = y_length * aspectRatio
            return x, y_length

    @staticmethod
    def _quadYLength(corners):
        ll = PerspectiveCorrection._linelength
        l0 = (corners[1], corners[2])
        l1 = (corners[0], corners[3])
        return max(ll(l0), ll(l1))

    @staticmethod
    def _quadXLength(corners):
        ll = PerspectiveCorrection._linelength
        l0 = (corners[0], corners[1])
        l1 = (corners[2], corners[3])
        return max(ll(l0), ll(l1))

    @staticmethod
    def _linelength(line):
        p0, p1 = line
        x0, y0 = p0
        x1, y1 = p1
        dx = x1 - x0
        dy = y1 - y0
        return (dx**2 + dy**2)**0.5
Пример #9
0
class PerspectiveTransformation(object):
    '''
    fit or add an image to the first image of this display 
    using perspective transformations
    '''
    def __init__(self, img):
        '''
        @param img -> reference image
        '''
        self.img_orig = img
        self._firstTime = True
        self.pattern = PatternRecognition(img)
  

    def fitImg(self, img_rgb):
        '''
        fit perspective and size of the input image to the base image
        '''
        H = self.pattern.findHomography(img_rgb)[0]
        H_inv = self.pattern.invertHomography(H)
        s = self.img_orig.shape
        warped = cv2.warpPerspective(img_rgb, H_inv, (s[1],s[0]) )
        return warped


    def addImg(self, img, overlap=None, direction='bottom'):
        '''
        '''
        assert direction == 'bottom','only direction=bottom implemented by now'
        
        #CUT IMAGE TO ONLY COMPARE POINTS AT OVERLAP:
        if overlap is not None:
            #only direction bottom for now...
            s = self.img_orig.shape
            oimgcut = self.img_orig[s[0]-overlap:,:]
            imgcut = img[:overlap,:]
        else:
            oimgcut = self.img_orig
            imgcut = img
        
        #PATTERN COMPARISON:
        if not self._firstTime or overlap is not None:
            self.pattern = PatternRecognition(oimgcut)   
        (H, inlierRatio) = self.pattern.findHomography(imgcut)[0:2]
        H_inv = self.pattern.invertHomography(H)

        #STITCH:
        self.img_orig = self._stitchImg(H_inv, inlierRatio, img, overlap)
        self._firstTime = False
        return self.img_orig


    def addDir(self, image_dir, img_filter=None):  
        '''
        @param image_dir -> 'directory' containing all images
        @param img_filter -> 'JPG'; None->Take all images
        '''
        dir_list = []
        try:
            dir_list = os.listdir(image_dir)
            if img_filter:
                # remove all files that doen't end with .[image_filter]
                dir_list = filter(lambda x: x.find(img_filter) > -1, dir_list)
            try: #remove Thumbs.db, is existent (windows only)
                dir_list.remove('Thumbs.db')
            except ValueError:
                pass
        except:
            raise IOError("Unable to open directory: %s" % image_dir)
        dir_list = map(lambda x: os.path.join(image_dir, x), dir_list)
        dir_list = filter(lambda x: x != image_dir, dir_list)
        return self._stitchDirRecursive(self.base_img_rgb, dir_list, 0)        
        

    def filterMatches(self, matches, ratio = 0.75):
        filtered_matches = []
        for m in matches:
            if len(m) == 2 and m[0].distance < m[1].distance * ratio:
                filtered_matches.append(m[0])
        return filtered_matches
   
    
    def imageDistance(self, matches):
        sumDistance = 0.0
        for match in matches:
            sumDistance += match.distance
        return sumDistance
   
    
    def findDimensions(self, image, homography):
        base_p1 = np.ones(3, np.float32)
        base_p2 = np.ones(3, np.float32)
        base_p3 = np.ones(3, np.float32)
        base_p4 = np.ones(3, np.float32)
    
        (y, x) = image.shape[:2]
    
        base_p1[:2] = [0,0]
        base_p2[:2] = [x,0]
        base_p3[:2] = [0,y]
        base_p4[:2] = [x,y]
    
        max_x = None
        max_y = None
        min_x = None
        min_y = None
    
        for pt in [base_p1, base_p2, base_p3, base_p4]:
    
            hp = np.matrix(homography, np.float32) * np.matrix(pt, np.float32).T
            hp_arr = np.array(hp, np.float32)
            normal_pt = np.array([hp_arr[0]/hp_arr[2], hp_arr[1]/hp_arr[2]], np.float32)

            if ( max_x == None or normal_pt[0,0] > max_x ):
                max_x = normal_pt[0,0]
            if ( max_y == None or normal_pt[1,0] > max_y ):
                max_y = normal_pt[1,0]
            if ( min_x == None or normal_pt[0,0] < min_x ):
                min_x = normal_pt[0,0]
            if ( min_y == None or normal_pt[1,0] < min_y ):
                min_y = normal_pt[1,0]
    
        min_x = min(0, min_x)
        min_y = min(0, min_y)
    
        return (min_x, min_y, max_x, max_y)

    
    def _stitchDirRecursive(self, dir_list, round=0):
        if ( len(dir_list) < 1 ):
            return self.base_img_rgb
        # Find key points in base image for motion estimation
        self.detector.detectAndCompute(self.base_img, None)
        
        print "Iterating through next images..."
    
        closestImage = None
    
        # TODO: Thread this loop since each iteration is independent
    
        # Find the best next image from the remaining images
        for next_img_path in dir_list:
    
            print "Reading %s..." % next_img_path
    
            next_img_rgb = cv2.imread(next_img_path)

            (H, inlierRatio, averagePointDistance, 
            next_img, next_features, 
            next_descs, matches_subset) = self.pattern.findHomography(next_img_rgb)
            
            # if ( closestImage == None or averagePointDistance < closestImage['dist'] ):
            if ( closestImage == None or inlierRatio > closestImage['inliers'] ):
                closestImage = {}
                closestImage['h'] = H
                closestImage['inliers'] = inlierRatio
                closestImage['dist'] = averagePointDistance
                closestImage['path'] = next_img_path
                closestImage['rgb'] = next_img_rgb
                closestImage['img'] = next_img
                closestImage['feat'] = next_features
                closestImage['desc'] = next_descs
                closestImage['match'] = matches_subset
    
        print "Closest Image: ", closestImage['path']
        print "Closest Image Ratio: ", closestImage['inliers']

        dir_list = filter(lambda x: x != closestImage['path'], dir_list)

        self.base_img_rgb = self._stitchImg(closestImage)
        self.base_img = cv2.GaussianBlur(cv2.cvtColor(self.base_img_rgb,cv2.COLOR_BGR2GRAY), (5,5), 0)

        return self._stitchDirRecursive(dir_list, round+1)


    def _stitchImg(self,H_inv, inliers, img, overlap=0):
        # TODO: use img_orig with can be float array
        # to return stitched results as floatarray of the same kind

        isColor = img.ndim == 3

        if ( inliers > 0.1 ): # and 
    
            #add translation to homography to consider overlap:
            if overlap:
                H_inv[1,2]+=self.img_orig.shape[0]-overlap

            (min_x, min_y, max_x, max_y) = self.findDimensions(img, H_inv)
    
            # Adjust max_x and max_y by base img size
            max_x = max(max_x, self.img_orig.shape[1])
            max_y = max(max_y, self.img_orig.shape[0])
    
            move_h = np.matrix(np.identity(3), np.float32)
    
            if ( min_x < 0 ):
                move_h[0,2] += -min_x
                max_x += -min_x
    
            if ( min_y < 0 ):
                move_h[1,2] += -min_y 
                max_y += -min_y
                
#             print "Homography: \n", H
            print "Inverse Homography: \n", H_inv
            print "Min Points: ", (min_x, min_y)
    
            mod_inv_h = move_h * H_inv

    
            img_w = int(math.ceil(max_x))
            img_h = int(math.ceil(max_y))
    
            print "New Dimensions: ", (img_w, img_h)
    
            # Warp the new image given the homography from the old image
            base_img_warp = cv2.warpPerspective(self.img_orig, move_h, (img_w, img_h))
            print "Warped base image"
    
            next_img_warp = cv2.warpPerspective(img, mod_inv_h, (img_w, img_h))
            print "Warped next image"
    
            # Put the base image on an enlarged palette
            if isColor:
                enlarged_base_img = np.zeros((img_h, img_w, 3), np.uint8)
            else:
                enlarged_base_img = np.zeros((img_h, img_w), np.uint8)
    
            print "Enlarged Image Shape: ", enlarged_base_img.shape
            print "Base Image Shape: ", self.img_orig.shape
            print "Base Image Warp Shape: ", base_img_warp.shape
    

            # Create a mask from the warped image for constructing masked composite
            if isColor:
                d = np.sum(next_img_warp, axis=-1)
            else:
                d = next_img_warp

            # Now add the warped image
            data_map = d==0
            enlarged_base_img[data_map] = base_img_warp[data_map]
            final_img = enlarged_base_img + next_img_warp
            
            #average overlap:
            if isColor:
                dd = np.sum(base_img_warp, axis=-1)
            else:
                dd = base_img_warp
            mask = np.logical_and(d!=0, dd!=0) 
            av = 0.5*(cv2.subtract(base_img_warp[mask],next_img_warp[mask]))
            if not isColor:
                av = av[:,0]
            final_img[mask] += av

#             final_img = cv2.add(enlarged_base_img, next_img_warp, 
#                 dtype=cv2.CV_8U)

            # Crop off the black edges
#             final_gray = self._rgb2Gray(final_img)
#             _, thresh = cv2.threshold(final_gray, 1, 255, cv2.THRESH_BINARY)
#             
            thresh = final_img>0
            if isColor:
                thresh = np.sum(thresh, axis=0)
            
            contours, _ = cv2.findContours(thresh.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
            print "Found %d contours..." % (len(contours))
    
            max_area = 0
            best_rect = (0,0,0,0)
    
            for cnt in contours:
                x,y,w,h = cv2.boundingRect(cnt)
                # print "Bounding Rectangle: ", (x,y,w,h)
    
                deltaHeight = h-y
                deltaWidth = w-x
    
                area = deltaHeight * deltaWidth
    
                if ( area > max_area and deltaHeight > 0 and deltaWidth > 0):
                    max_area = area
                    best_rect = (x,y,w,h)
    
            if ( max_area > 0 ):
                print "Maximum Contour: ", max_area
                print "Best Rectangle: ", best_rect
    
                final_img_crop = final_img[best_rect[1]:best_rect[1]+best_rect[3],
                        best_rect[0]:best_rect[0]+best_rect[2]]
    
                final_img = final_img_crop
    
            return final_img
    
        else:
            return self.base_img_rgb
Пример #10
0
class PerspectiveCorrection(object):
    def __init__(self,
                 img_shape,
                 obj_height_mm=None,
                 obj_width_mm=None,
                 cameraMatrix=None,
                 distCoeffs=0,
                 do_correctIntensity=False,
                 new_size=None,
                 in_plane=False,
                 cv2_opts={}):
        '''
        correction(warp+intensity factor) + uncertainty due to perspective distortion
        
        new_size = (sizey, sizex)
            if either sizey or sizex is None the resulting size will set 
            using an (assumed) aspect ratio 
        
        in_plane=True --> object has to tilt, only rotation and translation
        
        this class saves all parameter maps in self.maps
        !!!
        given images need to be already free from lens distortion
        and distCoeffs should be 0
        !!!
        '''
        self.opts = {
            'obj_height_mm': obj_height_mm,
            'obj_width_mm': obj_width_mm,
            'distCoeffs': distCoeffs,
            'do_correctIntensity': do_correctIntensity,
            'new_size': new_size,
            'in_plane': in_plane,
            'cv2_opts': cv2_opts
        }
        if cameraMatrix is None:
            cameraMatrix = genericCameraMatrix(img_shape)
        self.opts['cameraMatrix'] = cameraMatrix

    def setReference(self, ref):
        '''
        ref  ... either quad, grid, homography or reference image
        
        quad --> list of four image points(x,y) marking the edges of the quad
               to correct
        homography --> h. matrix to correct perspective distortion
        referenceImage --> image of same object without perspective distortion
        '''
        self.maps = {}
        self.quad = None
        self._obj_points = None
        self._camera_position = None
        self._homography = None
        self._homography_is_fixed = True
        self.tvec, self.rvec = None, None

        #evaluate input:
        if isinstance(ref, np.ndarray) and ref.shape == (3, 3):
            #REF IS HOMOGRAPHY
            self._homography = ref
            #REF IS QUAD
        elif len(ref) == 4:
            self.quad = sortCorners(ref)
            #REF IS IMAGE
        else:
            self.pattern = PatternRecognition(imread(ref))
            self._homography_is_fixed = False

    @property
    def homography(self):
        if self._homography is None:
            if self.quad is not None:
                #GET HOMOGRAPHIE FROM QUAD
                fixedX, fixedY = None, None
                try:
                    #is new size is given
                    sx, sy = self.opts['new_size']
                    if sx is None and sy is not None:
                        fixedY = sy
                        raise TypeError()
                    elif sx is not None and sy is None:
                        fixedX = sx
                        raise TypeError()
                except TypeError:
                    try:
                        #estimate size
                        w = self.opts['obj_width_mm']
                        h = self.opts['obj_height_mm']
                        aspectRatio = float(w) / h
                    except TypeError:
                        aspectRatio = calcAspectRatioFromCorners(
                            self.quad, self.opts['in_plane'])
                        print 'aspect ratio assumed to be %s' % aspectRatio
                    #new image border keeping aspect ratio
                    if fixedX or fixedY:
                        if fixedX:
                            sx = fixedX
                            sy = sx / aspectRatio
                        else:
                            sy = fixedY
                            sx = sy * aspectRatio
                    else:
                        sx, sy = self._calcQuadSize(self.quad, aspectRatio)

                self._newBorders = (int(round(sx)), int(round(sy)))
                #image edges:
                objP = np.array([
                    [0, 0],
                    [sx, 0],
                    [sx, sy],
                    [0, sy],
                ],
                                dtype=np.float32)
                self._homography = cv2.getPerspectiveTransform(
                    self.quad.astype(np.float32), objP)
            else:
                #GET HOMOGRAPHY USING PATTERN RECOGNITION
                self._Hinv = h = self.pattern.findHomography(self.img)[0]
                self._homography = self.pattern.invertHomography(h)
                s = self.img.shape
                self._newBorders = (s[1], s[0])

        return self._homography

    def distort(self, img, rotX=0, rotY=0, quad=None):
        '''
        Apply perspective distortion ion self.img
        angles are in DEG and need to be positive to fit into image
        
        '''
        self.img = imread(img)
        #fit old image to self.quad:
        corr = self.correct(self.img)
        s = self.img.shape
        if quad is None:
            wquad = (self.quad - self.quad.mean(axis=0)).astype(float)

            win_width = s[1]
            win_height = s[0]
            #project quad:
            for n, q in enumerate(wquad):
                p = Point3D(q[0], q[1], 0).rotateX(-rotX).rotateY(-rotY)
                p = p.project(win_width, win_height, s[1], s[1])
                wquad[n] = (p.x, p.y)
            wquad = sortCorners(wquad)
            #scale result so that longest side of quad and wquad are equal
            w = wquad[:, 0].max() - wquad[:, 0].min()
            h = wquad[:, 1].max() - wquad[:, 1].min()
            scale = min(s[1] / w, s[0] / h)
            #scale:
            wquad = (wquad * scale).astype(int)
        else:
            wquad = sortCorners(quad)
        wquad -= wquad.min(axis=0)

        lx = corr.shape[1]
        ly = corr.shape[0]

        objP = np.array([
            [0, 0],
            [lx, 0],
            [lx, ly],
            [0, ly],
        ],
                        dtype=np.float32)

        homography = cv2.getPerspectiveTransform(wquad.astype(np.float32),
                                                 objP)
        #distort corr:
        w = wquad[:, 0].max() - wquad[:, 0].min()
        h = wquad[:, 1].max() - wquad[:, 1].min()
        #(int(w),int(h))
        dist = cv2.warpPerspective(corr,
                                   homography, (int(w), int(h)),
                                   flags=cv2.INTER_CUBIC
                                   | cv2.WARP_INVERSE_MAP)

        #move middle of dist to middle of the old quad:
        bg = np.zeros(shape=s)
        rmn = (bg.shape[0] / 2, bg.shape[1] / 2)

        ss = dist.shape
        mn = (ss[0] / 2, ss[1] / 2)  #wquad.mean(axis=0)
        ref = (int(rmn[0] - mn[0]), int(rmn[1] - mn[1]))

        bg[ref[0]:ss[0] + ref[0], ref[1]:ss[1] + ref[1]] = dist

        #finally move quad into right position:
        self.quad = wquad
        self.quad += (ref[1], ref[0])
        self.img = bg
        self._homography = None
        self._poseFromQuad()

        if self.opts['do_correctIntensity']:
            self.img *= self._getTiltFactor(self.img)

        return self.img

    def _getTiltFactor(self, img):
        #CALCULATE VIGNETTING OF WARPED OBJECT:
        _, r = self.pose()
        eulerAngles = euler.mat2euler(cv2.Rodrigues(r)[0], axes='rzxy')

        tilt = eulerAngles[1]
        rot = eulerAngles[0]
        f = self.opts['cameraMatrix'][0, 0]
        s = img.shape
        self.maps['tilt_factor'] = tf = np.fromfunction(
            lambda x, y: tiltFactor((x, y), f=f, tilt=tilt, rot=rot), s[:2])
        #if img is color:
        if tf.shape != s:
            tf = np.repeat(tf, s[-1]).reshape(s)
        return tf

    def correctGrid(self, img, grid):
        '''
        grid -> array of polylines=((p0x,p0y),(p1x,p1y),,,)
        '''

        self.img = imread(img)
        h = self.homography  #TODO: cleanup only needed to get newBorder attr.

        if self.opts['do_correctIntensity']:
            self.img = self.img / self._getTiltFactor(self.img)

        snew = self._newBorders
        warped = np.empty(snew[::-1], dtype=self.img.dtype)
        s0, s1 = grid.shape[:2]
        nx, ny = s0 - 1, s1 - 1
        sy, sx = snew[0] / nx, snew[1] / ny

        objP = np.array([[0, 0], [sx, 0], [sx, sy], [0, sy]], dtype=np.float32)

        for ix in xrange(nx):
            for iy in xrange(ny):
                quad = grid[ix:ix + 2,
                            iy:iy + 2].reshape(4, 2)[np.array([0, 2, 3, 1])]
                hcell = cv2.getPerspectiveTransform(quad.astype(np.float32),
                                                    objP)

                cv2.warpPerspective(self.img,
                                    hcell, (sx, sy),
                                    warped[iy * sy:(iy + 1) * sy,
                                           ix * sx:(ix + 1) * sx],
                                    flags=cv2.INTER_LANCZOS4,
                                    **self.opts['cv2_opts'])
        return warped

    def correct(self, img):
        '''
        ...from perspective distortion: 
         --> perspective transformation
         --> apply tilt factor (view factor) correction 
        '''
        self.img = imread(img)

        if not self._homography_is_fixed:
            self._homography = None
        h = self.homography
        if self.opts['do_correctIntensity']:
            self.img = self.img / self._getTiltFactor(self.img)
        warped = cv2.warpPerspective(self.img,
                                     h,
                                     self._newBorders,
                                     flags=cv2.INTER_LANCZOS4,
                                     **self.opts['cv2_opts'])
        return warped

    def correctPoints(self, pts):
        if not self._homography_is_fixed:
            self._homography = None

        h = self.homography
        #         #normalize
        #         h /= h[2,2]
        #         #invert homography
        #         h = np.linalg.inv(h)

        if pts.ndim == 2:
            pts = pts.reshape(1, *pts.shape)
        return cv2.perspectiveTransform(pts.astype(np.float32), h)

    @property
    def camera_position(self):
        '''
        returns camera position in world coordinates using self.rvec and self.tvec
        from http://stackoverflow.com/questions/14515200/python-opencv-solvepnp-yields-wrong-translation-vector
        '''
        t, r = self.pose()
        if self._camera_position is None:
            self._camera_position = -np.matrix(
                cv2.Rodrigues(r)[0]).T * np.matrix(t)
        return self._camera_position

    def standardUncertainties(self,
                              focal_Length_mm,
                              f_number,
                              focusAtYX=None,
                              sigma_best_focus=0,
                              quad_pos_err=0,
                              shape=None,
                              uncertainties=((), (), ())):
        '''
        focusAtXY - image position with is in focus
            if not set it is assumed that the image middle is in focus
        sigma_best_focus - standard deviation of the PSF
                             within the best focus (default blur)
        uncertainties - contibutors for standard uncertainty
                        these need to be perspective transformed to fit the new 
                        image shape
        '''
        #TODO: consider quad_pos_error
        ############################## (also influences intensity corr map)

        cam = self.opts['cameraMatrix']
        if shape is None:
            s = self.img.shape
        else:
            s = shape

        # 1. DEFOCUS DUE TO DEPTH OF FIELD
        ##################################
        t, r = self.pose()
        worldCoord = np.fromfunction(
            lambda x, y: imgPointToWorldCoord((y, x), r, t, cam), s)
        depthMap = np.linalg.norm(worldCoord - self.camera_position,
                                  axis=0).reshape(s)
        del worldCoord

        if focusAtYX is None:
            #assume image middle is in-focus:
            focusAtYX = (s[0] / 2, s[1] / 2)
        infocusDepth = depthMap[focusAtYX]
        depthOfField_blur = defocusThroughDepth(depthMap,
                                                infocusDepth,
                                                focal_Length_mm,
                                                f_number,
                                                k=2.335)

        #2. INCREAASED PIXEL SIZE DUE TO INTERPOLATION BETWEEN
        #   PIXELS MOVED APARD
        ######################################################
        #index maps:
        py, px = np.mgrid[0:s[0], 0:s[1]]
        #warped index maps:
        wx = cv2.warpPerspective(np.asfarray(px),
                                 self.homography,
                                 self._newBorders,
                                 borderValue=np.nan,
                                 flags=cv2.INTER_LANCZOS4)
        wy = cv2.warpPerspective(np.asfarray(py),
                                 self.homography,
                                 self._newBorders,
                                 borderValue=np.nan,
                                 flags=cv2.INTER_LANCZOS4)

        pxSizeFactorX = (1 / np.abs(np.gradient(wx)[1]))
        pxSizeFactorY = (1 / np.abs(np.gradient(wy)[0]))

        self.maps['depthMap'] = depthMap

        #AREA RATIO AFTER/BEFORE:
        #AREA OF QUADRILATERAL:
        q = self.quad
        quad_size = 0.5 * abs((q[2, 0] - q[0, 0]) * (q[3, 1] - q[1, 1]) +
                              (q[3, 0] - q[1, 0]) * (q[0, 1] - q[2, 1]))
        sx, sy = self._newBorders
        self.areaRatio = (sx * sy) / quad_size

        #WARP ALL FIELD TO NEW PERSPECTIVE AND MULTIPLY WITH PXSIZE FACTOR:
        f = (pxSizeFactorX**2 + pxSizeFactorY**2)**0.5

        self.maps[
            'depthOfField_blur'] = depthOfField_blur = cv2.warpPerspective(
                depthOfField_blur,
                self.homography,
                self._newBorders,
                borderValue=np.nan,
            ) * f

        #perspective transform given uncertainties:
        warpedU = []
        for u in uncertainties:
            warpedU.append([])
            for i in u:
                #print i, type(i), isinstance(i, np.ndarray)
                if isinstance(i, np.ndarray) and i.size > 1:
                    i = cv2.warpPerspective(i,
                                            self.homography,
                                            self._newBorders,
                                            borderValue=np.nan,
                                            flags=cv2.INTER_LANCZOS4) * f

                else:
                    #multiply with area ratio: after/before perspective warp
                    i *= self.areaRatio

                warpedU[-1].append(i)

        delectionUncertX = (pxSizeFactorX - 1) / (2 * 3**0.5)
        delectionUncertY = (pxSizeFactorY - 1) / (2 * 3**0.5)

        warpedU[0].extend((delectionUncertX, depthOfField_blur))
        warpedU[1].extend((delectionUncertY, depthOfField_blur))

        return tuple(warpedU)

    def pose(self):
        if self.tvec is None:
            if self.quad is not None:
                self._poseFromQuad()
            else:
                self._poseFromHomography()
        return self.tvec, self.rvec

    def _poseFromHomography(self):
        sy, sx = self.img.shape[:2]
        #image edges:
        objP = np.array([[
            [0, 0],
            [sx, 0],
            [sx, sy],
            [0, sy],
        ]],
                        dtype=np.float32)
        quad = cv2.perspectiveTransform(objP, self._Hinv)
        self._poseFromQuad(quad)

    def _poseFromQuad(self, quad=None):
        '''
        estimate the pose of the object plane using quad
            setting:
        self.rvec -> rotation vector
        self.tvec -> translation vector
        '''
        if quad is None:
            quad = self.quad
        # http://answers.opencv.org/question/1073/what-format-does-cv2solvepnp-use-for-points-in/
        # Find the rotation and translation vectors.
        retval, self.rvec, self.tvec = cv2.solvePnP(
            self.obj_points,
            quad.astype(np.float32),
            self.opts['cameraMatrix'],
            self.opts['distCoeffs'],
            #flags=cv2.CV_ITERATIVE
        )
        if retval is None:
            print("Couln't estimate pose")

    @property
    def obj_points(self):
        if self._obj_points is None:
            h = self.opts['obj_height_mm']
            w = self.opts['obj_width_mm']
            if w is None or h is None:
                w, h = 100, 100

            self._obj_points = np.array([
                [0, 0, 0],
                [w, 0, 0],
                [w, h, 0],
                [0, h, 0],
            ],
                                        dtype=np.float32)
        return self._obj_points

    def drawQuad(self, img=None, quad=None, thickness=30):
        '''
        Draw the quad into given img 
        '''
        if img is None:
            img = self.img
        if quad is None:
            quad = self.quad
        q = quad
        c = int(img.max())
        cv2.line(img, tuple(q[0]), tuple(q[1]), c, thickness)
        cv2.line(img, tuple(q[1]), tuple(q[2]), c, thickness)
        cv2.line(img, tuple(q[2]), tuple(q[3]), c, thickness)
        cv2.line(img, tuple(q[3]), tuple(q[0]), c, thickness)
        return img

    def draw3dCoordAxis(self, img=None, thickness=8):
        '''
        draw the 3d coordinate axes into given image
        if image == False:
            create an empty image
        '''
        if img is None:
            img = self.img
        elif img is False:
            img = np.zeros(shape=self.img.shape, dtype=self.img.dtype)
        else:
            img = imread(img)
        # project 3D points to image plane:
        w, h = self.opts['obj_width_mm'], self.opts['obj_height_mm']
        axis = np.float32([[0.5 * w, 0.5 * h, 0], [w, 0.5 * h, 0],
                           [0.5 * w, h, 0], [0.5 * w, 0.5 * h, -0.5 * w]])
        t, r = self.pose()
        imgpts = cv2.projectPoints(axis, r, t, self.opts['cameraMatrix'],
                                   self.opts['distCoeffs'])[0]

        mx = img.max()
        origin = tuple(imgpts[0].ravel())
        cv2.line(img, origin, tuple(imgpts[1].ravel()), (0, 0, mx), thickness)
        cv2.line(img, origin, tuple(imgpts[2].ravel()), (0, mx, 0), thickness)
        cv2.line(img, origin, tuple(imgpts[3].ravel()), (mx, 0, 0),
                 thickness * 2)
        return img

    @staticmethod
    def _calcQuadSize(corners, aspectRatio):
        '''
        return the size of a rectangle in perspective distortion in [px]
        DEBUG: PUT THAT BACK IN??::
            if aspectRatio is not given is will be determined
        '''
        if aspectRatio > 1:  #x is bigger -> reduce y
            x_length = PerspectiveCorrection._quadXLength(corners)
            y = x_length / aspectRatio
            return x_length, y
        else:  # y is bigger -> reduce x
            y_length = PerspectiveCorrection._quadYLength(corners)
            x = y_length * aspectRatio
            return x, y_length

    @staticmethod
    def _quadYLength(corners):
        ll = PerspectiveCorrection._linelength
        l0 = (corners[1], corners[2])
        l1 = (corners[0], corners[3])
        return max(ll(l0), ll(l1))

    @staticmethod
    def _quadXLength(corners):
        ll = PerspectiveCorrection._linelength
        l0 = (corners[0], corners[1])
        l1 = (corners[2], corners[3])
        return max(ll(l0), ll(l1))

    @staticmethod
    def _linelength(line):
        p0, p1 = line
        x0, y0 = p0
        x1, y1 = p1
        dx = x1 - x0
        dy = y1 - y0
        return (dx**2 + dy**2)**0.5