def __init__(self, img, bg=None, maxDev=1e-4, maxIter=10, remove_border_size=0, # feature_size=5, cameraMatrix=None, distortionCoeffs=None): # 20 """ Args: img (path or array): Reference image Kwargs: bg (path or array): background image - same for all given images maxDev (float): Relative deviation between the last two iteration steps Stop iterative refinement, if deviation is smaller maxIter (int): Stop iterative refinement after maxIter steps """ self.lens = None if cameraMatrix is not None: self.lens = LensDistortion() self.lens._coeffs['distortionCoeffs'] = distortionCoeffs self.lens._coeffs['cameraMatrix'] = cameraMatrix self.maxDev = maxDev self.maxIter = maxIter self.remove_border_size = remove_border_size #self.feature_size = feature_size img = imread(img, 'gray') self.bg = bg if bg is not None: self.bg = getBackground(bg) if not isinstance(self.bg, np.ndarray): self.bg = np.full_like(img, self.bg, dtype=img.dtype) else: self.bg = self.bg.astype(img.dtype) img = cv2.subtract(img, self.bg) if self.lens is not None: img = self.lens.correct(img, keepSize=True) # CREATE TEMPLATE FOR PATTERN COMPARISON: pos = self._findObject(img) self.obj_shape = img[pos].shape PatternRecognition.__init__(self, img[pos]) self._ff_mma = MaskedMovingAverage(shape=img.shape, dtype=np.float64) self.object = None self.Hs = [] # Homography matrices of all fitted images self.Hinvs = [] # same, but inverse self.fits = [] # all imaged, fitted to reference self._fit_masks = [] self._refined = False
def addLens(self, lens, date=None, info='', light_spectrum='visible'): ''' lens -> instance of LensDistortion or saved file ''' self._registerLight(light_spectrum) date = _toDate(date) if not isinstance(lens, LensDistortion): l = LensDistortion() l.readFromFile(lens) lens = l f = self.coeffs['lens'] if light_spectrum not in f: f[light_spectrum] = [] f[light_spectrum].insert(_insertDateIndex(date, f[light_spectrum]), [date, info, lens.coeffs])
class ObjectVignettingSeparation(PatternRecognition): """ If an imaged object is superimposed by a flat field map (often determined by vignetting) the actual object signal can be separated from the cameras flatField using multiple imaged of the object at different positions. For this the following steps are needed: 1. Set the first given image as reference. For every other image ... ( .addImg() ) 2. Calculate translation, rotation, shear - difference through pattern recognition 3. Warp every image in order to fit the reference one. 4. Set an initial flatField image from the local maximum of every image Iterate: 5. Divide every warped image by its flatField. 6. Define .object as the average of all fitted and flatField corrected images 7. Extract .flatField as the ratio of (fitted) .object to every given image Usage: >>> o = ObjectFlatFieldSeparation(ref_img) >>> for img in imgs: >>> o.addImg(img) >>> flatField, obj = o.separate() """ def __init__(self, img, bg=None, maxDev=1e-4, maxIter=10, remove_border_size=0, # feature_size=5, cameraMatrix=None, distortionCoeffs=None): # 20 """ Args: img (path or array): Reference image Kwargs: bg (path or array): background image - same for all given images maxDev (float): Relative deviation between the last two iteration steps Stop iterative refinement, if deviation is smaller maxIter (int): Stop iterative refinement after maxIter steps """ self.lens = None if cameraMatrix is not None: self.lens = LensDistortion() self.lens._coeffs['distortionCoeffs'] = distortionCoeffs self.lens._coeffs['cameraMatrix'] = cameraMatrix self.maxDev = maxDev self.maxIter = maxIter self.remove_border_size = remove_border_size #self.feature_size = feature_size img = imread(img, 'gray') self.bg = bg if bg is not None: self.bg = getBackground(bg) if not isinstance(self.bg, np.ndarray): self.bg = np.full_like(img, self.bg, dtype=img.dtype) else: self.bg = self.bg.astype(img.dtype) img = cv2.subtract(img, self.bg) if self.lens is not None: img = self.lens.correct(img, keepSize=True) # CREATE TEMPLATE FOR PATTERN COMPARISON: pos = self._findObject(img) self.obj_shape = img[pos].shape PatternRecognition.__init__(self, img[pos]) self._ff_mma = MaskedMovingAverage(shape=img.shape, dtype=np.float64) self.object = None self.Hs = [] # Homography matrices of all fitted images self.Hinvs = [] # same, but inverse self.fits = [] # all imaged, fitted to reference self._fit_masks = [] self._refined = False # TODO: remove that property? @property def flatField(self): return self._ff_mma.avg @flatField.setter def flatField(self, arr): self._ff_mma.avg = arr def addImg(self, img, maxShear=0.015, maxRot=100, minMatches=12, borderWidth=3): # borderWidth=100 """ Args: img (path or array): image containing the same object as in the reference image Kwargs: maxShear (float): In order to define a good fit, refect higher shear values between this and the reference image maxRot (float): Same for rotation minMatches (int): Minimum of mating points found in both, this and the reference image """ try: fit, img, H, H_inv, nmatched = self._fitImg(img) except Exception as e: print(e) return # CHECK WHETHER FIT IS GOOD ENOUGH: (translation, rotation, scale, shear) = decompHomography(H) print('Homography ...\n\ttranslation: %s\n\trotation: %s\n\tscale: %s\n\tshear: %s' % (translation, rotation, scale, shear)) if (nmatched > minMatches and abs(shear) < maxShear and abs(rotation) < maxRot): print('==> img added') # HOMOGRAPHY: self.Hs.append(H) # INVERSE HOMOGRSAPHY self.Hinvs.append(H_inv) # IMAGES WARPED TO THE BASE IMAGE self.fits.append(fit) # ADD IMAGE TO THE INITIAL flatField ARRAY: i = img > self.signal_ranges[-1][0] # remove borders (that might have erroneous light): i = minimum_filter(i, borderWidth) self._ff_mma.update(img, i) # create fit img mask: mask = fit < self.signal_ranges[-1][0] mask = maximum_filter(mask, borderWidth) # IGNORE BORDER r = self.remove_border_size if r: mask[:r, :] = 1 mask[-r:, :] = 1 mask[:, -r:] = 1 mask[:, :r] = 1 self._fit_masks.append(mask) # image added return fit return False def error(self, nCells=15): ''' calculate the standard deviation of all fitted images, averaged to a grid ''' s0, s1 = self.fits[0].shape aR = s0 / s1 if aR > 1: ss0 = int(nCells) ss1 = int(ss0 / aR) else: ss1 = int(nCells) ss0 = int(ss1 * aR) L = len(self.fits) arr = np.array(self.fits) arr[np.array(self._fit_masks)] = np.nan avg = np.tile(np.nanmean(arr, axis=0), (L, 1, 1)) arr = (arr - avg) / avg out = np.empty(shape=(L, ss0, ss1)) with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) for n, f in enumerate(arr): out[n] = subCell2DFnArray(f, np.nanmean, (ss0, ss1)) return np.nanmean(out**2)**0.5 def separate(self): self.flatField = self._createInitialflatField() # todo: remove follwing # self.init_ff = self.flatField.copy() for step in self: print('iteration step %s/%s' % (step, self.maxIter)) # TODO: remove smooth from here - is better be done in post proc. smoothed_ff, mask = self.smooth() if self.lens is not None: smoothed_ff = self.lens.distortImage(smoothed_ff) mask = self.lens.distortImage(mask.astype(np.uint8)).astype(bool) return smoothed_ff, mask, self.flatField, self.object def smooth(self): # TODO: there is non nan in the ff img, or? mask = self.flatField == 0 from skimage.filters.rank import median, mean from skimage.morphology import disk ff = mean(median(self.flatField, disk(5), mask=~mask), disk(13), mask=~mask) return ff.astype(float) / ff.max(), mask def __iter__(self): # use iteration to refine the flatField array # keep track of deviation between two iteration steps # for break criterion: self._last_dev = None self.n = 0 # iteration number return self def __next__(self): # THE IMAGED OBJECT WILL BE AVERAGED FROM ALL # INDIVITUAL IMAGES SHOWING THIS OBJECT FROM DIFFERENT POSITIONS: obj = MaskedMovingAverage(shape=self.obj_shape) with np.errstate(divide='ignore', invalid='ignore'): for f, h in zip(self.fits, self.Hinvs): warpedflatField = cv2.warpPerspective(self.flatField, h, (f.shape[1], f.shape[0])) obj.update(f / warpedflatField, warpedflatField != 0) self.object = obj.avg # THE NEW flatField WILL BE OBTAINED FROM THE WARPED DIVIDENT # BETWEEN ALL IMAGES THE THE ESTIMATED IMAGE OOBJECT sh = self.flatField.shape s = MaskedMovingAverage(shape=sh) for f, mask, h in zip(self.fits, self._fit_masks, self.Hs): div = f / self.object # ->do not interpolate between background and image border div[mask] = np.nan div = cv2.warpPerspective(div, h, (sh[1], sh[0]), # borderMode=cv2.BORDER_TRANSPARENT ) div = np.nan_to_num(div) s.update(div, div != 0) new_flatField = s.avg # STOP ITERATION? # RMSE excluding NaNs: dev = np.nanmean((new_flatField[::10, ::10] - self.flatField[::10, ::10])**2)**0.5 print('residuum: %s' % dev) if self.n >= self.maxIter or (self._last_dev and ( (self.n > 4 and dev > self._last_dev) or dev < self.maxDev)): raise StopIteration # remove erroneous values: self.flatField = np.clip(new_flatField, 0, 1) self.n += 1 self._last_dev = dev return self.n def _createInitialflatField(self, downscale_size=9): s0, s1 = self.flatField.shape f = int(max(s0, s1) / downscale_size) every = int(f / 3.5) s = fastFilter(self.flatField, f, every) # make relative s /= s.max() return s def _fitImg(self, img): ''' fit perspective and size of the input image to the reference image ''' img = imread(img, 'gray') if self.bg is not None: img = cv2.subtract(img, self.bg) if self.lens is not None: img = self.lens.correct(img, keepSize=True) (H, _, _, _, _, _, _, n_matches) = self.findHomography(img) H_inv = self.invertHomography(H) s = self.obj_shape fit = cv2.warpPerspective(img, H_inv, (s[1], s[0])) return fit, img, H, H_inv, n_matches def _findObject(self, img): ''' Create a bounding box around the object within an image ''' from imgProcessor.imgSignal import signalMinimum # img is scaled already i = img > signalMinimum(img) # img.max()/2.5 # filter noise, single-time-effects etc. from mask: i = minimum_filter(i, 4) return boundingBox(i)
def simulateSytematicError(N_SAMPLES=5, N_IMAGES=10, SHOW_DETECTED_PATTERN=True, # GRAYSCALE=False, HEIGHT=500, PLOT_RESULTS=True, PLOT_ERROR_ARRAY=True, CAMERA_PARAM=None, PERSPECTIVE=True, ROTATION=True, RELATIVE_PATTERN_SIZE=0.5, POSITION=True, NOISE=25, BLUR=(3, 3), PATTERNS=None): ''' Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate the error to the given ones simulation ----------- N_IMAGES -> number of images to take for a camera calibration N_SAMPLES -> number of camera calibrations of each pattern type output -------- SHOW_DETECTED_PATTERN: print each image and detected pattern to screen PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters PLOT_ERROR_ARRAY: plot position error for the lens correction pattern -------- this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles GRAYSCALE: whether to load the pattern as gray scale RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%) PERSPECTIVE: [True] -> enable perspective distortion ROTATION: [True] -> enable rotation of the pattern BLUR: False or (sizex,sizey), like (3,3) CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2] ''' print( 'calculate systematic error of the implemented calibration algorithms') # LOCATION OF PATTERN IMAGES folder = MEDIA_PATH if PATTERNS is None: PATTERNS = ('Chessboard', 'Asymmetric circles', 'Symmetric circles') patterns = OrderedDict(( # n of inner corners ('Chessboard', ((6, 9), 'chessboard_pattern_a3.svg')), ('Asymmetric circles', ((4, 11), 'acircles_pattern_a3.svg')), ('Symmetric circles', ((8, 11), 'circles_pattern_a3.svg')), )) # REMOVE PATTERNS THAT ARE NOT TO BE TESTED: [patterns.pop(key) for key in patterns if key not in PATTERNS] if SHOW_DETECTED_PATTERN: cv2.namedWindow('Pattern', cv2.WINDOW_NORMAL) # number of positive detected patterns: success = [] # list[N_SAMPLES] of random camera parameters fx, fy, cx, cy, k1, k2, k3, p1, p2 = [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of given-detected parameters: errl, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l = [ ], [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of magnitude(difference of displacement vector # array): dxl = [] dyl = [] # maintain aspect ratio of din a4, a3...: aspect_ratio_DIN = 2.0**0.5 width = int(round(HEIGHT / aspect_ratio_DIN)) if CAMERA_PARAM is None: CAMERA_PARAM = [ HEIGHT, HEIGHT, HEIGHT / 2, width / 2, 0.0, 0.01, 0.1, 0.01, 0.001] # ???CREATE N DIFFERENT RANDOM LENS ERRORS: for n in range(N_SAMPLES): # TODO: RANDOMIZE CAMERA ERROR?? fx.append(CAMERA_PARAM[0]) # * np.random.uniform(1, 2) ) fy.append(CAMERA_PARAM[1]) # * np.random.uniform(1, 2) ) cx.append(CAMERA_PARAM[2]) # * np.random.uniform(0.9, 1.1) ) cy.append(CAMERA_PARAM[3]) # * np.random.uniform(0.9, 1.1) ) k1.append(CAMERA_PARAM[4]) # + np.random.uniform(-1, 1)*0.1) k2.append(CAMERA_PARAM[5]) # + np.random.uniform(-1, 1)*0.01) p1.append(CAMERA_PARAM[6]) # + np.random.uniform(0, 1)*0.1) p2.append(CAMERA_PARAM[7]) # + np.random.uniform(0, 1)*0.01) k3.append(CAMERA_PARAM[8]) # + np.random.uniform(0, 1)*0.001) L = LensDistortion() # FOR EVERY METHOD: for method, (board_size, filename) in patterns.items(): f = folder.join(filename) # LOAD THE SVG FILE, AND SAVE IT WITH NEW RESOLUTION: svg = QtSvg.QSvgRenderer(f) image = QtGui.QImage(width * 4, HEIGHT * 4, QtGui.QImage.Format_ARGB32) image.fill(QtCore.Qt.white) # Get QPainter that paints to the image painter = QtGui.QPainter(image) svg.render(painter) # Save, image format based on file extension # f = "rendered.png" # image.save(f) # # if GRAYSCALE: # img = cv2.imread(f, cv2.IMREAD_GRAYSCALE) # else: # img = cv2.imread(f) img = qImageToArray(image) success.append([]) fxl.append([]) errl.append([]) fyl.append([]) cxl.append([]) cyl.append([]) k1l.append([]) k2l.append([]) k3l.append([]) p1l.append([]) p2l.append([]) dxl.append([]) dyl.append([]) imgHeight, imgWidth = img.shape[0], img.shape[1] for n in range(N_SAMPLES): L.calibrate(board_size, method) print('SET PARAMS:', fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L.setCameraParams( fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L._coeffs['shape'] = (imgHeight, imgWidth) hw = imgWidth * 0.5 hh = imgHeight * 0.5 for m in range(N_IMAGES): pts1 = np.float32([[hw, hh + 100], [hw - 100, hh - 100], [hw + 100, hh - 100]]) pts2 = pts1.copy() if ROTATION: rotatePolygon(pts2, np.random.randint(0, 2 * np.pi)) if PERSPECTIVE: # CREATE A RANDOM PERSPECTIVE: pts2 += np.random.randint(-hw * 0.05, hh * 0.05, size=(3, 2)) # MAKE SURE THAT THE PATTERN IS FULLY WITHIN THE IMAGE: pts2 *= RELATIVE_PATTERN_SIZE # MOVE TO THE CENTER pts2[:, 0] += hw * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * (1 - RELATIVE_PATTERN_SIZE) if POSITION: f = ((2 * np.random.rand(2)) - 1) pts2[:, 0] += hw * 0.7 * f[0] * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * 0.7 * f[1] * (1 - RELATIVE_PATTERN_SIZE) # EXEC PERSPECTICE, POSITION, ROTATION: M = cv2.getAffineTransform(pts1, pts2) img_warped = cv2.warpAffine( img, M, (imgWidth, imgHeight), borderValue=(230, 230, 230)) # DOWNSCALE IMAGE AGAIN - UPSCALING AND DOWNSCALING SHOULD BRING THE ERRROR # WARPING DOWN img_warped = cv2.resize(img_warped, (width, HEIGHT)) # CREATE THE LENS DISTORTION: mapx, mapy = L.getDistortRectifyMap(width, HEIGHT) # print 664, mapx.shape img_distorted = cv2.remap( img_warped, mapx, mapy, cv2.INTER_LINEAR, borderValue=(230, 230, 230)) # img_distorted[img_distorted==0]=20 # img_distorted[img_distorted>100]=230 if BLUR: img_distorted = cv2.blur(img_distorted, BLUR) if NOISE: # soften, black and white more gray, and add noise img_distorted = img_distorted.astype(np.int16) img_distorted += (np.random.rand(*img_distorted.shape) * NOISE).astype(img_distorted.dtype) img_distorted = np.clip( img_distorted, 0, 255).astype(np.uint8) # plt.imshow(img_distorted) # plt.show() found = L.addImg(img_distorted) if SHOW_DETECTED_PATTERN and found: img_distorted = L.drawChessboard(img_distorted) cv2.imshow('Pattern', img_distorted) cv2.waitKey(1) success[-1].append(L.findCount) try: L._coeffs = None errl[-1].append(L.coeffs['reprojectionError']) L.correct(img_distorted) c = L.getCameraParams() print('GET PARAMS:', c) fxl[-1].append(fx[n] - c[0]) fyl[-1].append(fy[n] - c[1]) cxl[-1].append(cx[n] - c[2]) cyl[-1].append(cy[n] - c[3]) k1l[-1].append(k1[n] - c[4]) k2l[-1].append(k2[n] - c[5]) k3l[-1].append(k3[n] - c[6]) p1l[-1].append(p1[n] - c[7]) p2l[-1].append(p2[n] - c[8]) if PLOT_ERROR_ARRAY: dx = (mapx - L.mapx) / 2 dy = (mapy - L.mapy) / 2 dxl[-1].append(dx) dyl[-1].append(dy) except NothingFound: print( "Couldn't create a calibration because no patterns were detected") del painter # AVERAGE SAMPLES AND GET STD dx_std, dx_mean = [], [] dy_std, dy_mean = [], [] mag = [] std = [] for patterndx, patterndy in zip(dxl, dyl): x = np.mean(patterndx, axis=0) dx_mean.append(x) y = np.mean(patterndy, axis=0) dy_mean.append(y) x = np.std(patterndx, axis=0) mag.append((x**2 + y**2)**0.5) dx_std.append(x) y = np.std(patterndy, axis=0) dy_std.append(y) std.append((x**2 + y**2)**0.5) # PLOT p = len(patterns) if PLOT_RESULTS: fig, axs = plt.subplots(nrows=2, ncols=5) axs = np.array(axs).ravel() for ax, typ, tname in zip(axs, (success, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l), ('Success rate', 'fx', 'fy', 'cx', 'cy', 'k1', 'k2', 'k3', 'p1', 'p2') ): ax.set_title(tname) # , showmeans=True, meanline=True)#labels=patterns.keys()) ax.boxplot(typ, notch=0, sym='+', vert=1, whis=1.5) # , ha=ha[n]) ax.set_xticklabels(patterns.keys(), rotation=40, fontsize=8) if PLOT_ERROR_ARRAY: mmin = np.min(mag) mmax = np.max(mag) smin = np.min(std) smax = np.max(std) plt.figure() for n, pattern in enumerate(patterns.keys()): plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper', vmin=mmin, vmax=mmax) if n == p - 1: plt.colorbar(label='Average') plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper', vmin=smin, vmax=smax) if n == p - 1: plt.colorbar(label='Standard deviation') fig = plt.figure() fig.suptitle('Individually scaled') for n, pattern in enumerate(patterns.keys()): # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() plt.quiver( X, Y, dy_mean[n][::ix, ::iy] * 20, dx_mean[n][::ix, ::iy] * 20) plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper') plt.colorbar() # plt.quiver(X,Y,dx_std[n][::ix,::iy]*50, dy_std[n][::ix,::iy]*10) ############################################# fig = plt.figure() fig.suptitle('Spatial uncertainty + deflection') for n, pattern in enumerate(patterns.keys()): L.calibrate(board_size, method) # there is alot of additional calc thats not necassary: L.setCameraParams( fx[0], fy[0], cx[0], cy[0], k1[0], k2[0], k3[0], p1[0], p2[0]) L._coeffs['shape'] = (imgHeight, imgWidth) L._coeffs['reprojectionError'] = np.mean(errl[n]) # deflection_x, deflection_y = L.getDeflection(width, HEIGHT) # deflection_x += dx_mean[n] # deflection_y += dy_mean[n] ux, uy = L.standardUncertainties() plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() # DEFLECTION plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(np.linalg.norm([ux, uy], axis=0), origin='upper') plt.colorbar() # DEFL: VECTORS # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.quiver(X, Y, ux[::ix, ::iy] * 20, uy[::ix, ::iy] * 20) if PLOT_ERROR_ARRAY or PLOT_RESULTS: plt.show() return dx_mean, dy_mean
def getLens(self, light_spectrum, date): d = self.getCoeff('lens', light_spectrum, date) if d: return LensDistortion(d[2])
class LensDistortion(Tool): ''' Calibrate the camera through interpreting chessboard images [also works with un-loaded image stack] ''' icon = 'chessboard.svg' def __init__(self, imageDisplay): Tool.__init__(self, imageDisplay) _import() self.calFileTool = self.showGlobalTool(CalibrationFile) self.camera = None self.cItem = None self.key = None self._rois = [] self._roi = None pa = self.setParameterMenu() btn = QtWidgets.QPushButton('Show Patterns') btn.clicked.connect(lambda: os.startfile(PATTERN_FILE)) btn.setFlat(True) self._menu.content.header.insertWidget(2, btn) self.pLive = pa.addChild({ 'name': 'Live', 'type': 'bool', 'value': False, 'tip': """'False': Images are taken from stack True: Images are taken every time the first layer is updated choose this if you use a webcam"""}) self.pLiveStopAfter = self.pLive.addChild({ 'name': 'Stop after n images', 'type': 'int', 'value': 20, 'min': 0}) self.pLiveActivateTrigger = self.pLive.addChild({ 'name': "Manual trigger", 'type': 'bool', 'value': True}) self.pLiveTrigger = self.pLiveActivateTrigger.addChild({ 'name': "Trigger or KEY [+]", 'type': 'action', 'visible': False}) self.pLive.sigValueChanged.connect( # show/hide children lambda param, value: [ch.show(value) for ch in param.children()]) self.pLive.sigValueChanged.emit(self.pLive, self.pLive.value()) self.pMethod = pa.addChild({ 'name': 'Method', 'type': 'list', 'limits': ['Chessboard', 'Symmetric circles', 'Asymmetric circles', 'Manual']}) self.pMethod.sigValueChanged.connect(self._pMethodChanged) self.pFit = self.pMethod.addChild({ 'name': 'Fit to corners', 'type': 'action', 'visible': False, 'tip': '''Fit grid to given corner positions'''}) self.pFit.sigActivated.connect(self._pChebXYChanged) self.pChessbX = self.pMethod.addChild({ 'name': 'N corners X', 'type': 'int', 'value': 6, 'tip': '''Depending on used pattern, number of corners/circles in X''', 'limits': [3, 100]}) self.pChessbX.sigValueChanged.connect(self._pChebXYChanged) self.pChessbY = self.pMethod.addChild({ 'name': 'N corners Y', 'type': 'int', 'value': 8, 'tip': '''Depending on used pattern, number of corners/circles in Y''', 'limits': [3, 100]}) self.pChessbY.sigValueChanged.connect(self._pChebXYChanged) pApSize = pa.addChild({ 'name': 'Aperture Size [mm]', 'type': 'group', 'tip': 'Physical size of the sensor'}) self.pApertureX = pApSize.addChild({ 'name': 'Size X', 'type': 'float', 'value': 4, 'tip': 'Physical width of the sensor'}) self.pApertureY = pApSize.addChild({ 'name': 'Size Y', 'type': 'float', 'value': 3, 'tip': 'Physical height of the sensor'}) self.pDrawChessboard = pa.addChild({ 'name': 'Draw Chessboard', 'type': 'bool', 'value': False}) self.pDisplacement = pa.addChild({ 'name': 'Return spatial uncertainty', 'type': 'bool', 'value': False}) self.pUpdate = pa.addChild({ 'name': 'Update calibration', 'type': 'action', 'visible': False}) self.pUpdate.sigActivated.connect(self.updateCalibration) self.pCorrect = pa.addChild({ 'name': 'Undistort', 'type': 'action', 'visible': False}) self.pCorrect.sigActivated.connect(self.correct) def correct(self): out = [] for i in self.getDataOrFilenames(): out.append(self.camera.correct(i, keepSize=True)) self.display.workspace.addDisplay( origin=self.display, data=out, title='Corrected') def _posSize(self): # TODO: as general method in WidgetBase? vb = self.display.widget.view.vb r = vb.viewRange() s = ((r[0][0] + r[0][1]) / 1.1, (r[1][0] + r[1][1]) / 1.1) p = [(r[0][1] - r[0][0]) * 0.1, (r[1][1] - r[1][0]) * 0.1] return p, s def _nCells(self): return (self.pChessbX.value() - 1, self.pChessbY.value() - 1) def _removeROIs(self): w = self.display.widget vb = w.view.vb if self._rois: vb.removeItem(self._roi) w.sigTimeChanged.disconnect(self._changeROI) self._rois = [] def _createROIs(self): w = self.display.widget vb = w.view.vb p, s = self._posSize() n = self._nCells() e = None if self._roi: e = self._roi.edges() for i in range(len(w.image)): r = UnregGridROI(n, pos=p, size=s, edges=e, pen='r') self._rois.append(r) if i == w.currentIndex: self._roi = r vb.addItem(r) w.sigTimeChanged.connect(self._changeROI) def _changeROI(self, ind, time): w = self.display.widget vb = w.view.vb vb.removeItem(self._roi) self._roi = r = self._rois[ind] vb.addItem(r) def _pMethodChanged(self, param, val): self._removeROIs() if val == 'Manual': self._createROIs() self.pFit.show() else: self.pFit.hide() def _pChebXYChanged(self): # Show GridROI in case manual detectoin is chosen if self.pMethod.value() == 'Manual': if self._rois: self._removeROIs() self._createROIs() def updateCalibration(self): self.calFileTool.updateLens(self.camera) def _chooseSavePath(self): d = self.display.workspace.gui.dialogs.getSaveFileName( filter='*.%s' % LensDistortion.ftype) if d: self.pSavePath.setValue(d) def activate(self): w = self.display.widget self.camera = LD() self.camera.calibrate( method=self.pMethod.value(), board_size=(self.pChessbX.value(), self.pChessbY.value()), max_images=self.pLiveStopAfter.value(), sensorSize_mm=(self.pApertureX.value(), self.pApertureY.value()), detect_sensible=True) if self.pLive.value(): if self.pDrawChessboard.value() and not self.cItem: self.cItem = w.addColorLayer(name='Chessboard') if self.pLiveActivateTrigger.value(): self.pLiveTrigger.setOpts(visible=True) self.pLiveTrigger.sigActivated.connect(self._addImgStream) if not self.key: # ACTIVATE ON KEY [+] self.key = QtWidgets.QShortcut(self.display.workspace) self.key.setKey(QtGui.QKeySequence(QtCore.Qt.Key_Plus)) self.key.setContext(QtCore.Qt.ApplicationShortcut) self.key.activated.connect(self.pLiveTrigger.activate) else: # ACTIVATE WHEN IMAGE CHANGES w.item.sigImageChanged.connect(self._addImgStream) else: img = self.getDataOrFilenames() img_loaded = isinstance(img[0], np.ndarray) # check conditions: if len(img) < 10: print( 'having less than 10 images can result in \ erroneous results') out = [] d = self.pDrawChessboard.value() found_indices = [] for n, i in enumerate(img): try: # MANUAL ADDING POINTS if self._rois: print(self._rois[n].points()) self.camera.addPoints(self._rois[n].points()) self.camera.setImgShape(i.shape) else: self.camera.addImg(i) # draw chessboard if d: out.append(self.camera.drawChessboard( False if img_loaded else None)) found_indices.append(n) except NothingFound as errm: print('Layer %s: ' % n, errm) if d: if img_loaded: # , indices=found_indices) w.addColorLayer(np.array(out), name='chessboard') else: self.display.addLayers(out, ['Chessboard'] * len(out)) self._end() def _end(self): self.setChecked(False) i = self.camera.findCount print('found chessboard of %s images' % i) if i: # show calibration in window: t = self.display.workspace.addTextDock( 'Camera calibration').widgets[-1] t.showToolbar(False) t.text.setText(self.camera.getCoeffStr()) self.pUpdate.show() self.pCorrect.show() if self.pDisplacement.value(): self.display.workspace.addDisplay( origin=self.display, data=self.camera.getDisplacementArray(), title='displacement') def _addImgStream(self): ''' add a new image and draw chessboard on it till there are enough images ''' if self.pLiveActivateTrigger.value(): print('click') if not self.camera: print('activate first') try: image = self.display.widget.image found = self.camera.addImgStream(image) if found and self.pDrawChessboard.value(): chessboard = self.camera.drawChessboard(img=False) self.cItem.setLayer(chessboard) if found: print('found %s chessboards' % self.camera.findCount) except EnoughImages: self.deactivate() self._end() def deactivate(self): try: self.display.widget.item.sigImageChanged.disconnect( self._addImgStream) except: pass try: self.pLiveTrigger.sigActivated.disconnect(self._addImgStream) except: pass try: self.key.activated.disconnect(self.pLiveTrigger.activate) except: pass
def activate(self): w = self.display.widget self.camera = LD() self.camera.calibrate( method=self.pMethod.value(), board_size=(self.pChessbX.value(), self.pChessbY.value()), max_images=self.pLiveStopAfter.value(), sensorSize_mm=(self.pApertureX.value(), self.pApertureY.value()), detect_sensible=True) if self.pLive.value(): if self.pDrawChessboard.value() and not self.cItem: self.cItem = w.addColorLayer(name='Chessboard') if self.pLiveActivateTrigger.value(): self.pLiveTrigger.setOpts(visible=True) self.pLiveTrigger.sigActivated.connect(self._addImgStream) if not self.key: # ACTIVATE ON KEY [+] self.key = QtWidgets.QShortcut(self.display.workspace) self.key.setKey(QtGui.QKeySequence(QtCore.Qt.Key_Plus)) self.key.setContext(QtCore.Qt.ApplicationShortcut) self.key.activated.connect(self.pLiveTrigger.activate) else: # ACTIVATE WHEN IMAGE CHANGES w.item.sigImageChanged.connect(self._addImgStream) else: img = self.getDataOrFilenames() img_loaded = isinstance(img[0], np.ndarray) # check conditions: if len(img) < 10: print( 'having less than 10 images can result in \ erroneous results') out = [] d = self.pDrawChessboard.value() found_indices = [] for n, i in enumerate(img): try: # MANUAL ADDING POINTS if self._rois: print(self._rois[n].points()) self.camera.addPoints(self._rois[n].points()) self.camera.setImgShape(i.shape) else: self.camera.addImg(i) # draw chessboard if d: out.append(self.camera.drawChessboard( False if img_loaded else None)) found_indices.append(n) except NothingFound as errm: print('Layer %s: ' % n, errm) if d: if img_loaded: # , indices=found_indices) w.addColorLayer(np.array(out), name='chessboard') else: self.display.addLayers(out, ['Chessboard'] * len(out)) self._end()
def simulateSytematicError( N_SAMPLES=5, N_IMAGES=10, SHOW_DETECTED_PATTERN=True, # GRAYSCALE=False, HEIGHT=500, PLOT_RESULTS=True, PLOT_ERROR_ARRAY=True, CAMERA_PARAM=None, PERSPECTIVE=True, ROTATION=True, RELATIVE_PATTERN_SIZE=0.5, POSITION=True, NOISE=25, BLUR=(3, 3), PATTERNS=None): ''' Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate the error to the given ones simulation ----------- N_IMAGES -> number of images to take for a camera calibration N_SAMPLES -> number of camera calibrations of each pattern type output -------- SHOW_DETECTED_PATTERN: print each image and detected pattern to screen PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters PLOT_ERROR_ARRAY: plot position error for the lens correction pattern -------- this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles GRAYSCALE: whether to load the pattern as gray scale RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%) PERSPECTIVE: [True] -> enable perspective distortion ROTATION: [True] -> enable rotation of the pattern BLUR: False or (sizex,sizey), like (3,3) CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2] ''' print( 'calculate systematic error of the implemented calibration algorithms') # LOCATION OF PATTERN IMAGES folder = MEDIA_PATH if PATTERNS is None: PATTERNS = ('Chessboard', 'Asymmetric circles', 'Symmetric circles') patterns = OrderedDict(( # n of inner corners ('Chessboard', ((6, 9), 'chessboard_pattern_a3.svg')), ('Asymmetric circles', ((4, 11), 'acircles_pattern_a3.svg')), ('Symmetric circles', ((8, 11), 'circles_pattern_a3.svg')), )) # REMOVE PATTERNS THAT ARE NOT TO BE TESTED: [patterns.pop(key) for key in patterns if key not in PATTERNS] if SHOW_DETECTED_PATTERN: cv2.namedWindow('Pattern', cv2.WINDOW_NORMAL) # number of positive detected patterns: success = [] # list[N_SAMPLES] of random camera parameters fx, fy, cx, cy, k1, k2, k3, p1, p2 = [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of given-detected parameters: errl, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l = [ ], [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of magnitude(difference of displacement vector # array): dxl = [] dyl = [] # maintain aspect ratio of din a4, a3...: aspect_ratio_DIN = 2.0**0.5 width = int(round(HEIGHT / aspect_ratio_DIN)) if CAMERA_PARAM is None: CAMERA_PARAM = [ HEIGHT, HEIGHT, HEIGHT / 2, width / 2, 0.0, 0.01, 0.1, 0.01, 0.001 ] # ???CREATE N DIFFERENT RANDOM LENS ERRORS: for n in range(N_SAMPLES): # TODO: RANDOMIZE CAMERA ERROR?? fx.append(CAMERA_PARAM[0]) # * np.random.uniform(1, 2) ) fy.append(CAMERA_PARAM[1]) # * np.random.uniform(1, 2) ) cx.append(CAMERA_PARAM[2]) # * np.random.uniform(0.9, 1.1) ) cy.append(CAMERA_PARAM[3]) # * np.random.uniform(0.9, 1.1) ) k1.append(CAMERA_PARAM[4]) # + np.random.uniform(-1, 1)*0.1) k2.append(CAMERA_PARAM[5]) # + np.random.uniform(-1, 1)*0.01) p1.append(CAMERA_PARAM[6]) # + np.random.uniform(0, 1)*0.1) p2.append(CAMERA_PARAM[7]) # + np.random.uniform(0, 1)*0.01) k3.append(CAMERA_PARAM[8]) # + np.random.uniform(0, 1)*0.001) L = LensDistortion() # FOR EVERY METHOD: for method, (board_size, filename) in patterns.items(): f = folder.join(filename) # LOAD THE SVG FILE, AND SAVE IT WITH NEW RESOLUTION: svg = QtSvg.QSvgRenderer(f) image = QtGui.QImage(width * 4, HEIGHT * 4, QtGui.QImage.Format_ARGB32) image.fill(QtCore.Qt.white) # Get QPainter that paints to the image painter = QtGui.QPainter(image) svg.render(painter) # Save, image format based on file extension # f = "rendered.png" # image.save(f) # # if GRAYSCALE: # img = cv2.imread(f, cv2.IMREAD_GRAYSCALE) # else: # img = cv2.imread(f) img = qImageToArray(image) success.append([]) fxl.append([]) errl.append([]) fyl.append([]) cxl.append([]) cyl.append([]) k1l.append([]) k2l.append([]) k3l.append([]) p1l.append([]) p2l.append([]) dxl.append([]) dyl.append([]) imgHeight, imgWidth = img.shape[0], img.shape[1] for n in range(N_SAMPLES): L.calibrate(board_size, method) print('SET PARAMS:', fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L.setCameraParams(fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L._coeffs['shape'] = (imgHeight, imgWidth) hw = imgWidth * 0.5 hh = imgHeight * 0.5 for m in range(N_IMAGES): pts1 = np.float32([[hw, hh + 100], [hw - 100, hh - 100], [hw + 100, hh - 100]]) pts2 = pts1.copy() if ROTATION: rotatePolygon(pts2, np.random.randint(0, 2 * np.pi)) if PERSPECTIVE: # CREATE A RANDOM PERSPECTIVE: pts2 += np.random.randint(-hw * 0.05, hh * 0.05, size=(3, 2)) # MAKE SURE THAT THE PATTERN IS FULLY WITHIN THE IMAGE: pts2 *= RELATIVE_PATTERN_SIZE # MOVE TO THE CENTER pts2[:, 0] += hw * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * (1 - RELATIVE_PATTERN_SIZE) if POSITION: f = ((2 * np.random.rand(2)) - 1) pts2[:, 0] += hw * 0.7 * f[0] * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * 0.7 * f[1] * (1 - RELATIVE_PATTERN_SIZE) # EXEC PERSPECTICE, POSITION, ROTATION: M = cv2.getAffineTransform(pts1, pts2) img_warped = cv2.warpAffine(img, M, (imgWidth, imgHeight), borderValue=(230, 230, 230)) # DOWNSCALE IMAGE AGAIN - UPSCALING AND DOWNSCALING SHOULD BRING THE ERRROR # WARPING DOWN img_warped = cv2.resize(img_warped, (width, HEIGHT)) # CREATE THE LENS DISTORTION: mapx, mapy = L.getDistortRectifyMap(width, HEIGHT) # print 664, mapx.shape img_distorted = cv2.remap(img_warped, mapx, mapy, cv2.INTER_LINEAR, borderValue=(230, 230, 230)) # img_distorted[img_distorted==0]=20 # img_distorted[img_distorted>100]=230 if BLUR: img_distorted = cv2.blur(img_distorted, BLUR) if NOISE: # soften, black and white more gray, and add noise img_distorted = img_distorted.astype(np.int16) img_distorted += (np.random.rand(*img_distorted.shape) * NOISE).astype(img_distorted.dtype) img_distorted = np.clip(img_distorted, 0, 255).astype(np.uint8) # plt.imshow(img_distorted) # plt.show() found = L.addImg(img_distorted) if SHOW_DETECTED_PATTERN and found: img_distorted = L.drawChessboard(img_distorted) cv2.imshow('Pattern', img_distorted) cv2.waitKey(1) success[-1].append(L.findCount) try: L._coeffs = None errl[-1].append(L.coeffs['reprojectionError']) L.correct(img_distorted) c = L.getCameraParams() print('GET PARAMS:', c) fxl[-1].append(fx[n] - c[0]) fyl[-1].append(fy[n] - c[1]) cxl[-1].append(cx[n] - c[2]) cyl[-1].append(cy[n] - c[3]) k1l[-1].append(k1[n] - c[4]) k2l[-1].append(k2[n] - c[5]) k3l[-1].append(k3[n] - c[6]) p1l[-1].append(p1[n] - c[7]) p2l[-1].append(p2[n] - c[8]) if PLOT_ERROR_ARRAY: dx = (mapx - L.mapx) / 2 dy = (mapy - L.mapy) / 2 dxl[-1].append(dx) dyl[-1].append(dy) except NothingFound: print( "Couldn't create a calibration because no patterns were detected" ) del painter # AVERAGE SAMPLES AND GET STD dx_std, dx_mean = [], [] dy_std, dy_mean = [], [] mag = [] std = [] for patterndx, patterndy in zip(dxl, dyl): x = np.mean(patterndx, axis=0) dx_mean.append(x) y = np.mean(patterndy, axis=0) dy_mean.append(y) x = np.std(patterndx, axis=0) mag.append((x**2 + y**2)**0.5) dx_std.append(x) y = np.std(patterndy, axis=0) dy_std.append(y) std.append((x**2 + y**2)**0.5) # PLOT p = len(patterns) if PLOT_RESULTS: fig, axs = plt.subplots(nrows=2, ncols=5) axs = np.array(axs).ravel() for ax, typ, tname in zip( axs, (success, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l), ('Success rate', 'fx', 'fy', 'cx', 'cy', 'k1', 'k2', 'k3', 'p1', 'p2')): ax.set_title(tname) # , showmeans=True, meanline=True)#labels=patterns.keys()) ax.boxplot(typ, notch=0, sym='+', vert=1, whis=1.5) # , ha=ha[n]) ax.set_xticklabels(patterns.keys(), rotation=40, fontsize=8) if PLOT_ERROR_ARRAY: mmin = np.min(mag) mmax = np.max(mag) smin = np.min(std) smax = np.max(std) plt.figure() for n, pattern in enumerate(patterns.keys()): plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper', vmin=mmin, vmax=mmax) if n == p - 1: plt.colorbar(label='Average') plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper', vmin=smin, vmax=smax) if n == p - 1: plt.colorbar(label='Standard deviation') fig = plt.figure() fig.suptitle('Individually scaled') for n, pattern in enumerate(patterns.keys()): # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() plt.quiver(X, Y, dy_mean[n][::ix, ::iy] * 20, dx_mean[n][::ix, ::iy] * 20) plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper') plt.colorbar() # plt.quiver(X,Y,dx_std[n][::ix,::iy]*50, dy_std[n][::ix,::iy]*10) ############################################# fig = plt.figure() fig.suptitle('Spatial uncertainty + deflection') for n, pattern in enumerate(patterns.keys()): L.calibrate(board_size, method) # there is alot of additional calc thats not necassary: L.setCameraParams(fx[0], fy[0], cx[0], cy[0], k1[0], k2[0], k3[0], p1[0], p2[0]) L._coeffs['shape'] = (imgHeight, imgWidth) L._coeffs['reprojectionError'] = np.mean(errl[n]) # deflection_x, deflection_y = L.getDeflection(width, HEIGHT) # deflection_x += dx_mean[n] # deflection_y += dy_mean[n] ux, uy = L.standardUncertainties() plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() # DEFLECTION plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(np.linalg.norm([ux, uy], axis=0), origin='upper') plt.colorbar() # DEFL: VECTORS # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.quiver(X, Y, ux[::ix, ::iy] * 20, uy[::ix, ::iy] * 20) if PLOT_ERROR_ARRAY or PLOT_RESULTS: plt.show() return dx_mean, dy_mean