def Process(image, pos_var, pos_w, pos_phase, pos_psi): global kernel_size if kernel_size % 2 == 0: kernel_size += 1 kernel = cv.CreateMat(kernel_size, kernel_size, cv.CV_32FC1) # kernelimg = cv.CreateImage((kernel_size,kernel_size),cv.IPL_DEPTH_32F,1) # big_kernelimg = cv.CreateImage((kernel_size*20,kernel_size*20),cv.IPL_DEPTH_32F,1) src = cv.CreateImage((image.width, image.height), cv.IPL_DEPTH_8U, 1) src_f = cv.CreateImage((image.width, image.height), cv.IPL_DEPTH_32F, 1) # src = image #cv.CvtColor(image,src,cv.CV_BGR2GRAY) #no conversion is needed if cv.GetElemType(image) == cv.CV_8UC3: cv.CvtColor(image, src, cv.CV_BGR2GRAY) else: src = image cv.ConvertScale(src, src_f, 1.0 / 255, 0) dest = cv.CloneImage(src_f) dest_mag = cv.CloneImage(src_f) var = pos_var / 10.0 w = pos_w / 10.0 phase = pos_phase * cv.CV_PI / 180.0 psi = cv.CV_PI * pos_psi / 180.0 cv.Zero(kernel) for x in range(-kernel_size / 2 + 1, kernel_size / 2 + 1): for y in range(-kernel_size / 2 + 1, kernel_size / 2 + 1): kernel_val = math.exp(-( (x * x) + (y * y)) / (2 * var)) * math.cos(w * x * math.cos(phase) + w * y * math.sin(phase) + psi) cv.Set2D(kernel, y + kernel_size / 2, x + kernel_size / 2, cv.Scalar(kernel_val)) # cv.Set2D(kernelimg,y+kernel_size/2,x+kernel_size/2,cv.Scalar(kernel_val/2+0.5)) cv.Filter2D(src_f, dest, kernel, (-1, -1)) # cv.Resize(kernelimg,big_kernelimg) cv.Pow(dest, dest_mag, 2) # return (dest_mag, big_kernelimg, dest) return (dest_mag, dest) # cv.ShowImage("Mag",dest_mag) # cv.ShowImage("Kernel",big_kernelimg) # cv.ShowImage("Process window",dest)
def cvShiftDFT(src_arr, dst_arr): size = cv.GetSize(src_arr) dst_size = cv.GetSize(dst_arr) if dst_size != size: cv.Error(cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__) if (src_arr is dst_arr): tmp = cv.CreateMat(size[1] / 2, size[0] / 2, cv.GetElemType(src_arr)) cx = size[0] / 2 cy = size[1] / 2 # image center q1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) q2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) q3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) q4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) d1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) d2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) d3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) d4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) if (src_arr is not dst_arr): if (not cv.CV_ARE_TYPES_EQ(q1, d1)): cv.Error( cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__) cv.Copy(q3, d1) cv.Copy(q4, d2) cv.Copy(q1, d3) cv.Copy(q2, d4) else: cv.Copy(q3, tmp) cv.Copy(q1, q3) cv.Copy(tmp, q1) cv.Copy(q4, tmp) cv.Copy(q2, q4) cv.Copy(tmp, q2)
def downsample_and_detect(self, rgb): """ Downsample the input image to approximately VGA resolution and detect the calibration target corners in the full-size image. Combines these apparently orthogonal duties as an optimization. Checkerboard detection is too expensive on large images, so it's better to do detection on the smaller display image and scale the corners back up to the correct size. Returns (scrib, corners, downsampled_corners, board, (x_scale, y_scale)). """ # Scale the input image down to ~VGA size (width, height) = cv.GetSize(rgb) scale = math.sqrt((width * height) / (640. * 480.)) if scale > 1.0: scrib = cv.CreateMat(int(height / scale), int(width / scale), cv.GetElemType(rgb)) cv.Resize(rgb, scrib) else: scrib = cv.CloneMat(rgb) # Due to rounding, actual horizontal/vertical scaling may differ slightly x_scale = float(width) / scrib.cols y_scale = float(height) / scrib.rows if self.pattern == Patterns.Chessboard: # Detect checkerboard (ok, downsampled_corners, board) = self.get_corners(scrib, refine=True) # Scale corners back to full size image corners = None if ok: if scale > 1.0: # Refine up-scaled corners in the original full-res image # TODO Does this really make a difference in practice? corners_unrefined = [(c[0] * x_scale, c[1] * y_scale) for c in downsampled_corners] # TODO It's silly that this conversion is needed, this function should just work # on the one-channel mono image mono = cv.CreateMat(rgb.rows, rgb.cols, cv.CV_8UC1) cv.CvtColor(rgb, mono, cv.CV_BGR2GRAY) radius = int(math.ceil(scale)) corners = cv.FindCornerSubPix( mono, corners_unrefined, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) else: corners = downsampled_corners else: # Circle grid detection is fast even on large images (ok, corners, board) = self.get_corners(rgb) # Scale corners to downsampled image for display downsampled_corners = None if ok: # print corners if scale > 1.0: downsampled_corners = [(c[0] / x_scale, c[1] / y_scale) for c in corners] else: downsampled_corners = corners return (scrib, corners, downsampled_corners, board, (x_scale, y_scale))