def show_np(name, np_arr, w, h): ''' show_np(): shows a numpy array inputs: name: string for name of this iamge np_arr: numpy array object representing the image w.h: ints for size of image toshow returns: nothing effects: makes and shows a window called name, showing image ''' act_mat = cv.fromarray(np_arr.reshape((w, h))) if w * 10 < 500 and h * 10 < 500: w, h = w * 10, h * 10 act_mat_big = cv.CreateMat(w, h, act_mat.type) cv.Resize(act_mat, act_mat_big, cv.CV_INTER_NN) show_image(name, act_mat_big)
def calc_transform(self, points, correction): src = [(0, 0), (0, 0), (0, 0), (0, 0)] dst = [(0, 0), (0, CAMERA_SIZE[1]), (CAMERA_SIZE[0], 0), CAMERA_SIZE] for i in range(4): minimum = sum([j**2 for j in CAMERA_SIZE]) for j in range(4): distance = sum([(dst[i][k] - points[j][k])**2 for k in range(2)]) if distance < minimum: minimum = distance src[i] = tuple( [points[j][k] + correction[i][k] for k in range(2)]) transform = cv.CreateMat(3, 3, cv.CV_32FC1) cv.GetPerspectiveTransform(src, dst, transform) return transform
def __init__(self, left_filter, right_filter, left_rect, right_rect): ''' @param left_filter: is in the Fourier domain where the left eye corresponds to the real output and the right eye corresponds to the imaginary output ''' # Check the input to this function r, c = left_filter.rows, left_filter.cols assert left_filter.width == right_filter.width assert left_filter.height == right_filter.height assert left_filter.channels == 1 assert right_filter.channels == 1 # Create the arrays needed for the computation self.left_filter = cv.CreateMat(r, c, cv.CV_32F) self.right_filter = cv.CreateMat(r, c, cv.CV_32F) self.left_filter_dft = cv.CreateMat(r, c, cv.CV_32F) self.right_filter_dft = cv.CreateMat(r, c, cv.CV_32F) self.image = cv.CreateMat(r, c, cv.CV_32F) self.left_corr = cv.CreateMat(r, c, cv.CV_32F) self.right_corr = cv.CreateMat(r, c, cv.CV_32F) # Populate the spatial filters cv.ConvertScale(left_filter, self.left_filter) cv.ConvertScale(right_filter, self.right_filter) # Compute the filters in the Fourier domain cv.DFT(self.left_filter, self.left_filter_dft, cv.CV_DXT_FORWARD) cv.DFT(self.right_filter, self.right_filter_dft, cv.CV_DXT_FORWARD) # Set up correlation region of interest self.left_rect = left_rect self.right_rect = right_rect self.left_roi = cv.GetSubRect(self.left_corr, self.left_rect) self.right_roi = cv.GetSubRect(self.right_corr, self.right_rect) # Create the look up table for the log transform self.lut = cv.CreateMat(256, 1, cv.CV_32F) for i in range(256): self.lut[i, 0] = math.log(i + 1)
def calibrate(self): image_points_mat = concatenate_mats(self.image_points) object_points_mat = concatenate_mats(self.object_points) print "Image Points:" for row in range(image_points_mat.height): for col in range(image_points_mat.width): print image_points_mat[row, col] print print "Object Points:" for row in range(object_points_mat.height): for col in range(object_points_mat.width): print object_points_mat[row, col] print point_counts_mat = cv.CreateMat(1, self.number_of_scenes, cv.CV_32SC1) for i in range(self.number_of_scenes): point_counts_mat[0, i] = self.image_points[i].width intrinsics = cv.CreateMat(3, 3, cv.CV_32FC1) distortion = cv.CreateMat(4, 1, cv.CV_32FC1) cv.SetZero(intrinsics) cv.SetZero(distortion) size = (self.projector_info.width, self.projector_info.height) tvecs = cv.CreateMat(self.number_of_scenes, 3, cv.CV_32FC1) rvecs = cv.CreateMat(self.number_of_scenes, 3, cv.CV_32FC1) cv.CalibrateCamera2(object_points_mat, image_points_mat, point_counts_mat, size, intrinsics, distortion, rvecs, tvecs, flags=0) R = cv.CreateMat(3, 3, cv.CV_32FC1) P = cv.CreateMat(3, 4, cv.CV_32FC1) cv.SetIdentity(R) cv.SetZero(P) cv.Copy(intrinsics, cv.GetSubRect(P, (0, 0, 3, 3))) self.projector_info = create_msg(size, distortion, intrinsics, R, P) rospy.loginfo(self.projector_info) for col in range(3): for row in range(self.number_of_scenes): print tvecs[row, col], print
def hsv_orange_red_threshold(input_image): blur_image = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC3) cv.Smooth(input_image,blur_image,cv.CV_BLUR, 10, 10) proc_image = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC3) cv.CvtColor(blur_image, proc_image, cv.CV_BGR2HSV) split_image = [cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1),cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1),cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1)] cv.Split(proc_image, split_image[0],split_image[1],split_image[2], None ) thresh_0 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1) thresh_1 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1) thresh_2 = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1) red_orange = cv.CreateMat(input_image.rows,input_image.cols,cv.CV_8UC1) cv.Threshold(split_image[1],thresh_0, 128,255,cv.CV_THRESH_BINARY) # > 50% saturation cv.Threshold(split_image[0],thresh_1, 220,255,cv.CV_THRESH_BINARY) # > Purple cv.Threshold(split_image[0],thresh_2, 10, 255,cv.CV_THRESH_BINARY_INV) # < Yellow-Orange cv.Add(thresh_1,thresh_2,red_orange) cv.And(red_orange,thresh_0,red_orange) return red_orange
def calc_retinal_layer(input_file, retina_w, retina_h, is_random=False, detailed=False): ''' calc_retinal_layer(): calculates the values for each ganglion cell in the retinal layer 1) read in image 2) resize if necessary 3) run retinal layer: a) run difference-of-gaussian (Laplace seems to be similar/identical) on image b) extract given number of pixels NOTE: always call build_kernel before calling this function the first time inputs: input_file: the filename for the input image returns: numpy array of floats, such that each is the value of a ganglion cell effects: none ''' global dog_2d_mat, dog_2d_mat_detailed if detailed: raise Exception('detailed no longer supported') # load image, convert to grayscale image = load_input(input_file) # do difference of gaussian filtering im_w, im_h = image.cols, image.rows dog_mat = cv.CreateMat(im_h, im_w, cv.CV_32FC1) dog_2d_filter = dog_2d_mat cv.Filter2D(image, dog_mat, dog_2d_filter) # crop image dog_mat_small = crop_image(dog_mat, retina_w, retina_h) # convert to numpy and cap at [-1.0,1.0] dog_np = np.asarray(dog_mat_small) dog_np = np.minimum(dog_np, np.ones(dog_np.shape)) dog_np = np.maximum(dog_np, -np.ones(dog_np.shape)) # print_stats('dog',dog_np) # show_image('input image',image) # show_image('d-o-g filtered',dog_mat) return dog_np
def redraw_stereo(self, drawable): width, height = cv.GetSize(drawable.lscrib) display = cv.CreateMat(max(480, height), 2 * width + 100, cv.CV_8UC3) cv.Zero(display) cv.Copy(drawable.lscrib, cv.GetSubRect(display, (0, 0, width, height))) cv.Copy(drawable.rscrib, cv.GetSubRect(display, (width, 0, width, height))) cv.Set(cv.GetSubRect(display, (2 * width, 0, 100, height)), (255, 255, 255)) self.buttons(display) if not self.c.calibrated: if drawable.params: for i, (label, lo, hi, progress) in enumerate(drawable.params): (w, _), _ = cv.GetTextSize(label, self.font) cv.PutText(display, label, (2 * width + (100 - w) / 2, self.y(i)), self.font, (0, 0, 0)) color = (0, 255, 0) if progress < 1.0: color = (0, int(progress * 255.), 255) cv.Line(display, (int(2 * width + lo * 100), self.y(i) + 20), (int(2 * width + hi * 100), self.y(i) + 20), color, 4) else: cv.PutText(display, "epi.", (2 * width, self.y(0)), self.font, (0, 0, 0)) if drawable.epierror == -1: msg = "?" else: msg = "%.2f" % drawable.epierror cv.PutText(display, msg, (2 * width, self.y(1)), self.font, (0, 0, 0)) # TODO dim is never set anywhere. Supposed to be observed chessboard size? if drawable.dim != -1: cv.PutText(display, "dim", (2 * width, self.y(2)), self.font, (0, 0, 0)) cv.PutText(display, "%.3f" % drawable.dim, (2 * width, self.y(3)), self.font, (0, 0, 0)) self.show(display)
def __init__(self, initial_val=[0], p_noise=[1e-2], m_noise=[1e-3], m_mat=[1], ecv=[1]): self.kalman = cv.CreateKalman(len(initial_val), len(initial_val), 0) self.measurement = cv.CreateMat(len(initial_val), 1, cv.CV_32FC1) self.prediction = None cv.Zero(self.measurement) cv.SetIdentity(self.kalman.measurement_matrix, cv.RealScalar(*m_mat)) cv.SetIdentity(self.kalman.process_noise_cov, cv.RealScalar(*p_noise)) cv.SetIdentity(self.kalman.measurement_noise_cov, cv.RealScalar(*m_noise)) cv.SetIdentity(self.kalman.error_cov_post, cv.RealScalar(*ecv)) for v in initial_val: self.kalman.state_post[initial_val.index(v), 0] = v self.kalman.state_pre[initial_val.index(v), 0] = v
def correct_horizontal_motion(image, prev_trackpts, curr_trackpts, boundary_pts): image_boundaries = (1, 1, image.width-2, image.height-2) motion_matrix=cv.CreateMat(3, 3, cv.CV_32FC1) cv.FindHomography(prev_trackpts, curr_trackpts, motion_matrix, cv.CV_RANSAC, 10) boundary_pts = boundary_pts.reshape((1, -1, 2)).astype(np.float32) warped_boundary_pts = np.zeros_like(boundary_pts) cv.PerspectiveTransform(boundary_pts, warped_boundary_pts, motion_matrix) pt11, pt12 = cvutils.line_rectangle_intersection( tuple(warped_boundary_pts[0, 0,:]), tuple(warped_boundary_pts[0, 3,:]), image_boundaries) pt21, pt22 = cvutils.line_rectangle_intersection( tuple(warped_boundary_pts[0, 1,:]), tuple(warped_boundary_pts[0, 2,:]), image_boundaries) boundary_pts = cvutils.reorder_boundary_points(np.array((pt11, pt12, pt21, pt22))) return boundary_pts.reshape((-1, 2)).astype(np.int), motion_matrix
def run(self): starttime = time.time() gray = cv.CreateMat(480, 640, cv.CV_8UC1) im = cv.LoadImage(self.image) rect = cv.BoundingRect(self.obj.cont) siftimage = siftfastpy.Image(rect[2], rect[3]) cv.CvtColor(im, gray, cv.CV_BGR2GRAY) gnp = np.asarray(cv.GetSubRect(gray, rect)) siftimage.SetData(gnp) print 'initialization in: %fs' % (time.time() - starttime) frames, desc = siftfastpy.GetKeypoints(siftimage) self.obj.frames = frames self.obj.desc = desc print '%d keypoints found in %fs' % (frames.shape[0], time.time() - starttime)
def scanImg(self, scanBlock, color=None, thershold=4, iterativeCalculate=True): self.temporary_img = cv.CreateMat(self.height, self.width, cv.CV_8UC1) cv.Set(self.temporary_img, 0) for h in range(self.height): for w in range(self.width): if iterativeCalculate and self.calculatedImg[h, w] != 0: continue for i, j in ((1, 1), (1, -1), (-1, 1), (-1, -1)): pixelValue = self.getIntegralPixelValue( w, h, (scanBlock[0] * i, scanBlock[1] * j), i * j) if pixelValue < thershold: self.setThisPoint(w, h, color, scanBlock, (i, j)) break
def extract_plate(image, cluster): """De-skew and extract a detected plate from an image.""" cluster = list(cluster) cluster.sort(cmp=lambda x,y: cmp(x.cx,y.cx)) o = cluster[-1].cy - cluster[0].cy h = cluster[0].dist_to(cluster[-1]) angle = math.asin(o/h) matrix = cv.CreateMat(2, 3, cv.CV_32FC1) cx = (cluster[0].cx + cluster[-1].cx)/2.0 cy = (cluster[0].cy + cluster[-1].cy)/2.0 getattr(cv,'2DRotationMatrix')((cx,cy), angle*180.0/math.pi, 1, matrix) warp = cv.CreateImage(cv.GetSize(image), cv.IPL_DEPTH_8U, 3) cv.WarpAffine(image, warp, matrix) ret = cv.CreateImage((h+cluster[0].dia*3.0, cluster[0].dia*1.5), cv.IPL_DEPTH_8U, 3) cv.GetRectSubPix(warp, ret, (cx,cy)) print cv.GetSize(ret) return ret
def __init__(self): self.pub = rospy.Publisher('analglyph', Image) self.sub_depth = rospy.Subscribe('/kinect/depth/image_raw', Image, self.cb_depth) self.sub_rgb = rospy.Subscribe('/kinect/rgb/image_raw', Image, self.cb_rgb) self.bridge = CvBridge() self.img_out = cv.CreateMat(480, 640, cv.CV_8UC3) self.img_depth = cv.CreateMat(480, 640, cv.CV_8UC1) self.img_r = cv.CreateMat(480, 640, cv.CV_8UC1) self.img_g = cv.CreateMat(480, 640, cv.CV_8UC1) self.img_b = cv.CreateMat(480, 640, cv.CV_8UC1) self.img_rgb = cv.CreateMat(480, 640, cv.CV_8UC3)
def detectCircles(self, rect): """Detect circles in the picture the Hough circle transform parameters min_radius and max_radius are adjusted as follows (for the 768x576 image): * The black direction marker is around 8 to 11 pixels wide. * The ball is around 12 and 16 pixels wide. The above estimates are taken with GIMP. The lower bounds are obtained using the darker, inner pixels and the upper bounds using the lighter edge pixels. Since the Hough circle transform is resistant against damage (it could work with only half of the circle present), we stick with radii obtained from these measures and not try to "account for" the cases where the circles are somehow obscured and look smaller to the eye. """ out = cv.CloneImage(rect) cv.Smooth(rect, rect, cv.CV_GAUSSIAN, 9, 9) size = cv.GetSize(rect) cv.CvtColor(rect, self.gray8, cv.CV_BGR2GRAY) #storage = cv.CreateMemStorage(0) storage = cv.CreateMat(7000, 1, cv.CV_32FC3) print "PARAMS:", self.hough_params #Note: circles.total denotes the number of circles circles = cv.HoughCircles( self.gray8, storage, cv.CV_HOUGH_GRADIENT, 3, #dp / resolution 2, #circle dist threshold max(1, self.hough_params[0]), #param1 max(1, self.hough_params[1]), #param2 2, #min_radius 14, #max_radius ) for x, y, radius in [storage[i, 0] for i in range(storage.rows)]: cv.Circle(rect, (x, y), cv.Round(radius), cv.CV_RGB(300, 1, 1)) cv.ShowImage("BASD", rect) return rect
def cvShiftDFT(src_arr, dst_arr): size = cv.GetSize(src_arr) dst_size = cv.GetSize(dst_arr) if dst_size != size: cv.Error(cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__) if (src_arr is dst_arr): tmp = cv.CreateMat(size[1] / 2, size[0] / 2, cv.GetElemType(src_arr)) cx = size[0] / 2 cy = size[1] / 2 # image center q1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) q2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) q3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) q4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) d1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) d2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) d3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) d4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) if (src_arr is not dst_arr): if (not cv.CV_ARE_TYPES_EQ(q1, d1)): cv.Error( cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__) cv.Copy(q3, d1) cv.Copy(q4, d2) cv.Copy(q1, d3) cv.Copy(q2, d4) else: cv.Copy(q3, tmp) cv.Copy(q1, q3) cv.Copy(tmp, q1) cv.Copy(q4, tmp) cv.Copy(q2, q4) cv.Copy(tmp, q2)
def find_col_x1(I, Icol, bb, K=3, AX=0.2, AY=0.2, T=0.9): """ Tries to find the column of marks on I, using ICOL as a ref. image in template matching. """ roi_prev = cv.GetImageROI(I) shift_roi(I, bb[0], bb[1], bb[2] - bb[0], bb[3] - bb[1]) w_A, h_A = cv.GetSize(Icol) w_I, h_I = cv.GetSize(I) M = cv.CreateMat(h_I - h_A + 1, w_I - w_A + 1, cv.CV_32F) cv.MatchTemplate(I, Icol, M, cv.CV_TM_CCOEFF_NORMED) if DEBUG_SAVEIMGS: M_np = np.array(M) import scipy.misc print_dbg("<><><><> Saving '_Mbase.png' <><><><>") cv.SaveImage("_Mbase.png", I) print_dbg("<><><><> Saving '_M.png' <><><><>") scipy.misc.imsave("_M.png", M) pdb.set_trace() cv.SetImageROI(I, roi_prev) i = 0 xs = [] _xamt, _yamt = int(round(AX * w_A)), int(round(AY * h_A)) while i < K: minResp, maxResp, minLoc, maxLoc = cv.MinMaxLoc(M) if maxResp < T: break x, y = maxLoc # Find the /leftmost/ match: don't find a match in the middle # of a column. while M[y, x] >= T: x -= 1 xs.append((x + bb[0])) _x1 = max(1, x - _xamt) _x2 = max(1, x + _xamt) _y1 = max(1, y - _yamt) _y2 = max(1, y + _yamt) M[_y1:_y2, _x1:_x2] = -1.0 i += 1 if not xs: return None elif len(xs) == 1: return xs[0] return np.median(xs)
def calibrate(cam): print "\n\nCamera calibration " + str(cam) intrinsicPath = "/var/www/MuseumVisitors/calibration/calibIntrCam" + str( cam) + "-R1280x800.yml" extrinsicPath = "/var/www/MuseumVisitors/calibration/calibExtr_" + str( cam) + ".yaml" print "Rotation matrix:" VectRot = cv2.cv.Load(extrinsicPath, cv.CreateMemStorage(), name="Rotation") MatRot = cv.CreateMat(3, 3, cv.CV_64FC1) cv.Rodrigues2(VectRot, MatRot) MatRot = np.matrix(MatRot) print MatRot print "Translation vector:" VectTrans = cv2.cv.Load(extrinsicPath, cv.CreateMemStorage(), name="Translation") VectTrans = np.matrix(VectTrans) print VectTrans print "Camera matrix:" MatCam = cv2.cv.Load(intrinsicPath, cv.CreateMemStorage(), name="camera_matrix") MatCam = np.matrix(MatCam) print MatCam RotTrans = np.concatenate((MatRot[0:3, 0:2], VectTrans), axis=1) print(RotTrans) Hw = MatCam * RotTrans print(Hw) HT = (Hw.T).I HI2W = Hw.I HW2I = (HI2W.I).T print "HI2W:" print(HI2W) print "HW2I:" print(HW2I)
def _get_corners(img, board, refine=True): """ Get corners for a particular chessboard for an image """ w, h = cv.GetSize(img) if img.channels == 3: mono = cv.CreateMat(h, w, cv.CV_8UC1) cv.CvtColor(img, mono, cv.CV_BGR2GRAY) else: mono = img (ok, corners) = cv.FindChessboardCorners( mono, (board.n_cols, board.n_rows), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE | cv2.CALIB_CB_FAST_CHECK) # If any corners are within BORDER pixels of the screen edge, reject the detection by setting ok to false # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras BORDER = 8 if not all([(BORDER < x < (w - BORDER)) and (BORDER < y < (h - BORDER)) for (x, y) in corners]): ok = False if refine and ok: # Use a radius of half the minimum distance between corners. This should be large enough to snap to the # correct corner, but not so large as to include a wrong corner in the search window. min_distance = float("inf") for row in range(board.n_rows): for col in range(board.n_cols - 1): index = row * board.n_rows + col min_distance = min(min_distance, _pdist(corners[index], corners[index + 1])) for row in range(board.n_rows - 1): for col in range(board.n_cols): index = row * board.n_rows + col min_distance = min( min_distance, _pdist(corners[index], corners[index + board.n_cols])) radius = int(math.ceil(min_distance * 0.5)) corners = cv.FindCornerSubPix( mono, corners, (radius, radius), (-1, -1), (cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 30, 0.1)) return (ok, corners)
def find_homography(corners1, corners2, method=METHOD, threshold=THRESHOLD, status=STATUS): array1 = np.mat(corners1) array2 = np.mat(corners2) homography = cv.CreateMat(3, 3, cv.CV_64F) cv.FindHomography(cv.fromarray(array1), cv.fromarray(array2), homography, METHOD, THRESHOLD) if VERBOSE: # TODO: find a less obnoxious way to do this print "Homography matrix:" for i in range(3): print homography[i, 0], print homography[i, 1], print homography[i, 2] #find_svd(homography) return homography
def kalmanFilter(positions, velocities, processNoiseCov, measurementNoiseCov): kalman = cv.CreateKalman(6, 4) kalman.transition_matrix[0, 2] = 1 kalman.transition_matrix[0, 4] = 1. / 2 kalman.transition_matrix[1, 3] = 1 kalman.transition_matrix[1, 5] = 1. / 2 kalman.transition_matrix[2, 4] = 1 kalman.transition_matrix[3, 5] = 1 cv.SetIdentity(kalman.measurement_matrix, 1.) cv.SetIdentity(kalman.process_noise_cov, processNoiseCov) cv.SetIdentity(kalman.measurement_noise_cov, measurementNoiseCov) cv.SetIdentity(kalman.error_cov_post, 1.) p = positions[0] v = velocities[0] v2 = velocities[2] a = (v2 - v).multiply(0.5) kalman.state_post[0, 0] = p.x kalman.state_post[1, 0] = p.y kalman.state_post[2, 0] = v.x kalman.state_post[3, 0] = v.y kalman.state_post[4, 0] = a.x kalman.state_post[5, 0] = a.y filteredPositions = moving.Trajectory() filteredVelocities = moving.Trajectory() measurement = cv.CreateMat(4, 1, cv.CV_32FC1) for i in xrange(positions.length()): cv.KalmanPredict(kalman) # no control p = positions[i] v = velocities[i] measurement[0, 0] = p.x measurement[1, 0] = p.y measurement[2, 0] = v.x measurement[3, 0] = v.y cv.KalmanCorrect(kalman, measurement) filteredPositions.addPositionXY(kalman.state_post[0, 0], kalman.state_post[1, 0]) filteredVelocities.addPositionXY(kalman.state_post[2, 0], kalman.state_post[3, 0]) return (filteredPositions, filteredVelocities)
def __init__(self, ns_list): print "Creating aggregator for ", ns_list self.lock = threading.Lock() # image w = 640 h = 480 self.image_out = cv.CreateMat(h, w, cv.CV_8UC3) self.pub = rospy.Publisher('aggregated_image', Image) self.bridge = CvBridge() self.image_captured = get_image(["Successfully captured checkerboard"]) self.image_optimized = get_image(["Successfully ran optimization"]) self.image_failed = get_image(["Failed to run optimization"], False) # create render windows layouts = [(1, 1), (2, 2), (2, 2), (2, 2), (3, 3), (3, 3), (3, 3), (3, 3), (3, 3)] layout = layouts[len(ns_list) - 1] sub_w = w / layout[0] sub_h = h / layout[1] self.windows = [] for j in range(layout[1]): for i in range(layout[0]): self.windows.append( cv.GetSubRect(self.image_out, (i * sub_w, j * sub_h, sub_w, sub_h))) # create renderers self.renderer_list = [] for ns in ns_list: self.renderer_list.append(ImageRenderer(ns)) # subscribers self.capture_time = rospy.Time(0) self.calibrate_time = rospy.Time(0) self.captured_sub = rospy.Subscriber('robot_measurement', RobotMeasurement, self.captured_cb) self.optimized_sub = rospy.Subscriber('camera_calibration', CameraCalibration, self.calibrated_cb)
def init(self, ident): ident = int(ident) if self.ident > (1 << self.GRID_SIZE**2 - 4) - 1 or self.ident < 0: return None size = self.GRID_SIZE + 2 self.code = cv.CreateMat(size, size, cv.CV_8SC1) cv.Zero(self.code) for p in self.code_points: self.code[p[0] + 1, p[1] + 1] = ident % 2 ident /= 2 self.code[1, size - 2] = 1 self.code[size - 2, 1] = 1 self.code[size - 2, size - 2] = 1 self.border = [0] * (4 * size) for i in range(size): self.border[i] = (0, i) self.border[i + size] = (i, size - 1) self.border[i + 2 * size] = (size - 1, size - i - 1) self.border[i + 3 * size] = (size - 1 - i, 0)
def project3dToPixel(self, point): """ :param point: 3D point :type point: (x, y, z) Returns the rectified pixel coordinates (u, v) of the 3D point, using the camera :math:`P` matrix. This is the inverse of :meth:`projectPixelTo3dRay`. """ src = mkmat(4, 1, [point[0], point[1], point[2], 1.0]) dst = cv.CreateMat(3, 1, cv.CV_64FC1) cv.MatMul(self.P, src, dst) x = dst[0, 0] y = dst[1, 0] w = dst[2, 0] if w != 0: return (x / w, y / w) else: return (float('nan'), float('nan'))
def cvShiftDFT(src_arr, dst_arr): size = cv.GetSize(src_arr) dst_size = cv.GetSize(dst_arr) if dst_size != size: cv.Error(cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "as imagens devem ter tamanhos iguais", __FILE__, __LINE__) if (src_arr is dst_arr): tmp = cv.CreateMat(size[1] / 2, size[0] / 2, cv.GetElemType(src_arr)) cx = size[0] / 2 cy = size[1] / 2 # image center q1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) q2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) q3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) q4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) d1 = cv.GetSubRect(src_arr, (0, 0, cx, cy)) d2 = cv.GetSubRect(src_arr, (cx, 0, cx, cy)) d3 = cv.GetSubRect(src_arr, (cx, cy, cx, cy)) d4 = cv.GetSubRect(src_arr, (0, cy, cx, cy)) if (src_arr is not dst_arr): if (not cv.CV_ARE_TYPES_EQ(q1, d1)): cv.Error(cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "as imagens devem ter o mesmo formato", __FILE__, __LINE__) cv.Copy(q3, d1) cv.Copy(q4, d2) cv.Copy(q1, d3) cv.Copy(q2, d4) else: cv.Copy(q3, tmp) cv.Copy(q1, q3) cv.Copy(tmp, q1) cv.Copy(q4, tmp) cv.Copy(q2, q4) cv.Copy(tmp, q2)
def cal_fromcorners(self, good): """ :param good: Good corner positions and boards :type good: [(corners, ChessboardInfo)] """ boards = [b for (_, b) in good] ipts = self.mk_image_points(good) opts = self.mk_object_points(boards) npts = self.mk_point_counts(boards) intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1) if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: distortion = cv.CreateMat(8, 1, cv.CV_64FC1) # rational polynomial else: distortion = cv.CreateMat(5, 1, cv.CV_64FC1) # plumb bob cv.SetZero(intrinsics) cv.SetZero(distortion) # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio intrinsics[0, 0] = 1.0 intrinsics[1, 1] = 1.0 cv.CalibrateCamera2(opts, ipts, npts, self.size, intrinsics, distortion, cv.CreateMat(len(good), 3, cv.CV_32FC1), cv.CreateMat(len(good), 3, cv.CV_32FC1), flags=self.calib_flags) self.intrinsics = intrinsics self.distortion = distortion # R is identity matrix for monocular calibration self.R = cv.CreateMat(3, 3, cv.CV_64FC1) cv.SetIdentity(self.R) self.P = cv.CreateMat(3, 4, cv.CV_64FC1) cv.SetZero(self.P) self.mapx = cv.CreateImage(self.size, cv.IPL_DEPTH_32F, 1) self.mapy = cv.CreateImage(self.size, cv.IPL_DEPTH_32F, 1) self.set_alpha(0.0)
def scanline_numbers_to_planes(self, scanline_numbers): rows = scanline_numbers.height cols = scanline_numbers.width normal_vectors_x = cv.CreateMat(rows, cols, cv.CV_32FC1) cv.Set(normal_vectors_x, -1) normal_vectors_y = cv.CreateMat(rows, cols, cv.CV_32FC1) cv.Set(normal_vectors_y, 0) normal_vectors_z = cv.CreateMat(rows, cols, cv.CV_32FC1) cv.Copy(scanline_numbers, normal_vectors_z) cv.ConvertScale(normal_vectors_z, normal_vectors_z, scale=self.pixels_per_scanline) cv.AddS(normal_vectors_z, -self.center_pixel, normal_vectors_z) cv.ConvertScale(normal_vectors_z, normal_vectors_z, scale=1.0 / self.projector_model.fx()) normal_vectors = cv.CreateMat(rows, cols, cv.CV_32FC3) cv.Merge(normal_vectors_x, normal_vectors_y, normal_vectors_z, None, normal_vectors) # Bring the normal vectors into camera coordinates cv.Transform(normal_vectors, normal_vectors, self.projector_to_camera_rotation_matrix) normal_vectors_split = [None] * 3 for i in range(3): normal_vectors_split[i] = cv.CreateMat(rows, cols, cv.CV_32FC1) cv.Split(normal_vectors, normal_vectors_split[0], normal_vectors_split[1], normal_vectors_split[2], None) n_dot_p = cv.CreateMat(rows, cols, cv.CV_32FC1) cv.SetZero(n_dot_p) for i in range(3): cv.ScaleAdd(normal_vectors_split[i], self.projector_to_camera_translation_vector[i], n_dot_p, n_dot_p) planes = cv.CreateMat(rows, cols, cv.CV_32FC4) cv.Merge(normal_vectors_split[0], normal_vectors_split[1], normal_vectors_split[2], n_dot_p, planes) return planes
def drawDepthImage(self): """ Draw the caputered image to the screen. This is very slooow, so ommit this function in competition """ image = self.depthImage if image != 0: # make sure 8 bit image is displayed if image.depth == 16: newImage = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1) cv.Convert(image, newImage) image = newImage # generate pygame image image_rgb = cv.CreateMat(image.height, image.width, cv.CV_8UC3) cv.CvtColor(image, image_rgb, cv.CV_GRAY2RGB) pygameImage = pygame.image.frombuffer(image_rgb.tostring(), cv.GetSize(image_rgb), "RGB") # draw the image self.screen.blit(pygameImage, (0, 0))
def rotacion_y_posicion_robot(self, robo_x=200, robo_y=100, robo_th=80): """ \brief graficar el robot en el lienzo de mapa \param self no se necesita incluirlo al utilizar la funcion ya que se lo pone solo por ser la definicion de una clase \param robo_x coordenada x de la odometria del robot \param robo_y coordenada y de la odometria del robot \param robo_th Valor Th del robot \return Nada """ image_mapa = cv.LoadImage(self.nombre_archivo1, cv.CV_LOAD_IMAGE_COLOR) dimensiones_robot = self.dimensiones_robot image1 = cv.CreateImage(dimensiones_robot, 8, 3) image_mascara = cv.CreateImage(dimensiones_robot, 8, 1) ##rotacion #Rotar el robot src_center = dimensiones_robot[0] / 2, dimensiones_robot[1] / 2 rot_mat = cv.CreateMat(2, 3, cv.CV_32FC1) cv.GetRotationMatrix2D(src_center, robo_th, 1.0, rot_mat) cv.WarpAffine(self.robot, image1, rot_mat) #crear filtro para negro cv.InRangeS(image1, cv.RGB(0, 0, 0), cv.RGB(14, 14, 14), image_mascara) cv.Not(image_mascara, image_mascara) #cv.ReleaseImage(image1) #reducir y posicion cv.SetImageROI( image_mapa, (robo_x, robo_y, dimensiones_robot[0], dimensiones_robot[1])) cv.Copy(image1, image_mapa, mask=image_mascara) cv.ResetImageROI(image_mapa) font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1, 0, 3, 8) #Creates a font #x = 30 #y = 40 #cv.PutText(image_mapa,"Posicion robot (x= %d,y= %d,th= %d)" % (robo_x,robo_y,robo_th), (x,y),font, 255) #Draw the text cv.SaveImage(self.nombre_archivo, image_mapa) #Saves the image# #self.graficar(2000,image_mapa) return image_mapa
def doIt(args): try: img = cv.LoadImage(args[0]) except IOError: print "Error: cannot load image " + args[0] return 1 imgGray = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_8U, 1) cv.CvtColor(img, imgGray, cv.CV_RGB2GRAY) imgFilt = cv.CreateImage(cv.GetSize(imgGray), cv.IPL_DEPTH_8U, 1) kernel = cv.CreateMat(3, 3, cv.CV_32F) for r in xrange(3): for c in xrange(3): kernel[r, c] = 1. / 9. ## kernel[0,0] = 1 ## kernel[0,1] = 1 ## kernel[0,2] = 1 ## kernel[1,0] = 1 ## kernel[1,1] = 1 ## kernel[1,2] = 1 ## kernel[2,0] = 1 ## kernel[2,1] = 1 ## kernel[2,2] = 1 #kernel = numpy.array([[1, 1, 1],[1, 1, 1],[1, 1, 1]]) cv.Filter2D(imgGray, imgFilt, kernel) if len(args) > 1: cv.SaveImage(args[1], imgFilt) else: cv.NamedWindow(args[0]) cv.ShowImage(args[0], imgGray) cv.NamedWindow("filtered") cv.ShowImage("filtered", imgFilt) cv.WaitKey(0) return 0
def create_transform(x, y, angle, flip): transform = cv.CreateMat(2, 3, cv.CV_32FC1) # A simple 2x3 matrix of floats angle = pi - angle # Work on the upside-down image angle = angle - pi # We want the hand to look upside down in the upside down image c = cos(angle) s = sin(angle) # Create rotation matrix transform[0, 0] = c transform[1, 0] = s transform[0, 1] = -s transform[1, 1] = c transform[0, 2] = x transform[1, 2] = y if flip: transform[0, 0] = -c # Flip the x axis of the output image transform[1, 0] = -s #for i in range(2): # for j in range(2): # print transform[i,j] return transform