def normalize(img): """Utility function to normalize the image and return the new normalized image using opencv.""" norm = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_32F, 1) cvCopy(img, norm) cvNormalize(norm, norm, 1, 0, CV_MINMAX) norm_u = cvCreateImage(cvSize(img.width, img.height), IPL_DEPTH_8U, 1) cvConvertScale(norm, norm_u, 255) return norm_u
def IplGrayToRGB( i_image ): """Convert RGB Ipl image to gray scale, returns copy of image if the format is already right.""" o_image = cv.cvCreateImage( cv.cvSize( i_image.width, i_image.height ), 8 ,3) if (i_image.depth == 8) and (i_image.nChannels == 3) : cv.cvCopy( i_image, o_image ) else: #cv.cvCvtColor(i_image, o_image , cv.CV_BGR2GRAY ) cv.highgui.cvConvertImage( i_image, o_image ) return o_image
def IplGrayToRGB(i_image): """Convert RGB Ipl image to gray scale, returns copy of image if the format is already right.""" o_image = cv.cvCreateImage(cv.cvSize(i_image.width, i_image.height), 8, 3) if (i_image.depth == 8) and (i_image.nChannels == 3): cv.cvCopy(i_image, o_image) else: #cv.cvCvtColor(i_image, o_image , cv.CV_BGR2GRAY ) cv.highgui.cvConvertImage(i_image, o_image) return o_image
def next(self): vd.set_exposure(self.camera.capture, 55) frames_wait = 3 for i in range(frames_wait): self.camera.process_frames() cv.cvCopy(self.camera.left_debayered, self.low_exposure_left) cv.cvCopy(self.camera.right_debayered, self.low_exposure_right) vd.set_exposure(self.camera.capture, 255) for i in range(frames_wait): self.camera.process_frames() return (self.low_exposure_left, self.low_exposure_right, self.camera.left_debayered, self.camera.right_debayered)
def tile_images(img_width, img_height, num_width, num_height, images, channels=3): w = img_width * num_width h = img_height * num_height image = cv.cvCreateImage(cv.cvSize(int(w), int(h)), 8, channels) cv.cvSet(image, cv.cvScalar(255,255,255)) while len(images) > 0: try: for y in range(int(num_height)): for x in range(int(num_width)): small_tile = images.pop() img_x = x * img_width img_y = y * img_height cropped = cv.cvGetSubRect(image, cv.cvRect(img_x, img_y, img_width,img_height)) cv.cvCopy(small_tile, cropped) except exceptions.IndexError, e: break
def tile_images(img_width, img_height, num_width, num_height, images, channels=3): w = img_width * num_width h = img_height * num_height image = cv.cvCreateImage(cv.cvSize(int(w), int(h)), 8, channels) cv.cvSet(image, cv.cvScalar(255, 255, 255)) while len(images) > 0: try: for y in range(int(num_height)): for x in range(int(num_width)): small_tile = images.pop() img_x = x * img_width img_y = y * img_height cropped = cv.cvGetSubRect( image, cv.cvRect(img_x, img_y, img_width, img_height)) cv.cvCopy(small_tile, cropped) except exceptions.IndexError, e: break
def run(exposure, video=None, display=False, debug=False): if display: hg.cvNamedWindow("video", 1) hg.cvMoveWindow("video", 0, 0) if debug: hg.cvNamedWindow('right', 1) hg.cvMoveWindow("right", 800, 0) hg.cvNamedWindow("thresholded", 1) hg.cvNamedWindow('motion', 1) hg.cvNamedWindow('intensity', 1) hg.cvMoveWindow("thresholded", 800, 0) hg.cvMoveWindow("intensity", 0, 600) hg.cvMoveWindow("motion", 800, 600) if video is None: #video = cam.VidereStereo(0, gain=96, exposure=exposure) video = cam.StereoFile('measuring_tape_red_left.avi','measuring_tape_red_right.avi') frames = video.next() detector = LaserPointerDetector(frames[0], LaserPointerDetector.SUN_EXPOSURE, use_color=False, use_learning=True) detector_right = LaserPointerDetector(frames[1], LaserPointerDetector.SUN_EXPOSURE, use_color=False, use_learning=True, classifier=detector.classifier) stereo_cam = cam.KNOWN_CAMERAS['videre_stereo2'] for i in xrange(10): frames = video.next() detector.detect(frames[0]) detector_right.detect(frames[1]) lt = cv.cvCreateImage(cv.cvSize(640,480), 8, 3) rt = cv.cvCreateImage(cv.cvSize(640,480), 8, 3) for l, r in video: start_time = time.time() #l = stereo_cam.camera_left.undistort_img(l) #r = stereo_cam.camera_right.undistort_img(r) cv.cvCopy(l, lt) cv.cvCopy(r, rt) l = lt r = rt undistort_time = time.time() _, _, right_cam_detection, stats = detector_right.detect(r) if debug: draw_blobs(r, stats) draw_detection(r, right_cam_detection) hg.cvShowImage('right', r) image, combined, left_cam_detection, stats = detector.detect(l) detect_time = time.time() if debug: motion, intensity = detector.get_motion_intensity_images() show_processed(l, [combined, motion, intensity], left_cam_detection, stats, detector) elif display: #draw_blobs(l, stats) draw_detection(l, left_cam_detection) hg.cvShowImage('video', l) hg.cvWaitKey(10) if right_cam_detection != None and left_cam_detection != None: x = np.matrix(left_cam_detection['centroid']).T xp = np.matrix(right_cam_detection['centroid']).T result = stereo_cam.triangulate_3d(x, xp) print '3D point located at', result['point'].T, print 'distance %.2f error %.3f' % (np.linalg.norm(result['point']), result['error']) triangulation_time = time.time() diff = time.time() - start_time print 'Main: Running at %.2f fps, took %.4f s' % (1.0 / diff, diff)
def CropImage(i_ipl_image, i_ipl_roi): src_region = cv.cvGetSubRect( i_ipl_image, i_ipl_roi) cropped_image = cv.cvCreateImage( cv.cvSize( i_ipl_roi.width, i_ipl_roi.height) , 8 , 1) cv.cvCopy(src_region, cropped_image) return cropped_image
def CropImage(i_ipl_image, i_ipl_roi): src_region = cv.cvGetSubRect(i_ipl_image, i_ipl_roi) cropped_image = cv.cvCreateImage( cv.cvSize(i_ipl_roi.width, i_ipl_roi.height), 8, 1) cv.cvCopy(src_region, cropped_image) return cropped_image
def crop(self, i_image): src_region = cv.cvGetSubRect(i_image, self.__roi) self.__cropped_image = cv.cvCreateImage( cv.cvSize(self.__roi.width, self.__roi.height), 8, 1) cv.cvCopy(src_region, self.__cropped_image) return self.__cropped_image
def crop(self, i_image): src_region = cv.cvGetSubRect(i_image, self.__roi) self.__cropped_image = cv.cvCreateImage( cv.cvSize(self.__roi.width, self.__roi.height) , 8 , 1) cv.cvCopy(src_region, self.__cropped_image) return self.__cropped_image