def select_optimal_colors(self): # in case something else is still open cv.DestroyAllWindows() capture = cv.CaptureFromCAM(self.camera_index) if not capture: QMessageBox.information(self, "Camera Error", "Camera not found") return cv.NamedWindow("click & drag to select object", cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow("click & drag to select object", 610, 0) cv.SetMouseCallback("click & drag to select object", self.mouseHandler, None) camera_on = True while camera_on: if (not self.busy_updating): frame = cv.QueryFrame(capture) if not frame: break cv.ShowImage("click & drag to select object", frame) self.frame = frame k = cv.WaitKey(1) # press q or escape to quit camera view if k == 27 or k == 113 or self.end_record: camera_on = False cv.DestroyAllWindows() self.end_record = False break
def get_tracked_region(im): tr = TrackedRegion(im) cv.NamedWindow("reference") cv.CreateTrackbar('xpos', 'reference', 0, im.width - 1, tr.set_x) cv.CreateTrackbar('ypos', 'reference', 0, im.height - 1, tr.set_y) cv.CreateTrackbar('size', 'reference', 0, im.width - 1, tr.set_size) # Show position of descriptors on reference image cv.ShowImage("reference", im) # Selecting tracked region while True: key_pressed = cv.WaitKey(100) if key_pressed == 32: cv.Rectangle(im, (tr.xpos, tr.ypos), (tr.xpos + tr.size, tr.ypos + tr.size), 255, thickness=3) cv.DestroyWindow("reference") break elif key_pressed == 27: cv.DestroyAllWindows() cv.WaitKey(100) return else: im_copy = cv.CreateMat(im.height, im.width, cv.CV_8UC1) cv.Copy(im, im_copy) cv.Rectangle(im_copy, (tr.xpos, tr.ypos), (tr.xpos + tr.size, tr.ypos + tr.size), 255, thickness=3) cv.ShowImage("reference", im_copy) return tr
def step(self, pause=False): try: retval, img_arr = self.cam.read() #cv2.imwrite(settings.base_path+'pics/pic1.jpg', img_arr) assert img_arr is not None, "Camera in use by other process" self.add_observation(img_arr, draw=self.show_overlays) if settings.use_simplecv_display: if self.display.isDone(): raise SystemExit, "exiting" img = Image(cv.fromarray(img_arr)) if self.show_overlays or self.trainable: self.annotate_img(img) img.save(self.display) if self.display.mouseLeft: self.show_overlays = not self.show_overlays if self.display.mouseRight: self.display.done = True else: cv.ShowImage("Index", cv.fromarray(img_arr)) if pause: print("Press any key to continue") cv.WaitKey() cv.DestroyAllWindows() return True except KeyboardInterrupt: return False
def main(args): FO = FindObject("find_object") try: rospy.spin() except KeyboardInterrupt: print "Shutting down Find Object node." cv.DestroyAllWindows()
def main(): optic_flow_calculator = Optic_Flow_Calculator() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" cv.DestroyAllWindows()
def main(args): try: objectDetection() rospy.spin() except KeyboardInterrupt: print "Shutting down vision node." cv.DestroyAllWindows()
def loop(): """main video loop to render frames This loops until escape key is pressed, calling detect_features on each frame to look for True features (based on keys 1-3). """ global face_on, eye_on, nose_on # set up window and camera win_name = "peekaboo" source = cv.CaptureFromCAM(0) cv.NamedWindow(win_name) while True: # grab the key press to delegate action key = cv.WaitKey(15) # escape key to exit if key == 27: break # keys 1-3 determine detected features if key == 49: face_on = False if face_on else True if key == 50: eye_on = False if eye_on else True if key == 51: nose_on = False if nose_on else True # get the frame, detect selected features, and show frame frame = cv.QueryFrame(source) detect_features(frame) cv.ShowImage(win_name, frame) cv.DestroyAllWindows()
def main(args): global pub_twist global gripping rospy.init_node("visual_servoing") gripping = Gripping() rospy.sleep(1) gripping.pre_grip() rospy.sleep(1) rospy.Subscriber("usb_cam/image_raw", Image, image_callback) pub_twist = rospy.Publisher("/cmd_vel", Twist) thread.start_new_thread(send_twist, ()) try: rospy.spin() except Keyboardinterrupt: pass global xvel global yvel xvel = 0 yvel = 0 send_twist() cv.DestroyAllWindows()
def main(args): vn = vision_node() try: rospy.spin() except KeyboardInterrupt: print "Shutting down vison node." cv.DestroyAllWindows()
def main(): image_processor = Image_Processor() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" cv.DestroyAllWindows()
def main(args): rospy.init_node('ball_loc', anonymous=True) bl = ball_locator() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" cv.DestroyAllWindows()
def main(args): ic = image_converter() rospy.init_node('image_converter', anonymous=True) try: rospy.spin() except KeyboardInterrupt: print "Shutting down" cv.DestroyAllWindows()
def main(args): ic = face_detect() rospy.init_node('face_detect', anonymous=True) try: rospy.spin() except KeyboardInterupt: print "shutting down" cv.DestroyAllWindows()
def main(): rospy.init_node('cv_sandbox') v = vision() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" cv.DestroyAllWindows()
def clean_up(self): """Clean up the camera """ if self._capture: self._capture.release() cv.DestroyAllWindows()
def main(args): rospy.init_node('camera_arena_calibration_test') cal = Calibration() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" cv.DestroyAllWindows()
def runVideo(self): frameCount = 0 x = self.xFont y = self.yFont font = self.videoFont tom2Coords = self.tom2Coords tom1Coords = self.tom1Coords snareCoords = self.snareCoords hihatCoords = self.hihatCoords i = 2 while True: if i % 2 == 0: frame = cv.QueryFrame(capture) self.redStick.drawBoundingCircle(frame) self.blueStick.drawBoundingCircle(frame) self.redStick.appendCentersList() self.blueStick.appendCentersList() self.redStick.findDelta() self.blueStick.findDelta() self.redStick.playSounds() self.blueStick.playSounds() cv.Rectangle(frame, (tom1Coords[0], tom1Coords[1]), (tom1Coords[2], tom1Coords[3]), (255, 0, 0), 0) cv.Rectangle(frame, (tom2Coords[0], tom2Coords[1]), (tom2Coords[2], tom2Coords[3]), (0, 255, 0), 0) cv.Rectangle(frame, (snareCoords[0], snareCoords[1]), (snareCoords[2], snareCoords[3]), (0, 0, 255), 0) cv.Rectangle(frame, (hihatCoords[0], hihatCoords[1]), (hihatCoords[2], hihatCoords[3]), (125, 125, 0), 0) cv.Flip(frame, frame, 1) cv.PutText(frame, "Press q or esc to terminate.", (x, y), font, (0, 255, 0)) cv.PutText(frame, "Current Velocities:", (x, y + 50), font, (0, 255, 255)) cv.PutText(frame, str(self.redStick.delta), (x, y + 100), font, (0, 0, 255)) cv.PutText(frame, str(self.blueStick.delta), (x + 100, y + 100), font, (255, 0, 0)) cv.ShowImage("DrumMaster 9000!", frame) key = cv.WaitKey(7) if key == 27 or key == 113: cv.DestroyAllWindows() pygame.quit() exit() break frameCount += 1
def main(args): rospy.init_node('faa_image_conditioner', anonymous=True) conditioner = Conditioner() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" if conditioner.display_image_window: cv.DestroyAllWindows()
def main(args): rospy.init_node('table_detector_test', anonymous=True) ic = table_detector() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" cv.DestroyAllWindows()
def main(args): rospy.init_node('faa_image_processing', anonymous=True) ip = ImageProcessor() try: rospy.spin() except KeyboardInterrupt: print "Shutting down" if ip.display_images: cv.DestroyAllWindows()
def check_angle(self): # in case something else is still open cv.DestroyAllWindows() capture = cv.CaptureFromCAM(self.camera_index) if not capture: QMessageBox.information(self, "Camera Error", "Camera not found") return cv.NamedWindow("Video", cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow("Video", 350, 0) camera_on = True # keep a running average for 100 reads total = 0 num_reads = 0 while camera_on: if (not self.busy_updating): frame = cv.QueryFrame(capture) if not frame: break deg_found = self.angle(frame) # if an angle is detected, update the running average if deg_found != 0: total += deg_found num_reads += 1 self.curr_degree = float(total)/float(num_reads) if num_reads > 100: self.found_angle = True if self.found_angle: degrees_shown = self.curr_degree degrees_shown = round(degrees_shown, 2) to_text = str(degrees_shown) + " degrees" self.act_angle.setText(to_text) small_frame = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 3) cv.Resize(frame, small_frame) cv.ShowImage("Video", small_frame) k = cv.WaitKey(1) # press q or escape to quit camera view if k == 27 or k == 113 or self.end_record: camera_on = False cv.DestroyAllWindows() self.end_record = False break
def calibrate_screen(self): # in case something else is still open cv.DestroyAllWindows() capture = cv.CaptureFromCAM(self.camera_index) if not capture: QMessageBox.information(self, "Camera Error", "Camera not found") return cv.NamedWindow("hold up object at preferred distance from camera", cv.CV_WINDOW_AUTOSIZE) cv.NamedWindow("select for max visibility", cv.CV_WINDOW_AUTOSIZE) cv.MoveWindow("hold up object at preferred distance from camera", 320, 0) cv.MoveWindow("select for max visibility", 800, 82) cv.CreateTrackbar("Start at color", "hold up object at preferred distance from camera", self.low_color, 179, self.update_low_color) cv.CreateTrackbar("End at color", "hold up object at preferred distance from camera", self.high_color, 179, self.update_high_color) camera_on = True while camera_on: if (not self.busy_updating): frame = cv.QueryFrame(capture) if not frame: break # convert color to hue space for easier tracking imgHSV = cv.CreateImage(cv.GetSize(frame), 8, 3) cv.CvtColor(frame, imgHSV, cv.CV_BGR2HSV) imgThresh = cv.CreateImage(cv.GetSize(frame), 8, 1) # interactive thresholding cv.InRangeS(imgHSV, cv.Scalar(self.low_color, self.MED_SV, self.MED_SV), cv.Scalar(self.high_color, self.MAX_SV, self.MAX_SV), imgThresh) moments = cv.Moments(cv.GetMat(imgThresh)) self.calibration_area = cv.GetCentralMoment(moments, 0, 0) # shrink images for display small_thresh = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 1) cv.Resize(imgThresh, small_thresh) small_frame = cv.CreateImage((self.fit_camera_width, self.fit_camera_height), 8, 3) cv.Resize(frame, small_frame) cv.ShowImage("hold up object at preferred distance from camera", small_frame) cv.ShowImage("select for max visibility", small_thresh) k = cv.WaitKey(1) # press q or escape to quit camera view if k == 27 or k == 113 or self.end_record: camera_on = False cv.DestroyAllWindows() self.end_record = False break
def main(args): # if len(args) != 3: # return usage() # [name, cameraName, outputName] = args name = "ClickWindowName" rospy.init_node(name) outputName = rospy.get_param("~output", "defaultClickPointOutput") gui = ClickWindow(outputName=outputName) while not rospy.is_shutdown(): gui.listen() cv.DestroyAllWindows()
def find_contours(self, img_arr, debug=True): if debug: cv.ShowImage("Index", cv.fromarray(img_arr)) cv.WaitKey() cv.DestroyAllWindows() hsv_img = cv2.cvtColor(img_arr, cv2.COLOR_BGR2HSV) if debug: cv.ShowImage("Index", cv.fromarray(hsv_img)) cv.WaitKey() cv.DestroyAllWindows() HSV_MIN = np.array([0, self.SAT_THRES, 20],np.uint8) HSV_MAX = np.array([255, 255, 255],np.uint8) frame_threshed = cv2.inRange(hsv_img, HSV_MIN, HSV_MAX) ret, thresh = cv2.threshold(frame_threshed, 127, 255, 0) if debug: cv.ShowImage("Index", cv.fromarray(thresh)) cv.WaitKey() cv.DestroyAllWindows() contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours
def del_mem(self): del self.res del self.__res_gray del self.res_smooth del self.canny del self.contour_in del self.contour_out del self.contours del self.c_storage del self.a_storage cv.DestroyAllWindows()
def main(args): # if len(args) != 3: # return usage() # [name, cameraName, outputName] = args name = "CameraNodeName" rospy.init_node(name) cameraName = rospy.get_param("~cam", "defaultCameraNodeCamera") outputName = rospy.get_param("~output", "defaultCameraNodeOutput") gui = CameraNode(cameraName=cameraName, outputName=outputName) while not rospy.is_shutdown(): gui.listen() cv.DestroyAllWindows()
def get_dist_to_platform(self, req): #side = req.side rate = rospy.Rate(20) avg_move = 0 counter_failed = 0 res = GetDistResponse() res.dist = -1 NUM_READINGS = 10 for i in xrange(NUM_READINGS): while self.depth_frame is None: if rospy.is_shutdown(): if FULL_VISUALIZE: cv.DestroyAllWindows() return res rate.sleep() if rospy.is_shutdown() or counter_failed > MAX_COUNTER_FAILES: if FULL_VISUALIZE: cv.DestroyAllWindows() return res depth_frame = self.bridge.imgmsg_to_cv(self.depth_frame, "32FC1") self.depth_frame = None depth_frame = np.array(depth_frame, dtype=np.float32) depth_frame = depth_frame[crop_y[0]:crop_y[1], crop_x[0]:crop_x[1]] try: suc = self.process_image(depth_frame) if not suc: counter_failed += 1. rate.sleep() continue cv.WaitKey(5) except CvBridgeError, e: print e dist = pixel_to_m(self.avg_dist) avg_move += -OFFSET_LIN - dist rate.sleep()
def main(args): help_message = "Hot keys: \n" \ "\tq - quit the program\n" \ "\tr - restart video from beginning\n" \ "\tspace - toggle pause/play\n" print help_message try: a2r = AVI2ROS() except KeyboardInterrupt: print "Shutting down avi2ros..." cv.DestroyAllWindows()
def get_angle_to_platform(self, req): rate = rospy.Rate(30) avg_turn = 0 counter_failed = 0 res = GetAngleResponse() res.ang = -1 NUM_READINGS = 10 for i in xrange(NUM_READINGS): while self.depth_frame is None: if rospy.is_shutdown(): if FULL_VISUALIZE: cv.DestroyAllWindows() return res rate.sleep() if rospy.is_shutdown() or counter_failed > MAX_COUNTER_FAILES: if FULL_VISUALIZE: cv.DestroyAllWindows() return res depth_frame = self.bridge.imgmsg_to_cv(self.depth_frame, "32FC1") self.depth_frame = None depth_frame = np.array(depth_frame, dtype=np.float32) depth_frame = depth_frame[crop_y[0]:crop_y[1], crop_x[0]:crop_x[1]] try: suc = self.process_image(depth_frame) if not suc: counter_failed += 1. rate.sleep() continue cv.WaitKey(5) except CvBridgeError, e: print e avg_turn += math.pi / 2. + OFFSET_TH - self.avg_th rate.sleep()
def main(args): # Display a help message if appropriate. help_message = "" print help_message try: # Fire up the node. ROS2OpenCV("ros2opencv") # Spin so our services will work rospy.spin() except KeyboardInterrupt: logger.info("Shutting down vision node.") cv.DestroyAllWindows()