def __init__(self): self.bcm_pin = BCM_PIN() # Initialize motor with speed, 0 => slowest, 100 => fastest self.motor = Motor(50) # self.light = Light() self.ultrasonic = Ultrasonic() self.camera = Camera((120, 90))
def __init__(self): # Initialize arbitrator self.arbitrator = Arbitrator() # Initialize motobs (single object for both motors on the Zumo) self.motobs.append(Motob(Motors())) # Initialize sensors self.sensors = { 'ultrasonic': Ultrasonic(0.05), 'IR': IRProximitySensor(), 'reflectance': ReflectanceSensors(False, 0, 900), 'camera': Camera(), } self.active_sensors = [self.sensors['ultrasonic'], self.sensors['IR'], self.sensors['reflectance']] # Initialize sensobs self.sensobs = { 'distance': DistanceSensob([self.sensors['ultrasonic']]), 'line_pos': LinePosSensob([self.sensors['reflectance']]), 'proximity': ProximitySensob([self.sensors['IR']]), 'red_search': RedSearchSensob([self.sensors['camera']]), } self.active_sensobs = [self.sensobs['distance'], self.sensobs['line_pos'], self.sensobs['proximity']] time.sleep(1)
def __init__( self, config: Config, spec: ScenarioSpec, ) -> None: super(LaneScenario, self).__init__( config, spec, ) Log.out("Initializing detector", { 'detector': spec.data()['detector'], }) if spec.data()['detector'] == 'lanenet': self._detector = LaneNet(config) camera = Camera.from_dict(spec.data()['camera']) self._image = CameraImage.from_path_and_camera( os.path.join( os.path.dirname(spec.path()), spec.data()['image'], ), camera, )
def __init__(self, color, threshold=0.3, cut_ratio=(0.5, 0.25, 0, 0.25)): """ Initialize the sensob. Arguments: color -- A RGB tuple of the color to look for threshold -- The amount of leeway for the color cut_ratio -- The amount of the image to crop before analysing. This is a tuple of ratios of the image, beginning at the top and going clockwise. For example, passing (0.1, 0.2, 0.3, 0.4) will cut 10% off the top, 20% off the right side, 30% off the bottom and 40% of the left. """ super().__init__() self.color = color self.threshold = threshold self.cut_ratio = cut_ratio self.camera = Camera()
def tourist(steps=25, shots=5, speed=.25): ZumoButton().wait_for_press() rs = ReflectanceSensors() m = Motors() c = Camera() for i in range(steps): random_step(m, speed=speed, duration=0.5) vals = rs.update() if sum(vals) < 1: # very dark area im = shoot_panorama(c, m, shots) im.dump_image('vacation_pic' + str(i) + '.jpeg')
class ColorSensob(Sensob): """A sensob that detects the amount of a given color in an image.""" expensive = True def __init__(self, color, threshold=0.3, cut_ratio=(0.5, 0.25, 0, 0.25)): """ Initialize the sensob. Arguments: color -- A RGB tuple of the color to look for threshold -- The amount of leeway for the color cut_ratio -- The amount of the image to crop before analysing. This is a tuple of ratios of the image, beginning at the top and going clockwise. For example, passing (0.1, 0.2, 0.3, 0.4) will cut 10% off the top, 20% off the right side, 30% off the bottom and 40% of the left. """ super().__init__() self.color = color self.threshold = threshold self.cut_ratio = cut_ratio self.camera = Camera() def update(self): """Take a picture with the camera, and analyse its color values.""" image = self.camera.update() width, height = image.size start_y = floor(height * self.cut_ratio[0]) stop_y = floor(height * (1 - self.cut_ratio[2])) start_x = floor(width * self.cut_ratio[3]) stop_x = floor(width * (1 - self.cut_ratio[1])) lower = [self.color[i] - 255 * self.threshold for i in range(3)] upper = [self.color[i] + 255 * self.threshold for i in range(3)] num_color_pixels = 0 num_pixels = (stop_x - start_x) * (stop_y - start_y) for x in range(start_x, stop_x): for y in range(start_y, stop_y): pixel = image.getpixel((x, y)) for i in range(3): if not lower[i] <= pixel[i] <= upper[i]: break else: num_color_pixels += 1 self.value = num_color_pixels / num_pixels
def __init__( self, config: Config, spec: ScenarioSpec, ) -> None: super(AtariScenario, self).__init__( config, spec, ) Log.out("Initializing fusion", { 'version': "atari", }) self._atari = Atari(config, spec.data()['lane_count']) camera = Camera.from_dict(spec.data()['camera']) front_camera_dir = os.path.join( os.path.dirname(spec.path()), spec.data()['front_camera_dir'], ) assert os.path.isdir(front_camera_dir) front_camera_paths = [ f for f in os.listdir(front_camera_dir) if os.path.isfile(os.path.join(front_camera_dir, f)) ] self._front_cameras = [] for f in sorted(front_camera_paths): Log.out("Loading front camera image", { 'filename': f, }) self._front_cameras.append( CameraImage.from_path_and_camera( os.path.join(front_camera_dir, f), camera, ))
class CameraService(Service): camera = None img_path = None def __init__(self) -> None: super().__init__() self.camera = Camera() def run(self): while True: t = time.time() #得到路径,为未来的上传准备 self.img_path = self.camera.capture() # self.shareData("IMG_PATH", self.img_path) self.__upload(img_path=self.img_path) logging.debug("newest uplodad_img change to"+ self.img_path) time.sleep(config.CAPTURE_PERIOD) def startService(self): self.start() def stopService(self): pass def __upload(self, img_path): sensorId = 2 data = { 'type': 'img' } files = { 'img': open(img_path, 'rb') } url = config.UPLOAD_URL+config.USER_ID+'/'+config.DEVICE_ID+'/'+ str(sensorId) response = requests.post(url=url, data=data, files=files) return response
def __init__(self, IP, PORT, simulation, face_detection=False, publish=False, camera_calibration=False, stop=True): self.simulation = simulation self.stop_ = stop stop = True try: rospy.init_node("playful_ros_node_pepper_interface") except Exception as e: print print "attempted to start ROS node but failed " print "with error message: ", str(e) print "Did you launch your ROS core or initiated another node?" print if camera_calibration: self.use_ir_camera = True else: self.use_ir_camera = False self.transforms = Transforms(configuration.frames.reference, configuration.frames.relative, configuration.frames.head, configuration.frames.camera, configuration.camera.horizontal_fov) self._proxies = Proxies( IP, PORT, simulation, face_detection, face_detection_subscription=configuration.face_detection. subscription, face_detection_period=configuration.face_detection.period) self._naoqi_memory = Memory( self.transforms, self._proxies.memory, configuration.memory.frequency, configuration.body.joints, configuration.laser.scan_num, face_detection, configuration.frames.relative, configuration.frames.reference, configuration.face_detection.detection_id) self.sonar = self._naoqi_memory.sonar self.wheels_sensor = self._naoqi_memory.wheels self.laser = self._naoqi_memory.laser self.head_touch = self._naoqi_memory.head_touch self.left_hand_touch = self._naoqi_memory.left_hand_touch self.right_hand_touch = self._naoqi_memory.right_hand_touch self.face_detection = self._naoqi_memory.face_detection self.temperatures = self._naoqi_memory.temperatures self.joints = Joints(self._proxies.motion, configuration.body.frequency, self.simulation) self._torso_transform = Torso_transform( self._proxies.motion, configuration.frames.naoqi, configuration.frames.naoqi_reference, self.simulation, configuration.body.frequency) self.battery = Battery(self._proxies.battery) self.wheels = Wheels(self._proxies.motion) self.tts = TTS(self._proxies.tts) # does not work on naoqi 2.4.3 (works on 2.5) try: self.tablet = Tablet(self._proxies.tablet) except: pass self.music = Music(self._proxies.audio) self.leds = Leds(self._proxies.leds) self.tracker = Tracker(self._proxies.tracker, self._proxies.memory, configuration.tracker.event, configuration.tracker.event_type, configuration.tracker.effector_id, configuration.tracker.head_threshold) self.head = Head(configuration.head.method, self._proxies.motion, configuration.head.joint_yaw, configuration.head.joint_pitch, configuration.head.z_shift, self._set_joints_values, self.get_joints_values, self.transforms, configuration.head.smoother_time_threshold, configuration.head.smoother_max_acceleration, configuration.head.wheels_compensation_factor, self.wheels.get) self.hip = Hip(configuration.hip.method, configuration.hip.joints, self._set_joints_values, self.get_joints_values) self.knee = Knee(configuration.knee.method, configuration.knee.joint[0], self._set_joints_values, self.get_joints_values) self.arms = Arms(configuration.arms.method, self._proxies.motion, configuration.arms.length, configuration.arms.left_joints, configuration.arms.right_joints, configuration.hip.joints, configuration.knee.joint, configuration.frames.left_shoulder, configuration.frames.right_shoulder, self._set_joints_values, self.get_joints_values, self.transforms, configuration.arms.on_exit_posture) self.hands = Hands(configuration.hands.method, self._set_joints_values, self.get_joints_values) self._ros_tf_and_odom = Tf_and_odometry_publisher( configuration.ros.tf_and_odometry_frequency, configuration.ros.odometry_topic, configuration.frames.base, configuration.frames.reference, self._torso_transform) self._joint_states_publisher = Joint_states_publisher( configuration.ros.joint_states_frequency, self._proxies.motion, self.joints, configuration.ros.joint_states_topic) if not self.simulation: self.camera = Camera(configuration.camera.frequency, self._proxies.camera, configuration.camera.camera_id, configuration.camera.resolution, configuration.camera.source, configuration.camera.color_space, configuration.camera.frame_rate) self.camera_rgb = Camera(configuration.camera_rgb.frequency, self._proxies.camera, configuration.camera_rgb.camera_id, configuration.camera_rgb.resolution, configuration.camera_rgb.source, configuration.camera_rgb.color_space, configuration.camera_rgb.frame_rate) if self.use_ir_camera: self.camera_ir = Camera(configuration.camera_ir.frequency, self._proxies.camera, configuration.camera_ir.camera_id, configuration.camera_ir.resolution, configuration.camera_ir.source, configuration.camera_ir.color_space, configuration.camera_ir.frame_rate) if publish: self._publish = True self._publishers = Publishers( self, configuration.ros.publication_frequency) else: self._publish = False else: self._publish = False
class Pepper_interface(object): def __init__(self, IP, PORT, simulation, face_detection=False, publish=False, camera_calibration=False, stop=True): self.simulation = simulation self.stop_ = stop stop = True try: rospy.init_node("playful_ros_node_pepper_interface") except Exception as e: print print "attempted to start ROS node but failed " print "with error message: ", str(e) print "Did you launch your ROS core or initiated another node?" print if camera_calibration: self.use_ir_camera = True else: self.use_ir_camera = False self.transforms = Transforms(configuration.frames.reference, configuration.frames.relative, configuration.frames.head, configuration.frames.camera, configuration.camera.horizontal_fov) self._proxies = Proxies( IP, PORT, simulation, face_detection, face_detection_subscription=configuration.face_detection. subscription, face_detection_period=configuration.face_detection.period) self._naoqi_memory = Memory( self.transforms, self._proxies.memory, configuration.memory.frequency, configuration.body.joints, configuration.laser.scan_num, face_detection, configuration.frames.relative, configuration.frames.reference, configuration.face_detection.detection_id) self.sonar = self._naoqi_memory.sonar self.wheels_sensor = self._naoqi_memory.wheels self.laser = self._naoqi_memory.laser self.head_touch = self._naoqi_memory.head_touch self.left_hand_touch = self._naoqi_memory.left_hand_touch self.right_hand_touch = self._naoqi_memory.right_hand_touch self.face_detection = self._naoqi_memory.face_detection self.temperatures = self._naoqi_memory.temperatures self.joints = Joints(self._proxies.motion, configuration.body.frequency, self.simulation) self._torso_transform = Torso_transform( self._proxies.motion, configuration.frames.naoqi, configuration.frames.naoqi_reference, self.simulation, configuration.body.frequency) self.battery = Battery(self._proxies.battery) self.wheels = Wheels(self._proxies.motion) self.tts = TTS(self._proxies.tts) # does not work on naoqi 2.4.3 (works on 2.5) try: self.tablet = Tablet(self._proxies.tablet) except: pass self.music = Music(self._proxies.audio) self.leds = Leds(self._proxies.leds) self.tracker = Tracker(self._proxies.tracker, self._proxies.memory, configuration.tracker.event, configuration.tracker.event_type, configuration.tracker.effector_id, configuration.tracker.head_threshold) self.head = Head(configuration.head.method, self._proxies.motion, configuration.head.joint_yaw, configuration.head.joint_pitch, configuration.head.z_shift, self._set_joints_values, self.get_joints_values, self.transforms, configuration.head.smoother_time_threshold, configuration.head.smoother_max_acceleration, configuration.head.wheels_compensation_factor, self.wheels.get) self.hip = Hip(configuration.hip.method, configuration.hip.joints, self._set_joints_values, self.get_joints_values) self.knee = Knee(configuration.knee.method, configuration.knee.joint[0], self._set_joints_values, self.get_joints_values) self.arms = Arms(configuration.arms.method, self._proxies.motion, configuration.arms.length, configuration.arms.left_joints, configuration.arms.right_joints, configuration.hip.joints, configuration.knee.joint, configuration.frames.left_shoulder, configuration.frames.right_shoulder, self._set_joints_values, self.get_joints_values, self.transforms, configuration.arms.on_exit_posture) self.hands = Hands(configuration.hands.method, self._set_joints_values, self.get_joints_values) self._ros_tf_and_odom = Tf_and_odometry_publisher( configuration.ros.tf_and_odometry_frequency, configuration.ros.odometry_topic, configuration.frames.base, configuration.frames.reference, self._torso_transform) self._joint_states_publisher = Joint_states_publisher( configuration.ros.joint_states_frequency, self._proxies.motion, self.joints, configuration.ros.joint_states_topic) if not self.simulation: self.camera = Camera(configuration.camera.frequency, self._proxies.camera, configuration.camera.camera_id, configuration.camera.resolution, configuration.camera.source, configuration.camera.color_space, configuration.camera.frame_rate) self.camera_rgb = Camera(configuration.camera_rgb.frequency, self._proxies.camera, configuration.camera_rgb.camera_id, configuration.camera_rgb.resolution, configuration.camera_rgb.source, configuration.camera_rgb.color_space, configuration.camera_rgb.frame_rate) if self.use_ir_camera: self.camera_ir = Camera(configuration.camera_ir.frequency, self._proxies.camera, configuration.camera_ir.camera_id, configuration.camera_ir.resolution, configuration.camera_ir.source, configuration.camera_ir.color_space, configuration.camera_ir.frame_rate) if publish: self._publish = True self._publishers = Publishers( self, configuration.ros.publication_frequency) else: self._publish = False else: self._publish = False def start(self): self._naoqi_memory.start() self.joints.start() self._torso_transform.start() self._ros_tf_and_odom.start() self._joint_states_publisher.start() if not self.simulation: self.camera.start() self.camera_rgb.start() if self.use_ir_camera: self.camera_ir.start() if self._publish: self._publishers.start() self._proxies.motion.wakeUp() self.arms.set_stiffnesses(True, 1.0) self.arms.set_stiffnesses(False, 1.0) self.head.set_stiffnesses(1.0) def stop(self): if not self.stop_: return self.tracker.stop() self.wheels.stop() self.head.set(0.0, 0.0) self.hip.set(0.0, 0.0) self.knee.set(0.0) if (self.head.running() or self.hip.running() or self.knee.running()): time.sleep(0.5) self.arms.relax() while (self.arms.running(True) or self.arms.running(False)): time.sleep(0.1) self.head.set_stiffnesses(0.0) if self._publish: try: self._publishers.stop() except: pass try: self._naoqi_memory.stop() except: pass try: self.joints.stop() except: pass try: self._ros_tf_and_odom.stop() except: pass try: self._joint_states_publisher.stop() except: pass try: self._torso_transform.stop() except: pass if not self.simulation: try: self.camera.stop() self.camera_rgb.stop() if self.use_ir_camera: self.camera_ir.stop() except: pass def __del__(self): self.stop() def wake_up(self): self._proxies.motion.wakeUp() def get_joints_values(self, joints=None): data = self.joints.get() if not data: return None joints_values, time_stamp = data if joints is None: return joints_values return {j: v for j, v in joints_values.iteritems() if j in joints} def _set_joints_values(self, method, joint_values, motion_manager, velocity=0.1, target_time=None): current = self.get_joints_values(joint_values.keys()) new_motion_manager = Motion_manager(method, self._proxies.motion, joint_values, current, velocity=velocity, target_time=target_time) try: motion_manager.stop() except: pass return new_motion_manager def get_random_motion_manager(self, center_posture, amplitudes, velocity): return Random_motion_manager(self._proxies.motion, self.get_joints_values, center_posture, amplitudes, velocity, synchro=False) def get_dance_manager(self, postures, beat, initial_velocity, batch_size=4): return Dance_manager(self._proxies.motion, postures, beat, initial_velocity, batch_size=batch_size) def get_repetitive_motion_manager(self, postures, velocity, initial_velocity, batch_size=5): return Repetitive_motion_manager(self._proxies.motion, self.get_joints_values, postures, velocity, initial_velocity, nb_batchs=batch_size) def get_feedforward_interpolation_manager(self, postures, starting_velocity, velocities): joints = postures[0].keys() current_posture = self.get_joints_values(joints) return Feedforward_interpolation(self._proxies.motion, current_posture, postures, starting_velocity, velocities)
elif species == category[1]: writerBlue.writerow(row) elif species == category[2]: writerSnow.writerow(row) else: print "oops" filenameUUID = str(uuid.uuid4()) roiPath = pathToROI + '/' + species + '_' + filenameUUID + '.jpg' cv2.imwrite(roiPath, roi) fileCanada.close() fileBlue.close() fileSnow.close() camera = Camera() frameCenterX = 640.0/2 frameCenterY = 480.0/2 nestCSV = open(csvNestFilePath, 'w') nestWriter = csv.writer(nestCSV) # Loop through each picture for p in range(len(imagePaths)): frame = cv2.imread(imagePaths[p]) print imagePaths[p] csvImageGPS = open(csvImageGPSFilepath, 'r') reader = csv.reader(csvImageGPS, delimiter=',') latitude = 0
class Main: def __init__(self): self.bcm_pin = BCM_PIN() # Initialize motor with speed, 0 => slowest, 100 => fastest self.motor = Motor(50) # self.light = Light() self.ultrasonic = Ultrasonic() self.camera = Camera((120, 90)) def start(self, condition=1): # self.printInfoStatus() switcher = {0: 'brightest', 1: 'distance'} method = getattr(self, switcher[condition], lambda: 'nothing') method() def printInfoStatus(self): # os.system('clear') print "Info: ", datetime.datetime.now() print "" # print "Light sensor: " # print self.light.getSensorState(1) # print self.light.getSensorState(2) print "" print "Ultrasonic Distance: " + str( self.ultrasonic.getDistance()) + "cm" def distance(self): self.motor.setSpeed(50) distance = self.ultrasonic.getDistance() if 10 <= distance <= 20: self.motor.forward() elif 1 <= distance <= 5: self.motor.reverse() else: self.motor.stop() def brightest(self): self.motor.setSpeed(50) self.camera.captureImage() width, height = self.camera.resolution brightest = self.camera.getBrightest() # If brightest in left if brightest['x'] < ((width / 2) - 5): self.motor.spinanticlockwise() # If brightest in right elif brightest['x'] > ((width / 2) + 5): self.motor.spinclockwise() # If brightest in front else: self.motor.forward() def stop(self): self.motor.stop() self.bcm_pin.cleanup()
#!/usr/bin/env python #-*- coding:utf-8 -*- '' import config from sensors.camera import Camera from sensors.liquid_level_sensor import LiquidLevelSensor from sensors.switch import Switch __author__ = 'PakhoLeung' """ 这个表里注册了所有传感器,方便对传感器进行统一的管理 """ CAMERA = Camera() WD_INDICATOR = LiquidLevelSensor(channel=config.WDS_LIQUID_LEVEL_SENSOR_CHANNEL) WD_PUMP = Switch(channel=config.WDS_PUMP_SWITCH_CHANNEL)
def __init__(self) -> None: super().__init__() self.camera = Camera()
import sys from sensors.camera import Camera from actuators.motors import Motors from sensors.threadSensor import ThreadSensor from gui.threadGUI import ThreadGUI from gui.GUI import MainWindow from PyQt4 import QtGui import signal signal.signal(signal.SIGINT, signal.SIG_DFL) if __name__ == '__main__': camera = Camera() motors = Motors() app = QtGui.QApplication(sys.argv) frame = MainWindow() frame.setMotors(motors) frame.setCamera(camera) frame.show() t1 = ThreadSensor(camera) t1.daemon = True t1.start() t2 = ThreadGUI(frame) t2.daemon = True t2.start()
def main(): # timer = Timer() cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) data = np.load('./matrixCalibration.npz') cmatrix = data['camera_matrix'] dist = data['dist_coefs'] detector = cv2.SimpleBlobDetector_create(initDetectorParams()) initiateCamera = True while True: ret, img = cap.read() h, w = img.shape[:2] newcameramtx, roi = cv2.getOptimalNewCameraMatrix( cmatrix, dist, (w, h), 1, (w, h)) dst = cv2.undistort(img, cmatrix, dist, None, newcameramtx) x, y, w, h = roi dst = dst[y:y + h, x:x + w] if initiateCamera: print "Height: {}\nWidth: {}".format(h, w) camera = Camera(w, h, 65.0) altitude = 102.0 print "Full x length: {:.2f} cm".format(2 * camera.getCx(altitude)) print "Full y length: {:.2f} cm".format(2 * camera.getCy(altitude)) initiateCamera = False # keypoints, f = findEggDebug(img) keypoints = findEgg(dst, detector) cv2.circle(dst, (w / 2, h / 2), 5, (255, 0, 0), -1) if len(keypoints) > 0: # imgk = cv2.drawKeypoints(img, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) # Loops through every detection, and finds the distance to the center distance = [] for j in range(len(keypoints)): x = int(keypoints[j].pt[0]) y = int(keypoints[j].pt[1]) distancePixel = math.sqrt( math.pow(w / 2.0 - x, 2) + math.pow(h / 2.0 - y, 2)) distance.append(distancePixel) # Finds the shortest distance to the center of the frame shortestDistanceIndex = distance.index(min(distance)) xShort = int(keypoints[shortestDistanceIndex].pt[0]) yShort = int(keypoints[shortestDistanceIndex].pt[1]) # Difference between the detection and the center, in pixels diffX = xShort - w / 2.0 diffY = -1 * ( yShort - h / 2.0 ) # Negative because the pixel coords increase downwards # Difference between the detection and the center, in centimeters # x: + up (egg is up relative to the center), - down # y: + right, - left altitude = 103.0 offsetX = camera.getRealWorldDistance(altitude, diffX) offsetY = camera.getRealWorldDistance(altitude, diffY) diffR = math.sqrt(offsetX**2 + offsetY**2) print "offsetX:{:.2f}\toffsetY:{:.2f}\tdiff:{:.2f} cm".format( offsetX, offsetY, diffR) # Draw centroids for k in keypoints: x = int(k.pt[0]) y = int(k.pt[1]) cv2.circle(dst, (x, y), 10, (0, 0, 255), -1) if diffR < 1.0: cv2.circle(dst, (w / 2, h / 2), 5, (0, 255, 0), -1) cv2.circle(dst, (xShort, yShort), 5, (0, 255, 0), -1) # cv2.imshow("filtered", f) # cv2.imshow("mask", mask) # timer.sinceLastTimeLog('') cv2.imshow("img_keypoints", dst) k = cv2.waitKey(25) & 0xFF if k == 27: break cv2.destroyAllWindows() data.close()
cap = cv2.VideoCapture(0) once = True while True: ret, img = cap.read() h, w = img.shape[:2] newcameramtx, roi = cv2.getOptimalNewCameraMatrix(cmatrix, dist, (w, h), 1, (w, h)) # undistort dst = cv2.undistort(img, cmatrix, dist, None, newcameramtx) # crop the image x, y, w, h = roi dst = dst[y:y+h, x:x+w] if once: print "Height: {}\nWidth: {}".format(h, w) camera = Camera(w, h, 65.0) altitude = 300.0 print "Full x length: {:.2f} cm".format(2 * camera.getCx(altitude)) print "Full y length: {:.2f} cm".format(2 * camera.getCy(altitude)) once = False cv2.imshow('distorted', img) cv2.imshow('undistorted', dst) k = cv2.waitKey(25) & 0xFF if k == 27: break cv2.destroyAllWindows()