class noUVCCapture(): """ VideoCapture without uvc control """ def __init__(self, src,size=(640,480)): self.controls = None self.cvId = src ###add cv videocapture capabilities self.cap = VideoCapture(src) self.set_size(size) self.read = self.cap.read def set_size(self,size): width,height = size self.cap.set(3, width) self.cap.set(4, height) def get_size(self): return self.cap.get(3),self.cap.get(4) def read_RGB(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2BGR,img) return s,img def read_HSV(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2HSV,img) return s,img
class CameraCapture(uvc.Camera): """ CameraCapture Class for Image grabbing and control inherits from an OS specitic Camera that defines all uvc control methods """ def __init__(self, cam,size=(640,480)): super(CameraCapture, self).__init__(cam) ###add cv videocapture capabilities self.cap = VideoCapture(self.cvId) self.set_size(size) self.read = self.cap.read def set_size(self,size): width,height = size self.cap.set(3, width) self.cap.set(4, height) def get_size(self): return self.cap.get(3),self.cap.get(4) def read_RGB(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2BGR,img) return s,img def read_HSV(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2HSV,img) return s,img
class FileCapture(): """ simple file capture that can auto_rewind """ def __init__(self,src): self.auto_rewind = True self.controls = None #No UVC controls available with file capture # we initialize the actual capture based on cv2.VideoCapture self.cap = VideoCapture(src) timestamps_loc = os.path.join(src.rsplit(os.path.sep,1)[0],'eye_timestamps.npy') logger.info("trying to load timestamps with video at: %s"%timestamps_loc) try: self.timestamps = np.load(timestamps_loc).tolist() logger.info("loaded %s timestamps"%len(self.timestamps)) except: logger.info("did not find timestamps") self.timestamps = None self._get_frame_ = self.cap.read def get_size(self): return self.cap.get(3),self.cap.get(4) def set_fps(self): pass def get_fps(self): return None def read(self): s, img =self._get_frame_() if self.auto_rewind and not s: self.rewind() s, img = self._get_frame_() return s,img def get_frame(self): s, img = self.read() if self.timestamps: timestamp = self.timestamps.pop(0) else: timestamp = time() return Frame(timestamp,img) def rewind(self): self.cap.set(1,0) #seek to the beginning def create_atb_bar(self,pos): return 0,0 def kill_atb_bar(self): pass def close(self): pass
class FileCapture(): """ simple file capture that can auto_rewind """ def __init__(self,src): self.auto_rewind = True self.controls = None #No UVC controls available with file capture # we initialize the actual capture based on cv2.VideoCapture self.cap = VideoCapture(src) self._get_frame_ = self.cap.read def get_size(self): return self.cap.get(3),self.cap.get(4) def set_fps(self): pass def get_fps(self): return None def read(self): s, img =self._get_frame_() if self.auto_rewind and not s: self.rewind() s, img = self._get_frame_() return s,img def get_frame(self): s, img = self.read() timestamp = time() return Frame(timestamp,img) def rewind(self): self.cap.set(1,0) #seek to the beginning def create_atb_bar(self,pos): return 0,0 def kill_atb_bar(self): pass def close(self): pass
class FileCapture(): """ simple file capture that can auto_rewind """ def __init__(self,src): self.auto_rewind = True self.controls = None #No UVC controls available with file capture # we initialize the actual capture based on cv2.VideoCapture self.cap = VideoCapture(src) self._get_frame_ = self.cap.read def get_size(self): return self.cap.get(3),self.cap.get(4) def read(self): s, img =self._get_frame_() if self.auto_rewind and not s: self.rewind() s, img = self._get_frame_() return s,img def read_RGB(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2BGR,img) return s,img def read_HSV(self): s,img = self.read() if s: cvtColor(img,COLOR_RGB2HSV,img) return s,img def rewind(self): self.cap.set(1,0) #seek to the beginning
class Camera_Capture(): """ VideoCapture without uvc control using cv2.VideoCapture """ def __init__(self,src_id,size=(640,480),fps=None,timebase=None): self.controls = None self.cvId = src_id self.name = "VideoCapture" self.controls = None ###add cv videocapture capabilities self.capture = VideoCapture(src_id) self.set_size(size) if timebase == None: logger.debug("Capture will run with default system timebase") self.timebase = c_double(0) elif isinstance(timebase,c_double): logger.debug("Capture will run with app wide adjustable timebase") self.timebase = timebase else: logger.error("Invalid timebase variable type. Will use default system timebase") self.timebase = c_double(0) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp,img) def set_size(self,size): width,height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self,fps): self.capture.set(5,fps) def get_fps(self): return self.capture.get(5) def get_now(self): return time() def create_atb_bar(self,pos): size = 0,0 return size def kill_atb_bar(self): pass def close(self): pass
class Camera_Capture(): """ VideoCapture without uvc control using cv2.VideoCapture """ def __init__(self,src_id,size=(640,480),fps=None): self.controls = None self.cvId = src_id self.name = "VideoCapture" self.controls = None ###add cv videocapture capabilities self.capture = VideoCapture(src_id) self.set_size(size) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp,img) def set_size(self,size): width,height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self,fps): self.capture.set(5,fps) def get_fps(self): return self.capture.get(5) def create_atb_bar(self,pos): size = 0,0 return size def kill_atb_bar(self): pass def close(self): pass
def main_with_video(): """ This is the main entry point of the camera calibration script. IMPORTANT! Make sure the rows entered is (# of boxes in a column) - 1, cols = (# of boxes in a row) - 1 This has to do with OpenCV's computation of an "inner chessboard". This offset will be built in later, but that would require changing documentation before an implementation has been built. Once you've done that, the code will open a program that shows a picture of the checkerboard and any calibration lines the computer vision library made. Press any key other than ESCAPE to continue the calibration process. It is recommended to press ESCAPE if the camera does not see the entire board. The code will then compute many matrices to calibrate the camera and write an output in a file typically named 0.calib. """ args = get_args() # Capture vars cameras = [] # Which webcam objects you are going to use camera_ids = [] # How to find each webcam NUM_CAMERAS = args["nums"] img_points = [] # Points that the camera percieves (we cannot set these) obj_points = [] # Points in physical space (we arbitrarily set these) # Constants # Prepare object points - taken from tutorials objp = np.zeros((args["rows"] * args["cols"], 3), np.float32) objp[:, :2] = np.mgrid[0:args["cols"], 0:args["rows"]].T.reshape(-1, 2) WIN_SIZE = (11, 11) ZERO_ZONE = (-1, -1) # TODO TERM_CRITERIA_ITER used in C++, using MAX_ITER OK? TERM_CRITERIA = ( TERM_CRITERIA_EPS + TERM_CRITERIA_MAX_ITER, 30, 0.001, ) # 0.1 --> 0.001 # Make sure each camera is accessible, and set it up if it is. for i in range(NUM_CAMERAS): camera = VideoCapture(i) # OpenCV's way of knowing what a camera is img_points.insert(i, []) # List of points that camera i sees obj_points.insert(i, []) # List of points we define for camera i if VideoCapture.isOpened(camera): # Set up the camera camera.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) camera.set(cv2.CAP_PROP_FPS, 30) cameras.append(camera) camera_ids.append(i) pass else: print("Failed to open video capture device " + str(i)) print("Did you check that all cameras are plugged in and ready?") quit() # Make checkerboard points "Calibration variables" # Official docs says checkerboard size should be (cols, rows) checkerboard_size = (args["cols"], args["rows"]) frame = [] # The image (a 2D array of 3-number tuples) gray = None # The same image as Frame, but in grayscale (so 1 number per point) corners = [] # Checkerboard corners (object points?) # find the checkerboard for i in range(len(cameras)): frame.append(None) # placeholder for the camera's image to overwrite while True: # Loop until checkerboard corners are found # Take a picture and convert it to grayscale frame[i] = get_image(cameras[i]) gray = cv2.cvtColor(frame[i], COLOR_BGR2GRAY) # Find the checkerboard flags = (CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE + CALIB_CB_FAST_CHECK) retval, corners = cv2.findChessboardCorners( gray, (args["cols"], args["rows"]), None) if not retval: continue # No checkerboard, so keep looking if len(corners) != 0: print("Found checkerboard on " + str(i)) obj_points[i].append(objp) # Find and clean up the intersection points corners = cornerSubPix(gray, corners, WIN_SIZE, ZERO_ZONE, TERM_CRITERIA) # Store the points we found after cleanup img_points[i].append(corners) break assert frame[i].any() != None assert corners.any() != None ret = True # Draw corners / lines on the image to show the user drawChessboardCorners(frame[i], checkerboard_size, corners, ret) # Show the image and prompts to user for confirmation print("Press any key other than ESCAPE to continue") print("Press ESCAPE to abort calibration. If the camera can't see " + "the entire board, you should press ESCAPE and try again.") for i in camera_ids: print("This is what camera " + str(i) + " sees.") imshow(str(i), frame[i]) key = cv2.waitKey(0) if key == 27: print("Calibration process aborted.") exit(0) # Write the calibration file print("Beginning calibration file creation process") camera_matrix = [] dist_coeffs = [] rvecs = [] tvecs = [] for i in range(len(cameras)): if not cameras[i].isOpened(): continue if len(obj_points[i]) == 0: print("No checkerboards detected on camera" + str(i)) continue # TODO check if calib exists first # Might not do this and just overwrite each time. print("Calibrating camera " + str(camera_ids[i])) obj_points[i] = np.asarray(obj_points[i]) # See OpenCV for matrix dimension docs ret_r, mtx_r, dist_r, rvecs_r, tvecs_r = cv2.calibrateCamera( obj_points[i], img_points[i], (len(obj_points), len(obj_points[0])), None, None, ) print("Writing calibration file") calib_file = open(str(10 + camera_ids[i]) + ".calib", "w+") calib_file.write("camera_matrix =") write_matrix_to_file(mtx_r, calib_file) calib_file.write("dist_coeffs =") write_matrix_to_file(dist_r, calib_file) calib_file.close() print("Calibration file written to " + str(camera_ids[i]) + ".calib")
class BaseProc(object): def __init__(self): self.cap = VideoCapture(CAMERA) self.cap.set(CAP_PROP_FRAME_WIDTH, RESOLUTIONS[0]) self.cap.set(CAP_PROP_FRAME_HEIGHT, RESOLUTIONS[1]) self.cap.set(CAP_PROP_AUTO_EXPOSURE, 1) self.cap.set(CAP_PROP_EXPOSURE, 1000) _, self.frame = self.cap.read() self.img_resize() self.morph_img = self.frame self.iters = 1 self.k_size = 1 self.colors = ('red', 'blue', 'green', 'pink') self.store = {} self.min_ycb = [0, 0, 0] self.max_ycb = [255, 255, 255] self.iters = 1 self.k_size = 1 self.morph_elem = None def img_resize(self): self.frame = resize(self.frame, SCALE_RESOLUTIONS) def cvt_ycb(self, orig_img): image_ycb = cvtColor(orig_img, COLOR_BGR2YCrCb) mask = inRange(image_ycb, array(self.min_ycb), array(self.max_ycb)) return bitwise_and(orig_img, orig_img, mask=mask) def morph_transform(self): self.morph_img = morphologyEx(cvtColor(self.cvt_ycb(self.frame), COLOR_BGR2GRAY), MORPH_OPEN, self.morph_elem, iterations=self.iters) _, self.morph_img = threshold(self.morph_img, 0, 255, THRESH_BINARY) def select_area(self): contours, _ = findContours(self.morph_img, RETR_TREE, CHAIN_APPROX_SIMPLE) if len(contours) <= 0: return None return array([ boundingRect(contours[c]) for c in argsort([contourArea(c) for _, c in enumerate(contours)]) [-NUM_OF_MAX_AREAS:] ]) def draw_ctr(self, coordinate, color=(0, 255, 0), lw=2): x, y, w, h = coordinate rectangle(self.frame, (x, y), (x + w, y + h), color, lw) target_center = x + w // 2, y + h // 2 circle(self.frame, target_center, 2, color, -1) def change_color(self, color): print('change color:', color) (self.min_ycb, self.max_ycb, self.k_size, self.iters) = self.store[color] def restore_config(self, cfg="config.pkl"): try: self.store = load(open(cfg, 'rb')) print('loaded config') except FileNotFoundError: print('config not found!') return False return True
class Main: def __init__(self): self.image = None self.loopThread: threading.Thread = None self.face_cascade: CascadeClassifier = None self.cap: VideoCapture = None self.serialArduino: serial.Serial = None self.timerCanSendData: bool = True self.timer: Timer = Timer(Settings.timerPause, lambda: self.SetCanSendData(True)) asyncio.run(self.Start()) self.loopThread = threading.Thread(target=self.MainLoop) self.loopThread.daemon = True self.loopThread.start() if (Settings.showWindow): self.cameraThread = threading.Thread(target=self.CameraOut) self.cameraThread.daemon = True self.cameraThread.start() async def Start(self) -> None: self.face_cascade = CascadeClassifier(f"Files/{Settings.cascade}") if (not self.cap): self.cap = VideoCapture(Settings.capIndex) self.cap.set(3, Settings.resolution[0]) self.cap.set(4, Settings.resolution[1]) if (Settings.port): if (self.serialArduino): self.serialArduino.close() self.serialArduino = serial.Serial(port=Settings.port, baudrate=Settings.speed) def GetImage(self): return self.image def ImageToBase64(self, image=None) -> str: retval, buffer = imencode('.jpg', image) jpg_as_text = base64.b64encode(buffer).decode("ascii") return jpg_as_text def CameraOut(self): while True: if (self.image is not None): imshow("Image", self.image) waitKey(25) def SetCanSendData(self, value: bool = False): self.timerCanSendData = value def MainLoop(self): while True: self.timer.Update() if (not self.cap and not self.face_cascade): continue _, self.image = self.cap.read() gray = cvtColor(self.image, COLOR_BGR2GRAY) faces = self.face_cascade.detectMultiScale(gray, Settings.scaleFactor, Settings.minNeighbors) if (not self.timerCanSendData): continue if (len(faces) == 0): self.timer.Restart() self.SetCanSendData(False) if (self.serialArduino): self.serialArduino.write(bytes("n", encoding="utf-8")) continue for (x, y, w, h) in faces: self.timer.Restart() self.SetCanSendData(False) if (w * h < Settings.minArea): continue else: rectangle(self.image, (x, y), (x + w, y + h), (255, 0, 0), 2) if (self.serialArduino): self.serialArduino.write(bytes("d", encoding="utf-8"))
class Camera_Capture(object): """docstring for uvcc_camera""" def __init__(self, cam, size=(640, 480), fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls['UVCC_REQ_FOCUS_AUTO'].set_val(0) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) def re_init(self, cam, size=(640, 480), fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls['UVCC_REQ_FOCUS_AUTO'].set_val(0) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) #recreate the bar with new values bar_pos = self.bar._get_position() self.bar.destroy() self.create_atb_bar(bar_pos) def re_init_cam_by_src_id(self, src_id): try: cam = Camera_List()[src_id] except KeyError: logger.warning( "could not reinit capture, src_id not valid anymore") return self.re_init(cam, self.get_size()) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp, img) def set_size(self, size): width, height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self, fps): self.capture.set(5, fps) def get_fps(self): return self.capture.get(5) def create_atb_bar(self, pos): # add uvc camera controls to a separate ATB bar size = (200, 200) self.bar = atb.Bar(name="Camera_Controls", label=self.name, help="UVC Camera Controls", color=(50, 50, 50), alpha=100, text='light', position=pos, refresh=2., size=size) sorted_controls = [c for c in self.controls.itervalues()] sorted_controls.sort(key=lambda c: c.order) cameras_enum = atb.enum( "Capture", dict([(c.name, c.src_id) for c in Camera_List()])) self.bar.add_var("Capture", vtype=cameras_enum, getter=lambda: self.src_id, setter=self.re_init_cam_by_src_id) for control in sorted_controls: name = control.atb_name if control.type == "bool": self.bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val) elif control.type == 'int': self.bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val) self.bar.define(definition='min=' + str(control.min), varname=name) self.bar.define(definition='max=' + str(control.max), varname=name) self.bar.define(definition='step=' + str(control.step), varname=name) elif control.type == "menu": if control.menu is None: vtype = None else: vtype = atb.enum(name, control.menu) self.bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val) if control.menu is None: self.bar.define(definition='min=' + str(control.min), varname=name) self.bar.define(definition='max=' + str(control.max), varname=name) self.bar.define(definition='step=' + str(control.step), varname=name) else: pass if control.flags == "inactive": pass # self.bar.define(definition='readonly=1',varname=control.name) self.bar.add_button("refresh", self.controls.update_from_device) self.bar.add_button("load defaults", self.controls.load_defaults) return size def kill_atb_bar(self): pass def close(self): pass
# -*- coding: utf-8 -*- from cv2 import VideoCapture,imwrite import cv2 cv2.resizeWindow("Tracking", 800,800) capture = VideoCapture(0) capture.set(3, 1366) capture.set(4, 768) ret, frame = capture.read() imwrite("foreward" +'.jpeg',frame)
def handleSTTData(data): #handles Sub the Title content global jsonData global id global gameModes if data[0].content != b'': #checks if a video has been submitted with open(os.path.join(dataFolder, "NewContent", "sttTemp" + str(id)), 'wb+') as f: #temporarily saves the submitted video f.write(data[0].content) f.close() ffmpegArgs = [ os.path.join(dataFolder, "ModData", "ffmpeg.exe"), "-i", os.path.join(dataFolder, "NewContent", "sttTemp" + str(id)), "-movflags", "faststart", "-pix_fmt", "yuv420p", "-vf", "scale=trunc(iw/2)*2:trunc(ih/2)*2", "-y", os.path.join(dataFolder, "NewContent", "stt" + str(id) + ".mp4") ] #arguments passed to FFMPEG to convert the video to MP4 try: #converts the video to MP4, and extracts its duration and framerate from FFMPEG output. ffmpegOutput = check_output(ffmpegArgs, stderr=STDOUT).decode('utf-8') duration = re.search('Duration: ([0-9:\.]+)', ffmpegOutput, re.MULTILINE | re.IGNORECASE).group(1).split(".")[0] fps = float( re.search('([0-9\.]+) fps', ffmpegOutput, re.MULTILINE | re.IGNORECASE)[1]) durationInSeconds = sum( x * int(t) for x, t in zip([3600, 60, 1], duration.split(":"))) os.remove( os.path.join(dataFolder, "NewContent", "sttTemp" + str(id))) except Exception as e: print("Dropped request : conversion error. ({})".format(e)) return "error during conversion. Maybe try another file type." if data[1].text == '' or data[ 2].text == '': #if the time frame of the part to subtitle is not specified, it's set to the duration of the video. start = 0.05 end = durationInSeconds - 0.05 else: #if the time frame of the part to subtitle is specified, converts the beinning and end value to seconds and checks if they are correct. start = sum(x * int(t) for x, t in zip([60, 1], data[1].text.split(":"))) end = sum(x * int(t) for x, t in zip([60, 1], data[2].text.split(":"))) if start > end or end > durationInSeconds: print("Dropped request : time values are incorrect.") return "time values incorrect. Check if they do not exceed the duration of the video." if start == 0: start = 0.05 if end == durationInSeconds: end = durationInSeconds - 0.05 try: #extracts a screenshot of the video that will be used for the answers reveal. It gets the image in the middle of the time frame of the part to subtitle. screenShotTime = (start + end) / 2 screenShotFrame = screenShotTime * fps cap = VideoCapture( os.path.join(dataFolder, "NewContent", "stt" + str(id) + ".mp4")) cap.set(1, screenShotFrame) ret, frame = cap.read() imwrite( os.path.join(dataFolder, "NewContent", "sttImg" + str(id) + ".jpg"), frame) copyfile( os.path.join(dataFolder, "NewContent", "sttImg" + str(id) + ".jpg"), os.path.join(dataFolder, "NewContent", "sttImg" + str(id))) os.remove( os.path.join(dataFolder, "NewContent", "sttImg" + str(id) + ".jpg") ) #saves the image without extension, and gets rid of the original image. except Exception as e: print( "Dropped request : error while reading video. ({})".format(e)) return "error while reading video." if data[3].text == '': #check if there is an house answer houseAnswer = "No House Answer available." print("No house answer provided.") else: houseAnswer = data[3].text print("A house answer was provided for the question.") questionData = { "familyMode": "true", "asset": id, "id": id, "houseAnswers": [houseAnswer], "position": "0,-192.0,837,129", "start": start, "end": end } #the dictionnary which contains the data and will be dumped to the json file jsonData["packages"][0][gameModes["STT"]].append(questionData) saveJson(gameModes["STT"]) print("\"Sub the Title\" content successfully added !") else: print("Dropped request : no video submitted.") return "no image submitted."
class Camera(object): ''' Communicate with the camera. Class governing the communication with the camera. Parameters ----------- camera : int the index of the camera, best taken from func lookForCameras, from eyetracker.camera.capture dic : dic{propID value} to check corresponding propIDs check opencv documentation under the term VideoCapture. They will be set in the moment of object creation. Defines -------- self.camera : index of the camera self.cap : capturing object self.frame : returns a frame from camera self.close : closes cap self.reOpen : reopens cap ''' def __init__(self, camera, dic=None): self.camera = int(camera) self.cap = VideoCapture(self.camera) if dic: for propID, value in dic.iteritems(): self.cap.set(propID, value) first_frame = self.frame() def frame(self): ''' Read frame from camera. Returns -------- frame : np.array frame from camera ''' if self.cap.isOpened: return self.cap.read()[1] else: print 'Cap is not opened.' return None def set(self, **kwargs): ''' Set camera parameters. Parameters ----------- kwargs : {propID : value} ''' for propID, value in kwargs.iteritems(): self.cap.set(propID, value) def close(self): ''' Closes cap, you can reopen it with self.reOpen. ''' self.cap.release() def reOpen(self, cameraIndex): ''' Reopens cap. ''' self.cap.open(self.camera) first_frame = self.frame()
class TimeLapse: def __init__(self, config): """__init__(self):""" self.interval = config.interval self.shots = config.count self.output = config.output self.camera = VideoCapture(config.device) self.startFrom = config.startfrom self.currnetShot = 0 self.imageName = config.name self.imageType = config.type self.imageQuality = config.quality self.verbose = config.verbose def getResolution(self): """getResolution""" width = self.camera.get(CAP_PROP_FRAME_WIDTH) height = self.camera.get(CAP_PROP_FRAME_HEIGHT) return width, height def setMaxResolution(self): """setMaxResololution""" self.setResolution(10000, 10000) def setResolution(self, width, height): """setMaxResolution""" self.camera.set(CAP_PROP_FRAME_WIDTH, width) self.camera.set(CAP_PROP_FRAME_HEIGHT, height) def run(self): """run""" if self.startFrom: self.currnetShot = self.startFrom if self.shots: self.shots = self.startFrom + self.shots while True: if self.shots and self.shots <= self.currnetShot: break ok, img = self.takePhoto() if ok: self.saveImage(img) self.currnetShot += 1 sleep(self.interval) def takePhoto(self): """takePhoto""" return self.camera.read() def saveImage(self, img): """saveImage""" name = '{name}_{index:02d}.{type}'.format(name=self.imageName, index=self.currnetShot, type=self.imageType) imagePath = os.path.join(self.output, name) if self.verbose: print('Save a photo {} to: {}'.format(img.shape, imagePath)) imwrite(imagePath, img, self.imageOpts()) def imageOpts(self): if self.imageType == 'jpg': if self.imageQuality < 0 or self.imageQuality > 100: self.imageQuality = 95 return IMWRITE_JPEG_QUALITY, self.imageQuality elif self.imageType == 'png': if self.imageQuality < 0 or self.imageQuality > 9: self.imageQuality = 3 return IMWRITE_PNG_COMPRESSION, self.imageQuality elif self.imageType == 'webp': if self.imageQuality < 0 or self.imageQuality > 100: self.imageQuality = 95 return IMWRITE_WEBP_QUALITY, self.imageQuality def stop(self): """stop""" self.shots = self.currnetShot def __del__(self): self.camera.release()
def get_video_capture(self, camera_id: int) -> VideoCapture: cap = VideoCapture(camera_id) cap.set(CAP_PROP_BUFFERSIZE, 1) return cap
class Camera_Capture(object): """docstring for uvcc_camera""" def __init__(self, cam,size=(640,480),fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls['UVCC_REQ_FOCUS_AUTO'].set_val(0) except KeyError: pass if '6000' in self.name and False: #on mac we dont have enough controls to use this right. logger.info("adjusting exposure for HD-6000 camera") try: self.controls['UVCC_REQ_EXPOSURE_AUTOMODE'].set_val(1) self.controls['UVCC_REQ_EXPOSURE_ABS'].set_val(156) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp,img) def set_size(self,size): width,height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self,fps): self.capture.set(5,fps) def get_fps(self): return self.capture.get(5) def create_atb_bar(self,pos): # add uvc camera controls to a separate ATB bar size = (200,200) self.bar = atb.Bar(name="Camera_Controls", label=self.name, help="UVC Camera Controls", color=(50,50,50), alpha=100, text='light',position=pos,refresh=2., size=size) sorted_controls = [c for c in self.controls.itervalues()] sorted_controls.sort(key=lambda c: c.order) for control in sorted_controls: name = control.atb_name if control.type=="bool": self.bar.add_var(name,vtype=atb.TW_TYPE_BOOL8,getter=control.get_val,setter=control.set_val) elif control.type=='int': self.bar.add_var(name,vtype=atb.TW_TYPE_INT32,getter=control.get_val,setter=control.set_val) self.bar.define(definition='min='+str(control.min), varname=name) self.bar.define(definition='max='+str(control.max), varname=name) self.bar.define(definition='step='+str(control.step), varname=name) elif control.type=="menu": if control.menu is None: vtype = None else: vtype= atb.enum(name,control.menu) self.bar.add_var(name,vtype=vtype,getter=control.get_val,setter=control.set_val) if control.menu is None: self.bar.define(definition='min='+str(control.min), varname=name) self.bar.define(definition='max='+str(control.max), varname=name) self.bar.define(definition='step='+str(control.step), varname=name) else: pass if control.flags == "inactive": pass # self.bar.define(definition='readonly=1',varname=control.name) self.bar.add_button("refresh",self.controls.update_from_device) self.bar.add_button("load defaults",self.controls.load_defaults) return size def kill_atb_bar(self): pass def close(self): pass
def main(): """""" # Win32: handle drive letter ('c:', ...) source = sub(r'(^|=)([a-zA-Z]):([/\\a-zA-Z0-9])', r'\1?disk\2?\3', "0") chunks = source.split(':') chunks = [sub(r'\?disk([a-zA-Z])\?', r'\1:', s) for s in chunks] source = chunks[0] try: source = int(source) except ValueError: pass params = dict(s.split('=') for s in chunks[1:]) cam = VideoCapture(source) if 'size' in params: width, height = map(int, params['size'].split('x')) cam.set(CAP_PROP_FRAME_WIDTH, width) cam.set(CAP_PROP_FRAME_HEIGHT, height) if cam is None or not cam.isOpened(): print(f"Warning: unable to open {source}") return # https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_alt.xml # Try data/haarcascades/haarcascade_eye.xml also cascade = CascadeClassifier("haarcascade_frontalface_alt.xml") old_rects = [] image = pyvips.Image.new_from_file("laughing-man.svg", dpi=300) pil_img = Image.open(BytesIO(image)).convert('RGBA') cv_img = cvtColor(np.array(pil_img), COLOR_RGBA2BGRA) s_img = imread(cv_img, -1) #s_img = imread("laughing-man.svg", -1) time_delay = clock() while True: _, img = cam.read() # Face detection rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags=CASCADE_SCALE_IMAGE) if len(rects) == 0: rects = [] else: rects[:, 2:] += rects[:, :2] if isinstance(rects, list): rects = old_rects elif not isinstance(old_rects, list): for left_bound, lower_bound, right_bound, upper_bound in rects: x_size = right_bound - left_bound y_size = upper_bound - lower_bound rect_pyth = (y_size * y_size) + (x_size * x_size) for left_bound, lower_bound, right_bound, upper_bound in old_rects: x_size = right_bound - left_bound y_size = upper_bound - lower_bound old_pyth = (y_size * y_size) + (x_size * x_size) if old_pyth > rect_pyth: rects = ((old_rects) * 2 + (rects)) / 3 old_rects = rects else: old_rects = rects vis = img.copy() face_mask(vis, s_img, rects) time_delay = display_fps(vis, time_delay) imshow(WINDOW_TITLE, vis) character = waitKey(1) if getWindowProperty(WINDOW_TITLE, WND_PROP_VISIBLE) < 1: break # Quiting key bindings if character in [27, 1048603, 32]: # ESC or Space key to close window break # Alternatively, pressing S takes a screenshot if character in [115]: screenshot_directory = Path.cwd() / "screenshots" if not screenshot_directory.exists(): screenshot_directory.mkdir(parents=True, exist_ok=True) unix_value = int(time()) imwrite(str(screenshot_directory / f"laugh_{unix_value}.bmp"), img) print(f"screenshots/laugh_{unix_value}.bmp saved.") destroyAllWindows()
class camera(object): ''' Object containing camera information Call-able, retrieve current frame in camera buffer User accessible attributes: device system device number resolution camera resolution BGRimage image in BGR format HSVimage image in HSV format RGBimage image in RGB format FPS camera speed in FPS User accessible methods: close close camera device ''' def __init__(self, cam_num = -1, resolution = (640, 480)): ''' create camera object cam_num device number (integer) resolution image resolution (tuple width x height) ''' self.device = cam_num self.resolution = resolution self.BGRimage = [] self.HSVimage = [] self.RGBimage = [] self.FPS = [0, 0] self.__avr = 0 #assign and open device self.__capture = VideoCapture(cam_num) self.__capture.set(CV_CAP_PROP_FRAME_WIDTH,resolution[0]) self.__capture.set(CV_CAP_PROP_FRAME_HEIGHT,resolution[1]) self.__capture.open self.__flag = False t0 = time() self.__flag, self.BGRimage = self.__capture.read() self.FPS[0] = 1/(time()-t0) self.FPS[1] = self.FPS[0] self.__avr = self.FPS[0] print "camera", self.device, "ready @", self.FPS[0], "fps" return def __call__(self, frame_delay = 0, fast = False): ''' retrieve current frame in camera buffer frame_delay delay the frame decoding (integer) fast if true don't decode image to RGB format (logic) ''' #set timer to meassure fps self.__avr = self.FPS[1] t0 = time() #try to retrieve current frame while not self.__flag: if frame_delay > 0: for i in xrange(frame_delay + 1): self.__capture.grab() self.__flag, self.BGRimage = self.__capture.retrieve() del i else: self.__flag, self.BGRimage = self.__capture.read() self.__flag = False #decode bgr format to hsv self.HSVimage = cvtColor(self.BGRimage, CV_BGR2HSV) if fast: self.FPS[0] = 1/(time()-t0) self.FPS[1] = (self.FPS[0]+self.__avr)/2 return #decode bgr format to rgb self.RGBimage = cvtColor(self.BGRimage, CV_BGR2RGB) self.FPS[0] = 1/(time()-t0) self.FPS[1] = (self.FPS[0]+self.__avr)/2 return def __str__(self): ''' return camera information; device number device resolution instant speed average speed ''' tmp = "camera object @ dev "+str(self.device)+", resolution: "+str(self.resolution) tmp = tmp +", fps: "+str(self.FPS[0])+", Avr. fps: "+str(self.FPS[1]) return tmp def __del__(self): ''' when the object is deleted, it closes the device ''' self.close() return def close(self): ''' close device, making it available to use ''' #if the device is open then close it if self.__capture.isOpened(): self.__capture.release() print "camera", self.device, "closed" return
from os import listdir, chdir from cv2 import VideoCapture, imwrite, CAP_PROP_POS_FRAMES if __name__ == '__main__': # for folder in ['spectra']: for folder in listdir('.'): chdir(folder) for video_file in (video_file for video_file in listdir('.') if video_file.endswith('.avi')): for fr in [0, 15, 30]: cap = VideoCapture(video_file) cap.set(CAP_PROP_POS_FRAMES, fr) ret, frame = cap.read() imwrite(f'frame_{fr}_{video_file[:-4]}.png', frame) chdir('..')
class DBugVideoStream(object): """ A class to interact with a camera connected using a USB """ def __init__(self, camera_device_index=DEFAULT_CAMERA_USB_INDEX, read_buffer_amount=DEFAULT_READ_BUFFER_AMOUNT): """ :param camera_device_index: The USB device index of the camera to get the pictures from :param read_buffer_amount: The amount of frames to read from the camera in each update in order to clear the buffer """ self.cam = VideoCapture(camera_device_index) self._set_cam_props() self.read_buffer_amount = read_buffer_amount def _set_cam_props(self): """ Setting the props of self.cam such as picture height, width, saturation and more """ if not self.cam.isOpened(): raise ValueError("Camera Not Found") if not self.cam.set(3, RESIZE_IMAGE_WIDTH): logger.warning("Set width failed") if not self.cam.set(4, RESIZE_IMAGE_HEIGHT): logger.warning("Set height failed") if not self.cam.set(cv.CV_CAP_PROP_BRIGHTNESS, BRIGHTNESS): logger.warning("Set brightness failed") if not self.cam.set(cv.CV_CAP_PROP_SATURATION, SATURATION): logger.warning("Set saturation failed") if not self.cam.set(cv.CV_CAP_PROP_EXPOSURE, EXPOSURE): logger.warning("Set exposure failed") if not self.cam.set(cv.CV_CAP_PROP_CONTRAST, CONTRAST): logger.warning("Set contrast failed") return self.cam def get_image(self): """ Clears the buffer of self.cam and grabs a frame. :return: The frame that was grabbed, as a DBugColorImage instance. """ got_image, new_frame = self.cam.read() if not got_image: # Can't even grab one frame - might be a problem return for _ in xrange(self.read_buffer_amount - 1): # minus one, since we already grabbed one frame. frame = new_frame got_image, new_frame = self.cam.read() # Grab another frame if not got_image: break # Seems like the buffer is empty - lets use the last frame else: # this occurs if the loop wasn't broken - lets use the new frame frame = new_frame if ROTATE_CLOCKWISE: frame = rot90(frame, 1) return DBugColorImage(cv2_image=frame) @staticmethod def get_camera_usb_device_index(): """ finds the camera usb device index from /dev return: Int, the usb device index of the camera that is currently connected. If no camera was found None. """ video_devices_names = glob("/dev/video*") if video_devices_names: first_cam_name = video_devices_names[0] cam_index_start_index = first_cam_name.find("video") return int(first_cam_name[cam_index_start_index + len("video"):]) else: return None
class Worker: ## Initialize Worker robot. def __init__(self): try: self.camera = VideoCapture(CAMERA_INDEX) self.camera.set(3,CAMERA_WIDTH) self.camera.set(4,CAMERA_HEIGHT) message = 'Success.' except Exception: message = 'Failure.' print('[Setting up Camera]...' + message) try: self.arduino = serial.Serial(DEVICE, BAUD) message = 'Success.' except Exception: message = 'Failure.' print('[Setting up Controller]...' + message) self.reset_worker() ## Capture image then identify Home. def detect_green(self): objects = [] x = 0 for cache in range(10): (success, frame) = self.camera.read() raw = Image.fromarray(frame) BGR = raw.split() B = array(BGR[0].getdata(), dtype=float32) G = array(BGR[1].getdata(), dtype=float32) R = array(BGR[2].getdata(), dtype=float32) NDI_G = (BIAS*G + MINIMUM_COLOR)/(R + B + MINIMUM_COLOR) matrix = NDI_G.reshape(CAMERA_HEIGHT,CAMERA_WIDTH) columns = matrix.sum(axis=0) high = numpy.mean(columns) + numpy.std(columns) low = numpy.mean(columns) while (x < CAMERA_WIDTH-1): if (columns[x] > high): start = x while (columns[x] > low) and (x < CAMERA_WIDTH-1): x += 1 end = x size = (end - start) offset = (start + (end - start)/2) - CAMERA_CENTER if (size > MINIMUM_SIZE): objects.append((size,offset)) else: x += 1 return objects ## Detect def detect_yellow(self): objects = [] x = 0 for cache in range(30): (success, frame) = self.camera.read() raw = Image.fromarray(frame) BGR = raw.split() B = array(BGR[0].getdata(), dtype=float32) G = array(BGR[1].getdata(), dtype=float32) R = array(BGR[2].getdata(), dtype=float32) NDI_G = BIAS*(G + R + MINIMUM_COLOR)/(2*B + MINIMUM_COLOR) matrix = NDI_G.reshape(CAMERA_HEIGHT,CAMERA_WIDTH) columns = matrix.sum(axis=0) high = numpy.mean(columns) + numpy.std(columns) low = numpy.mean(columns) while (x < CAMERA_WIDTH-1): if (columns[x] > high): start = x while (columns[x] > low) and (x < CAMERA_WIDTH-1): x += 1 end = x size = (end - start) offset = (start + (end - start)/2) - CAMERA_CENTER if (size > MINIMUM_SIZE): objects.append((size,offset)) else: x += 1 return objects ## Is Oriented? --> Boolean def is_oriented(self): for cache in range(10): (success, frame) = self.camera.read() grayscale = cv2.cvtColor(frame, cv.CV_BGR2GRAY) blurred = cv2.GaussianBlur(grayscale, (0,0), 5) # (0,0), 5 colored = cv2.cvtColor(blurred,cv2.COLOR_GRAY2BGR) (flag, thresholded) = cv2.threshold(blurred, 50, 255, cv2.THRESH_BINARY) # 50 and 255 WORKS circles = cv2.HoughCircles(thresholded,cv2.cv.CV_HOUGH_GRADIENT,DP,MIN_DISTANCE,param1=EDGE_THRESHOLD, param2=CENTER_THRESHOLD, minRadius=MIN_RADIUS,maxRadius=MAX_RADIUS) try: for target in circles[0,:]: x = target[0] y = target[1] r = target[2] if (abs(CAMERA_CENTER - x) < CAMERA_THRESHOLD): return True else: return False except TypeError: return False ## Connect to Server. def connect(self): if (self.connected_out == False) and (self.connected_in == False): try: self.socket_out = socket.socket(AF_INET, SOCK_STREAM) self.socket_out.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket_out.bind((ADDRESS_OUT)) self.socket_out.listen(QUEUE_MAX) (self.connection, self.address) = self.socket_out.accept() self.connected_out = True message = 'Success.' except socket.error as message: message = 'Failure.' self.connected_out = False self.socket_out.close() pass print('[Establishing Send Port]...' + str(message)) try: self.socket_in = socket.socket(AF_INET,SOCK_STREAM) self.socket_in.connect((ADDRESS_IN)) self.connected_in = True message = 'Success.' except socket.error as message: self.socket_in.close() self.connected_in = False pass print('[Establishing Receive Port]...' + str(message)) ## Receives COMMAND from Server. def receive_command(self): try: json_command = self.socket_in.recv(BUFFER_SIZE) dict_command = json.loads(json_command) self.command = dict_command['COMMAND'] message = str(json_command) except socket.error as message: pass #! print('----------------------------------') print('[Receiving COMMAND]...' + str(message)) ## After receiving COMMAND, determine action. def decide_action(self): if (self.command == 'START'): self.goal = 'STARTING' self.start() elif (self.command == 'CONTINUE'): if (self.gathered < 4): self.goal = 'GATHERING' self.gather() elif (not self.dumped): self.goal = 'STACKING' self.find_zone() elif (not self.returning): self.goal = 'RETURNING' self.return_home() else: self.action = 'W' elif (self.command == 'PAUSE'): self.goal = 'PAUSING' self.action = 'W' elif (self.command == 'DISCONNECT'): self.goal = 'DISCONNECTING' self.action = 'W' else: self.action = 'W' ## Start Logic def start(self): self.action = 'H' print('[Starting]...Helping Extend Rack.') ## Gather Logic def gather(self): objects = self.detect_green() #1 There is an object in view... try: (size, offset) = max(objects) #2 ...and it is small... if (size < SIZE_GRAB_RANGE) and not (self.error_number == ERROR_LOAD): #3 ...and I just tried avoiding to the left... if (self.error_number == ERROR_AVOID_RIGHT): message = 'Blocked After Avoiding Right -> Avoiding Left.' self.action = 'J' #3 ...and I just tried avoid to the right... elif (self.error_number == ERROR_AVOID_LEFT): message = 'Blocked After Avoiding Left --> Avoiding Right.' self.action = 'I' #3 ...and I just tried to turn right... elif (self.error_number == ERROR_BLOCKED_LEFT): message = 'Blocked After Turning Left --> Avoiding Right.' self.action = 'I' #3 ... and i just tried to turn left... elif (self.error_number == ERROR_BLOCKED_LEFT): message = 'Blocked After Turning Right --> Avoiding Left.' self.action = 'J' #3 ...and I just was blocked after moving... elif (self.error_number == ERROR_CLOSE): message = 'Blocked After Turning Right --> Avoiding Left.' self.action = 'I' #3 ...and I didn't make bad movements... else: #4 ...and it's to the right... if (offset > CAMERA_THRESHOLD): if (offset > 4*CAMERA_THRESHOLD): message = '(Object in View -> 4 Right.' self.action = 'T' elif (offset > 3*CAMERA_THRESHOLD): message = 'Object in View -> 3 Right.' self.action = 'S' elif (offset > 2*CAMERA_THRESHOLD): message = 'Object in View -> 2 Right.' self.action = 'R' else: message = 'Object in View -> 1 Right' self.action = 'Q' #4 ...and it's to the left. elif (offset < -CAMERA_THRESHOLD): if (offset < -4*CAMERA_THRESHOLD): message = 'Object in View -> 4 Left' self.action = 'N' elif (offset < -3*CAMERA_THRESHOLD): message = 'Object in View -> 3 Left' self.action = 'M' elif (offset < -2*CAMERA_THRESHOLD): message = 'Object in View -> 2 Left' self.action = 'L' else: message = 'Object in View -> 1 Left' self.action = 'K' #4 ...and it's in front... else: message = 'Too Small -> Out of Range' self.action = 'F' #2 ...and it is large... elif (size > SIZE_GRAB_RANGE): #3 ...and I was just trying to grab something... if (self.error_number == ERROR_LOAD): message = 'Load Failed -> Reversing.' self.action = 'B' #3 ...and I was not just trying to grab something... else: #4 ...and it is oriented... if True: #! (self.is_oriented()) message = 'Large Enough -> In Range, Oriented -> Grab' self.action = 'G' self.gathered += 1 #4 ...but it is not oriented.. else: #5 ...and I already tried to orbit left. if (self.error_number == ERROR_ORBIT_LEFT): message = 'Large Enough -> In Range, Not Oriented --> Orbiting Right.' self.action = 'O' #5 ...and I didn't just try to go around right. elif (self.error_number == ERROR_ORBIT_RIGHT): message = 'Large Enough -> In Range, Not Oriented & Blocked Right -> Orbiting Left.' self.action = 'P' #5 ... grab anyway. else: message = 'In Range, Not Oriented -> Attempting to Orbing .' self.action = 'O' #2 else: self.action = 'W' message = 'Confused -> Waiting.' #1 There are no objects in view. except ValueError: message = 'No Objects Detected -> Searching Right.' self.action = 'T' print('[Gathering]...' + str(objects) + '...' + message) print('[Remembering Action]...' ) self.previous_action = self.action # remember newest action ## Find Zone def find_zone(self): objects = self.detect_green() # The Zone is in view... try: (size, offset) = max(objects) #2 ... but not at the zone... if (size < SIZE_HOME_RANGE): #3 ... and it's to the right. if (offset > CAMERA_THRESHOLD): if (offset > 4*CAMERA_THRESHOLD): message = '(Zone in View -> 4 Right.' self.action = 'T' elif (offset > 3*CAMERA_THRESHOLD): message = 'Zone in View -> 3 Right.' self.action = 'S' elif (offset > 2*CAMERA_THRESHOLD): message = 'Zone in View -> 2 Right.' self.action = 'R' else: message = 'Zone in View -> 1 Right' self.action = 'Q' #3 ...and it's to the left. elif (offset < -CAMERA_THRESHOLD): if (offset < -4*CAMERA_THRESHOLD): message = 'Zone in View -> 4 Left' self.action = 'N' elif (offset < -3*CAMERA_THRESHOLD): message = 'Zone in View -> 3 Left' self.action = 'M' elif (offset < -2*CAMERA_THRESHOLD): message = 'Zone in View -> 2 Left' self.action = 'L' else: message = 'Zone in View -> 1 Left' self.action = 'K' #3 ...and it is straight ahead. else: #4 ...but it is blocked ahead. if (self.error_number == ERROR_CLOSE): message = 'Object in Way -> Avoiding Right.' self.action = 'I' #4 ...but it is blocked after turning right. elif (self.error_number == ERROR_AVOID_RIGHT): message = 'Failed to Turn Right -> Avoiding Left.' self.action = 'J' #4 ...but it is blocked after avoiding right. elif (self.error_number == ERROR_AVOID_RIGHT): message = 'Failed to Avoid Right -> Avoiding Left.' self.action = 'J' #4 ...but it is blocked to the left. elif (self.error_number == ERROR_BLOCKED_LEFT): message = 'Failed to Turn Left -> Avoiding Right.' self.action = 'I' #4 ...but it is blocked after avoiding right. elif (self.error_number == ERROR_BLOCKED_RIGHT): message = 'Failed to Avoid Left -> Avoiding Right.' self.action = 'I' #4 ...and isn't blocked. else: message = 'Zone Too Small -> Moving Forward.' self.action = 'F' #2 ..and have reached the zone. else: message = 'At Zone -> Dumping Bales.' self.action = 'D' self.dumped = True #1 The Zone is not in view... except ValueError: message = 'Cannot See Zone -> Searching Left For Zone.' self.action = 'N' print('[Finding Zone]...' + message) ## Return Home def return_home(self): objects = self.detect_yellow() #1 Home is in view... try: (size, offset) = max(objects) #2 ... but not at the home... if (size < SIZE_ZONE_RANGE): #3 ... and it's to the right. if (offset > CAMERA_THRESHOLD): if (offset > 4*CAMERA_THRESHOLD): message = 'Home in View -> 4 Right.' self.action = 'T' elif (offset > 3*CAMERA_THRESHOLD): message = 'Home in View -> 3 Right.' self.action = 'S' elif (offset > 2*CAMERA_THRESHOLD): message = 'Home in View -> 2 Right.' self.action = 'R' else: message = 'Home in View -> 1 Right' self.action = 'Q' #3 ...and it's to the left. elif (offset < -CAMERA_THRESHOLD): if (offset < -4*CAMERA_THRESHOLD): message = 'Home in View -> 4 Left' self.action = 'N' elif (offset < -3*CAMERA_THRESHOLD): message = 'Home in View -> 3 Left' self.action = 'M' elif (offset < -2*CAMERA_THRESHOLD): message = 'Home in View -> 2 Left' self.action = 'L' else: message = 'Home in View -> 1 Left' self.action = 'K' #3 ...and it is straight ahead. else: #4 ...but it is blocked. if ((self.error_number == ERROR_BLOCKED_LEFT) or (self.error_number == ERROR_CLOSE) or (self.error_number == ERROR_AVOID_LEFT)): message = 'Object in Way Failed to go Left -> Avoiding Right.' self.action = 'I' elif ((self.error_number == ERROR_AVOID_RIGHT) or (self.error_number == ERROR_BLOCKED_RIGHT)): message = 'Object in Way, Failed to go Right -> Avoiding Left.' self.action = 'J' else: message = 'Home Too Small -> Moving Forward.' self.action = 'F' #2 ..and have reached the zone... else: message = 'At Home -> Parking.' self.action = 'F' self.returned = True #1 Home is not in view... except ValueError: message = 'Cannot See Home -> Searching Left For Home.' self.action = 'N' print('[Finding Home]...' + message) ## Execute action with arduino. def control_arduino(self): ### Send try: self.arduino.write(self.action) message = self.action except Exception: message = 'Failure.' print('[Sending ACTION]...' + message) ### Receive try: self.error_number = int(self.arduino.readline()) message = str(self.error_number) except ValueError: self.error_number = ERROR_PARSE message = "ValueError: Failed to parse signal, retrying..." except OSError: self.error_number = ERROR_CONNECTION message = "OSError: Connection lost, retrying..." except SerialException: self.error_number = ERROR_CONNECTION message = "Serial Exception: Connection lost, retrying..." except SyntaxError: self.error_number = ERROR_PARSE message = "Syntax Error: Failed to parse signal, retrying..." except KeyError: self.error_number = ERROR_PARSE message = 'KeyError: Failed to parse signal, retrying...' print('[Receiving ERROR from Controller]...' + message) ## Handle Errors from Arduino def handle_error(self): if (self.error_number == ERROR_NONE): self.error = 'NONE' elif (self.error_number == ERROR_CLOSE): self.error = 'TOO CLOSE' elif (self.error_number == ERROR_LOAD): self.error = 'LOAD FAILED' self.gathered -= 1 elif (self.error_number == ERROR_PARSE): self.error = 'PARSE FAILED' elif (self.error_number == ERROR_ACTION): self.error = 'BAD ACTION' elif (self.error_number == ERROR_BLOCKED_RIGHT): self.error = 'BLOCKED AFTER RIGHT TURN' elif (self.error_number == ERROR_BLOCKED_LEFT): self.error = 'BLOCKED AFTER LEFT TURN' elif (self.error_number == ERROR_ORBIT_RIGHT): self.error = 'ORBIT RIGHT FAILED' elif (self.error_number == ERROR_ORBIT_LEFT): self.error = 'ORBIT LEFT FAILED' elif (self.error_number == ERROR_AVOID_RIGHT): self.error = 'AVOID RIGHT FAILED' elif (self.error_number == ERROR_AVOID_LEFT): self.error = 'AVOID LEFT FAILED' else: self.error = 'UNKNOWN ERROR' print('[Handling ERRORS]...' + self.error) ## Send RESPONSE to Server def send_response(self): try: json_response = json.dumps({'ACTION':self.action, 'ERROR':self.error, 'GATHERED':str(self.gathered), 'GOAL':self.goal}) self.connection.send(json_response) message = str(json_response) except Exception: message = "Failure." print('[Sending RESPONSE]...' + message) ## Disconnect from Server. def disconnect(self): try: self.socket_in.close() self.socket_out.close() self.connection.close() self.reset_worker() message = 'Success.' except Exception: message = 'Failure.' pass print('[Disconnecting]...' + message) ## Reset Worker def reset_worker(self): try: self.connected_out = False self.connected_in = False self.command = None self.response = None self.action = None self.previous_action = None self.error = None self.error_number = None self.gathered = 0 self.dumped = False self.returned = False self.x = 0 self.y = 0 self.orientation = 0 message = 'Success.' except Exception: message = 'Failure.' print('[Resetting Worker]...' + message)
def setup(videoFile, cascadeFile, showOutput, initialBoxes=None, writeToVideo=False, outputFile="result.mp4"): """Reads in the video, requests cropping dimensions and runs the cascade to find the pucks""" global capture, writer, writeVideo, top, bottom, right, left, showOutputSeperate showOutputSeperate = showOutput #Open video file capture = VideoCapture(videoFile) #Read first frame success, frame = capture.read() if not success: return False, 0 #Get video details cols = int(capture.get(cv.CAP_PROP_FRAME_WIDTH)) rows = int(capture.get(cv.CAP_PROP_FRAME_HEIGHT)) framecount = int(capture.get(cv.CAP_PROP_FRAME_COUNT)) framerate = int(capture.get(cv.CAP_PROP_FPS)) #Select table size and crop the frame cv.namedWindow("Tisch auswaehlen und mit Enter bestaetigen", cv.WINDOW_NORMAL | cv.WINDOW_KEEPRATIO) tableBox = cv.selectROI("Tisch auswaehlen und mit Enter bestaetigen", frame, True, True) if DEBUG: print(tableBox) x, y, w, h = tableBox x, y = max(x, 0), max(y, 0) if DEBUG: print(x, y, w, h) top, bottom, left, right = y, y + h, x, x + w frame = frame[top:bottom, left:right] cv.destroyWindow("Tisch auswaehlen und mit Enter bestaetigen") openAndConfigureWindow() cv.imshow("Tracker", frame) cv.waitKey(0) #Find objects or use the boxes passed from user input and filter oversized and undersized boxes out if initialBoxes == None or len(initialBoxes) == 0: initialBoxes = cascade.findPucksInitial(cascadeFile, frame) initialBoxes[:] = (box for box in initialBoxes if utils.checkSizeOK(box)) if DEBUG: print(initialBoxes) frameCopy = frame[:, :, :] for box in initialBoxes: p1 = (int(box[0]), int(box[1])) p2 = (int(box[0] + box[2]), int(box[1] + box[3])) cv.rectangle(frameCopy, p1, p2, (0, 0, 255), 2, 1) cv.imshow("Tracker", frameCopy) cv.waitKey(0) utils.filterDuplicates(initialBoxes) for box in initialBoxes: p1 = (int(box[0]), int(box[1])) p2 = (int(box[0] + box[2]), int(box[1] + box[3])) cv.rectangle(frameCopy, p1, p2, (0, 255, 0), 2, 1) cv.imshow("Tracker", frameCopy) cv.waitKey(0) modifiedBoxes = [] for box in initialBoxes: modifiedBoxes.append(utils.shrinkBox(box)) #Create TrackablePuck objects initialObjects = [] multi_tracker.puckCountStart = len(modifiedBoxes) for i in range(0, len(modifiedBoxes)): initialObjects.append(TrackablePuck(i, modifiedBoxes[i])) #Initialize trackers and draw initial boxes and their indizes onto the frame displayFrame, positions = multi_tracker.addInitialObjects( initialObjects, frame) cv.imshow("Tracker", displayFrame) cv.waitKey(0) cv.destroyWindow("Tracker") #Reset frame counter capture.set(cv.CAP_PROP_POS_FRAMES, 0) #Initialize a video writer to write the frames with tracker marks to a file writeVideo = writeToVideo if writeToVideo: writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc(*'DIVX'), 60, (w, h)) #Returns True, the total number of frames of the video, the frame with the markers, the amount of pucks and the frame size when the setup was successful return True, capture.get( cv.CAP_PROP_FRAME_COUNT ), displayFrame, multi_tracker.puckCountStart, w, h
class VideoStream: # Custom exception to handle data errors class DataError(Exception): pass def __init__(self, camera_id: int, *, ip="0.0.0.0", port=50010): """ Constructor function used to initialise the stream by setting up the communication and camera-related values. .. warning:: You should avoid changing the ip address parameter and leave it at its default state, to ensure portability of the communication code. It is recommended that you change the `self._TIMEOUT` to adjust the delay on timeout with surface, as well as modify any `self._CAMERA` constants to adjust the resolution of streams. :param camera_id: Video Capture's ID :param ip: Raspberry Pi's IP address :param port: Raspberry Pi's port """ # Save the host and port information self._ip = ip self._port = port # Store the video capture id self._camera_id = camera_id # Create the OpenCV's camera object self._camera = VideoCapture(camera_id) # Declare the camera resolution constants for low quality stream self._CAMERA_WIDTH = 320 self._CAMERA_HEIGHT = 240 # Apply the low resolution self._camera.set(3, self._CAMERA_WIDTH) self._camera.set(4, self._CAMERA_HEIGHT) # Declare the constant for the communication timeout with the surface self._TIMEOUT = 3 # Initialise the socket for IPv4 addresses (hence AF_INET) and TCP (hence SOCK_STREAM) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Bind the socket to the given address, inform about errors try: self._socket.bind((self._ip, self._port)) except socket.error: print("Failed to bind socket to the given address {}:{} ".format( self._ip, self._port)) # Tell the server to listen to only one connection self._socket.listen(1) # Initialise the frame-end string to mark when the frame was fully sent self._end_payload = bytes("Frame was successfully sent", encoding="ASCII") def _handle_data(self): """ Function used to process the frames and send them to surface. Any frame-related modifications should be introduced here, preferably encapsulated in another function. """ # Once connected, keep receiving and sending the data, raise exception in case of errors try: # Fetch the next frame frame = self._next_frame() # Only handle valid frames if frame is not None: # Send the frame self._client_socket.sendall(dumps(frame)) # Mark that the frame was sent self._client_socket.sendall(self._end_payload) # Wait for the acknowledgement self._client_socket.recv(128) except (ConnectionResetError, ConnectionAbortedError, timeout, PicklingError): raise self.DataError def _next_frame(self): """ Function used to fetch a frame from the registered camera. :return: OpenCV frame object (numpy array) """ # Attempt to take the next frame try: # Fetch the frame information _, frame = self._camera.read() # Encode the frame _, frame = imencode(".jpg", frame) # Return the frame return frame except OpenCvError: return None def _on_surface_disconnected(self): """ Function used to clean up any resources when the connection to surface is closed. This function should be modified with the expansion of the stream to accommodate any additional resources that must be cleaned. """ # Close the socket self._client_socket.close() # Inform that the connection has been closed print("Video stream to {} closed successfully".format( self._client_address)) def stream(self): """ Function used to run a continuous connection with the surface. Runs an infinite loop that performs re-connection to the connected client as well as exchanges data with it, via non-blocking send and receive functions. The data exchanged is pickled using :mod:`dill`. """ # Never stop the server once it was started while True: # Inform that the server is ready to receive a connection print("Video stream at {} is waiting for a client...".format( self._socket.getsockname())) # Wait for a connection (accept function blocks the program until a client connects to the server) self._client_socket, self._client_address = self._socket.accept() # Set a non-blocking connection to timeout on receive/send self._client_socket.setblocking(False) # Set the timeout self._client_socket.settimeout(self._TIMEOUT) # Inform that a client has successfully connected print("Client with address {} connected to video stream at {}". format(self._client_address, self._socket.getsockname())) while True: # Attempt to handle the data, break in case of errors try: self._handle_data() except self.DataError: break # Clean up self._on_surface_disconnected()
from mind.voice import * from mind.dialog import * from protocol.motors import move_forward from protocol.motors import stop_action from cv2 import VideoCapture from cv2 import cvtColor from cv2 import COLOR_BGR2GRAY from cv2 import CAP_PROP_FRAME_WIDTH from cv2 import CAP_PROP_FRAME_HEIGHT from cv2 import CascadeClassifier from time import sleep face_cascade = CascadeClassifier('assets/frontalface.xml') cap = VideoCapture(0) cap.set(CAP_PROP_FRAME_WIDTH, 1280) cap.set(CAP_PROP_FRAME_HEIGHT, 720) is_face_found = False while True: ret, img = cap.read() gray = cvtColor(img, COLOR_BGR2GRAY) faces = face_cascade.detectMultiScale(gray, 1.3, 5) if len(faces) != 0: for (x, y, w, h) in faces: print('Face detected at ' + str(x) + ', ' + str(y)) stop_action() if is_face_found == False: say('Привет!')
import cv2 from cv2 import VideoCapture cap = VideoCapture('../data/video.mp4') print(cap.get(cv2.CAP_PROP_FPS)) cap.set(cv2.CAP_PROP_FPS, 30) while (cap.isOpened()): ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (21, 21), 0) cv2.imshow('frame', gray) if cv2.waitKey(33) & 0xFF == ord('q'): break cap.release() cv2.destroyAllWindows()
class Robot: """ La classe robot représente l'agent *robot* dans son ensemble. Parameters ---------- _cap : VideoCapture Classe de OpenCV servant à lire le flux de la caméra. _video_on : bool Flag permettant de stopper le thread de capture vidéo. _stop_compteur : float or None Dernière fois que le robot à vu la ligne si il ne la voit plus, où None si il voit la ligne. _fps_counter : float Garde l'heure afin de calculer le temps entre qui s'écoule entre deux images. """ def __init__(self): self._cap = VideoCapture(conf("robot.device")) self._cap.set(CAP_PROP_FRAME_HEIGHT, conf("robot.capture.height")) self._cap.set(CAP_PROP_FRAME_WIDTH, conf("robot.capture.width")) self.init_vars() def init_vars(self): """ Réinitialisation des variables *_fps_counter*, *_video_on* et *_stop_compteur* """ self._fps_counter = clock_gettime(CLOCK_MONOTONIC) self._video_on = False self._stop_compteur = None def start_video(self, analyser_virage=True): """ La méthode start_video lance une boucle qui, tant que la variable d'instance privée video_on est vraie, lit une image, et lance son analyse grâce au module ImageClass. Parameters ---------- analyser_virage : bool On réalise l'analyse du virage et on active l'autonomie du robot si `analyser_virage` est _True_, sinon on envoi juste l'image au client connecté sans rien faire. Returns ------ void Notes ----- Il est recommandé d'exécuter cette fonction en parallèle afin de ne pas bloquer le programme. """ if conf("debug.general"): print("start video ", self._video_on) while self._video_on: if conf("debug.FPS"): heure = clock_gettime(CLOCK_MONOTONIC) print(1 / (heure - self._fps_counter), "FPS") self._fps_counter = heure image = Image(self.lire_image()) image.send(Serveur.VIDEO_SOCKET) if analyser_virage: self.analyser(image) moteur.do("stop") def lire_image(self): """ Lit une image depuis le flux vidéo Returns ------ ndarray L'image lue """ ret, img = self._cap.read() # Récuperer le flux if ret is False: # Si jamais il y a un erreur exit("Impossible de joindre la caméra") return img def start_serveur(self): """ Cette méthode créé un serveur (thread) avec le module serveur,le lance, et le join (thread actuel mis en pause jusqu'a terminaison du thread serveur). """ try: serveur = Serveur(self) serveur.start() serveur.join() del (serveur) except OSError: print("Impossible de démarrer, il faut attendre...") for i in range(conf("network.reboot_time")): print("Redémarrage dans", 5 - i, "secondes") sleep(1) self.start_serveur() except (KeyboardInterrupt, InterruptedError): print("Arrêt du serveur") def actionOnFin(self, temps_max=conf('robot.wait_before_stop')): """ Action à effectuer lorsque la voiture ne détecte pas de ligne. Parameters ---------- temps_max : float Temps en secondes avant arrêt total Returns ------- void Rien. Notes ----- Logique : On laisse le robot pendant un certain temps encore rouler afin de lui laisser une petite chance de retrouver la ligne. Ce temps passé, on arrête le robot. """ maintenant = clock_gettime(CLOCK_MONOTONIC) if self._stop_compteur is None: self._stop_compteur = maintenant elif self._stop_compteur + temps_max > maintenant: if conf('debug.general'): print("Il reste ", temps_max + 1 - maintenant + self._stop_compteur) else: if conf('debug.general'): print("Arrêt du moteur") moteur.do("stop") if conf("debug.general"): print("Pas de ligne détéctée") def analyser(self, image): """ Analyse d'image et prise de décision Parameters ---------- image: Image L'image non-traitée Returns ------- void Rien. """ if image.isFin(): if conf("debug.image.log"): image.log("isfin") self.actionOnFin() else: self._stop_compteur = None if image.isSurex(): if conf("debug.image.surex"): print("Is surex") if conf("debug.image.log"): image.log("surex") image.preTraitementSurex() else: if conf("debug.image.surex"): print("Not surex") image.preTraitementPasSurex() mat = SimpleImage(image.get3x3()) angle, msg_log = prendre_decision(*mat.max_lines(), mat.mat) servo_dir.angle(angle) if conf("debug.image.log"): print("[{}] -> {}\t- image : {}".format( msg_log, angle, image.log("MaxV3"))) moteur.do("run") def __del__(self): # Libération des ressources pass
def set_resolution(videoCaptureInstance: cv2.VideoCapture, width: int, height: int) -> tuple: videoCaptureInstance.set(cv2.CAP_PROP_FRAME_WIDTH, width) videoCaptureInstance.set(cv2.CAP_PROP_FRAME_HEIGHT, height) return str(videoCaptureInstance.get(cv2.CV_CAP_PROP_FRAME_WIDTH)), str( videoCaptureInstance.get(cv2.CV_CAP_PROP_FRAME_HEIGHT))
def draw_video(self, agent: Agent_Part, video_file: cv2.VideoCapture, video_para, traj_weights, save_path, interp=True, indexx=0): _, f = video_file.read() video_shape = (f.shape[1], f.shape[0]) frame_list = (agent.frame_list.astype(np.float)).astype(np.int) frame_list_original = frame_list if interp: frame_list = np.array( [index for index in range(frame_list[0], frame_list[-1] + 1)]) video_list = [] times = 1000 * frame_list / video_para[1] obs = self.real2pixel(agent.get_train_traj(), traj_weights) if self.social_refine: pred = self.real2pixel(agent.get_pred_traj_sr(), traj_weights) else: pred = self.real2pixel(agent.get_pred_traj(), traj_weights) # # load from npy file # pred = np.load('./figures/hotel_{}_stgcnn.npy'.format(indexx)).reshape([-1, 2]) # pred = self.real2pixel(np.column_stack([ # pred.T[0], # sr: 0,1; sgan: 1,0; stgcnn: 1,0 # pred.T[1], # ]), traj_weights) gt = self.real2pixel(agent.get_gt_traj(), traj_weights) obs_file = cv2.imread(OBS_IMAGE, -1) gt_file = cv2.imread(GT_IMAGE, -1) pred_file = cv2.imread(PRED_IMAGE, -1) fourcc = cv2.VideoWriter_fourcc(*'XVID') VideoWriter = cv2.VideoWriter(save_path, fourcc, 25.0, video_shape) for time, frame in zip(times, frame_list): video_file.set(cv2.CAP_PROP_POS_MSEC, time - 1) _, f = video_file.read() # draw observations for obs_step in range(agent.obs_length): if frame >= frame_list_original[obs_step]: f = add_png_to_source(f, obs_file, obs[obs_step]) # draw predictions if frame >= frame_list_original[agent.obs_length]: for p in pred: f = add_png_to_source(f, pred_file, p, alpha=0.5) # draw GTs for gt_step in range(agent.obs_length, agent.total_frame): if frame >= frame_list_original[gt_step]: f = add_png_to_source(f, obs_file, gt[gt_step - agent.obs_length]) video_list.append(f) VideoWriter.write(f)
class VideoClipReader(object): def __init__(self, path, fmt=None, start=1): if os.path.isdir(path): if fmt is None: try: tryfmt = autoPattern(path) if tryfmt is None: fmt = ImgVideoCapture.DEFAULT_FMT else: fmt = tryfmt except Exception as e: fmt = ImgVideoCapture.DEFAULT_FMT self.backend = ImgVideoCapture(path, fmt, start) else: self.backend = VideoCapture(path) def read(self): return self.backend.read() def isOpened(self): return self.backend.isOpened() def release(self): return self.backend.release() def get(self, code): return self.backend.get(code) def set(self, code, value): return self.backend.set(code, value) def ImgSize(self): return int(self.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)), int( self.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) def fps(self): fps = self.get(cv2.cv.CV_CAP_PROP_FPS) if math.isnan(fps): fps = 1. / self.get(cv2.cv.CV_CAP_PROP_POS_AVI_RATIO) return fps def length(self): return int(self.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) def __setattr__(self, x, y): if x == 'pos': self.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, y) else: super(VideoClipReader, self).__setattr__(x, y) def __getattribute__(self, x): if x == 'pos': return self.get(cv2.cv.CV_CAP_PROP_POS_FRAMES) else: return super(VideoClipReader, self).__getattribute__(x) def __len__(self): return int(self.length()) def __getitem__(self, ind): if ind != int(self.get(cv2.cv.CV_CAP_PROP_POS_FRAMES)): self.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, ind) s, im = self.read() if not s or im is None: raise Exception('read img error') return im
def setFramePos(video: cv2.VideoCapture, pos): video.set(cv2.CAP_PROP_POS_FRAMES, pos)
def get_frame(videoPath, svPath, pic_cnt=10): cap = VideoCapture(videoPath) """ get 方法,可以有多个用途,后面跟的整数,依次表示下面的方法(从0开始) CV_CAP_PROP_POS_MSEC 视频文件的当前位置(以毫秒为单位)或视频捕获时间戳。 CV_CAP_PROP_POS_FRAMES 接下来要解码/捕获的帧的基于0的索引。 CV_CAP_PROP_POS_AVI_RATIO 视频文件的相对位置:0 - 电影的开始,1 - 电影的结尾。 CV_CAP_PROP_FRAME_WIDTH 视频流中帧的宽度。 CV_CAP_PROP_FRAME_HEIGHT 视频流中帧的高度。 CV_CAP_PROP_FPS 帧速率。 CV_CAP_PROP_FOURCC 编解码器的4字符代码。 CV_CAP_PROP_FRAME_COUNT 视频文件中的帧数。 CV_CAP_PROP_FORMAT 返回的Mat对象的格式 retrieve() 。 CV_CAP_PROP_MODE 指示当前捕获模式的特定于后端的值。 CV_CAP_PROP_BRIGHTNESS 图像的亮度(仅适用于相机)。 CV_CAP_PROP_CONTRAST 图像对比度(仅适用于相机)。 CV_CAP_PROP_SATURATION 图像的饱和度(仅适用于相机)。 CV_CAP_PROP_HUE 图像的色调(仅适用于相机)。 CV_CAP_PROP_GAIN 图像的增益(仅适用于相机)。 CV_CAP_PROP_EXPOSURE 曝光(仅适用于相机)。 CV_CAP_PROP_CONVERT_RGB 布尔标志,指示是否应将图像转换为RGB。 CV_CAP_PROP_WHITE_BALANCE_U 白平衡设置的U值(注意:目前仅支持DC1394 v 2.x后端) CV_CAP_PROP_WHITE_BALANCE_V 白平衡设置的V值(注意:目前仅支持DC1394 v 2.x后端) CV_CAP_PROP_RECTIFICATION 立体摄像机的整流标志(注意:目前仅支持DC1394 v 2.x后端) CV_CAP_PROP_ISO_SPEED摄像机 的ISO速度(注意:目前仅支持DC1394 v 2.x后端) CV_CAP_PROP_BUFFERSIZE 存储在内部缓冲存储器中的帧数(注意:目前仅支持DC1394 v 2.x后端) """ try: # 获取视频的总帧数 frame_cnt = cap.get(CAP_PROP_FRAME_COUNT) if int(pic_cnt) > int(frame_cnt): pic_cnt = int(frame_cnt) numFrame = 1 # 总共截取多少张图片 # 间隔多少帧截取一次 betweenFrame = frame_cnt // pic_cnt # 截图列表 images = [] for i in range(pic_cnt): cap.set(CAP_PROP_POS_FRAMES, numFrame) flag, frame = cap.read() if flag: # cv2.imshow('frame', frmae) numFrame += betweenFrame # newPath = svPath + str(numFrame) + ".jpg" newPath = svPath + str(i) + ".jpg" frame = resize_pic(frame, (256, 256)) imencode('.jpg', frame)[1].tofile(newPath) images.append(newPath) else: break # if cap.grab(): # flag, frame = cap.retrieve() # if not flag: # continue # else: # # cv2.imshow('video', frame) # # numFrame += int(frame_cnt // pic_cnt) # 间隔 # numFrame += 1 # 间隔 # newPath = svPath + str(numFrame) + ".jpg" # cv2.imencode('.jpg', frame)[1].tofile(newPath) # if cv2.waitKey(10) == 27: # break return images except Exception as bug: print(bug) return None finally: cap.release()
def _load_cuboid(self, cuboid: tuple, prep_fn=None): """Loads a cuboid from its video frames files """ fname = cuboid[0] frames_range = cuboid[1:-1] stride = cuboid[-1] cuboid_data = None n_frames = frames_range[0] # Number of readen frames # Load the desired video frames if (self.is_shuffled() or self.__cap_opened is None or self.__cap_opened[0] != fname or frames_range[0] != self.__cap_opened[2]+1): if self.__cap_opened is not None: self.__cap_opened[1].release() vid = VideoCapture(fname) if not vid.isOpened(): raise ValueError('Cannot load frames {} from video '\ 'file {}'.format(frames_range, fname)) vid.set(1, frames_range[0]) # Store the Video Capture for retrieval of next cuboid belonging # to the same video if not self.is_shuffled(): self.__cap_opened = [fname, vid, frames_range[1]] else: vid = self.__cap_opened[1] self.__cap_opened[2] = frames_range[1] # Update the last frame taken for i in range(self.cub_frames): if n_frames <= frames_range[1]: #readen # Take consecutive frames separated by the stride ret, fr = vid.read() if not ret: raise ValueError('Failed to load frame {} from video '\ 'file {}'.format(i, fname)) n_frames += 1 # Apply preprocessing function if specified if prep_fn: fr = prep_fn(fr) # Add the frame readen to the cuboid if cuboid_data is None: cuboid_data = np.zeros((self.cub_frames, *fr.shape), dtype=fr.dtype) cuboid_data[i] = fr # Exclude the next middle frames if stride for _ in range(stride-1): if n_frames <= frames_range[1]: ret = vid.grab() n_frames += 1 else: # Repeat the last frame if a complete cuboid cannot be taken # from the remained video frames cuboid_data[i] = cuboid_data[i-1] # Check loaded images have the same format #if frames and (frames[-1].shape != fr.shape or # frames[-1].dtype != fr.dtype): # raise ValueError('Differents sizes or types for images loaded'\ # ' detected for image "{}"'.format(fn)) # Add the remaind cuboid and fill it by repetitions of last frame # until make the cuboid with the derired number of frames #if len(frames) < self.cub_frames: # rep = len(frames) - self.cub_frames # frames.extend([frames[-1]]*rep) #frames = np.array(frames) #vid.release() return cuboid_data
class TkPlayer(): def __init__(self,path): self.path = path # 未設定なら以降実行しない。 if self.path == "": self.fps = 1 return if path[-3:] in ["mp4","avi"]: self.mode = "video" self.video = VideoCapture(path) if not self.video.isOpened(): self.path = "" messagebox.showwarning("FreedomWall",f"{path}\nのファイルが開けないため壁紙を貼り付ける事ができませんでした。") return self.height = self.video.get(CAP_PROP_FRAME_HEIGHT) self.width = self.video.get(CAP_PROP_FRAME_WIDTH) self.fps = self.video.get(CAP_PROP_FPS) else: self.mode = "picture" try: self.picture = Image.open(path) except: self.path = "" messagebox.showwarning("FreedomWall",f"{path}\nのファイルが開けないため壁紙を貼り付ける事ができませんでした。") return self.height,self.width = self.picture.size self.fps = 1 # 現在のフレームの取得 def get_frame(self,height,width): # もしpathが空なら実行しない。 if self.path == "": return None # 写真だったら写真で出力。 if self.mode == "picture": return ImageTk.PhotoImage(image=self.picture.resize((width,height))) # 動画なら動画で出力。 if self.video.isOpened(): ret,frame = self.video.read() # 最後だったら最初からにしてもう一度読み込む。 if not ret: self.video.set(CAP_PROP_POS_FRAMES,0) ret,frame = self.video.read() if ret: return ImageTk.PhotoImage(image=Image.fromarray(cvtColor(frame,COLOR_BGR2RGB)).resize((width,height))) else: return None else: return None # 解放 def __del__(self): if self.path == "": return if self.mode == "video": if self.video.isOpened(): self.video.release()
class StreamReader: def __init__(self, index: int = 0, log_handlers: [StreamHandler] = None): self._logger = getLogger(__name__) if log_handlers: for h in log_handlers: self._logger.addHandler(h) self._logger.debug("Initializing") self.index = index self._tracker = FPSTracker() self._internal_frame_q = InternalFrameQueue() self._running = TEvent() self._running.clear() self._lock = Lock() self._closing_flag = Event() self._timeout_limit = 0 self._stream = VideoCapture(index, defs.cap_backend) self.set_resolution(self.get_resolution()) self._fps_test_status = 0 self._fps_target = 30 self._spf_target = 1 / self._fps_target self._buffer = 5 self._use_limiter = False self._finalized = False self._end = TEvent() self._end.clear() self._err_event = Event() self._num_frms = 0 self._start = time() self._loop = get_event_loop() self._logger.debug("Initialized") def cleanup(self) -> None: """ Cleanup this object and prep for app closure. :return None: """ self._closing_flag.set() self._end.set() self.stop_reading() self._stream.release() def _calc_timeout(self) -> None: """ Get small sample of reads and pick longest read time for timeout. :return None: """ tries = list() for i in range(10): start = time() self._stream.read() # Prime camera for reading. end = time() tries.append(end - start) self._timeout_limit = max( tries) + 1.5 # + 1.5 to handle possible lag spikes. def _finalize(self) -> None: self._calc_timeout() self._loop.run_in_executor(None, self._read_cam) self._finalized = True def start_reading(self) -> None: """ Reset internal frame queue so we have no leftover frames and run read_cam in thread. :return None: """ with self._lock: if not self._finalized: self._finalize() if not self._running.is_set(): self._tracker.reset() self._internal_frame_q.reset_q() self._num_frms = 0 self._start = time() self._running.set() def await_err(self) -> futures: """ Signal when there is a connect event. :return futures: If the flag is set. """ return await_event(self._err_event) def stop_reading(self) -> None: """ End any looping tasks. :return None: """ with self._lock: self._running.clear() def _read_cam(self) -> None: """ Continuously check camera for new frames and put into queue. :return None: """ while not self._end.is_set(): if self._running.is_set(): ret, frame, dt = self._get_a_frame() if not ret: break self._tracker.update_fps() metric = (time() - self._start) // self._spf_target frm_diff = int(metric - self._num_frms) if frm_diff > 0: self._internal_frame_q.add_to_q((frame, dt, frm_diff)) self._num_frms += frm_diff elif frm_diff > -self._buffer: self._internal_frame_q.add_to_q((frame, dt, 1)) self._num_frms += 1 else: tsleep(.1) def _get_a_frame(self) -> (bool, ndarray, datetime): """ Helper function for self._read_cam. Raise error event if camera fails. :return bool: If frame was read successfully. """ s = time() ret, frame = self._stream.read() e = time() dt = datetime.now() time_taken = e - s timeout = time_taken > self._timeout_limit if not ret or frame is None or timeout: self._logger.warning( "cam_stream_reader.py _read_cam(): Camera failed. " + "ret: " + str(ret) + ". Frame is None: " + str(frame is None) + ". Time taken: " + str(time_taken)) self._loop.call_soon_threadsafe(self._err_event.set) return False, None, None return True, frame, dt def get_fps_setting(self) -> float: """ Return the current fps limit for this camera. :return int: The current fps limit. """ return self._fps_target def get_fps_actual(self) -> int: """ Return the current fps this camera is reading at. :return int: The current fps. """ return self._tracker.get_fps() def set_fps(self, new_fps: float) -> None: """ Set simulated read speed of this camera. Reader will still read from camera at max rate. :param new_fps: The new simulated fps. :return None: """ with self._lock: self._running.clear() tsleep(.05) self._fps_target = new_fps self._spf_target = 1 / self._fps_target self._buffer = new_fps // 6 self._start = time() self._num_frms = 0 self._running.set() def get_next_new_frame(self) -> (bool, (ndarray, datetime, int)): """ Get next frame from queue if it exists and return it, else return None. :return (bool, (ndarray, datetime)): (Whether there is a frame, (frame/None, datetime/None)) """ self._logger.debug("running") ret = self._internal_frame_q.get_from_q() if ret is not None: self._logger.debug("done with next element") return True, ret self._logger.debug("done with None") return False, None def test_resolution(self, size: (float, float)) -> (bool, (float, float)): """ Test given frame size to see if camera supports it. :param size: The size to test. :return (bool, (float, float)): Whether test succeeded and what the resultant resolution is. """ ret1 = self._stream.set(CAP_PROP_FRAME_WIDTH, size[0]) ret2 = self._stream.set(CAP_PROP_FRAME_HEIGHT, size[1]) ret, frame = self._stream.read() y, x = frame.shape[0], frame.shape[1] if ret1 and ret2 and size[0] == x and size[1] == y: return True, size else: return False, (self._stream.get(CAP_PROP_FRAME_WIDTH), self._stream.get(CAP_PROP_FRAME_HEIGHT)) def get_resolution(self) -> (float, float): """ :return (float, float): The current camera frame size. """ return self._stream.get(CAP_PROP_FRAME_WIDTH), self._stream.get( CAP_PROP_FRAME_HEIGHT) def set_resolution(self, size: (float, float)) -> None: """ Handle changing frame size on this camera. :param size: The new frame size to use. :return None: """ with self._lock: self._internal_frame_q.reset_q() self._set_fourcc() self._set_resolution(size) def _set_resolution(self, size: (float, float)) -> None: """ Set camera frame size to new size. :param size: The new size to use. :return none: """ self._stream.set(CAP_PROP_FRAME_WIDTH, size[0]) self._stream.set(CAP_PROP_FRAME_HEIGHT, size[1]) def _set_fourcc(self) -> None: """ Reset fourcc on this camera. Generally done after changing frame size. :return None: """ self._stream.set(CAP_PROP_FOURCC, defs.cap_temp_codec) self._stream.set(CAP_PROP_FOURCC, defs.cap_codec) def get_fps_status(self) -> int: """ :return int: The current percentage of testing done. """ return self._fps_test_status async def calc_max_fps(self, res_to_test: (float, float), num_reads: int = 240) -> int: """ Calculate this camera's actual max fps. :return int: The max supported fps. """ self._logger.debug("running") self._fps_test_status = 0 cur_res = self.get_resolution() self.set_resolution(res_to_test) self._stream.read() divisor = num_reads / 100 s = time() for i in range(num_reads): if self._closing_flag.is_set(): return 0 self._stream.read() self._fps_test_status = int(i / divisor) await asyncsleep(0) e = time() self.set_resolution(cur_res) time_taken = e - s if time_taken > 0: ret = round(num_reads / time_taken) self.set_fps(ret) else: ret = -1 self._logger.debug("done with: " + str(ret)) return ret
def get_frames(start: int, number_of_frames_to_read: int, video: cv.VideoCapture, multichannel: bool = True, downscale: bool = False, verbose: int = 0) -> List[np.ndarray]: """Gets frames from video and returns them in a list. Gets number_of_frames_to_read number of frames starting from start from video and returns them in a list. The frames can be in colour or greyscale, and can optionally be downscaled to 480p. Can print detailed information on the process. Args: start: An int representing the frame number to start from. number_of_frames_to_read: An int representing the number of frames to read. video: An open OpenCV video capture to read frames from. multichannel: A bool for selecting to extract colour or greyscale frames. downscale: A bool for selecting to downscale extracted frames to 480p. verbose: An int controlling the printing of detailed information. If verbose >= 1 and downscale == True, the function prints how many frames are being resized and how many threads are used to do the work. If verbose >= 1 the function prints a notice if the number of frames available after start is lower than number_of_frames_to_read. Returns: A list of ndarrays with length equal to number_of_frames_to_read, containing frames from video, starting from frame number start. If the number of available frames after start is lower than number_of_frames_to_read the returned list will only contain so many frames as is available. If verbose is grater tha or equal to 1, a notice about this is printed. Notes: Uses at least 4 threads, but as most as many as the number of available logical processors, to resize images if downscale is enabled. """ # Init return variable out: List[np.ndarray] = [] # Jump to start frame video.set(cv.CAP_PROP_POS_FRAMES, start) # Read frames from the file, # if reading the frame is successful append it to the list, else stop reading frames for x in range(number_of_frames_to_read): success, frame = video.read() if success: if multichannel: out.append(frame) else: out.append(cv.cvtColor(frame, cv.COLOR_BGR2GRAY)) else: if verbose >= 1: print( "Only", len(out), "frames read, not enough frames in video after frame number", start) break if downscale: # Try to set number of jobs to the number of available CPUs. # If os.cpu_count() failed and returned None, # default to 4 jobs, as that's good enough number_of_jobs: int = os.cpu_count() if not number_of_jobs: number_of_jobs = 4 if verbose >= 1: print("Resizing", len(out), "frames, using", number_of_jobs, "threads...") with Parallel(n_jobs=number_of_jobs, prefer="threads") as parallel: resized_out = (parallel( delayed(resize_image)(frame, 480) for frame in out)) return resized_out else: return out
def video_jump(video_cap: cv2.VideoCapture, frame_id: int): video_cap.set(cv2.CAP_PROP_POS_FRAMES, frame_id - 1)
from cv2 import VideoCapture, resize, flip import asciify cam = VideoCapture(0) cam.set(3, 180) cam.set(4, 240) while True: s, img = cam.read() if s: img = flip(resize(img, (240, 87)), 1) asciify.asciify(img) cam.release()
class Camera_Capture(object): """docstring for uvcc_camera""" def __init__(self, cam, size=(640, 480), fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) def re_init(self, cam, size=(640, 480), fps=30): self.src_id = cam.src_id self.uId = cam.uId self.name = cam.name self.controls = Controls(self.uId) try: self.controls["UVCC_REQ_FOCUS_AUTO"].set_val(0) except KeyError: pass self.capture = VideoCapture(self.src_id) self.set_size(size) # recreate the bar with new values bar_pos = self.bar._get_position() self.bar.destroy() self.create_atb_bar(bar_pos) def re_init_cam_by_src_id(self, src_id): try: cam = Camera_List()[src_id] except KeyError: logger.warning("could not reinit capture, src_id not valid anymore") return self.re_init(cam, self.get_size()) def get_frame(self): s, img = self.capture.read() timestamp = time() return Frame(timestamp, img) def set_size(self, size): width, height = size self.capture.set(3, width) self.capture.set(4, height) def get_size(self): return self.capture.get(3), self.capture.get(4) def set_fps(self, fps): self.capture.set(5, fps) def get_fps(self): return self.capture.get(5) def create_atb_bar(self, pos): # add uvc camera controls to a separate ATB bar size = (200, 200) self.bar = atb.Bar( name="Camera_Controls", label=self.name, help="UVC Camera Controls", color=(50, 50, 50), alpha=100, text="light", position=pos, refresh=2.0, size=size, ) sorted_controls = [c for c in self.controls.itervalues()] sorted_controls.sort(key=lambda c: c.order) cameras_enum = atb.enum("Capture", dict([(c.name, c.src_id) for c in Camera_List()])) self.bar.add_var("Capture", vtype=cameras_enum, getter=lambda: self.src_id, setter=self.re_init_cam_by_src_id) for control in sorted_controls: name = control.atb_name if control.type == "bool": self.bar.add_var(name, vtype=atb.TW_TYPE_BOOL8, getter=control.get_val, setter=control.set_val) elif control.type == "int": self.bar.add_var(name, vtype=atb.TW_TYPE_INT32, getter=control.get_val, setter=control.set_val) self.bar.define(definition="min=" + str(control.min), varname=name) self.bar.define(definition="max=" + str(control.max), varname=name) self.bar.define(definition="step=" + str(control.step), varname=name) elif control.type == "menu": if control.menu is None: vtype = None else: vtype = atb.enum(name, control.menu) self.bar.add_var(name, vtype=vtype, getter=control.get_val, setter=control.set_val) if control.menu is None: self.bar.define(definition="min=" + str(control.min), varname=name) self.bar.define(definition="max=" + str(control.max), varname=name) self.bar.define(definition="step=" + str(control.step), varname=name) else: pass if control.flags == "inactive": pass # self.bar.define(definition='readonly=1',varname=control.name) self.bar.add_button("refresh", self.controls.update_from_device) self.bar.add_button("load defaults", self.controls.load_defaults) return size def kill_atb_bar(self): pass def close(self): self.control = None logger.info("Capture released") pass
class Camera(object): ''' Communicate with the camera. Class governing the communication with the camera. Parameters ----------- camera : int the index of the camera, best taken from func lookForCameras, from eyetracker.camera.capture dic : dic{propID value} to check corresponding propIDs check opencv documentation under the term VideoCapture. They will be set in the moment of object creation. Defines -------- self.camera : index of the camera self.cap : capturing object self.frame : returns a frame from camera self.close : closes cap self.reOpen : reopens cap ''' def __init__(self, camera, dic=None): self.camera = int(camera) self.cap = VideoCapture(self.camera) if dic: for propID, value in dic.iteritems(): self.cap.set(propID, value) first_frame = self.frame() def frame(self): ''' Read frame from camera. Returns -------- frame : np.array frame from camera ''' if self.cap.isOpened: return self.cap.read()[1] else: print 'Cap is not opened.' return None def set(self, **kwargs): ''' Set camera parameters. Parameters ----------- kwargs : {propID : value} ''' for propID, value in kwargs.iteritems(): self.cap.set(propID, value) def close(self): ''' Closes cap, you can reopen it with self.reOpen. ''' self.cap.release() def reOpen(self, cameraIndex): ''' Reopens cap. ''' self.cap.open(self.camera) first_frame = self.frame()
class CameraConnector(object): endpoint = None # Server identity/endpoint identity = None alive = True # 1 if known to be alive ping_at = 0 # Next ping at this time expires = 0 # Expires at this time def __init__(self, ctx, identity, camera, **kwargs): self.thread_read = None self.identity = identity self.camera = camera self.endpoint = camera.get("endpoint") self.settings = camera.get("settings") self.logger = logging.getLogger(camera.get("name")) if isinstance(self.endpoint, str) and self.endpoint.isnumeric(): self.endpoint = int(self.endpoint) if self.endpoint == "-1": self.endpoint = -1 self.ctx = ctx self.create_camera() try: self.redirect_stderr() except: pass def redirect_stderr(self): libc = ctypes.CDLL(None) c_stderr = ctypes.c_void_p.in_dll(libc, 'stderr') original_stderr_fd = sys.stderr.fileno() f = open(os.devnull, 'w') to_fd = f.fileno() libc.fflush(c_stderr) sys.stderr.close() os.dup2(to_fd, original_stderr_fd) sys.stderr = os.fdopen(original_stderr_fd) def create_camera(self): self.logger.debug("CREATE CAMERA HERE") self.video = VideoCapture(self.endpoint) videosettings = self.settings.get("video", {}) fourcc = videosettings.get("CAP_PROP_FOURCC") if fourcc == None: fourcc = 'MJPG' else: del videosettings["CAP_PROP_FOURCC"] if len(fourcc) == 4: self.video.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fourcc)) # del videosettings["CAP_PROP_FOURCC"] for prop, val in videosettings.items(): if prop.startswith('CAP_PROP') and val != "": self.set_capture_prop(prop, val) time.sleep(0.5) if not self.video.isOpened(): self.video.release() self.video = None raise Exception( f"Could Not Open Video with Endpoint {self.endpoint}") def set_capture_prop(self, prop, val): try: self.video.set(getattr(cv2, prop), val) except: pass def list_supported_capture_properties(self): """ List the properties supported by the capture device. """ supported = list() for attr in dir(cv2): if attr.startswith('CAP_PROP'): supported.append(attr) return supported def run(self): self.alive = True if not self.thread_read or not self.thread_read.isAlive(): self.thread_read = threading.Thread(target=self.reader, args=(self.ctx, )) self.thread_read.daemon = True # self.thread_read.name = 'camera->reader' self.thread_read.start() def reader(self, ctx): device_collector = ctx.socket(zmq.PUSH) device_collector.connect(f"inproc://{self.identity}_collector") i = 0 retry = 0 max_retry_cnt = 10 while self.alive: try: res = self.video.read() if not res: if retry < max_retry_cnt: retry += 1 time.sleep(1) continue print(f'1 CAMERA DISCONNECTED retrycnt {retry}', flush=True) raise Exception("1 Camera Disconnected") ret, frame = res if ret: i += 1 retry = 0 # print(f'frame = {frame}') # publisher.send_multipart([self.identity, frame]) device_collector.send_multipart([ self.identity + b'.data_received', f'{i}'.encode('ascii'), pickle.dumps(frame, -1) ]) else: if retry < max_retry_cnt: retry += 1 time.sleep(0.5) continue print(f'CAMEA DISCONNECTED retrycnt {retry}', flush=True) raise Exception("Camera Disconnected") # device_collector.send(self.identity + b'.data_received', zmq.SNDMORE) # device_collector.send(f'{i}'.encode('ascii'), zmq.SNDMORE) # device_collector.send_pyobj(frame) # else: # if not self.video.isOpened(): # print("VIDEO IS NOT OPENED", flush=True) # time.sleep(2) # print('Sent frame {}'.format(i)) except Exception as e: print(f'Exception with Camera Driver: {str(e)}', flush=True) device_collector.send_multipart([ self.identity + b'.data_received', b'error', str(e).encode('ascii') ]) self.alive = False self.close_video() break device_collector.close() device_collector = None self.alive = False def open(self): try: if not self.video or not self.video.isOpened(): self.create_camera() except Exception as e: print(f'Serial Open Exception {str(e)}') def close_video(self): try: print("CLOSE VIDEO", flush=True) if self.video: self.video.release() time.sleep(0.3) self.video = None except Exception as e: print(f"Serial close {str(e)}", flush=True) def close(self): """Stop copying""" self.alive = False if self.thread_read: res = self.thread_read.join(2.0) if not self.thread_read.isAlive(): self.thread_read = None self.close_video()
def select_referene_length( movie: str, frames: int, cap: cv2.VideoCapture, ) -> Optional[float]: """select(get) reference length using GUI window Args: movie (str): movie file name frames (int): total frame of movie cap (cv2.VideoCapture): cv2 video object Returns: Optional[float]: mm per pixel """ cv2.namedWindow(movie, cv2.WINDOW_NORMAL) cv2.resizeWindow(movie, 500, 700) help_exists = False division = 100 tick = division if division < frames else frames tick_s = (int)(frames / division) + 1 cv2.createTrackbar("frame\n", movie, 0, tick - 1, no) cv2.createTrackbar("frame s\n", movie, 0, tick_s, no) cv2.createTrackbar("mm/line\n", movie, 0, 100, no) points: List[Tuple[int, int]] = [] warning_message: List[str] = [] cv2.namedWindow(movie, cv2.WINDOW_NORMAL) cv2.setMouseCallback(movie, mouse_on_select_positions, points) line_color = (255, 255, 255) print("--- measure ---") print("select line mm/pixel in GUI window!") print( "(s: save if selected, h:on/off help, c: clear, click: select, q/esc: abort)" ) while True: frame = cv2.getTrackbarPos("frame\n", movie) * tick_s frame_s = cv2.getTrackbarPos("frame s\n", movie) frame_now = frame + frame_s if frame + frame_s < frames else frames mm_per_line = cv2.getTrackbarPos("mm/line\n", movie) if mm_per_line == 0: mm_per_line = 1 cap.set(cv2.CAP_PROP_POS_FRAMES, frame_now) ret, img = cap.read() if len(points) == 1: cv2.drawMarker(img, points[0], line_color, markerSize=10) elif len(points) == 2: cv2.drawMarker(img, points[0], line_color, markerSize=10) cv2.drawMarker(img, points[1], line_color, markerSize=10) cv2.line(img, points[0], points[1], line_color, 2) elif len(points) == 3: points.clear() warning_message = ["3rd is not accepted"] if help_exists: add_texts_lower_right( img, [ "s:save if selected", "h:on/off help", "c:clear", "click:select", "q/esc:abort", ], ) add_texts_upper_left( img, [ "[measure]", "select line and mm/pixel", "frame: {0}".format(frame_now), "mm/line: {0}".format(mm_per_line), ], ) add_texts_lower_left(img, warning_message) cv2.imshow(movie, img) k = cv2.waitKey(1) & 0xFF if k == ord("s"): if len(points) == 2: cv2.destroyAllWindows() reference_length = math.sqrt((points[1][0] - points[0][0])**2 + (points[1][1] - points[0][1])**2) print("'s' is pressed. mm/pixel is saved ({0:.2f})".format( mm_per_line / reference_length)) return mm_per_line / reference_length else: print("line is not selected yet") warning_message = ["not selected yet"] continue elif k == ord("c"): print("'c' is pressed. selected points are cleared") points.clear() warning_message = ["cleared"] continue elif k == ord("h"): if help_exists: help_exists = False else: help_exists = True continue elif k == ord("q"): cv2.destroyAllWindows() print("'q' is pressed. abort") return None elif k == 27: cv2.destroyAllWindows() print("'Esq' is pressed. abort") return None