def gif_from_out_avi(self, out_file=None, color=False, brightness=100, saturation=100, hue=100, delay=10, fuzz="4%", layers="OptimizeTransparency", flush_map=True): """ Call ImageMagick's `convert` from shell to create a GIF from `out_avi` """ #clear tmp folder, for low memory environments: #DANGEROUS! #subprocess.call(['sudo', 'rm', '-r', '/tmp']) if not out_file: out_file = self.out_gif if not color: saturation = 0 #values associated with `-modulate` bsh = map(str, [brightness, saturation, hue]) try: if os.path.getsize(self.out_avi) < 6000: raise cv2.error("Didn't write any frames to AVI. " + "Wrong crop-size? Wrong codec?") except os.error as e: raise cv2.error("Temp AVI doesn't exist!") self.debug("Writing to " + out_file + "...") command = ['convert'] if delay > 0: command.extend(['-delay', str(delay)]) command.append(self.out_avi) if not all([v == '100' for v in bsh]): command.extend(['-modulate', ",".join(bsh)]) if fuzz: command.extend(['-fuzz', str(fuzz)]) if layers: command.extend(['-layers', str(layers)]) if flush_map: command.extend(['+map']) command.append(out_file) subprocess.call(command) self.log("Wrote to " + out_file) return self
def frame_to_output(self, color=True, frame=-1, use_roi=False, roi_rect=None): """ Write current frame (or specified `frame`) to `CvVideo.output` buffer Optionally in grayscale and/or cropped to `roi_rect` """ if not self.output: raise cv2.error("No output stream for writing!") if use_roi and not roi_rect: roi_rect = self.roi_default #Target specific frame? if frame >= 0: self.frame = frame self.read() #dump output frames #cv2.imwrite('dump/'+self.out_base + str(int(frame))+'.png', self.roi) if use_roi and not color: self.output.write(self.get_roi(False, roi_rect)) elif use_roi and color: self.output.write(self.get_roi(True, roi_rect)) elif not color: self.output.write(self.gray) else: self.output.write(self.img) return self
def _oneFrame(self): ret, self.frame = self.video.read() if not ret: raise cv2.error('No hay más Frames') #w = int(self.dw) #h = int(self.dh) cv2.imshow('Video', self.frame)
def template_check(self, templates=None, threshold=0.84, method=cv2.TM_CCOEFF_NORMED, use_roi=False, roi_rect=None): """ Cycle through each image in `templates` performing `cv2.matchTemplate` until match found (or return False) """ #TODO: Enable checking against min_val for alt method if templates is None and hasattr(self, 'templates'): templates = self.templates elif templates is None: raise cv2.error("No template(s) to match against!") roi_rect = roi_rect if roi_rect else self.roi_default target = self.get_roi(False, roi_rect) if use_roi else self.gray #dump checked frames #cv2.imwrite('dump/'+ self.out_base + '/' + # str(int(self.frame)) + '.png', target) for label, template in templates: res = cv2.matchTemplate(target, template, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val >= threshold: #cv2.imwrite('dump/'+ self.out_base + '/' + # str(int(self.frame)) + '-found.png', target) self.template_found = label #self.debug("max_val for %s was %f" % (label, max_val)) return True return False
def kmeans_periodic(columns, intervals, data, *args, **kwargs): """Runs kmeans with periodicity in a subset of dimensions. Transforms columns with periodicity on the specified intervals into two columns with coordinates on the unit circle for kmeans. After running through kmeans, the centers are transformed back to the range specified by the intervals. Arguments --------- columns : sequence Sequence of indexes specifying the columns that have periodic data intervals : sequence of length-2 sequences Sequence of (min, max) intervals, one interval per column See help(cv2.kmeans) for all other arguments, which are passed through. Returns ------- See help(cv2.kmeans) for outputs, which are passed through; except centers, which is modified so that it returns centers corresponding to the input data, instead of the transformed data. Raises ------ cv2.error If len(columns) != len(intervals) """ # Check each periodic column has an associated interval if len(columns) != len(intervals): raise cv2.error( "number of intervals must be equal to number of columns") ndims = data.shape[1] ys = [] # transform each periodic column into two columns with the x and y coordinate # of the angles for kmeans; x coord at original column, ys are appended for col, interval in zip(columns, intervals): a, b = min(interval), max(interval) width = b - a data[:, col] = TWO_PI * (data[:, col] - a) / width % TWO_PI ys.append(width * np.sin(data[:, col])) data[:, col] = width * np.cos(data[:, col]) # append the ys to the end ys = np.array(ys).transpose() data = np.hstack((data, ys)).astype(np.float32) # run kmeans retval, bestLabels, centers = cv2.kmeans(data, *args, **kwargs) # transform the centers back to range they came from for i, (col, interval) in enumerate(zip(columns, intervals)): a, b = min(interval), max(interval) angles = np.arctan2(centers[:, ndims + i], centers[:, col]) % TWO_PI centers[:, col] = a + (b - a) * angles / TWO_PI centers = centers[:, :ndims] return retval, bestLabels, centers
def recursive_affine_transform(self, max_try=10): # the 2x3 invariant matrix const_mat = numpy.array([[1., 0., 0.], [0., 1., 0]]) matcher = ImgMatcher(target_img=self.target, template=self.template, crop=self.crop) for _ in range(max_try): try: mat = matcher.get_affine_transform_mat() except cv2.error: raise cv2.error('Matching pattern not found, try bigger crop!') img = cv2.warpAffine(matcher.target, mat, (10000, 10000)) img = img[:matcher.template.shape[0], :matcher.template.shape[1]] matcher = ImgMatcher(target_img=img, template=matcher.template, crop=matcher.crop) if numpy.array_equal(mat, const_mat): print('Transform Finished. Use .target to get the image') break else: # use large blank image to hold transformed image, then cut print('Itering... Mat=') print(mat) return matcher.target
def read(self): """ """ try: ret, frame = self.cam.read() rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) except cv2.error: raise cv2.error("OpenCV can't find a camera!") return rgb
def load_cascades(data_dir): for k, v in PROFILES.iteritems(): v = os.path.join(data_dir, v) try: if not os.path.exists(v): raise cv2.error('no such file') CASCADES[k] = cv2.CascadeClassifier(v) except cv2.error: fatal("cannot load {} from {}".format(k, v))
def imencode(self, format='png', params=None): image = self.convert('native') if not format.startswith('.'): format = '.' + format result, buf = cv2.imencode(format, image.array, params) if result: return buf else: raise cv2.error('imencode failed')
def load_cascades(data_dir): for k, v in PROFILES.items(): v = os.path.join(data_dir, v) try: if not os.path.exists(v): raise cv2.error('no such file') CASCADES[k] = cv2.CascadeClassifier(v) except cv2.error: fatal("cannot load {} from {}".format(k, v))
def roi_rect(self, rect): minX, minY, maxX, maxY = rect invalid_dimensions = any([minX < 0, minY < 0, maxX < 0, maxY < 0, maxX < minX, maxY < minY, maxX > self.width, maxY > self.height]) if invalid_dimensions: raise cv2.error("Invalid dimensions for crop/ROI rectangle.") else: self._roi_rect = (minX, minY, maxX, maxY)
def read(self): """ """ try: ret, frame = self.cam.read() rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) except cv2.error: raise cv2.error("OpenCV can't find a camera!") if self.bw: return np.mean(rgb, 2).astype(rgb.dtype) else: return rgb
def affine_transform(keypoint1, keypoint2, matches): src = np.array([keypoint1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2) dst = np.array([keypoint2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2) try: affine_matrix = cv2.estimateRigidTransform(src, dst, fullAffine=False) transform_matrix = np.zeros((3, 3)) transform_matrix = np.vstack((affine_matrix, [0, 0, 1])) except ValueError as e: raise ValueError('Not enough feature points') from e except cv2.error as e: raise cv2.error('Image too noisy') from e return transform_matrix
def load_cascades(self): """ Loads cascades into Cascades array self.is_loaded = True on success, False otherwise """ if not self.is_valid: self.is_loaded = False return for k, v in PROFILES.items(): v = os.path.join(self.data_dir, v) try: if not os.path.exists(v): raise cv2.error('no such file') self.cascades[k] = cv2.CascadeClassifier(v) except cv2.error: self.is_loaded = False self.is_loaded = True
def _open_single_image(path, squash, dim): img = cv2.imread(path, cv2.IMREAD_COLOR) if img is None: raise cv2.error('Failed to open {}'.format(os.path.basename(fpath))) if not squash: sq_dim = min(img.shape[0], img.shape[1]) yshift = int((img.shape[0] - sq_dim) / 2) xshift = int((img.shape[1] - sq_dim) / 2) yadd = img.shape[0] - (2 * sq_dim) xadd = img.shape[1] - (2 * sq_dim) img = img[yshift:(img.shape[0] - yshift - yadd), xshift:(img.shape[1] - xshift - xadd)] return cv2.resize(img, dim)
def crop_image(image): gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) gray = cv2.GaussianBlur(gray, (3, 3), 0) edged = cv2.Canny(gray, 10, 250) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7)) closed = cv2.morphologyEx(edged, cv2.MORPH_CLOSE, kernel) cnts = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[1] allowed_boxes = [] for c in cnts: rect = cv2.minAreaRect(c) box = cv2.boxPoints(rect) box = np.int0(box) # no noise condition SUPERPARAM = 60 # TODO KILL feriat if (rect[1][0] > SUPERPARAM) and (rect[1][1] > SUPERPARAM): statistics = get_statistics(image, box) if np.mean(statistics) < 100: allowed_boxes.append(rect) try: nec_boxes = [ min(allowed_boxes, key=lambda x: x[0][0] + x[0][1]), max(allowed_boxes, key=lambda x: x[0][0] + x[0][1]), min(allowed_boxes, key=lambda x: x[0][0] - x[0][1]), max(allowed_boxes, key=lambda x: x[0][0] - x[0][1]), ] except ValueError as e: raise cv2.error("No rectangles found") from e large = sorted(image.shape)[-1] small = sorted(image.shape)[-2] pts1 = np.float32([i[0] for i in nec_boxes]) pts2 = np.float32([[0, 0], [large, small], [0, small], [large, 0]]) M = cv2.getPerspectiveTransform(pts1, pts2) result = cv2.warpPerspective(image, M, (large, small)) result = cv2.resize(result, (WIDTH, HEIGHT)) return result
def capture_frame(self): """ Captures next video frame from camera and returns it :return: returns captured frame as NumPy array """ self.camera.open(0) rv, self.last_frame = self.camera.read() if rv: self.camera.release() return self.last_frame # cv2 capture else: for try_ in range(5): # Insist on capturing the frame 5 more times if something went wrong try: rv_, self.last_frame = self.camera.read() if rv_: self.camera.release() return self.last_frame except: # TODO: Too broad except, should be more specific self.camera.release() pass return cv2.error("Error capturing frame.")
def template_best(self, templates=None, threshold=0.84, method=cv2.TM_CCOEFF_NORMED, use_roi=False, roi_rect=None): """ Cycle through each image in `templates` performing `cv2.matchTemplate`, return best match (or return False) """ #TODO: Enable checking against min_val for alt methods if templates is None and hasattr(self, 'templates'): templates = self.templates elif templates is None: raise cv2.error("No template(s) to match against!") roi_rect = roi_rect if roi_rect else self.roi_default target = self.get_roi(False, roi_rect) if use_roi else self.gray #dump checked frames #cv2.imwrite('dump/'+ self.out_base + '/' + # str(int(self.frame)) + '.png', target) matches = {} #USED TO BE: dict((label,None) for label,template in templates) for label, template in templates: res = cv2.matchTemplate(target, template, method) min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res) if max_val >= threshold: matches[label] = max_val if matches: match_best = [label for label in sorted(matches, key=matches.get, reverse=True)][0] self.template_found = match_best return match_best return False
def apply_random_scale_and_crop(image, new_w, new_h, net_w, net_h, dx, dy): try: im_sized = cv2.resize(image, (new_w, new_h)) except cv2.error as e: print('something') print(new_w, new_h) raise cv2.error('{}, {} {}'.format(new_w, new_h, e.__cause__)) if dx > 0: im_sized = np.pad(im_sized, ((0, 0), (dx, 0), (0, 0)), mode='constant', constant_values=127) else: im_sized = im_sized[:, -dx:, :] if (new_w + dx) < net_w: im_sized = np.pad(im_sized, ((0, 0), (0, net_w - (new_w + dx)), (0, 0)), mode='constant', constant_values=127) if dy > 0: im_sized = np.pad(im_sized, ((dy, 0), (0, 0), (0, 0)), mode='constant', constant_values=127) else: im_sized = im_sized[-dy:, :, :] if (new_h + dy) < net_h: im_sized = np.pad(im_sized, ((0, net_h - (new_h + dy)), (0, 0), (0, 0)), mode='constant', constant_values=127) return im_sized[:net_h, :net_w, :]
def __get_text_from_image(saved_image_path, coord): try: import cv2 import cv2.cv as cv import tesseract except: raise ImportError("tesseract library for python required") api = tesseract.TessBaseAPI() api.Init(".", "eng", tesseract.OEM_DEFAULT) api.SetPageSegMode(tesseract.PSM_AUTO) image0 = cv2.imread(saved_image_path, cv.CV_LOAD_IMAGE_GRAYSCALE) if image0 is None: raise cv2.error("Image for text matching was NoneType") # x1 = coord[0], y1 = coord[1], x2 = coord[2], y2 = coord[3] image1 = image0[coord[1]:coord[3], coord[0]:coord[2]] height1, width1 = image1.shape iplimage = cv.CreateImageHeader((width1, height1), cv.IPL_DEPTH_8U, 1) cv.SetData(iplimage, image1.tostring(), image1.dtype.itemsize * (width1)) tesseract.SetCvImage(iplimage, api) return api.GetUTF8Text()
def imread(fp, flags=cv2.IMREAD_UNCHANGED): exclusive_fp = False filename = "" if isinstance(fp, Path): filename = str(fp.resolve()) elif isPath(fp): filename = fp if filename: fp = builtins.open(filename, "rb") exclusive_fp = True try: fp.seek(0) except (AttributeError, io.UnsupportedOperation): fp = io.BytesIO(fp.read()) exclusive_fp = True data = fp.read() if exclusive_fp: fp.close() mat = cv2.imdecode(np.asarray(memoryview(data)), flags) if mat is None: raise cv2.error('imdecode failed') ch = _channels(mat.shape) target_mode = None if ch == 3: target_mode = 'BGR' elif ch == 4: target_mode = 'BGRA' if target_mode is not None and mat.dtype != np.uint8: if mat.dtype in (np.float32, np.float64): maxval = 1.0 else: maxval = np.float32(np.iinfo(mat.dtype).max) mat = (mat / maxval * 255).astype(np.uint8) return Image(mat, target_mode)
ORANGE_MIN = np.array([150, 20, 200],np.uint8) ORANGE_MAX = np.array([180, 255, 255],np.uint8) winName = "Movement Indicator" cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE) cv2.namedWindow('brand', cv2.CV_WINDOW_AUTOSIZE) while s: cv2.imshow( winName,img ) s, img = cam.read() s1, brand_img = brand_cam.read() try: cv2.imshow('brand', brand_img) except: print cv2.error() mod_img = img mod_img = cv2.blur(mod_img,(25,30)) mod_img = cv2.cvtColor(mod_img,cv2.COLOR_BGR2HSV) mod_img = cv2.inRange(mod_img, ORANGE_MIN, ORANGE_MAX) contours,hierarchy = cv2.findContours(mod_img,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: moments = cv2.moments(cnt) # Calculate moments if moments['m00']!=0: cx = int(moments['m10']/moments['m00']) # cx = M10/M00 cy = int(moments['m01']/moments['m00']) # cy = M01/M00 moment_area = moments['m00'] # Contour area from moment contour_area = cv2.contourArea(cnt) # Contour area using in_built function diff_x = old_cx - cx # Calculate movement from last saved image diff_x = diff_x * diff_x / 2 # Square and squareroot to avoid negatives diff_y = old_cy - cy
def test_fps(fmt, resolution, framerate, zoom): # check if camera stream exists global device_num # set video format cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*fmt)) # convert video codec number to format and check if set correctly fourcc = int(cap.get(cv2.CAP_PROP_FOURCC)) codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)]) log_print("Video format set to: {} ({})".format(codec, fourcc)) # make sure format is set correctly if codec != fmt: log_print("Unable to set video format correctly.") reboot_device(fmt, codec) return -1 # set resolution and check if set correctly if resolution == '4k': cap.set(cv2.CAP_PROP_FRAME_WIDTH, 3840) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) elif resolution == '1200p': cap.set(cv2.CAP_PROP_FRAME_WIDTH, 4800) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1200) elif resolution == '1080p': cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) elif resolution == '720p': cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) elif resolution == '540p': cap.set(cv2.CAP_PROP_FRAME_WIDTH, 960) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 540) elif resolution == '360p': cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 360) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) log_print("Resolution set to: {} x {}".format(width, height)) # set zoom level device = 'v4l2-ctl -d /dev/video{}'.format(device_num) log_print("Setting zoom level to: {}".format(zoom)) subprocess.call(['{} -c zoom_absolute={}'.format(device, str(zoom))], shell=True) # open opencv capture device and set the fps log_print("Setting framerate to: {}".format(framerate)) cap.set(cv2.CAP_PROP_FPS, framerate) current_fps = cap.get(cv2.CAP_PROP_FPS) log_print("Current framerate: {}\n".format(current_fps)) # check if device is responding to get/set commands and try rebooting if it isn't if width == 0 and height == 0 and current_fps == 0: log_print("Device not responding to get/set commands") reboot_device(fmt, codec) # set number of frames to be counted frames = framerate * 20 prev_frame = 0 fps_list, jitters = ([] for x in range(2)) drops, count, initial_frames, initial_elapsed = (0 for x in range(4)) # calculate fps start = time.time() # default initial value initial_elapsed = 30 for i in range(0, frames): try: retval, frame = cap.read() current_frame = cap.get(cv2.CAP_PROP_POS_MSEC) if prev_frame == 0: prev_frame = current_frame diff = current_frame - prev_frame prev_frame = current_frame # save jitter between current and previous frame to average later jitters.append(abs(diff - (1000 / framerate))) if retval is False: drops += 1 if drops >= 5: log_print("# of dropped frames: {}".format(drops)) raise cv2.error("Timeout error") continue else: if live_view is True: # switch channels for correct color output frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) cv2.imshow('frame', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break count += 1 # 2/3 frames - 1 to get the first 20 seconds if i == (((frames * 2) / 3) - 1): initial_frames = count initial_end = time.time() initial_elapsed = initial_end - start except cv2.error as e: log_print("{}".format(e)) reboot_device(fmt, codec) log_print("FREEZE FAIL\n") return -1 end = time.time() total_elapsed = end - start elapsed = total_elapsed - initial_elapsed actual_frames = count avg_jitter = sum(jitters) / len(jitters) log_print("Test duration (s): {:<5}".format(total_elapsed)) log_print("Total frames grabbed: {:<5}".format(count)) log_print("Total frames dropped: {:<5}".format(drops)) log_print("Average jitter (ms): {:<5}".format(avg_jitter)) initial_fps = float(initial_frames / initial_elapsed) fps_list.append(initial_fps) log_print("Initial average fps: {:<5}".format(initial_fps)) fps = float((actual_frames - initial_frames) / elapsed) fps_list.append(fps) log_print("Actual average fps: {:<5}\n".format(fps)) diff = abs(float(framerate) - float(fps_list[1])) # success if diff <= 1: log_print("PASS\n") return 1 # soft failure elif diff <= 3 and diff > 1: log_print("SOFT FAIL\n") return 0 # hard failure else: log_print("HARD FAIL\n") return -1
def generate_thumbnails(self, raw_files): # Generates thumbnails from files found in the selected directory # Returns an array of tuples which contain (img_byte_array, creation_date, creation_time) # Each tuple corresponds to a thumbnail of an image or video, or if it's an SWF, a placeholder # The array we will add each thumbnail's tuple to thumbs = [] # Format progressbar to display current action self.format_progressbar.emit("Generating thumbnails - %v/%m") self.max_progressbar.emit(self.file_count) for file in raw_files: print(f"Processing \"{file.name}\"") # Only process files, not directories if file.is_file(): file_ext = file.name.split('.')[-1].lower() if file_ext in FileExts.image_exts: # For image files... # Read image from file try: im = cv2.imread(file.path) except cv2.error( ).code as e: # TODO: Add separate function to show why loading failed in grid print( f"Error while loading {file.name} with error {e}") else: # Resize image im = self.proper_resize(im, self.thumb_height) # Encode image into bytearrray img_byte_array = bytes(cv2.imencode(".png", im)[1]) # Open image with PIL (i die) and get creation date metadata which is under "306" pil_image = Image.open(file.path) exif_data = None # We use try block because some images do not contain EXIF metadata. try: exif_data = pil_image._getexif() # Get the creation date and time creation_date = exif_data[306].split(' ')[0] creation_time = exif_data[306].split(' ')[1] except: # Triggered when a key isn't in the image EXIF print( f"Could not find creation_date or creation_time in {file.name}" ) # Setting date and time to zeroes makes the image last in the list creation_date = "0000-00-00" creation_time = "00:00:00" # Cleanup im = None elif file_ext in FileExts.video_exts: # For video files... # Get video metadata metadata = self.get_video_metadata(file.path) # Only attempt to load the video if ffmpeg is able to get metadata # This prevents ffmpeg inside cv2 from printing an unhandleable error if metadata == False: print( f"Error: Loading {file.name} failed, moov atom not found. Skipping..." ) creation_date = False else: creation_date = metadata[0] creation_time = metadata[1] # Read video file try: frames = cv2.VideoCapture(file.path) first_frame = frames.read()[1] except cv2.error( ).code as e: # TODO: same as the images print( f"Error while loading {file.name} with error {e}" ) exit(1) else: # Resize frame | !!! .read() result is a tuple, the 2nd value is the ndarray we need. resized_first_frame = self.proper_resize( first_frame, self.thumb_height) # Encode first frame into bytearray img_byte_array = bytes( cv2.imencode(".png", resized_first_frame)[1]) # Free up memory frames.release() cv2.destroyAllWindows() if isinstance(creation_date, str): # Only if we aren't skipping a file # Split dates/times by any non-number character into tuple for consistency creation_date = tuple(re.split("[^0-9]", creation_date)) creation_time = tuple(re.split("[^0-9]", creation_time)) # Add the bytearray and info to the list thumbs.append((img_byte_array, creation_date, creation_time)) creation_date = None # Increment progressbar after every cycle self.increment_progressbar.emit() return thumbs
def frame(self, frame): #self.debug("Setting 'frame' to %s out of %s" % (frame, self.framecount)) if frame < 0 or frame > self.framecount: raise cv2.error("Requested frame is out of bounds") else: self.stream.set(cv2.cv.CV_CAP_PROP_POS_FRAMES, frame)
def saveMasks(seq_path, xml_path, out_mask_size, out_border, fixed_ar, save_raw_mask, show_img, out_root_path='', save_test=1, save_train=1, frames_reader=None, masks_per_seq=0, enable_out_suffix=1, train_fnames=None, test_fnames=None, map_to_bbox=0, out_img_dir='', enable_xml_annotations=0, allow_skipping_images=0): global _pause, _exit if not xml_path or not os.path.isdir(xml_path): raise IOError( 'Folder containing the loaded boxes does not exist: {}'.format( xml_path)) files = glob.glob(os.path.join(xml_path, '*.xml')) n_files = len(files) if n_files == 0: raise IOError('No loaded boxes found') if frames_reader is None: frames_reader = get_frames_reader(seq_path, save_as_bin=False) min_dim = max_dim = 0 out_w, out_h = out_mask_size print('out_mask_size: {}'.format(out_mask_size)) if out_w == -1 and out_h == -1: out_w = out_h = 0 if out_w == -1: max_dim = out_h elif out_h == -1: min_dim = out_w if fixed_ar: print('Using fixed aspect ratio: {}'.format(fixed_ar)) print('out_border: {}'.format(out_border)) def getint(fn): basename = os.path.basename(fn) num = re.sub("\D", "", basename) try: return int(num) except: return 0 if len(files) > 0: files = sorted(files, key=getint) print('Loading annotations from {:d} files'.format(n_files)) file_id = 0 n_boxes = 0 seq_root_dir = os.path.dirname(seq_path) seq_name = os.path.basename(seq_path) if not out_root_path: out_root_path = os.path.join(seq_root_dir, 'masks') if not enable_out_suffix: out_seq_name = seq_name else: if map_to_bbox: out_seq_name = '{}_mapped'.format(seq_name) else: out_seq_name = '{}_{}x{}'.format(seq_name, out_w, out_h) if fixed_ar: out_seq_name = '{}_ar_{}'.format(out_seq_name, fixed_ar) else: out_seq_name = '{}_{}'.format(out_seq_name, out_border) out_seq_name = out_seq_name.replace('.', 'p') train_root_path = os.path.join(out_root_path, out_seq_name) if not save_test and not save_train: raise AssertionError('Either save_test or save_train must be on') # print('Saving output sequences to {}'.format(out_root_path)) if save_train: out_img_root_path = train_root_path if out_img_dir: out_img_root_path = os.path.join(out_img_root_path, out_img_dir) out_mask_root_path = os.path.join(train_root_path, 'labels') print('Saving training mask sequence to {}'.format(train_root_path)) if not os.path.isdir(out_img_root_path): os.makedirs(out_img_root_path) if not os.path.isdir(out_mask_root_path): os.makedirs(out_mask_root_path) if enable_xml_annotations: out_xml_path = os.path.join(out_img_root_path, 'annotations') print('Saving xml_annotations to {}'.format(out_xml_path)) if not os.path.isdir(out_xml_path): os.makedirs(out_xml_path) if save_test: out_test_seq_name = out_seq_name + '_test' test_img_root_path = os.path.join(out_root_path, out_test_seq_name) print('Saving unlabeled testing mask sequence to {}'.format( test_img_root_path)) if not os.path.isdir(test_img_root_path): os.makedirs(test_img_root_path) win_name = 'patch and mask' disable_resizing = 0 scale_x = scale_y = 1.0 if out_w == 0 and out_h == 0: print('Resizing disabled') disable_resizing = 1 csv_raw = [] test_csv_raw = [] n_files = len(files) if save_raw_mask: print('Saving raw labels') mask_pix_val = (1, 1, 1) else: mask_pix_val = (255, 255, 255) n_masks = 0 _train_fnames = [] _test_fnames = [] _exit_seq = 0 disp_img = None for file_id, file in enumerate(files): xml_reader = PascalVocReader(file) filename = os.path.basename(xml_reader.filename) filename_no_ext = os.path.splitext(filename)[0] # file_id = int(re.sub("\D", "", filename)) # print('filename: {}'.format(filename)) # print('file_id: {}'.format(file_id)) img = frames_reader.get_frame_by_name(filename, convert_to_rgb=0) if img is None: print('image {} could not be read'.format(filename)) continue img_h, img_w = img.shape[:2] mask_img = None shapes = xml_reader.getShapes() n_shapes = len(shapes) if n_shapes > 1: print('{} boxes found for {} in {}'.format(n_shapes, filename, file)) obj_id = 0 img_written = 0 for shape in shapes: label, points, _, _, difficult, bbox_source, id_number, score, mask, mask_img = shape if not mask: if not save_test: continue xmin, ymin = points[0] xmax, ymax = points[2] img_root_path = test_img_root_path else: if not save_train: continue mask_pts_list = Shape.getContourPts(mask, verbose=0) mask_pts = np.asarray(mask_pts_list) xmin, ymin = np.min(mask_pts, axis=0).astype(np.int32) xmax, ymax = np.max(mask_pts, axis=0).astype(np.int32) img_root_path = out_img_root_path if fixed_ar: w, h = xmax - xmin, ymax - ymin src_ar = float(w) / float(h) if fixed_ar > src_ar: border_x = int((h * fixed_ar - w) / 2.0) border_y = 0 else: border_y = int((w / fixed_ar - h) / 2.0) border_x = 0 else: border_x = border_y = out_border # start_row, start_col = max(0, ymin - border_y), max(0, xmin - border_x) # end_row, end_col = min(img_h - 1, ymax + border_y), min(img_w - 1, xmax + border_x) start_row, start_col = ymin - border_y, xmin - border_x end_row, end_col = ymax + border_y, xmax + border_x if start_row < 0 or start_col < 0 or end_row >= img_h or end_col >= img_w: msg = 'Invalid border {} for box {} in image {} of size {}'.format( [border_x, border_y], [xmin, ymin, xmax, ymax], filename, [img_w, img_h]) if allow_skipping_images: print('\n' + msg + '\n') continue else: raise AssertionError(msg) if mask: n_masks += 1 w, h = end_col - start_col, end_row - start_row patch_img = img[start_row:end_row, start_col:end_col, :] if not disable_resizing: if max_dim > 0: if w > h: out_w = max_dim out_h = 0 else: out_h = max_dim out_w = 0 elif min_dim > 0: if w < h: out_w = min_dim out_h = 0 else: out_h = min_dim out_w = 0 else: out_w, out_h = out_mask_size scale_x = float(out_w) / float(w) scale_y = float(out_h) / float(h) if scale_x == 0: scale_x = scale_y out_w = int(w * scale_x) elif scale_y == 0: scale_y = scale_x out_h = int(h * scale_y) try: patch_img = cv2.resize(patch_img, (out_w, out_h)) # print('patch_img: {}'.format(patch_img.shape)) except cv2.error as e: print('patch_img: {}'.format(patch_img.shape)) print('out_size: {}, {}'.format(start_row, start_col)) print('out_size: {}, {}'.format(end_row, end_col)) print('out_size: {}, {}'.format(out_w, out_h)) raise cv2.error(e) else: out_w, out_h = w, h _label = label if id_number is None: id_number = -1 if id_number > 0: _label = '{}_{}'.format(_label, id_number) if enable_out_suffix: out_fname = '{}_{}_{}'.format(filename_no_ext, obj_id, label) else: out_fname = filename_no_ext _xmin, _ymin = int((xmin - start_col) * scale_x), int( (ymin - start_row) * scale_y) _xmax, _ymax = int((xmax - start_col) * scale_x), int( (ymax - start_row) * scale_y) if map_to_bbox: if not img_written: img_written = 1 out_img_path = os.path.join(img_root_path, filename) cv2.imwrite(out_img_path, img) if enable_xml_annotations: imageShape = [xml_reader.height, xml_reader.width, 3] xml_writer = PascalVocWriter(out_xml_path, filename, imageShape) if mask: if enable_xml_annotations: bndbox = [xmin, ymin, xmax, ymax] xml_writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, difficult, bbox_source, id_number, score, mask, mask_img) raw_data = { 'target_id': int(id_number), 'filename': filename, 'width': img_w, 'height': img_h, 'class': label, 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax } if show_img: cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) disp_img = img else: img_out_fname = out_fname + '.jpg' if mask: out_img_path = os.path.join(img_root_path, img_out_fname) cv2.imwrite(out_img_path, patch_img) if enable_xml_annotations: n_mask = len(mask) _mask = [] for i in range(n_mask): _mask.append([(mask[i][0] - start_col) * scale_x, (mask[i][1] - start_row) * scale_y, mask[i][2]]) imageShape = [xml_reader.height, xml_reader.width, 3] xml_writer = PascalVocWriter(out_xml_path, xml_reader.filename, imageShape) bndbox = [_xmin, _ymin, _xmax, _ymax] xml_writer.addBndBox(_xmin, _ymin, _xmax, _ymax, label, difficult, bbox_source, id_number, score, _mask) raw_data = { 'target_id': int(id_number), 'filename': img_out_fname, 'width': out_w, 'height': out_h, 'class': label, 'xmin': _xmin, 'ymin': _ymin, 'xmax': _xmax, 'ymax': _ymax } if show_img: cv2.rectangle(patch_img, (_xmin, _ymin), (_xmax, _ymax), (0, 255, 0), 2) disp_img = patch_img if mask: if mask_img is None: mask_img = np.zeros_like(img) # print('border_x: {}'.format(border_x)) # print('border_y: {}'.format(border_y)) # print('scale_x: {}'.format(scale_x)) # print('scale_y: {}'.format(scale_y)) # # print('xmin: {}'.format(xmin)) # print('ymin: {}'.format(ymin)) mask_pts = [[(x - xmin + border_x) * scale_x, (y - ymin + border_y) * scale_y] for x, y in mask_pts] curr_mask = np.zeros_like(patch_img, dtype=np.uint8) # print('mask_img: {}'.format(mask_img.shape)) mask_out_fname = out_fname + '.png' # np.savetxt('mask_seq_mask_pts.txt', mask_pts, fmt='%.6f') curr_mask = cv2.fillPoly( curr_mask, np.array([ mask_pts, ], dtype=np.int32), mask_pix_val) # print('min: {} max: {}'.format( # np.min(mask_img.flatten()), # np.max(mask_img.flatten())) # ) if map_to_bbox: mask_img = map_mask_to_bbox( (xmin, ymin, xmax, ymax), curr_mask, fixed_ar, out_border, mask_img.shape, mask_img) else: mask_img = curr_mask out_mask_path = os.path.join(out_mask_root_path, mask_out_fname) cv2.imwrite(out_mask_path, mask_img) _train_fnames.append((out_img_path, out_mask_path)) if show_img: disp_mask_img = mask_img.copy() if save_raw_mask: disp_mask_img[disp_mask_img > 0] = 255 blended_img = np.asarray( Image.blend(Image.fromarray(patch_img), Image.fromarray(disp_mask_img), 0.5)) disp_img = np.concatenate( (disp_img, disp_mask_img, blended_img), axis=1) csv_raw.append(raw_data) else: test_csv_raw.append(raw_data) if not map_to_bbox: _test_fnames.append(out_img_path) if show_img and not map_to_bbox: # if _pause: # print('frame {} :: {}'.format(file_id, filename)) cv2.imshow(win_name, disp_img) k = cv2.waitKey(1 - _pause) if k == ord('q'): _exit = 1 break elif k == 27: _exit_seq = 1 break elif k == 32: _pause = 1 - _pause obj_id += 1 if map_to_bbox and img is not None: out_img_path = os.path.join(out_img_root_path, filename) if save_train and mask_img is not None: mask_out_fname = filename_no_ext + '.png' out_mask_path = os.path.join(out_mask_root_path, mask_out_fname) cv2.imwrite(out_mask_path, mask_img) if enable_xml_annotations: out_xml_file = os.path.join(out_xml_path, os.path.basename(file)) xml_writer.save(targetFile=out_xml_file) _train_fnames.append((out_img_path, out_mask_path)) if show_img: disp_mask_img = mask_img if save_raw_mask: disp_mask_img[disp_mask_img > 0] = 255 blended_img = np.asarray( Image.blend(Image.fromarray(img), Image.fromarray(disp_mask_img), 0.5)) disp_img = np.concatenate( (disp_img, disp_mask_img, blended_img), axis=1) elif save_test: out_img_path = os.path.join(test_img_root_path, filename) if out_img_path in _test_fnames: raise IOError( 'Duplicate out_img_path: {}'.format(out_img_path)) _test_fnames.append(out_img_path) if show_img and disp_img is not None: cv2.imshow(win_name, disp_img) k = cv2.waitKey(1 - _pause) if k == ord('q'): _exit = 1 break elif k == 27: break elif k == 32: _pause = 1 - _pause if _exit: break sys.stdout.write( '\rDone {:d}/{:d} files {:s} ({:d} masks found)'.format( file_id + 1, n_files, filename, n_masks)) sys.stdout.flush() if masks_per_seq > 0 and n_masks >= masks_per_seq: break sys.stdout.write('\n') sys.stdout.flush() if not _exit_seq and save_train and n_masks == 0: raise IOError('\nNo masks found for {}\n'.format(seq_path)) train_csv_path = test_csv_path = '' if csv_raw: print('Saved {} labeled files in training sequence'.format( len(csv_raw))) train_csv_path = os.path.join(out_img_root_path, 'annotations.csv') pd.DataFrame(csv_raw).to_csv(train_csv_path) if test_csv_raw: print('Saved {} unlabeled files in test sequence'.format( len(test_csv_raw))) test_csv_path = os.path.join(test_img_root_path, 'annotations.csv') pd.DataFrame(test_csv_raw).to_csv(test_csv_path) if show_img: cv2.destroyWindow(win_name) if save_train and train_fnames is not None: train_fnames[ out_seq_name] = _train_fnames, train_root_path, csv_raw, train_csv_path if save_test and test_fnames is not None: test_fnames[ out_test_seq_name] = _test_fnames, test_img_root_path, test_csv_raw, test_csv_path return n_masks
import imutils if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--videofile', type=str, required=True) args = parser.parse_args() videofile = args.videofile if not os.path.isfile(videofile): raise IOError( 'invalid output file, .avi and/or .skeleton already exist') ### Webcam ### cap = cv2.VideoCapture(videofile) if not cap.isOpened(): raise cv2.error('Unable to open video capture') width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) start = time.perf_counter() while (True): # Read new image and apply transformation ret, frame = cap.read() # Display frame cv2.imshow('frame', frame) if cv2.waitKey(1000 // 30) == 27: break cap.release() cv2.destroyAllWindows()
def test_fps(self, format_, resolution, framerate): # check if camera stream exists global device_num if self.cam is None: print('cv2.VideoCapture unsuccessful') sys.exit(1) if resolution == '4k': self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 3840) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) elif resolution == '1200p': self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 4800) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1200) elif resolution == '1080p': self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) elif resolution == '720p': self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) elif resolution == '540p': self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 960) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 540) elif resolution == '360p': self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) log_print("Resolution set to: {} x {}".format(int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))) if format_ == 'MJPG': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) elif format_ == 'YUYV': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'YUYV')) elif format_ == 'YUY2': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'YUYV')) elif format_ == 'I420': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'I420')) elif format_ == 'NV12': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'NV12')) fourcc = self.cam.get(cv2.CAP_PROP_FOURCC) fourcc = int(fourcc) codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)]) log_print("Video format set to: {} ({})".format(codec, fourcc)) # open opencv capture device and set the fps log_print("Setting framerate to: {}".format(framerate)) self.cam.set(cv2.CAP_PROP_FPS, framerate) current_fps = self.cam.get(cv2.CAP_PROP_FPS) log_print("Current framerate: {}\n".format(current_fps)) # check fps for 1, 5, and 10 second streams start = time.time() count, skipped = (0 for i in range(2)) five_yes, ten_yes = (False for i in range(2)) # calculate fps frames = framerate*30 fps_list = [] for f in range(0, frames): start = time.time() for i in range(0, framerate*30): try: retval, frame = self.cam.read() if retval is False: raise cv2.error("OpenCV error") if time.time() > start + 30: raise cv2.error("Timeout error") except cv2.error as e: log_print("{}".format(e)) log_print("Panacast device crashed, rebooting...") os.system("adb reboot") time.sleep(95) device_num = 0 while True: self.cam = cv2.VideoCapture(device_num) if self.cam.isOpened(): self.cam = cv2.VideoCapture(device_num) log_print("Device back online: {}".format(device_num)) time.sleep(20) break else: device_num += 1 time.sleep(20) return -1 end = time.time() elapsed = 30 log_print("Test duration: {:<5} s".format(elapsed)) log_print("Total frames counted: {:<5}".format(f)) fps = float(f / elapsed) fps_list.append(f / elapsed) log_print("Average fps: {:<5}\n".format(fps)) # diff5 = abs(float(framerate) - float(fps_list[0])) diff10 = abs(float(framerate) - float(fps_list[0])) # change back to fps_list[1] # set framerate back to default self.cam.set(cv2.CAP_PROP_FPS, 30) self.cam.release() # success # add "diff5 <= 2 and" to evaluate 5 sec fps if diff10 <= 2: return 0 #failure else: return -1
print('Skipping bkg_box {} as having invalid normalized box: {}'.format( obj_id, (start_row, end_row, start_col, end_col))) continue try: dst_patch = cv2.resize(src_patch, (dst_width, dst_height)) except cv2.error as e: print() print('bkg_orig_shape', bkg_orig_shape) print('bkg_img.shape', bkg_img.shape) print('bkg_bbox_orig', bkg_bbox_orig) print('bkg_bbox', bkg_bbox) print('bkg_resize_factor', bkg_resize_factor) print('dst_width', dst_width) print('dst_height', dst_height) raise cv2.error(e) if mask_type == 0: dst_patch_mask = np.ones(dst_patch.shape[:2], dtype=np.float64) elif mask_type == 1: dst_patch_mask = get2DGaussianErrorFunctionArray(dst_width, dst_height) elif mask_type == 2: dst_patch_mask = get2DGaussianArray(dst_width, dst_height) elif mask_type == 3: dst_patch_mask = get2DGaussianArray2(dst_width, dst_height) elif mask_type == 4: dst_patch_mask = cv2.resize(mask_img, (dst_width, dst_height)) dst_patch_mask_rgb = np.dstack((dst_patch_mask, dst_patch_mask, dst_patch_mask)) # mask_img = np.zeros_like(bkg_img, dtype=np.float64)
ORANGE_MIN = np.array([150, 20, 200], np.uint8) ORANGE_MAX = np.array([180, 255, 255], np.uint8) winName = "Movement Indicator" cv2.namedWindow(winName, cv2.CV_WINDOW_AUTOSIZE) cv2.namedWindow('brand', cv2.CV_WINDOW_AUTOSIZE) while s: cv2.imshow(winName, img) s, img = cam.read() s1, brand_img = brand_cam.read() try: cv2.imshow('brand', brand_img) except: print cv2.error() mod_img = img mod_img = cv2.blur(mod_img, (25, 30)) mod_img = cv2.cvtColor(mod_img, cv2.COLOR_BGR2HSV) mod_img = cv2.inRange(mod_img, ORANGE_MIN, ORANGE_MAX) contours, hierarchy = cv2.findContours(mod_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: moments = cv2.moments(cnt) # Calculate moments if moments['m00'] != 0: cx = int(moments['m10'] / moments['m00']) # cx = M10/M00 cy = int(moments['m01'] / moments['m00']) # cy = M01/M00 moment_area = moments['m00'] # Contour area from moment contour_area = cv2.contourArea( cnt) # Contour area using in_built function diff_x = old_cx - cx # Calculate movement from last saved image
def test_fps(self, format_, resolution, framerate): # check if camera stream exists if self.cam is None: log_print('cv2.VideoCapture unsuccessful') sys.exit(1) if resolution == '4k': # log_print("Resolution to be set to: 3840 x 1080") self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 3840) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) elif resolution == '1080p': # log_print("Resolution to be set to: 1920 x 1080") self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) elif resolution == '720p': # log_print("Resolution to be set to: 1280 x 720") self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) elif resolution == '480p': # log_print("Resolution to be set to: 640 x 480") self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 640) self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 480) log_print("Resolution set to: {} x {}".format(int(self.cam.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.cam.get(cv2.CAP_PROP_FRAME_HEIGHT)))) if format_ == 'MJPG': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG')) log_print("Video format set to: MJPG") elif format_ == 'YUYV': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'YUYV')) log_print("Video format set to: YUYV") elif format_ == 'YUY2': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'YUY2')) log_print("Video format set to: YUY2") elif format_ == 'I420': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'I420')) log_print("Video format set to: I420") elif format_ == 'NV12': self.cam.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'NV12')) log_print("Video format set to: NV12") # fourcc = self.cam.get(cv2.CAP_PROP_FOURCC) # fourcc = int(fourcc) # codec = "".join([chr((fourcc >> 8 * i) & 0xFF) for i in range(4)]) # log_print("Video format set to: {} ({})".format(codec, fourcc)) # open opencv capture device and set the fps log_print("Setting framerate to: {}".format(framerate)) self.cam.set(cv2.CAP_PROP_FPS, framerate) current_fps = self.cam.get(cv2.CAP_PROP_FPS) log_print("Current framerate: {}\n".format(current_fps)) # check fps for 1, 5, and 10 second streams start = time.time() one = start + 1 five = start + 5 ten = start + 10 count, skipped = (0 for i in range(2)) five_yes, ten_yes = (False for i in range(2)) # calculate fps while True: try: retval, frame = self.cam.read() if retval is True: count += 1 else: raise cv2.error("OpenCV error") except cv2.error as e: skipped += 1 log_print("{}".format(e)) log_print("Panacast device crashed, rebooting...") os.system("adb reboot") time.sleep(20) return -1 if time.time() >= five and five_yes is False: duration = time.time() - start log_print("Test duration: 5 s") log_print("Duration: {:<5} s".format(duration)) log_print("Total frames counted: {:<5}".format(count)) log_print("Total frames skipped: {:<5}".format(skipped)) fps5 = count / duration log_print("Average fps: {:<5}\n".format(fps5)) five_yes = True elif time.time() >= ten and ten_yes is False: duration = time.time() - start log_print("Test duration: 10 s") log_print("Duration: {:<5} s".format(duration)) log_print("Total frames counted: {:<5}".format(count)) log_print("Total frames skipped: {:<5}".format(skipped)) fps10 = count / duration log_print("Average fps: {:<5}\n".format(fps10)) ten_yes = True break diff5 = abs(float(framerate) - float(fps5)) diff10 = abs(float(framerate) - float(fps10)) # set framerate back to default self.cam.set(cv2.CAP_PROP_FPS, 30) # success if diff5 <= 2 and diff10 <= 2: # log_print("Success.") return 0 #failure else: # log_print("Failure.") return -1