#!/usr/bin/python3 import numpy as np import cv2 cam = cv2.VideoCapture(0) #deciding video format vid_format = cv2.VideoWriter_fourcc(*'xvid') #filename ,video format,FPS,video W,H out = cv2.VideoWriter('output.avi', vid_format, 5.0, (640, 480)) print(cv2.get(cv2.CAP_PROP_FRAME_WIDTH)) print(cv2.get(cv2.CAP_PROP_FRAME_HEIGHT)) while cam.isOpened(): status, frame = cam.read() cv2.imshow('frame', frame) out.write(frame) if cv2.waitKey(1) & 0xFF == ord('q'): break cam.release() cv2.destroyAllWindows()
cap = cv2.VideoCapture(0) while True: ret, frame = cap.read() if not frame: break cv2.imshow('video', frame) key = cv2.waitKey() if key == 27: break cap.release() cv2.destroyAllWindow() ''' Time and Date ''' cap = cv2.VideoCapture(0) print(cv2.get(3)) print(cv2.get(4)) while (cap.isOpened()): ret, frame = cap.read() if ret == True: font = cv2.FONT_HERSHEY_SIMPLEX text = 'Width:' + str(cap.get(3)) + 'Height:' + str(cap.get(4)) frame = cv2.putText(frame, text, (10, 50), font, 5, (0, 0, 255), cv2.Line_AA) date = str(datetime.datetime.now()) frame = cv2.putText(frame, date, (10, 50), font, 5, (0, 0, 255), cv2.Line_AA) cap.release() cv2.destroyAllWindows() ''' Arithmetic operation on images'''
def getFrame(self, vid_file, frame_index, gray=False, flatten=False, dtype=None, resize_shape=None, reshape_image=None, augmentation=False): cap = cv2.VideoCapture(vid_file) if (cap.set(1, frame_index) == False): print( "ERROR in PrepareDataset/getFrame: Setting video file pointer failed" ) print("vid_file =", vid_file) print("frame_index =", frame_index) print("total frames =", cv2.get(cv2.CAP_PROP_FRAME_COUNT)) exit(0) ret, frame = cap.read() if (ret == False): print( "ERROR in PrepareDataset/getFrame: Reading video file frame failed" ) print("vid_file =", vid_file) print("frame_index =", frame_index) print("total frames =", cv2.get(cv2.CAP_PROP_FRAME_COUNT)) exit(0) cap.release() if (augmentation == True): rotation_list = [ np.random.randint(-self.rotation_higher_index, -self.rotation_lower_index) / 100, np.random.randint(self.rotation_lower_index, self.rotation_higher_index) / 100 ] params = { 'theta': np.random.choice(rotation_list), 'brightness': np.random.randint(self.brightness_lower_index, self.brightness_higher_index) / 100 } frame = self.augmentation_object.apply_transform( x=frame, transform_parameters=params).astype(frame.dtype) if (gray == True): frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) if (resize_shape != None): frame = self.resize_image(frame, resize_shape, print_it=False) if (flatten == True and reshape_image != None): print( "Warning: flatten and reshape used together. Make sure you know what you are doing" ) if (flatten == True): frame = frame.flatten() if (reshape_image != None): frame = frame.reshape(reshape_image) if (dtype == None): return frame return frame.astype(dtype)
sound = mixer.Sound('alarm.wav') face = cv2.CascadeClassifier( 'haar cascade files\haarcascade_frontalface_alt.xml') leye = cv2.CascadeClassifier( 'haar cascade files\haarcascade_lefteye_2splits.xml') reye = cv2.CascadeClassifier( 'haar cascade files\haarcascade_righteye_2splits.xml') lbl = ['Close', 'Open'] model = load_model('cnnCat2.h5') path = os.getcwd() cap = cv2.VideoCapture(0) font = cv2.FONT_HERSHEY_COMPLEX_SMALL nbFrames = cv2.get(Videoio.CAP_PROP_FRAME_COUNT) count = 0 score = 0 thicc = 2 rpred = [99] lpred = [99] while (True): ret, frame = cap.read() height, width = frame.shape[:2] gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) faces = face.detectMultiScale(gray, minNeighbors=5, scaleFactor=1.1,
# -*- coding: utf-8 -*- """ Created on Sat Mar 21 14:54:25 2020 @author: krypton """ #%% imports import cv2 #%% cap = cv2.VideoCapture(0) width = cv2.get(cv2.CAP_PROP_FRAME_WIDTH) height = cv2.get(cv2.CAP_PROP_FRAME_HEIGHT) #%% while True: ret, frame = cap.read() gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) cv2.imshow("video", gray) if cv2.waitKey(1) & 0xFF == ord("q"): break cap.release() cv2.destroyAllWindows()
args = vars(ap.parse_args()) greenLower = (29, 86, 6) greenUpper = (64, 255, 255) pts = deque(maxlen=args['buffer']) if not args.get("video", False): cap = VideoStream(src=0).start() else: cap = cv2.VideoCapture(args["video"]) time.sleep(2.0) #Globals thickness_amt = 5 height = int(cv2.get(cv2.CAP_PROP_FRAME_HEIGHT)) width = int(cv2.get(cv2.CAP_PROP_FRAME_WIDTH)) print(height) print(width) while (1): # Take each frame frame = cap.read() frame = cv2.flip(frame, 1) frame = frame[1] if args.get("video", False) else frame if frame is None: break
import cv2 import time if __name__ == '__main__': # Start default camera video = cv2.VideoCapture(0) fps = video.get(cv2.CAP_PROP_FPS) print("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps)) num_frames = 120 print("Capturing {0} frames".format(num_frames)) # Start time start = cv2.get() # Grab a few frames for i in range(0, num_frames): ret, frame = video.read() # End time end = time.time() # Time elapsed seconds = end - start print("Time taken : {0} seconds".format(seconds)) # Calculate frames per second fps = num_frames / seconds; print("Estimated frames per second : {0}".format(fps))