def acquire_capture(config):
    try:
        capture = cv2.videoCapture(config.camera)
    except:
        print("ERROR: Unable to open capture. Exiting.")
        sys.exit()
    return capture
Exemple #2
0
    def __init__(self,raw_video):
        if os._exists(raw_video):
            self.raw_video = cv2.videoCapture(raw_video)
            self.fragment_cnt = 0
            self.face_detector = dlib.get_frontal_face_detector()
            self.shape_predictor = dlib.shape_predictor('../models/shape_predictor_68_face_landmarks.dat')
            self.face_encoder = dlib.face_recognition_model_v1('../models/dlib_face_recognition_resnet_model_v1.dat')

            # variable to record broadcasting room info
            self.hosts = []
            self.left_face
            self.right_face
            self.mid_face_region
            self.background


            # cv2 default parameters for video process control
            self.CV_CAP_PROP_POS_MSEC = 0
            self.CV_CAP_PROP_POS_FRAMES = 1
            self.CV_CAP_PROP_POS_AVI_RATIO = 2
            self.CV_CAP_PROP_FRAME_WIDTH = 3
            self.CV_CAP_PROP_FRAME_HEIGHT = 4
            self.CV_CAP_PROP_FPS = 5
            self.CV_CAP_PROP_FOURCC = 6
            self.CV_CAP_PROP_FRAME_COUNT = 7
            self.CV_CAP_PROP_FORMAT = 8
            self.CV_CAP_PROP_MODE = 9
            print 'Load video successfully'
        else:
            raise IOError
Exemple #3
0
def faceCheckLoop():
    cam = cv2.videoCapture()

    while True:
        ret, frame = cam.read()

        if not ret:
            raise IOError("Error getting camera frame...")
            raise IOError('"Mission failed... well get them next time..."')
            break

        # to exit the program
        key = cv2.waitKey(1)
        # press f to exit application
        if key == ord('f'):
            print('[EXIT] Exiting Application...')
            break

        cv2.imwrite(os.path.join('img.jpg'), frame)
        face = faceRecognise('img.jpg')
        if not face == None:
            userAlert(f'{face} is trying to enter your home')
        if os.path.exist('img.png'):
            os.remove('img.png')
        else:
            raise IOError('The file was not saved properly...')
            break

    cam.release()
Exemple #4
0
    def main(self):
        camera_feed = cv2.videoCapture()

        while(1):
            #Read camera feed
            video = camera_feed.read()
            #Convert Frames from RGB to HSV
            convert_to_hsv = cv2.cvtColor(video,cv2.COLOR_BGR2HSV)
            #Convert video feed to gray
            convert_to_gray= cv2.cvtColor(video,cv2.BRG2GRAY)
            convert_to_gray = cv2.GaussianBlur(gray, (5, 5), 0)
            edged = cv2.Canny(gray, 35, 125)
Exemple #5
0
def main():
	videoCapture = cv2.videoCapture("/Users/Haoyang/MyOutAvi.")

	fps = videoCapture.get(cv2.cv.CV_CAP_PROP_FPS)

	size = (int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
			int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))

	success, frame = videoCapture.read()
	cv2.namedWindow("Test")
	while success:
		cv2.imshow("Test",frame)
		success, frame = videoCapture.read()
Exemple #6
0
def take_snapshot():
	number=random.randint(0,100)
	videoCaptureObject=cv2.videoCapture(0)       
	result=True
	while(result):
		ret,frame=videoCaptureObject.read() 
		img_name="img"+str(number)+".png"         
		cv2.imwrite(img_name,frame)
		start_time=time.time
		result=False 
	return img_name
	print("snapshot taken")
	videoCaptureObject.release()
	cv2.destroyAllWindows()
Exemple #7
0
def face_recognition_realtime(predict_func, video_path):
    assert (os.path.exists(video_path)), 'video_path does not exists'
    cap = cv2.videoCapture(video_path)
    frame_idx = 0
    if cap.isOpened == False:
        cap.open(video_path)

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    videoWrite = cv2.VideoWriter("face_recognition_result.mp4",
                                 cv2.VideoWriter_fourcc(*'mp4v'), 30,
                                 (width, height))

    while cap.isOpened:
        ret, img = cap.read()
        if ret == False:
            break
def takeImages():
    Id = input("Enter Your Id: ")
    name = input("Enter Your Name: ")

    if (is_number(Id) and name.isalpha()):
        cam = cv2.videoCapture(0)
        harcascadePath = "haarcascade_default.xml"
        detector = cv2.cascadeClassifier(harcascadePath)
        sampleNum = 0

        while (True):
            ret, img = cam.read()
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            faces = detector.detectMultiScale(gray,
                                              1.3,
                                              5,
                                              minSize=(30, 30),
                                              flags=cv2.CASCADE_SCALE_IMAGE)
            for (x, y, w, h) in faces:
                cv2.rectangle(img, (x, y), (x + w, y + h), (10, 159, 255), 2)
                sampleNum = sampleNum + 1
                #saving the captured face in the dataset folder TrainingImage
                cv2.imwrite(
                    "TrainingImage" + os.sep + name + "." + Id + '.' +
                    str(sampleNum) + ".jpg", gray[y:y + h, x:x + w])
                cv2.imshow('frame', img)
            if cv2.waitKey(100) & 0xFF == ord('q'):
                break
            elif sampleNum > 100:
                break
        cam.release()
        cv2.destroyAllWindows()
        res = "Images Saved for ID : " + Id + " Name : " + name
        row = [Id, name]
        with open("StudentDetails" + os.sep + "StudentDetails.csv",
                  'a+') as csvFile:
            writer = csv.writer(csvFile)
            writer.writerow(row)
        csvFile.close()
    else:
        if (is_number(Id)):
            print("Enter Alphabetical Name")
        if (name.isalpha()):
            print("Enter Numeric ID")
Exemple #9
0
def solve_sudoku_with_video(path_to_model):
    video = cv2.videoCapture(0)
    end_frame = None
    live = True
    while (end_frame != None):
        ret, image = video.read()
        if (ret == False):
            print("Can't get input")
            break
        if (live):
            cv2.imshow("Display", image)
        key = cv2.waitKey(1) & 0xFF
        if key == ord('d'):
            live = False
        elif key == ord('a'):
            live = True
        elif key == ord('s'):
            end_frame = image
    frame_name = os.path.join("images", "Sudoku_frame.jpg")
    cv2.imwrite(frame_name, end_frame)
    solve_sudoku(frame_name, path_to_model)
Exemple #10
0
 def __init__(self, src=0): #initialize video stream and get first frame
     self.stream = cv2.videoCapture(src)
     (self.grabbed, self.frame) = self.stream.read()
     
     self.stopped = False
Exemple #11
0
                     nargs=1,
                     help='video file  or input video chanel to process')
 parser.add_argument('-s',
                     '--start',
                     type=int,
                     default=0,
                     help='start frame to save from, default 0')
 parser.add_argument('-f',
                     '--frames',
                     type=int,
                     default=1,
                     help='number of frames to save, default 1')
 # process commentline parameters
 args = parser.parse_args()
 if args.name[0] in ("0", "1", "2", "3"):
     cap = cv2.videoCapture(int(args.name[0]))  # open camera stream
     fname = 'chanel{}'.format(args.name[0])
 else:
     cap = cv2.VideoCapture(args.name[0])  # open video file
     fname = os.path.splitext(args.name[0])[0]  # remove extension
 n = args.start
 m = args.frames
 if not cap.isOpened():
     print("Error opening video file")
 else:
     # process video
     i = 0
     while i < (n + m):
         ret, frame = cap.read()  # get first frame
         if ret:
             if i >= n and i < n + m:
def video_capture(cube,idcam):
    capture = cv2.videoCapture(idcam)
    while True:
        ret, frame = self.capture.read()
        cube.video_frame(idcam,frame)
# -*- coding:UTF-8 -*-
#! /usr/bin/env python

import cv2
import sys
import thread
import time import localtime

if __name__ == "__main__":
    while True:
        capture = cv2.videoCapture(0)
        if capture.isOpen() is False:
            continue
        ret, image = capture.read()
        if ret == False:
            continue
        
Exemple #14
0
import cv2

imgcapture = cv2.videoCapture(0)
result = True

while (result):
    ret, frame = imgcapture.read()
    cv2.imwrite("test.jpg", frame)
    result = false
    print("Image Captured.....")

imgcapture.release()
import cv2

face_cascade = cv2.CascadeClassifier(r'haarcascade_frontalface_default.xml')
#li=['a.jpg','b.jpg','d.jpg']
#for i in li:

video = cv2.videoCapture('abcd.mp4', 1)
while True:
    check, frame = video.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    faces = face_cascade.detectMultiScale(gray,
                                          scaleFactor=1.05,
                                          minNeighbors=5)

    for x, y, w, h in faces:
        rect = cv2.rectangle(grey, (x, y), (x + w, y + h), (0, 255, 0), 3)

    print(faces)

    cv2.imshow("my image", gray)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    video.release()
Exemple #16
0
import cv2

#car image
#img_file ='car.jpg'

#video on which we are working
video=cv2.videoCapture('')


#pre-trained car classifier andhuman classifier
classifier_file_cars='cars.xml'
classifier_file_humans='haarscascade_fullbody.xml'

#create car classifier
car_tracker=cv2.CascadeClassifier(classifier_file_cars)
human_tracker=cv2.CascadeClassifier(classifier_file_humans)
  
  #run until video stops
while True :

    #create opencv image
    #img=cv2.imread('img_file')
    
    #read current frame
    (read_successful,frame)=video.read() 


    #safe coding
    if read_successful:
        #covery to grayscale
        grayscaled_frame=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
Exemple #17
0
 if args.method in range(6):
     method = methods[args.method]
 elif args.method == 99:
     spec = True
 else:
     print('Invalid method id: {}'.format(args.method))
     sys.exit(3)
 # open template and convert to grayscale
 try:
     templ_gray = cv2.imread(args.template, cv2.IMREAD_GRAYSCALE)
 except:
     print('Error opening template image {}'.format(args.template))
     sys.exit(1)
 templ_h, templ_w = templ_gray.shape
 if args.name in ("0", "1", "2", "3"):
     cap = cv2.videoCapture(int(sys.argv[3]))  # open camera stream
     if args.fps:
         fps = 25
     t = datetime.now()
 else:
     fn = args.name[0]
     if re.match('([a-zA-Z])*[0-9]_[0-9]{8}_[0-9]{6}', fn):
         l = fn.split('_')
         t = datetime.datetime(int(l[-2][0:4]), int(l[-2][4:6]),
                               int(l[-2][6:8]), int(l[-1][0:2]),
                               int(l[-1][2:4]), int(l[-1][4:6]))
         tformat = '%Y-%m-%d %H:%M:%S.%f'
     else:
         t = datetime.datetime(1970, 1, 1, 0, 0, 0)
         tformat = '%H:%M:%S.%f'
     cap = cv2.VideoCapture(fn)  # open video file
Exemple #18
0
 def openCameraUSB(self, num):
     self.cam = cv2.videoCapture(num)
Exemple #19
0
import cv2
import cv2
import numpy as np
import argparse
import imutils

#ap=argparse.ArgumentParser()
#ap.add_argument("-i", "--video", required=True, help="path to the input image")
#args= vars(ap.parse_args())
video = cv2.videoCapture(0)
objekti_leidmine = cv2.createBackgroundSubtractorMOG2(history=200,
                                                      varThreshold=100)
while True:
    net, frame = video.read()
    mask = objekti_leidmine.apply(frame)
    _, mask = cv2.threshold(mask, 245, 255, cv2.THRESH_BINARY)
    contours, _ = cv2.findContours(mask, cv2.RETR_THREE, cv2.CHAIN)
    detect = []
    for cnt in contours:
        pnd = cv2.contourArea(cnt)
        x, y, w, h = cv2.boundingReact(cnt)
        keskel = cv2.center(cnt)

        cv2.imshow("video", frame)
import zbar
from PIL import Image
import cv2
import numpy as np



scanner = zbar.ImageScanner()
scanner.parse_config('enable')

cam = cv2.videoCapture(0)



while True:
    _, frame = cam.read()

    scanner.scan(frame)

    for symbol in frame:
        tl, tr, bl, br = [item for item in symbol.location]
        rec = cv2.minAreaRect
Exemple #21
0
                else:
                    pt2 = (keypoints[i + 1][0], keypoints[i + 1][1])

                cv2.line(face_roi_bgr,
                         pt1,
                         pt2,
                         color,
                         thickness=5,
                         lineType=8,
                         shift=0)

        return face_roi_bgr


if name == '__main__':
    cap = cv2.videoCapture(cam_src)

    while cap.isOpened():
        ret, frame = cap.read()

        if ret:
            detected_keypts = detect_keypoints(frame)
            detected_keypts = cv2.cvtColor(detected_keypts, cv2.COLOR_RGB2BGR)
            cv2.imshow("feed", detected_keypts)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
        else:
            break

    print("[INFO] Ending stream..")
    cv2.destroyAllWindows()
Exemple #22
0
 def _init_(self, path):
     """ initialize the parameters of the Camera
     :param path: -- route the video takes.
     """
     self.path = path
     self.capture = cv2.videoCapture(0)
Exemple #23
0
import numpy
import cv2

cam = cv2.videoCapture(0)

while True:
	ret,b = cam.read()
	cv2.imshow('kilogram',b)
	if cv2.waitKey(10) & 0xFF == ord('q'):
		break
b.release()
cv2.destroyAllWindows
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 16:15:44 2020

@author: abasel
"""
import cv2 as cv
import numpy as np

cap = cv.VideoCapture(0)
cap2= cv.videoCapture(0)
i=0
while(True):
    #Capture image par imaghe
    ret1, image1 = cap.read()
    ret2, image2 = cap2.read()
    
    
    
    if (ret1):
        cv.imshow("Cam 1", image1)
        if cv.waitKey(1) & 0xFF == ord('a'):
            cv.imwrite(r"C:\POC_Hand_Detection\datasets\stereo_hands\left\frame_"+str(i)+'.jpg',image1)
    if(ret2):
        cv.imshow("Cam 0", image2)
        if cv.waitKey(1) & 0xFF == ord('a'):
            cv.imwrite(r"C:\POC_Hand_Detection\datasets\stereo_hands\right\frame_"+str(i)+'.jpg',image2)
            i+=1
    elif cv.waitKey(1) & 0xFF == ord('q'):
        break
#Ne pas oublier de fermer le flux et la fenetre
import cv2
import numpy

cap = cv2.videoCapture(0)

while (True):
    ret, frame = cap.read()

    cv2.imshow('frame', frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    cv2.imwrite("test", img)
cap.release()
cv2.destroyAllWindows()
Exemple #26
0
def FindRelativeDistance(contours):
	if cv.contourArea(contours) < 500:
		relativeDistance = 2.021 * math.exp(-0.0021 * cv.contourArea(c)) + 0.07472 * math.exp(0.0031 * cv.contourArea(c))
	else:
		relativeDistance = 0.9576 * math.exp(-0.001674 * cv.contourArea(c)) + 0.6411 * math.exp(-0.0001057 * cv.contourArea(c))
	return relativeDistance

test.Start()

start_time = time.time()

frame_cnt = 0

#----------------------------------OBJECT DETECTION----------------------------------
while True:
	frame = cv.videoCapture(0)
	BGR = cv.cvtColor(frame, cv.COLOR_BAYER_GB2BGR)
	src = BGR 
	
	# Check if image is loaded fine
	if src is None:
		print ('Error opening image!')
		print ('Usage: simple_code.py [oCam -- default ' + default_file + '] \n')
		exit()
	
	# Define the lower and upper boundaries of the "green"
	# ball in the HSV color space
	GreenLower = (29, 86, 6)
	GreenUpper = (64, 255, 255)

	# blur the frame and convert it to the HSV color space
Exemple #27
0
#CNN OBJECT DETECTION (implemeting real time algorithm from darkflow. Using their dataset and pretrained CNN)
import cv2
from darkflow.net.build import TFNEt #importing pre trained CNN from YOLO9000 creator
import numpy as np
import time

options = { #creating options dictionary. Threshold will determine the number of boxes
'model': 'cfg/yolo.cfg', # or 'cfg/tiny-yolo-voc-fs.cfg'
'load': 'bin/yolo.weights',
'threshold': 0.2,
'gpu': 0.8
}

tfnet = TFNet(options) #creating TFNet opbject + passing options
colors = [tuple(240 * np.random.rand(3)) for  _ in range(10)] #random colors (10 of them) for the bounding boxes that will be generated
capture = cv2.videoCapture(0)#capture object


while True:
    starttime = time.time() #how long each frame takes
    ret, frame = capture.read #
    results = tfnet.return_predict(frame) #make prediction
    if ret:  #if capture device is still recording will continue to make predictions
        for color, result in zip( colors, results): # 1 color per result, looping over predictions
            #pull out top left and bottom right coordinates and add the confidence interval
            label = result['label']
            font = '{ }:{ :.0f}%'.format(label, confidence*100)#display
            confidence = result['confidence']
            top_left = (result['topleft']['x'], result['topleft']['y'])
            bottom_right = (result['bottomright']['x'], result['bottomright']['y'])
            frame = cv2.rectange(frame,t1,br,color 5)
    show_image(thresh, "thresh")
    kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7))
    closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
    closed = cv2.erode(closed, None, iterations=4)
    closed = cv2.dilate(closed, None, iterations=4)

    closed_gray = cv2.cvtColor(closed, cv2.COLOR_BGR2GRAY)
    show_image(closed_gray, "gray_closed")
    (img2, cnts, _) = cv2.findContours(closed_gray.copy(), cv2.RETR_EXTERNAL,
                                       cv2.CHAIN_APPROX_SIMPLE)
    c = sorted(cnts, key=cv2.contourArea, reverse=True)[0]
    rect = cv2.minAreaRect(c)
    box = np.int0(cv2.boxPoints(rect))
    cv2.drawContours(frame, [box], -1, (0, 255, 0), 3)
    cv2.imshow("Frame", frame)
    cv2.waitKey(0)


if __name__ == "__main__":
    ret = True
    if VIDEO_PATH:  # check if not empty and valid path
        while ret:
            vid = cv2.videoCapture(VIDEO_PATH)
            ret, frame = vid.read()
            detect_barcode(frame)
    elif IMG_PATH:
        frame = cv2.imread(IMG_PATH, 1)
        detect_barcode(frame)
    else:
        print("Image and video path not valid")
Exemple #29
0
import numpy as np

ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-b", "--buffer", type=int, default=64, help="max buffer size")
args = vars(ap.parse_args())

greenLower = (29, 86, 6)
greenUpper = (64, 255, 255)
pts = deque(maxlen=args["buffer"])

if not args.get("video", False):
    vs = VideoStream(src=0).start()

else:
    vs = cv2.videoCapture(args["video"])

time.sleep(2.0)

while True:
    frame = vs.read()
    frame = cv2.flip(frame, 1)
    #frame = np.uint8(np.clip((frame - 5), 0, 255)) #减小亮度的操作出现了bug

    frame = frame if not args.get("video", False) else frame[1]

    if frame is None:
        break

    frame = imutils.resize(frame, width=600)
    blurred = cv2.GaussianBlur(frame, (11, 11), 0)
Exemple #30
0
import cv2

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
images_path = 'D:\Files\python\Face Recognition\venv\images\Jun_Ji_Hyun'
#Read the input image

#img = cv2.imread('images/Jun_ji_Hyun/2.jpg')
cap = cv2.videoCapture("filename")

while cap.isOpened():
    img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.5, 5)

    for (x, y, w, h) in faces:
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 3)

    #Display the output
    cv2.imshow('img', img)
    if cv2.waitkey(1) & 0xFF == ord('q'):
        break

cap.release()