def get_video_rtsp(rtsp):
    '''
    excample:
        >>> rtsp="rtsp://192.168.1.168:80/0"
        >>> get_vedeo_rtsp(rtsp)
    '''
    import cv2
    capture=cv2.videocapture(filename)
    while(1):
        ret,frame=capture.read()
        if frame is None:
            break
        cv2.imshow('img',frame)
        if cv2.waitKey(1) == 27:
            break
    capture.release()
    cv2.destroyAllWindows()
import cv2
import numpy as  np
faceDetect=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cam=cv2.videocapture(0)
id=raw_input('emter user id')
sampleNum=0
while(True):
	ret,img=cam.read()
	gray=cv2.cvtcolor(img,cv2.COLOR_BGR2GRAY)
	faces=faceDetect.detectMultiScale(gray,1.3,5)
	for(x,y,w,h) in faces:
	sampleNum=sampleNum+1
		cv2.imwrite("dataset/user."+str(id)+"."+str(sampleNum)+".jpg")
		cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
		cv2.waitKey(100)
	cv2.imshow("face",img)
	cv2.waitKey(10)
	if(sampleNum>60):
		break
cam.release()
cv2.destroyAllWindows()
Exemple #3
0
import cv2
#calling classifirer
casclf = cv2.CascadeClassifier('face.xml')
#loading face data
cap = cv2.videocapture(2)
while cap.isOpened():
    status, frame = cap.read()
    #applying classifier in live frame
    face = casclf.detectMultiscale(frame, 1.13,
                                   5)  #classifier turning paramete
    # print(face)
    for x, y, h, w in face:
        cv2.rectangle(frame, (x, y), (x + h, y + w), (0, 0, 255), 2)
        #onlyface
        #facedata=frame[x:x+h,y:y+w]
        #cv2.imshow('f',facedata)
    cv2.imshow('face', frame)
    if cv2.waitkey(10) & 0xff == ord('q'):
        break

cv2.destroyAllWindows()
cap.release()
Exemple #4
0
                # Put the coordinates of contour on the screen and have them move with the object
                cv2.putText(frame, "X: " + format(x),
                            (int(x) - 60, int(y) + 50), font, 0.6,
                            (155, 250, 55), 2, cv2.LINE_AA)
                cv2.putText(frame, "Y: " + format(y),
                            (int(x) - 60, int(y) + 70), font, 0.6,
                            (155, 255, 155), 2, cv2.LINE_AA)
                cv2.putText(frame, "W: " + format(w),
                            (int(x) - 60, int(y) + 90), font, 0.6,
                            (215, 250, 55), 2, cv2.LINE_AA)
                cv2.putText(frame, "H: " + format(h),
                            (int(x) - 60, int(y) + 110), font, 0.6,
                            (155, 250, 155), 2, cv2.LINE_AA)

                cX = cX + centerX
                cY = cY + centerY

        if len(contours) == 2:
            cX = cX / 2
            cY = cY / 2

        return len(contours), cX, cY


if __name__ == 'main':
    cap = cv2.videocapture('../video/GOPRO281.mp4')
    frame, ret = cap
    tasks = Tasks(frame)
    data = tasks.findObject("qualGate")
#img, (x,y) , (width,height) (color in bgr) , border_thickness
cv2.rectangle(img, (10, 10), (150, 150), (255, 0, 0), 4)

#img ,text , (x,y), font face,scale(font size) , color in bgr,thickness
cv2.putText(img, "Bahubali", (20, 20), font, 1, (255, 0, 0), 2)

while True:
    cv2.imshow('image', img)
    #1 ms of delay for windows switching
    if cv2.waitKey(1) == 27:
        break

cv2.destroyAllWindows()

capture = cv2.videocapture(0)

i = 0

while True:
    ret, img = capture.read()
    if ret:
        cv2.imshow('result', img)
        i += 1
        cv2.imwrite('img_{}'.format(i), img)
        if cv2.waitkey(1) == 27:
            break
    else:
        print('Camera not Working')

cv2.destroyAllWindows()
Exemple #6
0
import cv2

cameracapture = cv2.videocapture(0)
fps = 30
size = (int(cameracapture.get(CAP_PROP_FRAME_WIDTH)),
        int(cameracapture.get(CAP_PROP_FRAME_HEIGHT)))
videowriter = cv2.videoWriter('MyOutputVid.avi',
                              cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps,
                              size)
success, frame = cameracapture.read()
frames_remaning = 10 * fps - 1
while success and frames_remaning > 0:
    videowriter.write(frame)
    success, frame = cameracapture.read()
    frames_remaning = frames_remaning - 1

cameracapture.release()
# an appropriate timer for capturing the frames is the best way to proceed
Exemple #7
0
#READING AND UNDERSTANDING QR CODE
import cv2
from pyzbar.pyzbar import decode

capture = cv2.videocapture(0)  #DEFAULT CAMERA

while True:
    _, instantpic = capture.read()

    decodeddata = decode(instantpic)
    NETDATA = decodeddata[0][0] #FOR ACCESING THE TEXT PART ONLY

    cv2.imshow("qr",instantpic)
    key = cv2.waitKey(5)

    if key == ord(POWER HANDLE): #FIRST RED SWITCH
        break

#to read out text

from _tkinter import *
import pyttsx3

engine = pyttsx3.init()
engine.say(NETDATA)

engine.runAndWait()

#FOR CAPTURING EMERGENCY PIC AND SAVING
import time
emergencypic = cv2.videocapture(0)
Exemple #8
0
    files = natural_sort(glob.glob(folder))

    assert len(files) > 0, "Folder empty, make sure to provide a correct path"

    print(f'loading {len(files)} files')
    map_length = len(files[1:])
    mapped = list(
        zip(files, files[1:], [row_stride] * map_length,
            [col_stride] * map_length))
    with ThreadPool(processes=8) as p:
        result = list(tqdm(p.imap(threaded_flow, mapped), total=len(mapped)))
        return result


if __name__ == '__main__':
    cap = cv.videocapture('video/train.mp4')
    ret, frame1 = cap.read()

    flows = []
    while cap.isopened():
        ret, frame2 = cap.read()
        if not ret:
            break

        flow = calc_flow(frame1, frame2)
        im = draw_flow(frame1, flow)
        avg_flow = calc_avg_flow(flow)
        im = draw_avg_flow(frame1, avg_flow)
        flows.append(avg_flow)
        cv.imshow('avg flow', im)
        cv.waitkey(30)