コード例 #1
0
import os
import sys
import json
from androidhelper.sl4a import Android
import glob
import time
import numpy as np
import urllib
import cv as cv2

import numpy as np
import cv as cv2
#from PIL import Image as img
#Initialize Android
droid = Android()
faceCascade = cv2.CascadeClassifier("/sdcard/cas.xml")
import subprocess


def draw_rectangle(img, rect):
    (x, y, w, h) = rect
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)


def draw_text(img, text, x, y):
    cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)


def detect_face(path, layout):
    image = cv2.imread(path)
    height, width = image.shape[:2]
# Face Recognition

# Importing the libraries
import cv as cv2
# Loading the cascades
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')


# Defining a function that will do the detections
def detect(gray, frame):
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    for (x, y, w, h) in faces:
        cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
        roi_gray = gray[y:y + h, x:x + w]
        roi_color = frame[y:y + h, x:x + w]
        eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 3)
        for (ex, ey, ew, eh) in eyes:
            cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh), (0, 255, 0),
                          2)
    return frame


# Doing some Face Recognition with the webcam
video_capture = cv2.VideoCapture(0)
while True:
    _, frame = video_capture.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    canvas = detect(gray, frame)
    cv2.imshow('Video', canvas)
    if cv2.waitKey(1) & 0xFF == ord('q'):
コード例 #3
0
import cv as cv
import numpy as np
from helpers import butter_bandpass_filter
from helpers import detect_face
from helpers import feature_extraction
from scipy.interpolate import interp1d
from sklearn.decomposition import PCA


# Load Video
cap = cv.VideoCapture('somevideo.mp4')

if (cap.isOpened()== False): 
    print("Error opening video stream or file")

face_cascade = cv.CascadeClassifier()
if not face_cascade.load('./haarcascade_frontalface_alt.xml'):
    print('--(!)Error loading face cascade')
    exit(0)


feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.3,
                       minDistance = 7,
                       blockSize = 7 )

                       # Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03))
# First frame
コード例 #4
0
ファイル: main.py プロジェクト: w4llcroft/qpython
# Start the camera for interactive image capture
cam_cap = '/sdcard/capture.jpg'
droid.cameraInteractiveCapturePicture(cam_cap)

# Resize the image for faster processing
img = Image.open(cam_cap)
reduced_percent = 0.1
out = img.resize([int(reduced_percent * s) for s in img.size])
imagePath = "/sdcard/image.jpg"
out.save(imagePath)
sleep(3)
# Set the haarcascade file path
cascPath = "/sdcard/haarcascade_frontalface_default.xml"

# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)

# Read the image
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,
                                     scaleFactor=1.2,
                                     minNeighbors=5,
                                     minSize=(30, 30),
                                     flags=cv2.cv.CV_HAAR_SCALE_IMAGE)

print("Found {0} faces!".format(len(faces)))

# Draw a rectangle around the faces
コード例 #5
0
import cv
import time
import numpy as np

cv_car = cv.CascadeClassifier(
    r'C:\Users\DEBIPRASAD\Desktop\Projetc Work\ComputerVision-Projects-master\CarPedestrianDetection\cascades\haarcascade_car.xml'
)
capture = cv.VideoCapture(
    r'C:\Users\DEBIPRASAD\Desktop\Projetc Work\ComputerVision-Projects-master\CarPedestrianDetection\files\cars.avi'
)

while capture.isOpened():
    response, frame = capture.read()
    if response:
        gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
        cars = cv_car.detectMultiScale(gray, 1.2, 3)
        for (x, y, w, h) in cars:
            cv.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 0), 3)
            cv.imshow('cars', frame)
        if cv.waitkey(1) & 0xFF == ord('q'):
            break
    else:
        break
capture.release()
cv.destroyAllWindows()