コード例 #1
0
def main():
    '''
    Arguments to be set:
        showCam : determine if show the camera preview screen.
    '''
    print("Enter main() function")
    
    if args.testImage is not None:
        img = cv2.imread(args.testImage)
        faceCoordinates = fd.face_detect(img)
        face_img = fd.face_crop(img, faceCoordinates, face_shape=face_shape)
        #cv2.imshow(windowsName, face_img)
        cv2.imwrite('testing.png', face_img)
        im = image.load_img('testing.png', target_size = (128, 128))
        im = image.img_to_array(im)
        im = np.expand_dims(im, axis = 0)

        result = model.predict(im)        #[0]
        index = np.argmax(result)
        print(emotions[index])            #, 'prob:', max(result))
        sys.exit(0)

    showCam = 1

    capture = getCameraStream()

    if showCam:
        cv2.startWindowThread()
        cv2.namedWindow(windowName, cv2.WND_PROP_FULLSCREEN)
        cv2.setWindowProperty(windowName, cv2.WND_PROP_FULLSCREEN, cv2.WND_PROP_FULLSCREEN)
    
    display_and_classify(capture)
コード例 #2
0
def main():
    image = "testing-pictures/close_left.jpg"
    image2 = "testing-pictures/close_right.jpg"

    #far_left2 + far_right2: 0.368
    #everything else: 0.127
    camDist = 0.127
    picLength = 600
    picHeight = 1134
    LRangle = 30.26
    UDangle = 30.47

    photoOneCoords = face_detection.face_detect(image)
    photoTwoCoords = face_detection.face_detect(image2)
    combinedCoords = []
    print(photoOneCoords)
    print(photoTwoCoords)

    for i in range(len(photoOneCoords)):
        combinedCoords.append(
            calcDist.positionToCam(photoOneCoords[i], photoTwoCoords[i],
                                   camDist, picLength, picHeight, LRangle,
                                   UDangle))

    print(
        "Those are the 3D positions of the people in this picture wtih respect to the cameras:"
    )
    xyz = ["x", "y", "z"]
    for i in range(0, len(combinedCoords)):
        print("Person " + str(i + 1) + ": ")
        for j in range(0, len(combinedCoords[i])):
            print(xyz[j] + ": " + str(combinedCoords[i][j]))
    distance = input("Enter a distance for social distancing(in meters): ")
    distance = float(distance)

    dict = {}
    for i in range(len(photoOneCoords)):
        dict[photoOneCoords[i]] = combinedCoords[i]
    notDistanced = calcDist.checkAll(dict, distance)

    if (len(notDistanced) != 0):
        print("not Distanced:")
        face_detection.draw_Rect(image)

    else:
        print("Distanced!")
コード例 #3
0
    def videoLoop(self):
        try:
            # keep looping over frames until we are instructed to stop
            i = 0
            while not self.stopEvent.is_set():
                # grab the frame from the video stream and resize it to
                # have a maximum width of 300 pixels
                flag, self.frame = vs.read()

                if i == 0:
                    i += 2
                    faceCoordinates = None
                else:

                    try:
                        faceCoordinates = fd.face_detect(self.frame)
                        startX = faceCoordinates[0]
                        startY = faceCoordinates[1]
                        endX = faceCoordinates[2]
                        endY = faceCoordinates[3]
                        if startX is not None:
                            image = fd.draw_rect(self.frame, startX, startY,
                                                 endX, endY)
                            face_img = fd.face_crop(self.frame,
                                                    faceCoordinates,
                                                    face_shape=(128, 128))
                            im = img_to_array(face_img)
                            im = np.expand_dims(im, axis=0)
                            result = self.model.predict(im)
                            for i in range(0, 7):
                                self.resultLabel[i].config(
                                    text=str(round(result[0][i], 4)))
                    except:
                        pass

                self.frame = imutils.resize(self.frame, width=300)
                self.frame = cv2.flip(self.frame, 1)
                # OpenCV represents images in BGR order; however PIL
                # represents images in RGB order, so we need to swap
                # the channels, then convert to PIL and ImageTk format
                image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)
                image = Image.fromarray(image)
                image = ImageTk.PhotoImage(image)

                # if the panel is not None, we need to initialize it
                if self.panelA is None:
                    self.panelA = tki.Label(image=image)
                    self.panelA.image = image
                    self.panelA.pack(side="left", padx=10, pady=10)
                    self.panelA.place(x=20, y=20)
# otherwise, simply update the panel
                else:
                    self.panelA.configure(image=image)
                    self.panelA.image = image

        except RuntimeError:
            print("[INFO] caught a RuntimeError")
コード例 #4
0
def call_processing():
    try :

        result_c_img = face_detection.face_detect()

    except:
        print('Error')

    else:
        print("finish (no error)")
コード例 #5
0
ファイル: main.py プロジェクト: sammiee5311/facial_expression
    def __init__(self):
        super(Webcam, self).__init__()
        self._stop_event = threading.Event()
        self.is_running = True
        self.model = load_model('save_model1.h5')

        self.FACE = face_detect("./file/haarcascade_frontalface_alt.xml")  # haarcascade_frontalface_alt path
        self.face_expression = ['angry', 'disgust', 'scary', 'happy', 'neutral', 'sad', 'surprise']
        self.face_expression_cnt = defaultdict(int)
        self.mood = 'None'
コード例 #6
0
def display_and_classify(capture):
    i=1
    while (True):
        flag, frame = capture.read()
        """
        cv2.imshow(windowName+"s", frame)
        if cv2.waitKey(1) == 27:
                break
        """
        #faceCoordinates = None
        if i==1:
            index = 4
            i -= 1
            faceCoordinates=None
        
        try:
            faceCoordinates = fd.face_detect(frame)
            startX = faceCoordinates[0]
            startY = faceCoordinates[1]
            endX = faceCoordinates[2]
            endY = faceCoordinates[3]
            #faceCoordinates = dict['faceCoordinates']
            #text = dict['text']
            refreshFrame(frame, startX, startY, endX, endY, index)
        except:
            refreshFrame(frame, None, None, None, None, 7)
        
        if faceCoordinates is not None:
            face_img = fd.face_crop(frame, faceCoordinates, face_shape=face_shape)
            #cv2.imshow(windowsName, face_img)
            cv2.imwrite('testing.png', face_img)
            im = image.load_img('testing.png', target_size = (128, 128))
            im = image.img_to_array(im)
            im = np.expand_dims(im, axis = 0)            

            result = model.predict(im)        #[0]
            index = np.argmax(result)
            print(emotions[index])       #, 'prob:', max(result))
            print("")
            #text = "{:.2f}%".format(emotions[index] * 100)
            
            # print(face_img.shape)
            # emotion = class_label[result_index]
            # print(emotion)
            
            if cv2.waitKey(1) == 27:
                break  # esc to quit
                break
コード例 #7
0
ファイル: main.py プロジェクト: youmn327/dataset
import cv2
import csv
import numpy as np
from collections import defaultdict
from keras.models import load_model

#############################################
from face_detection import face_detect
from music_player import Music_Player
from weather import Weather_Api

#############################################

model = load_model('./file/save_model1.h5')

FACE = face_detect("./file/haarcascade_frontalface_alt.xml")  # haarcascade_frontalface_alt 파일 경로
WTH = Weather_Api()

face_expression = ['화남', '역겨움', '무서움', '행복', '무표정', '우울', '놀람']
face_expression_cnt = defaultdict(int)

cap = cv2.VideoCapture("./file/video.mp4")  # 비디오 경로

while True:
    success, img = cap.read()
    if not success:
        break
    faces, flag = FACE.detect_face(img)
    for person in faces:
        # cv2.imshow('person', person)
        person = np.asarray(person)
コード例 #8
0
import cv2
import numpy as np

from vid2photos import vid2photos
from face_detection import face_detect

#from pixel2degree import pixel2degree

if __name__ == "__main__":
    frameCount = vid2photos()
    #initialing list for the centroids
    centroidPerFrame = [0] * (frameCount - 1)

    for i in range(1, frameCount - 1):
        centroidPerFrame[i] = face_detect("image" + str(i) + ".jpg")
    newlist = centroidPerFrame.remove(0)
    print(newlist)
    cleanList = [x for x in centroidPerFrame if x != [[], []]]
    print(cleanList)

    #degree = pixel2degree (cleanList, "image1.jpg")
    #print degree

    # use the valueo of centroidPerFrame and parse it (need some parser fuction)
    # then use the parsed value rearrange it prettily and convert it it to some
    # angle given the distance (CONST) (need a function for this too)
    # then
コード例 #9
0
import sys
sys.path.append('site-packagesのパス')
from skimage import data, io, filters, color, img_as_ubyte
from sklearn import preprocessing
import numpy as np
import matplotlib.pyplot as plt
import scipy
import Cython

# C++で作成した共有ファイルの.soをインポート
import face_detection

input_path = '/media/docker_shared/mono_develop/img/getImg.jpg'
output_path = '/media/docker_shared/mono_develop/img/kansei.jpg'
# /Users/ruimac/opt/anaconda3/bin/python

try:

    result_c_img = face_detection.face_detect()

except:
    print('Error')

else:
    print("finish (no error)")
コード例 #10
0
def result():
    if request.method == 'POST':
        result = request.form
        img = face_detect(result['url'])
        img.save("templates/img.jpg")
        return render_template("result.html", result=result)