コード例 #1
0
ファイル: main.py プロジェクト: tanmayshankar/InViSyBlE
def runInViSyBlE():
    SIFTObjectDetector.loadDatabase("/home/venkat/Documents/Projects/InViSyBle/ObjectDatabase/")
    FaceRecognizer.loadDatabase("/home/venkat/Documents/Projects/InViSyBle/FaceDatabase/")

    #cap = cv2.VideoCapture(0)
    #getFrame = GetFrame()
    #getBWFrame = GetBWFrame()
    scheduler = Scheduler()
    scheduler.updateComputationList([GetFrame, SIFTObjectDetector.SIFTObjectDetector, FaceDetector, FaceRecognizer.FaceRecognizer])

    while(True):#cap.isOpened()):
        #ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #frame = getFrame((0,0), None)
        #frame, frameId = getBWFrame(frame, None)
        res = scheduler.compute()
        if None in res:
            continue
        frame, frameId = res[0]
        detectedObjects = res[1]
        detectedFaces = res[3]

        #draw face rectangles
        faces = res[2]
        for (x,y,w,h) in faces:
            cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)

        cv2.imshow('frame',frame)
        print detectedObjects, detectedFaces
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #cap.release()
    cv2.destroyAllWindows()
コード例 #2
0
def updateRecognizer():
    #    with open(FaceRecognizer.pklpath, 'rb') as f:
    #        FaceRecognizer.persons = pickle.load(f)
    #    saveData()

    if len(FaceProcessor.persons) <= 1:
        return
    FaceRecognizer.le = FaceRecognizer.LabelEncoder().fit(
        list(FaceProcessor.persons.keys()))
    FaceRecognizer.clf = FaceRecognizer.train()
コード例 #3
0
def train(file):
    print('C:\\Users\\Admin\\Desktop\\I-attendence\\recognize\\data\\' + file +
          '\\')
    faces, faceID = fr.labels_for_training_data(
        'C:\\Users\\Admin\\Desktop\\I-attendence\\recognize\\data\\' + file +
        '\\')
    print(faces)
    face_recognizer = fr.train_classifier(faces, faceID)

    face_recognizer.save(
        'C:\\Users\\Admin\\Desktop\\I-attendence\\recognize\\trainedyml\\' +
        file + '.yml')
    print('C:\\Users\\Admin\\Desktop\\I-attendence\\recognize\\data\\' + file +
          '\\')
コード例 #4
0
def TrainManyTestOne():
    """
    Training:
    Train with many images of a subject.
    Many subjects will be in  model.
    Testing:
    Test with one image of each subject.
    :return:
    """
    os.remove(learnedData)
    faceRecognizer = FaceRecognizer.Recognizer(learnedData)

    train_images, test_images = [], []
    for subject in os.listdir(path):
        images = os.listdir(posixpath.join(path, subject))
        mappedId = int(subject[-2:])
        train_images = images[1:]
        test_images.append(
            posixpath.join(posixpath.join(path, subject), images[0]))

        for image in train_images:
            print "train", image, mappedId
            faces = getFaces(
                posixpath.join(posixpath.join(path, subject), image))
            faceRecognizer.update(faces, [mappedId])

    print "Prediction starts: "
    for image in test_images:
        print image
        faces = getFaces(image)
        faceRecognizer.predict(faces)
コード例 #5
0
def recognizeFace():
    npImg = getTmpImg()

    msg = FaceRecognizer.recognize(npImg)

    if msg is None:
        return MSG['EMPTY']

    identity = msg['identity']
    name = msg['name']
    confidence = msg['confidence']
    box = msg['box']

    print(name, confidence)

    if confidence < 0.7:
        identity, name = -1, 'unknown'

    msg = MSG['RECOGNIZED_FACE']
    msg['box'] = {
        'left': box.left(),
        'top': box.top(),
        'right': box.right(),
        'bottom': box.bottom()
    }
    msg['label'] = {'identity': identity, 'name': name}
    return msg
コード例 #6
0
def from_video_recognition(video_path, output_path, name_list,
                           detection_classifier, trained_recognizer):
    cap = cv2.VideoCapture(video_path)

    count = 0
    while (cap.isOpened()):
        ret_code, frame = cap.read()
        if ret_code == True:
            ret_frame, skip_code = FaceRecognizer.get_recognition(
                frame, name_list, detection_classifier, trained_recognizer)

            if skip_code == None: continue
            if count % 1 == 0:
                write_path = output_path + str(count) + ".jpg"
                cv2.imwrite(write_path, ret_frame)
                print(count)
            count += 1
            if cv2.waitKey(1) & 0xFF == ord(
                    'q'
            ):  # wait key is time(ms) between frames, press q to exit
                break
        else:
            break

    # Release everything if job is finished
    cap.release()
    cv2.destroyAllWindows()
コード例 #7
0
ファイル: Main.py プロジェクト: Sarika312/Computer-Vision
def main():

    selectCascades()

    trainingData = FaceRecognizer.ImageFeatureSet()
    # Call the below method in a loop for every file in the training data set
    processImage('.\\Images\\test.jpg', trainingData)
コード例 #8
0
def TrainAllSubjectsExceptOneAndPredictThatSubject():
    os.remove(learnedData)
    faceRecognizer = FaceRecognizer.Recognizer(learnedData)

    subjects = os.listdir(path)
    train_subjects = subjects[:-1]
    test_subjec = subjects[-1]

    print train_subjects
    print test_subjec

    for subject in train_subjects:
        # print subject
        mappedId = int(subject[-2:])
        for image in os.listdir(posixpath.join(path, subject)):
            print image
            faces = getFaces(
                posixpath.join(posixpath.join(path, subject), image))
            faceRecognizer.update(faces, [mappedId])

    print "Prediction of images of ", test_subjec, "starts"
    for image in os.listdir(posixpath.join(path, test_subjec)):
        print image
        faces = getFaces(
            posixpath.join(posixpath.join(path, test_subjec), image))
        faceRecognizer.predict(faces)
コード例 #9
0
ファイル: Main.py プロジェクト: samuel-72/Computer-Vision
def main():

    selectCascades()

    trainingData = FaceRecognizer.ImageFeatureSet()
    testingData = FaceRecognizer.ImageFeatureSet()
    test = {}
    train = {}
    delta = 2.3
    # Call the below method in a loop for every file in the training data set
    for files in os.walk(
            'C:\\Users\\Marc Nipuna\\Desktop\\Computer-Vision\\Codebase\\TrainingImages\\'
    ):
        for filename in files[2]:
            processImage(
                'C:\\Users\\Marc Nipuna\\Desktop\\Computer-Vision\\Codebase\\TrainingImages\\'
                + str(filename), trainingData, "Training Data")

    # Call the below method for extracting the features of the test image
    print "gonna call test"
    for files in os.walk(
            'C:\\Users\\Marc Nipuna\\Desktop\\Computer-Vision\\Codebase\\TestImage\\'
    ):
        print files
        for filename in files[2]:
            processImage(
                'C:\\Users\\Marc Nipuna\\Desktop\\Computer-Vision\\Codebase\\TestImage\\'
                + str(filename), testingData, "Testing Data")

    print "\n\n\nPrinting the training data: \n******************************\n "
    trainingData.printDataSet("Training Data")

    print "\n\n\nPrinting the test data: \n******************************\n"
    testingData.printDataSet("Testing Data")

    train = trainingData.trainingDataSet
    test = testingData.testDataSet
    #Scoring the test data with the training dataset

    #print "train",train
    #print "test",test

    score = trainingData.scoring(train, test, delta)

    for key_train, value_train in train.iteritems():
        testing(key_train, value_train)
コード例 #10
0
 def init_vision(self):
     # if self.video is None or not self.video.isOpened():
     #     if self.use_pipe:
     #         self.video = open_camera(self.video, None, "outpipe")
     #     else:
     #         self.video = open_camera(self.video)
     self.face_detector = FaceDetect.FaceDetector()
     self.face_detector.select_trained_cascade_model(
         "library/vision/Vision/cascade_models/"
         "haarcascade_frontalface_alt2.xml")
     self.face_recognizer = FaceRecognizer.FaceRecognizer(
         'library/vision/face_recognition/trained_face_model.pkl')
コード例 #11
0
def runInViSyBlE():
    SIFTObjectDetector.loadDatabase(
        "/home/venkat/Documents/Projects/InViSyBle/ObjectDatabase/")
    FaceRecognizer.loadDatabase(
        "/home/venkat/Documents/Projects/InViSyBle/FaceDatabase/")

    #cap = cv2.VideoCapture(0)
    #getFrame = GetFrame()
    #getBWFrame = GetBWFrame()
    scheduler = Scheduler()
    scheduler.updateComputationList([
        GetFrame, SIFTObjectDetector.SIFTObjectDetector, FaceDetector,
        FaceRecognizer.FaceRecognizer
    ])

    while (True):  #cap.isOpened()):
        #ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #frame = getFrame((0,0), None)
        #frame, frameId = getBWFrame(frame, None)
        res = scheduler.compute()
        if None in res:
            continue
        frame, frameId = res[0]
        detectedObjects = res[1]
        detectedFaces = res[3]

        #draw face rectangles
        faces = res[2]
        for (x, y, w, h) in faces:
            cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)

        cv2.imshow('frame', frame)
        print detectedObjects, detectedFaces
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #cap.release()
    cv2.destroyAllWindows()
コード例 #12
0
def recognize(file, conn, fac_id, time, subject, sem):

    test_img = cv2.imread(
        'C:\\Users\\Admin\\Desktop\\I-attendence\\attendence\\' + file +
        '.jpg')
    face_detected, gray_img = fr.faceDetection(test_img)
    print("Face Detected : ", face_detected)

    face_recognizer = cv2.face.LBPHFaceRecognizer_create()
    face_recognizer.read(
        'C:\\Users\\Admin\\Desktop\\I-attendence\\recognize\\trainedyml\\' +
        file + '.yml')

    print(fac_id, "----------------------")
    print(file, "********************")
    for face in face_detected:
        userdata = {
            "facid": fac_id,
            "data": file,
            "time": time,
            "subject": subject,
            "semester": sem
        }
        resp = requests.post(
            'http://127.0.0.1/attendence/student_attendence.php',
            params=userdata)
        print(resp, userdata)

        (x, y, w, h) = face
        roi_gray = gray_img[y:y + h, x:x + w]
        label, confidence = face_recognizer.predict(roi_gray)
        global confident
        confident = confidence
        print("Label : ", label, " Confidence : ", confidence)
        b = bytes(repr(confidence), 'utf-8')
        conn.send(b)
        fr.draw_rect(test_img, face)
        predicted_name = file
        fr.put_text(test_img, predicted_name, x, y)
        resized_img = cv2.resize(test_img, (700, 600))
コード例 #13
0
ファイル: Main.py プロジェクト: samuel-72/Computer-Vision
def main():
    
    selectCascades()
    
    trainingData = FaceRecognizer.ImageFeatureSet()
    testingData = FaceRecognizer.ImageFeatureSet()

    # Call the below method in a loop for every file in the training data set
    '''for files in os.walk('.\\TrainingImages\\'):
        for filename in files[2]:
            processImage('.\\TrainingImages\\'+str(filename),trainingData,"Training Data")'''

    # Call the below method for extracting the features of the test image
    '''for files in os.walk('.\\TestImage\\'):
        for filename in files[2]:
            processImage('.\\TestImage\\'+str(filename),testingData,"Testing Data")'''
    
    import Files
    reload(Files)
    train = Files.Files('.\\TrainingImages\\')
    for filename in train:
            processImage('.\\TrainingImages\\'+str(filename),trainingData,"Training Data")
            
    test = Files.Files('.\\TestImage\\')
    for filename in test:
            print "NAme",filename
            processImage('.\\TestImage\\'+str(filename),testingData,"Testing Data")
    
        
    
    

    print "\n\n\nPrinting the training data: \n******************************\n "
    trainingData.printDataSet("Training Data")    

    print "\n\n\nPrinting the test data: \n******************************\n"
    testingData.printDataSet("Testing Data")
コード例 #14
0
    def __init__(self, image_buffer, state_variable_stop_processing,
                 shape_predictor_68_datfile_location,
                 torch_neuralnetwork_model_location,
                 classifiermodel_picklefile_location):
        """ Constructor """
        threading.Thread.__init__(self)
        self.__image_buffer = image_buffer
        self.__state_varibale_stop_processing = state_variable_stop_processing
        self.__face_detector = fd.FaceDetector(
            shape_predictor_68_datfile_location,
            torch_neuralnetwork_model_location)
        self.__face_recognizer = frec.FaceRecognizer(
            classifiermodel_picklefile_location)

        self.__Windowname = "Mustie vision"
コード例 #15
0
def TrainAllTestAll():
    os.remove(learnedData)
    faceRecognizer = FaceRecognizer.Recognizer(learnedData)
    # Training starts
    subjectId = 1
    for subject in os.listdir(path):
        # print subject
        for images in os.listdir(posixpath.join(path, subject)):
            # print images
            mappedIds = [subjectId]
            faces = getFaces(
                posixpath.join(posixpath.join(path, subject), images))
            faceRecognizer.update(faces, mappedIds)

        subjectId += 1

    # Testing starts
    for subject in os.listdir(path):
        # print subject
        for images in os.listdir(posixpath.join(path, subject)):
            faces = getFaces(
                posixpath.join(posixpath.join(path, subject), images))
            faceRecognizer.predict(faces)
コード例 #16
0
import cv2
import os
import numpy as np
import FaceRecognizer as fr

test_img = cv2.imread(
    'C:\\Users\\Admin\\Desktop\\I-attendence\\recognize\\data\\87.jpg')
face_detected, gray_img = fr.faceDetection(test_img)
print("Face Detected : ", face_detected)

#face_recognizer = cv2.face.LBPHFaceRecognizer_create()
#face_recognizer.read('C:\\Users\\Admin\\Desktop\\I-attendence\\recognizetrainingData.yml')

faces, faceID = fr.labels_for_training_data(
    'C:\\Users\\Admin\\Desktop\\I-attendence\\recognize\\data\\')
print(faces)
face_recognizer = fr.train_classifier(faces, faceID)

face_recognizer.save(
    'C:\\Users\\Admin\\Desktop\\I-attendence\\recognizetrainingData.yml')
name = {0: 'Arjun', 1: 'Viral'}

for face in face_detected:
    (x, y, w, h) = face
    roi_gray = gray_img[y:y + h, x:x + w]
    label, confidence = face_recognizer.predict(roi_gray)
    print("Label : ", label, " Confidence : ", confidence)
    fr.draw_rect(test_img, face)
    predicted_name = name[0]
    fr.put_text(test_img, predicted_name, x, y)
    resized_img = cv2.resize(test_img, (700, 600))
コード例 #17
0
  return

##########
# Main
##########
initLogger()

if (len(sys.argv) < 2):
  logging.error('ERROR: config file path required as argv[1]')
  sys.exit()

# Read config
configs = loadConfig(sys.argv[1])

# Face Detector
faceDetector = FaceDetector.FaceDetector()
faceDetector.initialize(configs)

# Face Recognition
faceRecognizer = FaceRecognizer.FaceRecognizer()
faceRecognizer.initialize(configs)

# Widget Communicator
widgetCommunicator = WidgetCommunicator.WidgetCommunicator()
widgetCommunicator.initialize(configs)

# Start Process
startProcess(configs, faceDetector, faceRecognizer, widgetCommunicator)

terminateProcess(configs, faceDetector, faceRecognizer, widgetCommunicator)
コード例 #18
0
                    'q'
            ):  # wait key is time(ms) between frames, press q to exit
                break
        else:
            break

    # Release everything if job is finished
    cap.release()
    cv2.destroyAllWindows()


#main for recognition

video_path = "vid_test_data/HP_troll_trim_end.mp4"
output_path = "vid_train_data/HP_troll_start2/"
opencv_classifier_path = "opencv/sources/data/haarcascades/haarcascade_frontalface_default.xml"
opencv_classifier = Initializer.load_detection_classifier(
    opencv_classifier_path)

#from_video_detection(video_path,output_path,opencv_classifier)

training_data_path = "vid_train_data/HP_troll_start2/HP_troll_start/"
name_list = FaceRecognizer.get_name_list(training_data_path)
face_list, label_list = FaceRecognizer.preprocess(training_data_path,
                                                  opencv_classifier)
trained_recognizer = FaceRecognizer.train_recognizer(
    face_list, label_list, opencv_recognizer_type="LBPH")

from_video_recognition(video_path, output_path, name_list, opencv_classifier,
                       trained_recognizer)
コード例 #19
0
    # cascPath = 'haarcascade_frontalface_default.xml'
    # faceCascade = cv2.CascadeClassifier(cascPath)

    video_capture = cv2.VideoCapture(0)

    video_capture.set(3, 1280)
    video_capture.set(4, 720)

    facenet_model_dir = './pretrained_models/FaceNet/'
    mtcnn_model_dir = './pretrained_models/MTCNN/'
    database_verbose = False
    db_load_path = './data/database/database.pkl'
    db_save_path = './data/database/database.pkl'
    FR = FaceRecognizer(facenet_model_dir,
                        mtcnn_model_dir,
                        db_load_path=db_load_path,
                        database_verbose=database_verbose,
                        resize_factor=0.2)
    # Build the model
    tic = time.clock()
    FR.build()
    toc = time.clock()
    build_time = toc - tic
    print('build time: {}'.format(build_time))
    cv2.namedWindow('Camera', cv2.WINDOW_NORMAL)
    cv2.setMouseCallback('Camera', clickEvent)
    cv2.moveWindow('Camera', 0, 0)
    tableDialog = Table()

    while True:
        # Capture frame-by-frame, (height,width) = 720,1280
コード例 #20
0
    MODEL_PATH_TPU = "../pretrained_model/edgetpu_v1/facedetection_320_240_edgetpu.tflite"
    REC_MODEL_PATH_TPU = "../pretrained_model/edgetpu_v2/model_with_mask_clf_quant_edgetpu.tflite"
    MODEL_PATH = "../pretrained_model/training_model/ulffd_landmark.tflite"
    REC_MODEL_PATH = "../pretrained_model/training_model/inference_model_993_quant.tflite"
    DATABASE_PATH = "../pretrained_model/db.npy"
    LABEL_PATH = "../pretrained_model/label.json"

    if args.w == 320:
        W, H = 320, 240
    else:
        W, H = 640, 640

    # load model
    if args.coral_tpu:
        face_detector = FaceDetector(MODEL_PATH_TPU, tpu=args.coral_tpu)
        face_recognizer = FaceRecognizer(REC_MODEL_PATH_TPU,
                                         tpu=args.coral_tpu)
    else:
        face_detector = FaceDetector(MODEL_PATH, tpu=args.coral_tpu)
        face_recognizer = FaceRecognizer(REC_MODEL_PATH, tpu=args.coral_tpu)

    # load database
    rec_db = np.load(DATABASE_PATH)
    label = json.load(open(LABEL_PATH))

    # Quene
    # q_flipped = multiprocessing.Manager().Queue(1)
    # q_rgbf = multiprocessing.Manager().Queue(1)

    # video capture
    # cap = cv2.VideoCapture(0)
    cap = FileVideoStream(0).start()
コード例 #21
0
import cv2
import os
import func
import FaceRecognizer
from threading import Thread
from mail_test import send_mail


def hi():
    print("hi")


base_user_path = os.getcwd() + "\\user"
dc = FaceRecognizer.FaceRecognizer()
dc.load_users(os.getcwd() + "\\user")
c = cv2.VideoCapture(0)
while c.isOpened():
    s, f = c.read()
    if not s:
        break
    color_image = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
    faces = dc.face_detect(color_image)
    faces = dc.recognize(color_image, multi_detect=1)
    f = dc.draw_faces(f, faces)
    names = func.print_usr_name(faces)
    for name in names:
        thread = Thread(target=send_mail, args=("{}".format(name), ))
        thread.start()
        # TODO : Read user info and send mail to user and user's parent
    cv2.imshow("test", f)
    key = cv2.waitKey(1)
コード例 #22
0
from detect import *
from FaceRecognizer import *
import glob

data_dir = '../recognition/orl_faces/'

fr = FaceRecognizer()

for directory in glob.glob(data_dir +
                           '*')[0:2]:  #[0:(len(glob.glob(data_dir+'*'))/10)] :
    print directory
    person_name = directory.split('/')[-1]

    for imfile in glob.glob(directory + '/*'):
        print imfile
        im_id = int(imfile.split('/')[-1].split('.')[0])
        if (im_id <= 6):
            print im_id
            img, score = face_detect(imfile)
            fr.add_face(img, person_name)

print 'FaceRecognizer -- Initialized with training data!'
fr.train()

TEST_SIZE = 0
CORRECT = 0.0

for directory in glob.glob(data_dir +
                           '*')[0:2]:  #[0:(len(glob.glob(data_dir+'*'))/10)] :
    person_name = directory.split('/')[-1]
コード例 #23
0
def updateRecognizer():
    FaceRecognizer.updateRecognizer()
コード例 #24
0
from detect import *
from FaceRecognizer import *
import glob
import json
import pickle

data_dir = '../data/lynk/'

with open('../data/labels.json') as f:
    label_map = json.load(f)

fr = FaceRecognizer()

count = 1
for file_name in glob.glob(data_dir + '*'):
    print 'Processing -- ', file_name, ' -- ', count
    count += 1
    image_uid = file_name.split('/')[-1].split('.')[0]
    person_name = label_map[image_uid]

    img, score = face_detect(file_name)
    fr.add_face(img, person_name)

print 'FaceRecognizer -- Initialized with training data!'
fr.train()
print 'Done Training'

with open('FRmod_v1.obj', 'w') as filehandler:
    pickle.dump(fr, filehandler)

print 'Saved Model!'
コード例 #25
0
ファイル: Main.py プロジェクト: samuel-72/Computer-Vision
def processImage(pathToImageFile,dataSet,typeOfDataSet):
    global face_cascade, eye_cascade, mouth_cascade, nose_cascade
    
    #Get the filename from the path - This will be the key for the DS storing the image features
    filename = pathToImageFile.split("\\")[len(pathToImageFile.split("\\"))-1].strip()
    print "File Being Processed : ", filename
    
    # Selecting the image and other setup for cv2
    img = cv2.imread(pathToImageFile)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    cv2.imshow('Input Image',img)
    # Identify the face in the image
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    """
    print "Face is : ",faces
    """
    # Capture the cascade data in detectMultiScale, then draw the rectangles
    for (x,y,w,h) in faces:
        cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
        roi_gray = gray[y:y+h, x:x+w]
        roi_color = img[y:y+h, x:x+w]
        # Identify the eye, mouth and nose in the image
        eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, minNeighbors=10, minSize=(20,28))
        mouth = mouth_cascade.detectMultiScale(roi_gray,1.1, minNeighbors=10, minSize=(20,28))
        nose = nose_cascade.detectMultiScale(roi_gray,1.1, minNeighbors=10, minSize=(20,28))
        """
        print "Eyes are : ", eyes
        """
        for (ex,ey,ew,eh) in eyes:
            cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)        
        
        for (ex,ey,ew,eh) in mouth:
            cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,0,255),2)
            
        for (ex,ey,ew,eh) in nose:
            cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(50,50,50),2)  
        
        # Store the Face, Eye, Nose and Mouth in their coressponding objects
        face     = FaceRecognizer.FR_Face(faces)
        leftEye  = FaceRecognizer.FR_Eye(eyes[1])
        rightEye = FaceRecognizer.FR_Eye(eyes[0])
        nose     = FaceRecognizer.FR_Nose(nose)
        mouth    = FaceRecognizer.FR_Mouth(mouth)
        """
        # Printing Debug information
        print "Leye Upper Left:        ", leftEye.upperLeftPoint
        print "Leye Upper Right:        ", leftEye.upperRightPoint
        print "Leye Lower Left:        ", leftEye.lowerLeftPoint
        print "Leye Lower Right:        ", leftEye.lowerRightPoint                        

        print "Reye Upper Left:        ", rightEye.upperLeftPoint
        print "Reye Upper Right:        ", rightEye.upperRightPoint
        print "Reye Lower Left:        ", rightEye.lowerLeftPoint
        print "Reye Lower Right:        ", rightEye.lowerRightPoint                        
        """
        # Calculating features
        distLeftRightEyeCenter  = calculateDistance(leftEye.center, rightEye.center)
        distLefEyeNoseCenter    = calculateDistance(leftEye.center, nose.center)
        distRightEyeNoseCenter  = calculateDistance(rightEye.center, nose.center)
        distLefEyeMouthCenter   = calculateDistance(leftEye.center, mouth.center)
        distRightEyeMouthCenter = calculateDistance(rightEye.center, mouth.center)
        
        #Storing features in object of class ImageFeatureSet
        if (typeOfDataSet == "Training Data"):
            dataSet.addToTrainingDataSet(filename,leftEye.center,rightEye.center,nose.center,mouth.center,distLeftRightEyeCenter,distLefEyeNoseCenter,distRightEyeNoseCenter,distLefEyeMouthCenter,distRightEyeMouthCenter,face.width)
        elif (typeOfDataSet == "Testing Data"):
            dataSet.addToTestDataSet(filename,leftEye.center,rightEye.center,nose.center,mouth.center,distLeftRightEyeCenter,distLefEyeNoseCenter,distRightEyeNoseCenter,distLefEyeMouthCenter,distRightEyeMouthCenter,face.width)
                    
        cv2.imshow("Image with identification", img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
import utils

facenet_model_dir = './pretrained_models/FaceNet/'
mtcnn_model_dir = './pretrained_models/MTCNN/'
image_dir = './data/people'  # you should make sure you do have images in this directory
database_verbose = False
db_load_path = None
database_save_path = 'test_johnson.pkl'
image_dir = os.path.abspath(os.path.expanduser(image_dir))
image_name_dict = utils.parse_img_dir(image_dir)
print(image_name_dict)
# print(len(image_name_dict['image']))
# sys.exit()

FR = FaceRecognizer(facenet_model_dir,
                    mtcnn_model_dir,
                    db_load_path=db_load_path,
                    database_verbose=database_verbose)

tic = time.clock()
FR.build()
toc = time.clock()
build_time = toc - tic
print('build time: {}'.format(build_time))

cv2.namedWindow('image', cv2.WINDOW_NORMAL)
for i in range(len(image_name_dict['image'])):
    # read image
    image = cv2.imread(os.path.join(image_dir, image_name_dict['image'][i]))

    # inference
    tic = time.clock()
コード例 #27
0
ファイル: Main.py プロジェクト: teyfikavkan/RoboticSpeech
    # recordTextToSpeech(myresponse) function takes the argument "myresponse" and
    # it records bot's response as a mp3 type and it returns the name of tape.
    tapeName = toSpeech.recordTextToSpeech(myresponse, cnt)

    # playTape(mystring) function takes the argument "tapeName"
    # and it plays the tape which has already been recorded.
    PlayTape.playTape(tapeName)

BotConnection.deleteAllRecords()

entrysentence = unicode("Merhaba  kartınızı okutabilir misiniz", "utf-8")
PlayTape.playTape(toSpeech.recordTextToSpeech(entrysentence, cnt))
id = raw_input("Merhaba ID kartınizi okutabilir misiniz?")
verifiedName = checkDB.copyToTextFile(id);
print(verifiedName)
recognizedName = FaceRecognizer.FaceRecognize()
if (verifiedName == recognizedName):
    print("Erişime izin verildi")
    # sentence=unicode("Hoş geldin ")+unicode(recognizedName)+unicode(" nasıl yardımcı olabilirim")
    # sentence=sentence.encode("utf-8")
    sentence = unicode("Hoş geldin " + recognizedName + " nasıl yardımcı olabilirim", "utf-8")
    PlayTape.playTape(toSpeech.recordTextToSpeech(sentence, cnt+1))

else:
    print("")
    PlayTape.playTape(toSpeech.recordTextToSpeech("kim oldugunu bilemedim", cnt+1))




while True == True:
# cv2.destroyAllWindows()
####################################################################

####################### test FaceRecognizer v4 ####################### 
facenet_model_dir = './pretrained_models/FaceNet/'
mtcnn_model_dir = './pretrained_models/MTCNN/'
image_dir = './data/test_people' # you should make sure you do have images in this directory
database_verbose = False
db_load_path = 'test_johnson.pkl'
save_img_dir = './data/result'
save_img_dir = utils.check_dir(save_img_dir)
image_dir = os.path.abspath(os.path.expanduser(image_dir))
image_list = [f for f in os.listdir(image_dir) if not f.startswith('.')]

FR = FaceRecognizer(facenet_model_dir, 
					mtcnn_model_dir,
					db_load_path=db_load_path,
					database_verbose=database_verbose)

tic = time.clock()
FR.build()
toc = time.clock()
build_time = toc - tic
print('build time: {}'.format(build_time))

FR.rename_identity('YC','BurBurBur')

cv2.namedWindow('image', cv2.WINDOW_NORMAL)
for i, image_name in enumerate(image_list):
	# read image
	image = cv2.imread(os.path.join(image_dir,image_name))
	# inference