Beispiel #1
0
    def __init__(self, video_src=None, modelType=None, threshold=None):
        QtGui.QMainWindow.__init__(self)
        self.recognition = rec.Recognition(True, modelType, threshold)
        self.setWindowTitle("Authorization system - Face")
        cWidget = QtGui.QWidget(self)
        mainLayout = QtGui.QVBoxLayout()

        # Title
        # titleLabel = QtGui.QLabel("Show your credential!")
        # titleLabel.setAlignment(QtCore.Qt.AlignCenter)

        # Webcam
        self.imgLabel = QtGui.QLabel()
        self.webcamSampling = vs.VideoSampling(video_src)
        self.update()

        # Button
        self.startButton = QtGui.QPushButton("Authenticate")
        self.cancelButton = QtGui.QPushButton("Cancel")
        self.connect(self.startButton, QtCore.SIGNAL("clicked()"),
                     self.startAuthentication)
        self.connect(self.cancelButton, QtCore.SIGNAL("clicked()"),
                     QtCore.SLOT("close()"))

        # mainLayout.addWidget(titleLabel)
        mainLayout.addWidget(self.imgLabel)
        mainLayout.addWidget(self.startButton)
        mainLayout.addWidget(self.cancelButton)

        mainLayout.setAlignment(QtCore.Qt.AlignCenter)
        cWidget.setLayout(mainLayout)
        self.setCentralWidget(cWidget)
        self.center()
Beispiel #2
0
    def saveFrame(self, userId):
        """ write frame to disk and add row to file config
        Return:
        detection - face is detected
        newUser - created new user
        """
        imgBasePath = ""
        imgBasePath = self.faceDir + "/" + "face_" + userId
        recognition = rec.Recognition(False)
        # faceImg, detection, x, y, h, w = recognition.detectFace(
        #     cv2.cvtColor(self.currentFrame, cv2.COLOR_RGB2GRAY))
        # faceImg2, detection2 = recognition.getCroppedImageByEyes(
        #     cv2.cvtColor(self.currentFrame, cv2.COLOR_RGB2GRAY), (0.2, 0.2))
        #faceImg, detection = recognition.getCroppedImageByEyes(
        faceImg, detection, x, y, h, w = recognition.detectFace(
            cv2.cvtColor(self.currentFrame, cv2.COLOR_RGB2GRAY))

        if detection:
            writer = rwi.ReadWriteIndex()
            faceImg = cv2.resize(faceImg, (92, 112))
            fileExtension = "." + self.config.get("Cam", "imgExtension")
            filePath = (imgBasePath + "#" +
                        str(writer.getCountUserElem(userId)) + fileExtension)
            cv2.imwrite(self.currentDir + filePath, faceImg)
            newUser = writer.checkUser(userId)
            writer.addRow(userId, filePath)  # add new line to file index
            return detection, not newUser

        return detection, 0
Beispiel #3
0
import keyboard
from Frame import Framework
from ChessInformation import chessInformation
import json

from Recognition import *

picutre_path = "D:/MyProject/python/testLOL/picture/"

frame = Framework()
imgHandle = chessInformation()
cnt = 0
with open(r'D:\MyProject\python\testLOL\project\scr\information.json',
          'r') as f:
    config = json.load(f)
    rec = Recognition(config)

    def Id2Name(id):
        if id in config:
            return config[id]["HeroName"]
        else:
            return "None"

    while True:
        # print("get " + str(cnt))
        keyboard.wait("f9")
        for i in range(5):
            img = imgHandle.getChessImg(i, frame)
            print(Id2Name(rec.get(img)))

    # time.sleep(20)
Beispiel #4
0
USAGE
python main.py -i videos/sj.mp4 -d dataset
"""

import Recognition
import Encoder
import argparse

# read arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d",
                "--dataset",
                type=str,
                default="dataset",
                help="input face dataset path")
ap.add_argument("-i", "--input", type=str, help="path to input video")
args = vars(ap.parse_args())

# start encoding
print("Encoding data set...")
encoder = Encoder.Encoder(args['dataset'])
encode_file_dir = encoder.encode()
print("Completely encode the data set")

# start detection and recognition
print("Detecting the face...")
input_video_dir = args['input']
recognition = Recognition.Recognition(encode_file_dir, input_video_dir)
mosaic_list = recognition.recognize()
print("Completely detected faces from input video")