コード例 #1
0
    def __init__(self, LogPath):
        """ Initializes the Facenet class. """

        # Class settings
        self.Known = []
        
        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(self.Helpers.confs["Server"]["Logs"]+"/Facenet")
        
        # OpenCV settings 
        self.OpenCV = OpenCV(self.Helpers)
        
        # Dlib settings 
        self.Detector   = dlib.get_frontal_face_detector()
        self.Predictor  = dlib.shape_predictor(self.Helpers.confs["Classifier"]["Dlib"])
コード例 #2
0
ファイル: TassAI.py プロジェクト: amirunpri2018/TassAI
    def connect(self):
        """ Connects to the Foscam IP camera. """

        self.camera = OpenCV("rtsp://" +
                             self.Helpers.confs["Foscam"]["RTSPuser"] + ":" +
                             self.Helpers.confs["Foscam"]["RTSPpass"] + "@" +
                             self.Helpers.confs["Foscam"]["RTSPip"] + ":" +
                             self.Helpers.confs["Foscam"]["RTSPport"] + "/" +
                             self.Helpers.confs["Foscam"]["RTSPendpoint"])

        self.Helpers.logger.info("Connected To Camera")
コード例 #3
0
    def __init__(self, LogPath, Configuration):

        ###############################################################
        #
        # Sets up all default requirements and placeholders
        #
        ###############################################################

        # Class settings
        self.Known = []
        self.Configuration = Configuration

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Configuration.AiCore["Logs"] + "/Facenet")

        # OpenCV settings
        self.OpenCV = OpenCV(self.Helpers)

        # Dlib settings
        self.Detector = dlib.get_frontal_face_detector()
        self.Predictor = dlib.shape_predictor(
            self.Configuration.Classifier["Dlib"])
コード例 #4
0
    def __init__(self):
        """ Initializes the AML/ALL Detection System Movidius NCS1 Classifier Class. """

        self.Helpers = Helpers("ClassifierServer")
        self.confs = self.Helpers.confs

        self.OpenCV = OpenCV()

        self.Movidius = Movidius()
        self.Movidius.checkNCS()
        self.Movidius.loadInception()

        self.Helpers.logger.info(
            "Classifier server class initialization complete.")
コード例 #5
0
class Facenet():

    ###############################################################
    #
    # Facenet helper functions
    #
    ###############################################################

    def __init__(self, LogPath, Configuration):

        ###############################################################
        #
        # Sets up all default requirements and placeholders
        #
        ###############################################################

        # Class settings
        self.Known = []
        self.Configuration = Configuration

        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(
            self.Configuration.AiCore["Logs"] + "/Facenet")

        # OpenCV settings
        self.OpenCV = OpenCV(self.Helpers)

        # Dlib settings
        self.Detector = dlib.get_frontal_face_detector()
        self.Predictor = dlib.shape_predictor(
            self.Configuration.Classifier["Dlib"])

    def PreprocessKnown(self, ValidDir, Graph):

        ###############################################################
        #
        # Preprocesses known images
        #
        ###############################################################

        for validFile in os.listdir(ValidDir):
            if os.path.splitext(validFile)[1] in self.Configuration.Classifier[
                    "ValidIType"]:
                self.Known.append({
                    "File":
                    validFile,
                    "Score":
                    self.Infer(
                        cv2.resize(cv2.imread(ValidDir + validFile),
                                   (640, 480)), Graph)
                })

        self.Helpers.logMessage(self.LogFile, "Facenet", "STATUS",
                                str(len(self.Known)) + " known images found.")

    def ProcessFrame(self, Frame):

        ###############################################################
        #
        # Processes frame
        #
        ###############################################################

        Known = []

        Frame = cv2.resize(cv2.imdecode(Frame, cv2.IMREAD_UNCHANGED),
                           (640, 480))
        RawFrame = Frame.copy()
        Gray = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)

        Path = "Data/Captured/" + datetime.now().strftime(
            "%Y-%m-%d") + "/" + datetime.now().strftime("%H") + "/"
        FileName = datetime.now().strftime('%M-%S') + ".jpg"
        FileNameGray = datetime.now().strftime('%M-%S') + "-Gray.jpg"

        self.OpenCV.SaveFrame(Path + "/", FileName, Frame)
        self.OpenCV.SaveFrame(Path + "/", FileNameGray, Gray)

        return Frame

    def LoadGraph(self):

        ###############################################################
        #
        # Loads Facenet graph
        #
        ###############################################################

        with open(self.Configuration.Classifier["NetworkPath"] +
                  self.Configuration.Classifier["Graph"],
                  mode='rb') as f:
            graphFile = f.read()

        self.Helpers.logMessage(self.LogFile, "Facenet", "Status",
                                "Loaded TASS Graph")

        return graphFile

    def Infer(self, face, graph):

        ###############################################################
        #
        # Runs the image through Facenet
        #
        ###############################################################

        graph.LoadTensor(self.PreProcess(face).astype(np.float16), None)
        output, userobj = graph.GetResult()

        return output

    def PreProcess(self, src):

        ###############################################################
        #
        # Preprocesses an image
        #
        ###############################################################

        NETWORK_WIDTH = 160
        NETWORK_HEIGHT = 160
        preprocessed_image = cv2.resize(src, (NETWORK_WIDTH, NETWORK_HEIGHT))
        preprocessed_image = cv2.cvtColor(preprocessed_image,
                                          cv2.COLOR_BGR2RGB)
        preprocessed_image = self.OpenCV.whiten(preprocessed_image)

        return preprocessed_image

    def Compare(self, face1, face2):

        ###############################################################
        #
        # Determines whether two images are a match
        #
        ###############################################################

        if (len(face1) != len(face2)):
            self.Helpers.logMessage(self.LogFile, "Facenet", "!ERROR!",
                                    "Distance Missmatch")
            return False

        tdiff = 0
        for index in range(0, len(face1)):
            diff = np.square(face1[index] - face2[index])
            tdiff += diff

        if (tdiff < 1.3):
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification",
                                    "Calculated Match: " + str(tdiff))
            return True, tdiff
        else:
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification",
                                    "Calculated Mismatch: " + str(tdiff))
            return False, tdiff

    def addFrameBB(self, frame, x, y, w, h):

        ###############################################################
        #
        # Adds bounding box to the passed frame
        #
        ###############################################################

        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        return frame

    def addFrameFeatures(self, frame, shape, x, y):

        ###############################################################
        #
        # Adds facial features to the passed frame
        #
        ###############################################################

        for (x, y) in shape:
            cv2.circle(frame, (x, y), 1, (0, 255, 0), -1)

        return frame
コード例 #6
0
	def connect(self):
		""" Connects to the USB camera. """

		self.camera = OpenCV(self.Helpers.confs["Camera"]["Id"])
コード例 #7
0
    def connect(self):
        """ Connects to the USB camera. """

        self.camera = OpenCV(self.Helpers.confs["Camera"]["Id"])

        self.Helpers.logger.info("Connected To Camera")
コード例 #8
0
class Facenet():
    """ ALL Detection System 2019 Facenet Class

    Facenet helper functions for the ALL Detection System 2019 Facial Authentication Server project. 
    """
    
    def __init__(self, LogPath):
        """ Initializes the Facenet class. """

        # Class settings
        self.Known = []
        
        self.Helpers = Helpers()
        self.LogFile = self.Helpers.setLogFile(self.Helpers.confs["Server"]["Logs"]+"/Facenet")
        
        # OpenCV settings 
        self.OpenCV = OpenCV(self.Helpers)
        
        # Dlib settings 
        self.Detector   = dlib.get_frontal_face_detector()
        self.Predictor  = dlib.shape_predictor(self.Helpers.confs["Classifier"]["Dlib"])

    def PreprocessKnown(self, ValidDir, Graph):
        """ Preprocesses known images. """
        
        for validFile in os.listdir(ValidDir):
            if os.path.splitext(validFile)[1] in self.Helpers.confs["Classifier"]["ValidIType"]:
                self.Known.append({"File": validFile, "Score": self.Infer(cv2.resize(cv2.imread(ValidDir+validFile), (640, 480)), Graph)})
        
        self.Helpers.logMessage(self.LogFile,
                                "Facenet",
                                "STATUS",
                                str(len(self.Known)) + " known images found.") 

    def ProcessFrame(self, Frame):
        """ Preprocesses frame. """

        Known = []

        Frame = cv2.resize(cv2.imdecode(Frame, cv2.IMREAD_UNCHANGED), (640, 480)) 
        RawFrame = Frame.copy()
        Gray = cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)

        Path = "Data/Captured/" + datetime.now().strftime("%Y-%m-%d") + "/" + datetime.now().strftime("%H") + "/"
        FileName = datetime.now().strftime('%M-%S') + ".jpg"
        FileNameGray = datetime.now().strftime('%M-%S') + "-Gray.jpg"

        self.OpenCV.SaveFrame(Path + "/", FileName, Frame)
        self.OpenCV.SaveFrame(Path + "/", FileNameGray, Gray)

        return Frame

    def LoadGraph(self):
        """ Loads Facenet graph. """

        with open(self.Helpers.confs["Classifier"]["Graph"], mode='rb') as f:
            graphFile = f.read()
            
        self.Helpers.logMessage(self.LogFile, "Facenet", "Status", "Loaded TASS Graph")

        return graphFile
        
    def Infer(self, face, graph):
        """ Runs the image through Facenet. """
        
        graph.LoadTensor(self.PreProcess(face).astype(np.float16), None)
        output, userobj = graph.GetResult()

        return output

    def PreProcess(self, src):
        """ Preprocesses an image. """
        
        NETWORK_WIDTH = 160
        NETWORK_HEIGHT = 160
        
        preprocessed_image = cv2.resize(src, (NETWORK_WIDTH, NETWORK_HEIGHT))
        preprocessed_image = cv2.cvtColor(preprocessed_image, cv2.COLOR_BGR2RGB)
        preprocessed_image = self.OpenCV.whiten(preprocessed_image)
        
        return preprocessed_image

    def Compare(self, face1, face2):
        """ Determines whether two images are a match. """

        if (len(face1) != len(face2)):
            self.Helpers.logMessage(self.LogFile, "Facenet", "!ERROR!", "Distance Missmatch")
            return False

        tdiff = 0
        for index in range(0, len(face1)):
            diff = np.square(face1[index] - face2[index])
            tdiff += diff

        if (tdiff < 1.3):
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification", "Calculated Match: " + str(tdiff))
            return True, tdiff
        else:
            self.Helpers.logMessage(self.LogFile, "Facenet", "Classification", "Calculated Mismatch: " + str(tdiff))
            return False, tdiff