示例#1
0
    def runCapture(self):
		
        imgdir = self._imagePath
        cam = self._cam
        cascade = self._cascade
         
        faceSize= (90, 90)
        model= cv2.createLBPHFaceRecognizer(threshold=70.0) 
        
                
        images,labels,namess = utils.retrain(imgdir,model,faceSize)
        #print "Nouvel etat:",len(images),"images",len(namess),"personnes"
        
        self.isSomebody = False
                  
        ret, img = cam.read()
                         
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #a voir si c'est necessaire
        gray = cv2.equalizeHist(gray)
        # dectection de visage
        rects = cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(40, 40), flags=cv2.cv.CV_HAAR_SCALE_IMAGE) #flags = cv2.CASCADE_SCALE_IMAGE     

        # ne prend que la region de l'image qui nous interesse (le visage)
        roi = None
        if len(rects)>0:
            (x, y, w, h) = rects[0]
            # crop & resize it 
            roi = cv2.resize( gray[y:y+h, x:x+h], faceSize )
            #calcule les coordonnees du rectangle et les affiche
            xCentre1=(2*x+w)/2
            yCentre1=(2*y+h)/2  
            xCentre=str(xCentre1)
            yCentre=str(yCentre1)
            centre="("+xCentre+":"+yCentre+")"

            cv2.rectangle(img, (x,y),(x+w,y+h), (0,255,0),2)

            if len(images)>0:
                
                [p_label, p_confidence] = model.predict(np.asarray(roi))
                name="unknown"
                if p_label !=-1 :
                    name = namess[p_label]
                    self.isSomebody = True
                cv2.putText( img, "%s %.2f %.2f" % (name,p_confidence,p_label),(x+10,y+20), cv2.FONT_HERSHEY_PLAIN,1.5, (0,255,0))
                self.name = name
                self.xPosition = xCentre1
                self.yPosition = yCentre1

        while True:

            cv2.imshow('Face Recognition For Poppy', img)

            k = cv2.waitKey(5) & 0xFF
        
            # quitter avec Echap
            if k == 27: break
示例#2
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     serv_motion = self.add_preload_service('MotionSensor')
     self.char_detected = serv_motion.configure_char('MotionDetected')
     self.engine = ClassificationEngine("./models/classify.tflite")
     self.is_trained = retrain()
     self.labels = get_labels()
     self.is_running = True
     logging.info(self.setup_message())
示例#3
0
    def run(self):
        while self.is_running:

            if app_state.last_state == "shutdown":
                self.is_running = False
                os.system('kill $PPID')

            if (app_state.last_state == "run") and self.is_trained:
                detection = False
                img = camera.returnPIL()
                output = self.engine.ClassifyWithImage(img)
                print("Detection: ", output[0][0])
                if output[0][0] == int(self.labels["detection"]):
                    detection = True
                    logging.info("detection triggered")
                self._detected(detection)

            if app_state.last_state == "retrain":
                logging.info("imprinting weights")
                self.is_trained = retrain()
                self.labels = get_labels()

                if self.is_trained:
                    self.engine = ClassificationEngine(
                        "./models/classify.tflite")
                    app_state.last_state = "run"
                    logging.info("finished imprinting")

                else:
                    app_state.last_state = "collect"
                    logging.warning(
                        "could not imprint weights. Please provide enough pictures"
                    )

            if app_state.last_state == "collect_background":
                camera.collect("background")
                app_state.last_state = "collect"

            if app_state.last_state == "collect_detection":
                camera.collect("detection")
                app_state.last_state = "collect"
示例#4
0
    def _runCaptureLoop2(self):

        print "  cliquer sur 'Echap' pour quitter"
        print "  cliquer sur 'a' pour ajouter une image a la base de donnees"
        print "  cliquer sur 't' pour retenir le modele"

        # si le dossier n'existe pas, on le cree
        imgdir = self._imagePath
        try:
            os.mkdir(imgdir)
        except:
            pass  # le chemin existe deja

        # taille des images dans la bdd

        # open the webcam

        cam = self._cam

        if (not cam.isOpened()):
            print "camera non detectee!"
            sys.exit()
        print "camera detectee!."

        # load the cascadefile:
        cascadePath = self._cascadePath
        cascade = cv2.CascadeClassifier(cascadePath)
        if (cascade.empty()):
            print "aucune cascade precisee!"
            sys.exit()
        print "cascade:", cascadePath

        faceSize = (90, 90)
        model = cv2.createLBPHFaceRecognizer(threshold=70.0)

        self.compteur = 0

        images, labels, namess = utils.retrain(imgdir, model, faceSize)
        print "Nouvel etat:", len(images), "images", len(namess), "personnes"

        compteur = 0

        while compteur < 10:

            if self.compteur > 10:
                self.isSomebody = False
                compteur = 0

            ret, img = cam.read()

            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            #a voir si c'est necessaire
            #gray = cv2.equalizeHist(gray)
            # dectection de visage
            rects = cascade.detectMultiScale(
                gray,
                scaleFactor=1.1,
                minNeighbors=5,
                minSize=(30, 30),
                flags=cv2.cv.CV_HAAR_SCALE_IMAGE
            )  #flags = cv2.CASCADE_SCALE_IMAGE

            # ne prend que la region de l'image qui nous interesse (le visage)
            roi = None
            for x, y, w, h in rects:
                self.isSomebody = True

                # crop & resize it
                roi = cv2.resize(gray[y:y + h, x:x + h], faceSize)
                #calcule les coordonnees du rectangle et les affiche
                xCentre1 = (2 * x + w) / 2
                yCentre1 = (2 * y + h) / 2
                xCentre = str(xCentre1)
                yCentre = str(yCentre1)
                centre = "(" + xCentre + ":" + yCentre + ")"

                cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
                if len(images) > 0:

                    [p_label, p_confidence] = model.predict(np.asarray(roi))
                    name = "unknown"
                    if p_label != -1:
                        name = namess[p_label]

                    cv2.putText(img,
                                "%s %.2f %.2f" % (name, p_confidence, p_label),
                                (x + 10, y + 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                                (0, 255, 0))
                    self.name = name
                    self.xPosition = xCentre1
                    self.yPosition = yCentre1
                break  # use only 1st detected

            self.compteur += 1
            cv2.imshow('Face Recognition For Poppy', img)

            k = cv2.waitKey(5) & 0xFF

            # quitter avec Echap
            if k == 27: break

            # on clique sur 'a' et on ajoute la personne a la bdd
            if (k == 97) and (roi != None):
                print "Entrer le nom de la personne: "
                name = sys.stdin.readline().strip('\r').strip('\n')
                #creer un dossier pour cette personne
                dirname = os.path.join(imgdir, name)
                try:
                    os.mkdir(dirname)
                except:
                    pass  #on continue si le dossier existe deja
                # on sauvegarde l'image
                path = os.path.join(dirname,
                                    "%d.png" % (rand.uniform(0, 10000)))
                print "added:", path
                cv2.imwrite(path, roi)

            # on met a jour le modele
            if (k == 116):  # 't' pressed
                images, labels, namess = utils.retrain(imgdir, model, faceSize)
                print "Nouvel etat:", len(images), "images", len(
                    namess), "personnes"

            compteur = compteur + 1
示例#5
0
    def runCapture(self):

        imgdir = self._imagePath
        cam = self._cam
        cascade = self._cascade

        faceSize = (90, 90)
        model = cv2.createLBPHFaceRecognizer(threshold=70.0)

        images, labels, namess = utils.retrain(imgdir, model, faceSize)
        #print "Nouvel etat:",len(images),"images",len(namess),"personnes"

        self.isSomebody = False

        ret, img = cam.read()

        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        #a voir si c'est necessaire
        gray = cv2.equalizeHist(gray)
        # dectection de visage
        rects = cascade.detectMultiScale(
            gray,
            scaleFactor=1.2,
            minNeighbors=5,
            minSize=(40, 40),
            flags=cv2.cv.CV_HAAR_SCALE_IMAGE)  #flags = cv2.CASCADE_SCALE_IMAGE

        # ne prend que la region de l'image qui nous interesse (le visage)
        roi = None
        if len(rects) > 0:
            (x, y, w, h) = rects[0]
            # crop & resize it
            roi = cv2.resize(gray[y:y + h, x:x + h], faceSize)
            #calcule les coordonnees du rectangle et les affiche
            xCentre1 = (2 * x + w) / 2
            yCentre1 = (2 * y + h) / 2
            xCentre = str(xCentre1)
            yCentre = str(yCentre1)
            centre = "(" + xCentre + ":" + yCentre + ")"

            cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)

            if len(images) > 0:

                [p_label, p_confidence] = model.predict(np.asarray(roi))
                name = "unknown"
                if p_label != -1:
                    name = namess[p_label]
                    self.isSomebody = True
                cv2.putText(img,
                            "%s %.2f %.2f" % (name, p_confidence, p_label),
                            (x + 10, y + 20), cv2.FONT_HERSHEY_PLAIN, 1.5,
                            (0, 255, 0))
                self.name = name
                self.xPosition = xCentre1
                self.yPosition = yCentre1

        while True:

            cv2.imshow('Face Recognition For Poppy', img)

            k = cv2.waitKey(5) & 0xFF

            # quitter avec Echap
            if k == 27: break
示例#6
0
    def _runCaptureLoop2(self):
        
        print "  cliquer sur 'Echap' pour quitter"
        print "  cliquer sur 'a' pour ajouter une image a la base de donnees"
        print "  cliquer sur 't' pour retenir le modele"
    
        # si le dossier n'existe pas, on le cree
        imgdir=self._imagePath        
        try:
            os.mkdir(imgdir)
        except:
            pass # le chemin existe deja

        # taille des images dans la bdd

    
        # open the webcam
       
        cam = self._cam
                
        if ( not cam.isOpened() ):
            print "camera non detectee!"
            sys.exit()      
        print "camera detectee!."         
    
        # load the cascadefile:
        cascadePath= self._cascadePath
        cascade = cv2.CascadeClassifier(cascadePath)
        if ( cascade.empty() ):
            print "aucune cascade precisee!"
            sys.exit()         
        print "cascade:",cascadePath
    
         
        faceSize= (90, 90)
        model= cv2.createLBPHFaceRecognizer(threshold=70.0)

        self.compteur = 0
        
                
        images,labels,namess = utils.retrain(imgdir,model,faceSize)
        print "Nouvel etat:",len(images),"images",len(namess),"personnes"

        compteur = 0

        while compteur < 10:
        
            if self.compteur > 10:
                self.isSomebody = False
                compteur = 0


            ret, img = cam.read()
                         
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            #a voir si c'est necessaire
            #gray = cv2.equalizeHist(gray)
            # dectection de visage
            rects = cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30), flags=cv2.cv.CV_HAAR_SCALE_IMAGE) #flags = cv2.CASCADE_SCALE_IMAGE     
        
            # ne prend que la region de l'image qui nous interesse (le visage)
            roi = None
            for x, y, w, h in rects:
                self.isSomebody = True

                # crop & resize it 
                roi = cv2.resize( gray[y:y+h, x:x+h], faceSize )
                #calcule les coordonnees du rectangle et les affiche
                xCentre1=(2*x+w)/2
                yCentre1=(2*y+h)/2  
                xCentre=str(xCentre1)
                yCentre=str(yCentre1)
                centre="("+xCentre+":"+yCentre+")"
        
                cv2.rectangle(img, (x,y),(x+w,y+h), (0,255,0),2)
                if len(images)>0:
                
                    [p_label, p_confidence] = model.predict(np.asarray(roi))
                    name="unknown"
                    if p_label !=-1 :
                        name = namess[p_label]                    

                    cv2.putText( img, "%s %.2f %.2f" % (name,p_confidence,p_label),(x+10,y+20), cv2.FONT_HERSHEY_PLAIN,1.5, (0,255,0))
                    self.name = name
                    self.xPosition = xCentre1
                    self.yPosition = yCentre1
                break # use only 1st detected

            self.compteur += 1
            cv2.imshow('Face Recognition For Poppy', img)



                    
            k = cv2.waitKey(5) & 0xFF
        
            # quitter avec Echap
            if k == 27: break
       
            # on clique sur 'a' et on ajoute la personne a la bdd
            if (k == 97) and (roi!=None): 
                print "Entrer le nom de la personne: "
                name = sys.stdin.readline().strip('\r').strip('\n')
                #creer un dossier pour cette personne
                dirname = os.path.join(imgdir,name)
                try:
                    os.mkdir(dirname)
                except:
                    pass #on continue si le dossier existe deja
                # on sauvegarde l'image
                path=os.path.join(dirname,"%d.png" %(rand.uniform(0,10000)))
                print "added:",path
                cv2.imwrite(path,roi)
        
            # on met a jour le modele
            if (k == 116): # 't' pressed
                images,labels,namess = utils.retrain(imgdir,model,faceSize)
                print "Nouvel etat:",len(images),"images",len(namess),"personnes"
            
            compteur = compteur + 1