Exemple #1
0
def test():
	f = Face()
	pic = "/home/gustavo/Documents/unb/das/joserobertoarruda.jpg"
	img = cv2.imread(pic)
	img = f.detect(img)
	cv2.imshow('frame',img)
	cv2.waitKey()
Exemple #2
0
def demo_video(video_file):
    import time
    facedemo = Face(detector_method=DETECTOR, recognition_method=None)
    cap = common.VideoStream(video_file, queueSize=4).start()
    time.sleep(1)
    total_t, counter = 0, 0
    t = common.clock()

    while not cap.stopped:
        imgcv = cap.read()
        if imgcv is not None:
            counter += 1
            detections = facedemo.detect(imgcv, upsamples=0)
            ids = range(len(detections))

            # temp = mtracker.update(imgcv, to_cvbox(detections))
            # cvboxes, ids = [], []
            # for tid,tracker in mtracker.trackers.items():
            #     if tracker.visible_count > 3 and tracker.consecutive_invisible_count<10:
            #         cvboxes.append(tracker.bbox)
            #         ids.append(tid)
            # detections = to_bbox(cvboxes)

            print(detections)
            common.showImage(common.drawObjects(imgcv, detections, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print(counter / total_t)
Exemple #3
0
class Capture:
    def __init__(self, label, clf):
        self.frame = None
        self.thread1 = None
        self.thread2 = None
        self.stopEvent1 = None
        self.stopEvent2 = None
        self.started = None
        self.captured = None
        self.label = label
        self.vs = VideoStream().start()
        self.fa = Face()
        self.emotion = Emotion(clf)
        self.outputPath = None

    def start(self):
        #setting started to true to set it is started in the train function.
        #Because None type cannot be set
        self.started = True

        #Erasing stopEvent value (if any) to loop in the function
        self.stopEvent1 = None

        #checking if train thred is started
        #if started close it
        if (self.captured):
            self.stopEvent2.set()

        #initialize the thread
        self.stopEvent1 = threading.Event()
        self.thread1 = threading.Thread(target=self.videoLoop, args=())
        self.thread1.start()

    def close(self):

        #initiallly checking if the threads have been started and then closing
        #if started
        if (self.started):
            self.stopEvent1.set()
        if (self.captured):
            self.stopEvent2.set()
        # self.vs.stop()

    def exit(self):
        if (self.started):
            self.stopEvent1.set()
            print "closing thread 1"
        if (self.captured):
            self.stopEvent2.set()
            print "closing thread 2"
        self.vs.stop()
        VideoStream().stop()

    def capture(self, emotion):
        #setting the trained variable to true for same reason as started
        self.captured = True
        # self.emo = emotion
        self.outputPath = "Dataset" + os.sep + emotion
        if os.path.exists(self.outputPath):
            pass
        else:
            os.mkdir(self.outputPath)

        #Setting StopEvent to None to loop
        self.stopEvent2 = None

        #stopping started thread if running
        if (self.started):
            self.stopEvent1.set()

        #initializing the train thred
        self.stopEvent2 = threading.Event()
        self.thread2 = threading.Thread(target=self.captureLoop, args=())
        self.thread2.start()

    def snapshot(self):
        ts = datetime.datetime.now()
        filename = "{}.jpg".format(ts.strftime("%Y-%m-%d_%H-%M-%S"))
        output = os.path.sep.join((self.outputPath, filename))
        cv2.imwrite(output, self.frame.copy())
        self.showInfo()

    def showInfo(self):
        info.show()

    def videoLoop(self):
        try:
            #loop until the stopEvent is not set
            while not self.stopEvent1.is_set():
                self.frame = self.vs.read()

                gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                image = cv2.cvtColor(self.frame, cv2.COLOR_BGR2RGB)

                faces = self.fa.detect(gray)

                image = self.emotion.getLandmarks(image, faces)

                image = Image.fromarray(image)

                image = ImageTk.PhotoImage(image)

                self.label.configure(image=image)
                self.label.image = image

        except RuntimeError, e:
            print "Error"
class confusionMatrix:
    def __init__(self):
        self.predictor = dlib.shape_predictor("predictor.dat")
        self.clf = SVC(kernel = 'linear', probability=True, tol=1e-3)
        self.emotions = ['Angry','Happy','Neutral','Sad','Shocked']
        self.face = Face()

    def getconfusion(self):
        # predictor = dlib.shape_predictor("predictor.dat")
        # clf = SVC(kernel = 'linear', probability=True, tol=1e-3)
        # emotions = ['Angry','Happy','Neutral','Sad','Shocked']
        # face = Face()
      
        mat = []
        training_data,training_labels,prediction_data,prediction_labels = self.make_sets()
        npar_train = np.array(training_data)
        npar_pred = np.array(prediction_data)
        self.clf.fit(npar_train,training_labels)
        print "training..."
        print len(prediction_data)
        for index,pred in enumerate(prediction_data):
            prediction = self.clf.predict(npar_pred[index].reshape(1,-1))
            #print self.emotions[int(prediction)]
            pred = int(prediction)
            actual =  int(prediction_labels[index])
            if(pred != actual):
                print "{} predicted as {}".format(self.emotions[actual],self.emotions[pred])
                cell = "_"+str(actual)+str(pred)+"_"
                mat.append(cell)
        print "Successful"
        print mat

        def count_mat(mat):
            new_mat = []
            count = {}
            for n in mat:
                if n not in new_mat:
                    new_mat.append(n)
                    count[n] = 1
                else:
                    count[n] += 1
            return new_mat,count

        New_mat, count = count_mat(mat)
        for mats in New_mat:
            print "{} : {} \n".format(mats,count[mats])


       

        

    def get_images(self,emotion):
        print "Getting {} images ".format(emotion)
        files = glob.glob("Dataset/%s/*" %emotion)
        random.shuffle(files)
        training = files[:int(len(files)*0.7)]
        prediction = files[-int(len(files)*0.3):]
        print len(files)
        return training,prediction

    def get_landmarks(self,image):
        image = modules.resize(image,width=400)
        cv2.imshow("image",image)
        cv2.waitKey(10)
        faces = self.face.detect(image)

        for x,y,w,h in faces:
            roi = image[y:y+h,x:x+w]

            #incresing the size of the ROI to give it to the shape predictor as image 
            #ROI = image[(y-10):(y+h+10),(x-10):(x+w+10)]

            #resizing the roi
            resized_roi = modules.resize(roi,width=800,inter=cv2.INTER_CUBIC)

            #getting the bottom and the right corner of the resized image
            bottom,right = resized_roi.shape[:2]

            #converting cv2 rectangle to dlib rectangle
            rect = dlib.rectangle(x,y,x+w,y+h)
            resized_rect = dlib.rectangle(0,0,right,bottom)

            shape = self.predictor(image,rect)
            resized_shape = self.predictor(resized_roi,resized_rect)

            xlist = []
            ylist = []
            landmarks_vectorised = []

            for i in range(36,68):
                xlist.append(float(resized_shape.part(i).x))
                ylist.append(float(resized_shape.part(i).y))

            xmean = np.mean(xlist)
            ymean = np.mean(ylist)

            xcentral = [(x-xmean) for x in xlist]
            ycentral = [(y-ymean) for y in ylist]

            for x,y,w,z in zip(xcentral,ycentral,xlist,ylist):
               landmarks_vectorised.append(x)
               landmarks_vectorised.append(y)

            meannp = np.asarray((ymean,xmean))
            coornp = np.asarray((z,w))
            dist = np.linalg.norm(coornp-meannp)
            landmarks_vectorised.append(dist)
        if len(faces)<1:
            landmarks_vectorised = "error"
        return landmarks_vectorised


    def make_sets(self):
        training_data = []
        training_labels = []
        prediction_data= []
        prediction_labels = []
        for emotion in self.emotions:
            training,prediction = self.get_images(emotion)
            for item in training:
                image = cv2.imread(item,0)
                landmarks_vectorised = self.get_landmarks(image)
                if landmarks_vectorised == "error":
                    pass
                else:
                    training_data.append(landmarks_vectorised)
                    training_labels.append(self.emotions.index(emotion))
            for item in prediction:
                image = cv2.imread(item,0)
                landmarks_vectorised = self.get_landmarks(image)
                if landmarks_vectorised == "error":
                    pass
                else:
                    prediction_data.append(landmarks_vectorised)
                    prediction_labels.append(self.emotions.index(emotion))
        cv2.destroyAllWindows()
        return training_data,training_labels,prediction_data,prediction_labels
class Demography(object):
    def __init__(self, face_method='dlib', device='cpu', age_method='mobilenetv2',gender_method='mobilenetv2',gpu_config =0.3,model_file='age_gender_frozen_trt_mobilenetv2.pb'):
        global config
        if gpu_config is not None and config is not None:
            config.gpu_options.per_process_gpu_memory_fraction = gpu_config
        
        if age_method == 'mobilenetv2' and gender_method == "mobilenetv2":
            self.gender_age_estimator = AgeGenderEstimate_mobilenetv2(gpu_frac=gpu_config, model_file=model_file)
            self.mixed_model = True
        else:
            self.mixed_model = False
            if age_method == 'inception':
                self.age_estimator = AgeEstimate()
            elif age_method == 'coral':
                self.age_estimator = AgeEstimate_Coral()
            elif age_method == "mobilenetv2":
                self.age_estimator = AgeGenderEstimate_mobilenetv2(gpu_frac=gpu_config)

            if gender_method == 'inception':
                self.gender_estimator = GenderEstimate()
            elif gender_method == 'mobilenetv2':
                self.gender_estimator = AgeGenderEstimate_mobilenetv2(gpu_frac=gpu_config)
                
        if face_method:
            self.face_detect = Face(detector_method=face_method, recognition_method=None)

    def run(self, imgcv, face_detect=False):
        results = []
        if face_detect:
            faces = self.face_detect.detect(imgcv)
            for face in faces:
                results.append(self.run_face(imgcv, face['box']))
        else:
            results.append(self.run_face(imgcv, None))
        return results

    def run_face(self, imgcv, face_box):
        if face_box is not None:
            face_image = common.subImage(imgcv, face_box)
        else:
            face_image = imgcv
            
        if self.mixed_model:
            face_image = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
            gender, age = self.gender_age_estimator.run(face_image)
        else:
            if isinstance(self.gender_estimator, AgeGenderEstimate_mobilenetv2):
                gender = self.gender_estimator.run(face_image)[0]
            else:
                gender = self.gender_estimator.run(face_image)

            if isinstance(self.age_estimator, AgeGenderEstimate_mobilenetv2):
                age = self.age_estimator.run(face_image)[1]
            else:
                if isinstance(self.age_estimator, AgeEstimate):
                    age = self.age_estimator.run(face_image)
                elif isinstance(self.age_estimator, AgeEstimate_Coral):
                    face_image = common.subImage(imgcv, face_box, padding_type='coral')
                    age = self.age_estimator.run(face_image)

        return self._format_results(face_box, age, gender)

    def _format_results(self, face_box, age, gender):
        out = {
                'box': face_box,
                'classes': [{'name': 'face',
                             'prob': None,
                             'meta': {'age': age,
                                      'gender': gender
                                      }
                            }]
            }
        return out
class dataCreator(Label):
    def __init__(self, label, vs):
        self.face = Face()
        self.label = label
        self.vs = vs
        self.emotions = ['Angry', 'Happy', 'Neutral', 'Sad', 'Shocked']
        self.target = "Dataset"

    def create(self):
        for emotion in self.emotions:
            print("please look {}. Press Capture button when ready".format(
                emotion))
            start_time = time.time()
            elapsed_time = time.time() - start_time
            sample = 0
            if os.path.exists(self.target + os.sep + emotion):
                pass
            else:
                os.mkdir(self.target + os.sep + emotion)

            while True:
                while elapsed_time < 3.0:
                    image = self.vs.read()
                    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    elapsed_time = int(time.time() - start_time)

                    text = "Starting in {} seconds".format(3 - elapsed_time)

                    cv2.putText(image, str(text), (20, 40),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)

                    cv2.waitKey(100)
                    img = Image.fromarray(image)

                    img = ImageTk.PhotoImage(img)

                    self.label.configure(image=img)
                    self.label.image = img

                image = self.vs.read()

                if (sample > 20):
                    break
                else:
                    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
                    image_copy = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
                    faces = self.face.detect(gray)
                    for x, y, w, h in faces:
                        sample += 1
                        #frame = image[y-10:y+h+10,x-10:x+w+10]
                        cv2.rectangle(image_copy, (x, y), (x + w, y + h),
                                      (255, 0, 0), 1)
                        cv2.imwrite(
                            self.target + os.sep + emotion + os.sep +
                            str(time.time()) + ".jpg", image)

                        cv2.waitKey(100)

                    img = Image.fromarray(image_copy)

                    img = ImageTk.PhotoImage(img)

                    self.label.configure(image=img)

                    self.label.image = img
Exemple #7
0
def demo_video(video_file):
    facedemo = Face(detector_method=DETECTOR, recognition_method=None)
    mtracker = MultiTracker(SingleTrackerType=CorrelationTracker)
    # mtracker = MultiTracker(SingleTrackerType=CorrelationTracker,
    #                         removalConfig=removalConfig)
    # mtracker = MultiTracker(SingleTrackerType = cv2.TrackerKCF_create)

    cap = common.VideoStream(video_file, queueSize=4).start()
    cv2.waitKey(500)
    Outcount, Incount = 0, 0

    while not cap.stopped:
        t = common.clock()
        total_t, counter = 0, 0

        imgcv = cap.read()
        if imgcv is not None:
            counter += 1
            detections = facedemo.detect(imgcv, upsamples=0)
            mtracker.update(imgcv, common.toCvbox(detections))
            cvboxes, ids = [], []

            for tid, tracker in mtracker.trackers.items():
                if tracker.visible_count > 3 and tracker.consecutive_invisible_count < 10:
                    state_current = get_pos(tracker.bbox)
                    try:
                        if state_current != tracker.regionside:
                            tracker.statechange += 1
                            print state_current, tracker.regionside, tracker.statechange
                            if state_current == 'Positive':
                                if tracker.statechange % 2:
                                    Incount += 1
                                else:
                                    Outcount -= 1
                            else:
                                if tracker.statechange % 2:
                                    Outcount += 1
                                else:
                                    Incount -= 1
                            tracker.regionside = state_current
                    except AttributeError:
                        tracker.regionside = state_current
                        tracker.statechange = 0

                    cvboxes.append(tracker.bbox)
                    ids.append(tid)

            detections = to_bbox(cvboxes)
            print Incount, Outcount
            cv2.line(imgcv, (LINE['x1'], LINE['y1']), (LINE['x2'], LINE['y2']),
                     (0, 0, 255), 4)
            imgcv = common.drawLabel(imgcv,
                                     "IN:%d  OUT:%d" % (Incount, Outcount),
                                     (10, 10),
                                     color=(0, 0, 255))
            common.showImage(common.drawObjects(imgcv, detections, ids))

        key = cv2.waitKey(1) & 0xFF
        if key == 27:
            break

        t1 = common.clock()
        dt = t1 - t
        t = t1
        total_t += dt
        print counter / total_t
class Trainer:
    def __init__(self):
        self.predictor = dlib.shape_predictor("predictor.dat")
        self.clf = SVC(kernel='linear', probability=True, tol=1e-3)
        self.emotions = ['Angry', 'Happy', 'Neutral', 'Sad', 'Shocked']
        self.face = Face()

    def train(self):
        # predictor = dlib.shape_predictor("predictor.dat")
        # clf = SVC(kernel = 'linear', probability=True, tol=1e-3)
        # emotions = ['Angry','Happy','Neutral','Sad','Shocked']
        # face = Face()

        training_data, training_labels = self.make_sets()
        npar_train = np.array(training_data)
        self.clf.fit(npar_train, training_labels)
        print "training"
        joblib.dump(self.clf, 'CustomSVM.pkl')
        print "SVM trained successfully"
        mbox.showinfo("FER", "SVM training completed")

    def accuracy(self):
        test_data, test_labels = self.make_sets()
        npar_test = np.array(test_data)
        self.clf = joblib.load('Universal.pkl')
        accuracy = self.clf.score(npar_test, test_labels)
        print accuracy

    def get_images(self, emotion):
        print "Getting {} images ".format(emotion)
        files = glob.glob("Dataset/%s/*" % emotion)
        print len(files)
        return files

    def get_landmarks(self, image):
        image = modules.resize(image, width=400)
        cv2.imshow("SVM training", image)
        cv2.waitKey(500)
        faces = self.face.detect(image)

        for x, y, w, h in faces:
            roi = image[y:y + h, x:x + w]

            #incresing the size of the ROI to give it to the shape predictor as image
            #ROI = image[(y-10):(y+h+10),(x-10):(x+w+10)]

            #resizing the roi
            resized_roi = modules.resize(roi, width=800, inter=cv2.INTER_CUBIC)

            #getting the bottom and the right corner of the resized image
            bottom, right = resized_roi.shape[:2]

            #converting cv2 rectangle to dlib rectangle
            rect = dlib.rectangle(x, y, x + w, y + h)
            resized_rect = dlib.rectangle(0, 0, right, bottom)

            shape = self.predictor(image, rect)
            resized_shape = self.predictor(resized_roi, resized_rect)

            xlist = []
            ylist = []
            landmarks_vectorised = []

            for i in range(36, 68):
                xlist.append(float(resized_shape.part(i).x))
                ylist.append(float(resized_shape.part(i).y))

            xmean = np.mean(xlist)
            ymean = np.mean(ylist)

            xcentral = [(x - xmean) for x in xlist]
            ycentral = [(y - ymean) for y in ylist]

            for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):
                landmarks_vectorised.append(x)
                landmarks_vectorised.append(y)

            meannp = np.asarray((ymean, xmean))
            coornp = np.asarray((z, w))
            dist = np.linalg.norm(coornp - meannp)
        landmarks_vectorised.append(dist)
        if len(faces) < 1:
            landmarks_vectorised = "error"
        return landmarks_vectorised

    def make_sets(self):
        training_data = []
        training_labels = []
        for emotion in self.emotions:
            training = self.get_images(emotion)
            for item in training:
                image = cv2.imread(item, 0)
                landmarks_vectorised = self.get_landmarks(image)
                if landmarks_vectorised == "error":
                    pass
                else:
                    training_data.append(landmarks_vectorised)
                    training_labels.append(self.emotions.index(emotion))
        cv2.destroyAllWindows()
        return training_data, training_labels