Example #1
0
 def __init__(self, thresholds):
     print "INITIALIZING..."
     cascade = pickle.load(open('cascade.pickle'))
     print cascade.classifiers[2].oob_score_
     cascade.thresholds = [thresholds]
     face_detector = FaceDetector(cascade)
     face_detector.window_scales = [1]
     self.classifier = face_detector
Example #2
0
def detect(request):
	tmp_filename = 'media/' + ''.join(random.choice(string.ascii_uppercase 
		+ string.digits) for _ in range(64)) + '.jpg'
	if request.method == 'POST':
		handle_uploaded_file(request.FILES['image'], tmp_filename)
	elif request.method == 'GET':
		url = request.GET['url']
		try:
			urllib.urlretrieve(url, tmp_filename)
		except:
			return JsonResponse({'code':config.CODE_INV_URL})
	else:
		return JsonResponse({'code':config.CODE_MTHD_NOT_SPRT})

	try:
		faces = FaceDetector.detectFace(tmp_filename)

		res = {}
		res['code'] = config.CODE_SUCCESS
		res['num'] = len(faces)
		res['coordinates'] = []
		for (x, y, w, h) in faces:
			res['coordinates'].append("%d,%d,%d,%d"%(x, y, w, h))
		res["url"] = '/' + tmp_filename
	except:
		res = {'code':config.CODE_SYS_ERR}
	return JsonResponse(res)
Example #3
0
def get_num_discarded(data_path, detection_classifier):
    discard_total = 0
    image_total = 0
    dirs = os.listdir(
        data_path
    )  # get names of everything one level below training_data_path
    for dir in dirs:
        dir_path = Path(data_path, dir)
        if os.path.isdir(dir_path):  # only keep directories
            images = os.listdir(str(dir_path))
            for image in images:
                image_path = Path(dir_path, image)
                if imghdr.what(image_path
                               ) != None:  # check to make sure it's an image
                    img = Initializer.load_image(image_path)
                    image_total += 1
                    face_tuple = FaceDetector.get_faces(
                        img, detection_classifier)
                    if len(
                            face_tuple
                    ) <= 0:  # if one face detected, keep in training data, else skip
                        discard_total += 1
                        continue

    return discard_total, image_total
def from_video_detection(video_path, output_path, opencv_classifier):
    cap = cv2.VideoCapture(video_path)

    count = 0
    while (cap.isOpened()):
        ret_code, frame = cap.read()
        if ret_code == True:
            ret_frame_list = FaceDetector.detect_faces(frame,
                                                       opencv_classifier)
            if len(ret_frame_list) <= 0:
                continue
            if count % 11 == 0:
                write_path = output_path + str(count) + ".jpg"
                cv2.imwrite(write_path, frame)
                print(count)
            count += 1
            if cv2.waitKey(1) & 0xFF == ord(
                    'q'
            ):  # wait key is time(ms) between frames, press q to exit
                break
        else:
            break

    # Release everything if job is finished
    cap.release()
    cv2.destroyAllWindows()
Example #5
0
def send():
    
    cap = cv2.VideoCapture(0) #open the camera
    global fourcc = cv2.VideoWriter_fourcc(*config.H264_FOURCC)
    global out
    if (config.IS_TEST):
        out = cv2.VideoWriter(config.H264_PIPELINE,fourcc,config.CAP_PROP_FPS_TEST, (config.CAP_PROP_FRAME_WIDTH_TEST,config.CAP_PROP_FRAME_HEIGHT_TEST),True) #ouput GStreamer pipeline
    else: #meaning that we are in prod
        out = cv2.VideoWriter(config.H264_PIPELINE,fourcc,config.CAP_PROP_FPS_PROD, (config.CAP_PROP_FRAME_WIDTH_PROD,config.CAP_PROP_FRAME_HEIGHT_PROD),True) #ouput GStreamer pipeline


    #Facedetector 
    faceDetector = FaceDetector.FaceDetector(config.caffe_model,config.prototxt_file,config.detection_threshold)

    if not out.isOpened():
        print('VideoWriter not opened')
        exit(0)

    while cap.isOpened():
        ret,frame = cap.read()
       
        if ret:
            frame = faceDetector.recognition(frame)


            # Write to pipeline
            out.write(frame)

            if cv2.waitKey(1)&0xFF == ord('q'):
                break

    cap.release()
    out.release()
Example #6
0
    def __init__(self):
        # self.t0 = time.time()  # timer used for debugging the video's fps
        signal.signal(signal.SIGINT, self.sigint_handler)
        # reduce the video fps since the Coral's processing slows down the counting
        # POSSIBLE alternative solution: first save the videos, nightly - process them.
        self.video_writer = VideoWriter(output_path='/home/mendel/mnt/resources/videos',
                                        fps=15.0)
        self.recording_last_face_seen_timestamp = 0

        self.face_detector = FaceDetector(model_path='/home/mendel/mnt/cameraSamples/examples-camera/all_models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite')
        self.face_classifier = Classifier(using_model='/home/mendel/mnt/cameraSamples/examples-camera/imprinting_classification/retrained_imprinting_model.tflite',
                                          label_file='/home/mendel/mnt/cameraSamples/examples-camera/imprinting_classification/retrained_imprinting_model.txt')

        self.who = dict()
        self.counter = 0
        self.counter_up_down = False  # on off switch. False for human not visible (thus-down). True for face up.
        self.counting_prev_face_seen_timestamp = 0
Example #7
0
    def __init__(self):
        # Inintializing path for saving face frames 
        self.face_img_saving_path = "Dataset/TestingDataset/Unknown/"
        
        # Creating object of Face Detector
        self.FD = FaceDetector()
        
        # Initializing paths for 'test images dataset'
        self.test_path = "Dataset/TestingDataset/"        
        # Initializing path for the test dataset images
        self.unknown_img_path = self.test_path + "Unknown/"
                
        # Loading 'YAML' and 'creating model'
        yaml_file = open('TrainedEntities/custom_vgg16_model.yaml', 'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        self.predictor_model = model_from_yaml(loaded_model_yaml)

        # Loading 'weights' into the newly created model
        self.predictor_model.load_weights("TrainedEntities/custom_vgg16_model_weights.h5")
        print("Loaded model from disk")    
Example #8
0
def startApp():

    # UI展示
    app = QtWidgets.QApplication(sys.argv)
    window = FaceDetector.FaceDetector()

    # 串口设备
    thread = threading.Thread(target=LockDevice.lockDevice, args=(window, ))
    thread.start()

    window.show()

    sys.exit(app.exec_())
Example #9
0
        def t():
            #SACAR CONSTRUCTORES DEL HILO
            fd = FaceDetector()

            # classes = ['angry','disgusted','fearful','happy','sad','surprised','neutral']
            er = EmotionRecognizer("models/simple_fer2013_named/model.h5", 48,
                                   48)

            while self.state > -1:
                '''llamar clase reconocimiento emociones con las imgs grabadas
				 	en un array'''
                # img = cv2.resize(self.lastImage, None, fx=0.5, fy=0.5)
                # pasamos de imagen PIL a array
                # print self.lastImage
                img = cv2.resize(np.array(self.lastImage),
                                 None,
                                 fx=0.5,
                                 fy=0.5)

                faces = fd.detect([img])

                # print faces
                # print len(faces)

                # faces seria [[]] pq es un vector preparado para recibir un batch de imgs
                # con caras detectadas y, por cada cara, 4 esquinas. si recibe una imagen por
                # instante y ninguna cara detectada, len(faces) sera 1, pero len(faces[0]) si sera 0
                if faces != [[]]:
                    faceAoi = img[faces[0][0][1]:faces[0][0][3],
                                  faces[0][0][0]:faces[0][0][2]]
                    # cv2.imshow("viz", faceAoi)
                    # cv2.waitKey(0)

                    ### PROBAR A PONER getOnlyTheBiggest = True
                    emotion = er.detect(faceAoi)
                    # devuelve un vector con la probabilidad de que la imagen de entrada muestre cada una de las emociones anteriores
                    print self.classes[np.argmax(emotion)]
                    # print emotion
                    self.emotionsArray.append(np.argmax(emotion))
Example #10
0
 def __init__(self, camName, camURL, cameraFunction, dlibDetection,
              fpsTweak):
     logger.info("Loading Stream From IP Camera: " + camURL)
     self.motionDetector = MotionDetector.MotionDetector()
     self.faceDetector = FaceDetector.FaceDetector()
     self.processing_frame = None
     self.tempFrame = None
     self.captureFrame = None
     self.streamingFPS = 0  # Streaming frame rate per second
     self.processingFPS = 0
     self.FPSstart = time.time()
     self.FPScount = 0
     self.motion = False  # Used for alerts and transistion between system states i.e from motion detection to face detection
     self.people = {}  # Holds person ID and corresponding person object
     self.trackers = []  # Holds all alive trackers
     self.cameraFunction = cameraFunction
     self.dlibDetection = dlibDetection  # Used to choose detection method for camera (dlib - True vs opencv - False)
     self.fpsTweak = fpsTweak  # used to know if we should apply the FPS work around when you have many cameras
     self.rgbFrame = None
     self.faceBoxes = None
     self.captureEvent = threading.Event()
     self.captureEvent.set()
     self.peopleDictLock = threading.Lock(
     )  # Used to block concurrent access to people dictionary
     self.video = cv2.VideoCapture(
         camURL
     )  # VideoCapture object used to capture frames from IP camera
     logger.info("We are opening the video feed.")
     self.url = camURL
     self.camName = camName
     if not self.video.isOpened():
         #   raise Exception("could not open camera or channelinput " + camurl)
         self.video.open()
     logger.info("Video feed open.")
     self.dump_video_info()  # logging every specs of the video feed
     # Start a thread to continuously capture frames.
     # The capture thread ensures the frames being processed are up to date and are not old
     self.captureLock = threading.Lock(
     )  # Sometimes used to prevent concurrent access
     self.captureThread = threading.Thread(name='video_captureThread ' +
                                           camURL,
                                           target=self.get_frame)
     self.captureThread.daemon = True
     self.captureThread.stop = False
     self.captureThread.start()
     #RdL Load Params from HSConfig.cfg
     hsconfigparser = SafeConfigParser()
     hsconfigparser.read('HSConfig.cfg')
     self.param_cameramode = hsconfigparser.get('MACHINERY', 'cameramode')
     logger.info('Video Feed opened in mode: ' + self.param_cameramode)
     print('Video Feed opened in mode: ' + self.param_cameramode)
Example #11
0
def get_recognition_stats(img, name_list, detection_classifier,
                          trained_recognizer):
    data = FaceDetector.get_faces(
        img, detection_classifier
    )  # get all faces/coords in image and try to detect all
    if len(data) <= 0:
        print("No faces detected in test image -> skipped")
        return (None, None)

    face, coord = data[0]  #take 1st detected face
    label, confidence = trained_recognizer.predict(face)
    #if confidence > 125: name = unknown
    face_name = name_list[label]
    return (face_name, confidence)
Example #12
0
    def __init__(self, image_buffer, state_variable_stop_processing,
                 shape_predictor_68_datfile_location,
                 torch_neuralnetwork_model_location,
                 classifiermodel_picklefile_location):
        """ Constructor """
        threading.Thread.__init__(self)
        self.__image_buffer = image_buffer
        self.__state_varibale_stop_processing = state_variable_stop_processing
        self.__face_detector = fd.FaceDetector(
            shape_predictor_68_datfile_location,
            torch_neuralnetwork_model_location)
        self.__face_recognizer = frec.FaceRecognizer(
            classifiermodel_picklefile_location)

        self.__Windowname = "Mustie vision"
Example #13
0
    def __init__(self):
        self.cap = cv2.VideoCapture(0)
        ret, frame = self.cap.read()

        self.timer = CvTimer()

        self.doCanny = False

        self.mainWindow = cv2.namedWindow("FaceOff", cv2.WINDOW_AUTOSIZE | cv2.WINDOW_OPENGL)

        # Milliseconds of delay used by waitKey() at the end of every frame
        self.delayAtEndOfFrame = 5

        self.faceDetector = FaceDetector()

        self.detectFaces = True
Example #14
0
 def __init__(self, camURL, cameraFunction, dlibDetection, fpsTweak):
     logger.info("Loading Stream From IP Camera: " + camURL)
     self.motionDetector = MotionDetector.MotionDetector()
     self.faceDetector = FaceDetector.FaceDetector()
     self.processing_frame = None
     self.tempFrame = None
     self.captureFrame = None
     self.streamingFPS = 0  # Streaming frame rate per second
     self.processingFPS = 0
     self.FPSstart = time.time()
     self.FPScount = 0
     self.motion = False  # Used for alerts and transistion between system states i.e from motion detection to face detection
     self.people = {}  # Holds person ID and corresponding person object
     self.trackers = []  # Holds all alive trackers
     self.cameraFunction = cameraFunction
     self.dlibDetection = dlibDetection  # Used to choose detection method for camera (dlib - True vs opencv - False)
     self.fpsTweak = fpsTweak  # used to know if we should apply the FPS work around when you have many cameras
     self.rgbFrame = None
     self.faceBoxes = None
     self.captureEvent = threading.Event()
     self.captureEvent.set()
     self.peopleDictLock = threading.Lock(
     )  # Used to block concurrent access to people dictionary
     uri = camURL
     latency = 100
     width = 1280
     height = 720  #738
     framerate = 25
     #gst_str = ("rtspsrc location={} latency={} ! rtph264depay ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw, width=(int){}, height=(int){}, framerate={}/1, format=(string)BGRx ! videoconvert ! appsink").format(uri, latency, width, height, framerate)
     #gst_str = ("rtspsrc location={} latency={} ! queue ! rtph264depay ! queue ! h264parse ! omxh264dec ! nvvidconv ! video/x-raw,format=BGRx ! videoconvert ! video/x-raw,format=BGR ! appsink").format(uri, latency)
     gst_str = "rtspsrc location={} ! application/x-rtp, media=video ! rtph264depay ! h264parse ! nvv4l2decoder ! nvvidconv ! video/x-raw, format=BGRx ! videoconvert ! video/x-raw, format=BGR ! appsink".format(
         uri)
     self.video = cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
     #self.video = cv2.VideoCapture(camURL) # VideoCapture object used to capture frames from IP camera
     logger.info("We are opening the video feed.")
     self.url = camURL
     logger.info("Video feed open.")
     self.dump_video_info()  # logging every specs of the video feed
     # Start a thread to continuously capture frames.
     # The capture thread ensures the frames being processed are up to date and are not old
     self.captureLock = threading.Lock(
     )  # Sometimes used to prevent concurrent access
     self.captureThread = threading.Thread(name='video_captureThread',
                                           target=self.get_frame)
     self.captureThread.daemon = True
     self.captureThread.start()
     self.captureThread.stop = False
Example #15
0
def preprocess(training_data_path, detection_classifier):
    face_list = []
    label_list = []
    dirs = os.listdir(
        training_data_path
    )  # get names of everything one level below training_data_path
    label = 0
    for dir in dirs:
        dir_path = Path(training_data_path, dir)
        if os.path.isdir(dir_path):  # only keep directories
            images = os.listdir(str(dir_path))
            discard_count = 0
            image_count = 0
            for image in images:
                image_path = Path(dir_path, image)
                if imghdr.what(
                        image_path) != None:  #check to make sure it's an image
                    img = Initializer.load_image(image_path)
                    image_count += 1
                    face_tuple = FaceDetector.get_faces(
                        img, detection_classifier)
                    if len(
                            face_tuple
                    ) == 1:  # if one face detected, keep in training data, else skip
                        face_tuple = face_tuple[0]
                    else:
                        #print("No faces or more than one face found in "+ str(image_path) + " so not used")
                        discard_count += 1
                        continue
                    if face_tuple[
                            0] is not None:  # face_tuple[0] = face image in grayscale
                        face_list.append(face_tuple[0])
                        label_list.append(label)
                    else:
                        print("No face was detected in " + str(image_path) +
                              " so the image is not used")
            print(
                str(discard_count) + "/" + str(image_count) +
                " images discarded for " + dir)
            label += 1
    if len(face_list) <= 0:
        print("No faces detected in training data -> cannot proceed")
        raise SystemExit

    return face_list, label_list
Example #16
0
def get_recognition(img, name_list, detection_classifier, trained_recognizer):
    img_copy = img.copy()
    data = FaceDetector.get_faces(
        img, detection_classifier
    )  # get all faces/coords in image and try to detect all
    if len(data) <= 0:
        print("No faces detected in test image -> skipped")
        return (img_copy, None)

    for face, coord in data:  # perform recognition on every detected face
        return_code = 25
        label, confidence = trained_recognizer.predict(face)
        if confidence > 110.0:  #<45%
            return_code = None
            continue

        face_name = name_list[label]
        label_image(img_copy, coord, face_name, confidence)
    return (img_copy, return_code)
def detectFace():
    with open(tmpImgPath, 'rb') as f:
        npImg = comm.bytes2NpImg(f.read())

    if npImg is None:
        return MSG['EMPTY']

    npImg = scaledImg(npImg)
    hasFace = FaceDetector.markAllFacesByDlib(npImg)

    content = comm.npImg2Bytes(npImg)
    #    msg = {
    #        "type": "proceeded",
    #        'detail': 'faceDetected' if hasFace else 'noFace',
    #        "content": content
    #    }
    msg = MSG['PROCEEDED']
    msg['detail'] = 'faceDetected' if hasFace else 'noFace'
    msg['content'] = content
    return msg
Example #18
0
	def __init__(self, camURL, cameraFunction="detect_recognise_track", dlibDetection=True, fpsTweak=False):
		logger.info("Loading Stream From IP Camera: " + camURL)
		self.motionDetector = MotionDetector.MotionDetector()
		self.faceDetector = FaceDetector.FaceDetector()
		self.processing_frame = None
		self.tempFrame = None
		self.captureFrame  = None
		self.streamingFPS = 0 # Streaming frame rate per second
		self.processingFPS = 0
		self.FPSstart = time.time()
		self.FPScount = 0
		self.motion = False # Used for alerts and transistion between system states i.e from motion detection to face detection
		self.people = {} # Holds person ID and corresponding person object 
		self.trackers = [] # Holds all alive trackers
		self.cameraFunction = cameraFunction 
		self.dlibDetection = dlibDetection # Used to choose detection method for camera (dlib - True vs opencv - False)
		self.fpsTweak = fpsTweak # used to know if we should apply the FPS work around when you have many cameras
		self.rgbFrame = None
		self.faceBoxes = None
		self.captureEvent = threading.Event()
		self.captureEvent.set()	
		self.peopleDictLock = threading.Lock() # Used to block concurrent access to people dictionary
		
		if camURL == 'w':
			self.video = cv2.VideoCapture(0)
			camURL = 0		
		else:
			self.video = cv2.VideoCapture(camURL) # VideoCapture object used to capture frames from IP camera
		
		self.video.set(3, 640)
		self.video.set(4, 480)
		self.url = camURL
		logger.info("We are opening the video feed.")
		logger.info("Video feed open.")
		# Start a thread to continuously capture frames.
		# The capture thread ensures the frames being processed are up to date and are not old
		self.captureLock = threading.Lock() # Sometimes used to prevent concurrent access
		self.captureThread = threading.Thread(name='video_captureThread',target=self.get_frame)
		self.captureThread.daemon = True
		self.captureThread.start()
		self.captureThread.stop = False
Example #19
0
def detectFace():
    npImg = getTmpImg()

    if npImg is None:
        return MSG['EMPTY']

    npImg = comm.scaledImg(npImg)
    faces = FaceDetector.getAllFaceBoxes(npImg)

    boxes = []

    for face in faces:
        box = {
            'left': face.left(),
            'top': face.top(),
            'right': face.right(),
            'bottom': face.bottom()
        }
        boxes.append(box)

    msg = MSG['DETECTED_FACES']
    msg['boxes'] = boxes
    return msg
Example #20
0
 def __init__(self, camURL, cameraFunction, dlibDetection):
     print("Loading Stream From IP Camera ", camURL)
     self.motionDetector = MotionDetector.MotionDetector()
     self.faceDetector = FaceDetector.FaceDetector()
     self.processing_frame = None
     self.tempFrame = None
     self.captureFrame = None
     self.streamingFPS = 0  # Streaming frame rate per second
     self.processingFPS = 0
     self.FPSstart = time.time()
     self.FPScount = 0
     self.motion = False  # Used for alerts and transistion between system states i.e from motion detection to face detection
     self.people = {}  # Holds person ID and corresponding person object
     self.trackers = []  # Holds all alive trackers
     self.cameraFunction = cameraFunction
     self.dlibDetection = dlibDetection  # Used to choose detection method for camera (dlib - True vs opencv - False)
     self.rgbFrame = None
     self.faceBoxes = None
     self.captureEvent = threading.Event()
     self.captureEvent.set()
     self.peopleDictLock = threading.Lock(
     )  # Used to block concurrent access to people dictionary
     self.video = cv2.VideoCapture(
         camURL
     )  # VideoCapture object used to capture frames from IP camera
     self.url = camURL
     if not self.video.isOpened():
         self.video.open()
     # Start a thread to continuously capture frames.
     # The capture thread ensures the frames being processed are up to date and are not old
     self.captureLock = threading.Lock(
     )  # Sometimes used to prevent concurrent access
     self.captureThread = threading.Thread(name='video_captureThread',
                                           target=self.get_frame)
     self.captureThread.daemon = True
     self.captureThread.start()
Example #21
0
import FaceDetector as fd
import threading, cv2

d = fd.FaceDetector("/home/pixiepro/Demos/FTF/faceDetector/weights.txt",
                    'img.jpg')
t = threading.Thread(target=d.detectFace)
t.start()
Example #22
0
import telebot
import os
import subprocess
import cv2
import re
import FaceDetector

token = "1311274358:AAGcCioU_6hBzedZa42FJ8bfDOuX7DEFB7s"
detector = FaceDetector.FaceDetector()
bot = telebot.TeleBot(token)


def downloadFile(fileId):
    return bot.download_file(bot.get_file(fileId).file_path)


@bot.message_handler(content_types=['photo'])
def getPhoto(message):
    bot.send_message(message.chat.id, 'Вы прислали фото')

    userDir, photoFilesList = getFilesList('./photo/', message.from_user.id)

    photoBytes = downloadFile(message.photo[-1].file_id)
    tmpFilePath = userDir + 'test.jpg'
    with open(tmpFilePath, 'wb') as file:
        file.write(photoBytes)

    image, faceLocations = detector.detectFaceWithMTCNN(
        tmpFilePath)  #detectFaceWithCascades(tmpFilePath)#
    faceQuantity = len(faceLocations)
    cv2.imwrite(userDir + 'testWithMarker.jpg', image)
Example #23
0
class FacePredictor:

    def __init__(self):
        # Inintializing path for saving face frames 
        self.face_img_saving_path = "Dataset/TestingDataset/Unknown/"
        
        # Creating object of Face Detector
        self.FD = FaceDetector()
        
        # Initializing paths for 'test images dataset'
        self.test_path = "Dataset/TestingDataset/"        
        # Initializing path for the test dataset images
        self.unknown_img_path = self.test_path + "Unknown/"
                
        # Loading 'YAML' and 'creating model'
        yaml_file = open('TrainedEntities/custom_vgg16_model.yaml', 'r')
        loaded_model_yaml = yaml_file.read()
        yaml_file.close()
        self.predictor_model = model_from_yaml(loaded_model_yaml)

        # Loading 'weights' into the newly created model
        self.predictor_model.load_weights("TrainedEntities/custom_vgg16_model_weights.h5")
        print("Loaded model from disk")    
             
    
    
    def ParameterPreparation(self):
        # Preparing test image batches
        self.test_batches = ImageDataGenerator().flow_from_directory(self.test_path, target_size=(224,224), classes=None, batch_size=10)
        # Finding number of testing images
        self.no_of_images = len(os.listdir(self.unknown_img_path))
           
    
    def TestFaceDatsetDeletor(self):        
        # Deleting the test dataset images
        for file in os.listdir(self.unknown_img_path):
            if(file.split('.')[-1] == "jpg"):
                os.remove(os.path.join(self.unknown_img_path, file))            
                
                
    def Predictor(self, test_image):
        self.test_image = test_image
        # Saving faces found in the image
        self.FD.Detector(test_image, (self.face_img_saving_path + "User_"), 200, 1)        
        
        # Prepares the parameters for face prediction
        self.ParameterPreparation()
        
        # Predicting Ids of Dataset
        predictions = self.predictor_model.predict_generator(self.test_batches, steps=int(self.no_of_images/10)+1, verbose=0)
        
        # Extracting Ids from predictions
        Ids = []
        for i in range(0,len(predictions)):
            temp = np.where(predictions[i] == np.amax(predictions[i]))
            Ids.append(int(temp[0][0]))
        
        
        # Deleting the dataset formed for testing images
        self.TestFaceDatsetDeletor()
        
        return Ids
Example #24
0
class FaceOffCore:
    def __init__(self):
        self.cap = cv2.VideoCapture(0)
        ret, frame = self.cap.read()

        self.timer = CvTimer()

        self.doCanny = False

        self.mainWindow = cv2.namedWindow("FaceOff", cv2.WINDOW_AUTOSIZE | cv2.WINDOW_OPENGL)

        # Milliseconds of delay used by waitKey() at the end of every frame
        self.delayAtEndOfFrame = 5

        self.faceDetector = FaceDetector()

        self.detectFaces = True

    def Main(self):
        framerate = 0.0
        previousTime = time.time()
        displayFrameRate = 0.0

        while True:
            self.timer.mark_new_frame()

            framerate += 1
            if time.time() - previousTime >= 1:
                previousTime = time.time()
                displayFrameRate = framerate
                framerate = 0.0

            # Capture frame-by-frame
            ret, frame = self.cap.read()
            frameHeight, frameWidth = frame.shape[:2]

            # Make a grayscale copy of the frame for processing
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            if self.doCanny:
                # Our operations on the frame come here

                # Blur the image a bit to reduce noise
                # (3,3) is controlling blur amount, use odds only
                gray = cv2.GaussianBlur(gray, (9, 9), 0)
                # Perform Canny
                edges = cv2.Canny(gray, 20, 50)

                # Adjust Canny line width
                kernel = np.ones((2, 2), np.uint8)
                dilation = cv2.dilate(edges, kernel, iterations=1)

                # Make an empty image the size of the source frame filled with zeros
                colorMask = np.zeros((frameHeight, frameWidth, 3), np.uint8)
                # Set every pixel of colorMask to be red (colors are unsigned ints)
                colorMask[:] = (0, 0, 255)

                # Apply the colored image (colorMask) to the input frame using edges as a mask
                cv2.bitwise_and(colorMask, frame, frame, mask=dilation)

            if self.detectFaces:
                self.faceDetector.processFrame(frame, gray)

            # Draw program FPS to screen
            self.timer.drawToFrame(frame, 10, 15)

            # Draw camera FPS to screen
            cv2.putText(
                frame, "camera fps=%s" % (displayFrameRate), (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)
            )

            cv2.putText(
                frame,
                "Face detection [f] %s" % (self.detectFaces),
                (10, 55),
                cv2.FONT_HERSHEY_SIMPLEX,
                0.5,
                (0, 255, 0),
            )
            cv2.putText(
                frame, "Canny detection [c] %s" % (self.doCanny), (10, 75), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)
            )

            # Display the resulting frame
            cv2.imshow("FaceOff", frame)

            key = cv2.waitKey(self.delayAtEndOfFrame) & 0xFF
            if key == 27 or key == ord("q"):  # Escape key
                # When everything done, release the capture
                self.cap.release()
                cv2.destroyAllWindows()
                break
            elif key == ord("c"):
                self.doCanny = not self.doCanny
            elif key == ord("f"):
                self.detectFaces = not self.detectFaces
Example #25
0
class Main:
    def __init__(self):
        # self.t0 = time.time()  # timer used for debugging the video's fps
        signal.signal(signal.SIGINT, self.sigint_handler)
        # reduce the video fps since the Coral's processing slows down the counting
        # POSSIBLE alternative solution: first save the videos, nightly - process them.
        self.video_writer = VideoWriter(output_path='/home/mendel/mnt/resources/videos',
                                        fps=15.0)
        self.recording_last_face_seen_timestamp = 0

        self.face_detector = FaceDetector(model_path='/home/mendel/mnt/cameraSamples/examples-camera/all_models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite')
        self.face_classifier = Classifier(using_model='/home/mendel/mnt/cameraSamples/examples-camera/imprinting_classification/retrained_imprinting_model.tflite',
                                          label_file='/home/mendel/mnt/cameraSamples/examples-camera/imprinting_classification/retrained_imprinting_model.txt')

        self.who = dict()
        self.counter = 0
        self.counter_up_down = False  # on off switch. False for human not visible (thus-down). True for face up.
        self.counting_prev_face_seen_timestamp = 0

    def _record(self, image, face_rois_in_image: List[List[int]]) -> Tuple[CONSTANTS.RECORD_STATUS, Union[None, str]]:
        seeing_a_face = len(face_rois_in_image) > 0

        if self.video_writer.is_video_recording_in_progress():
            # print("{}".format(time.time() - self.t0))  # timer used for debugging the video's fps
            # self.t0 = time.time()  # timer used for debugging the video's fps
            self.video_writer.add_image(numpy.array(image))

            if time.time() - self.recording_last_face_seen_timestamp >= CONSTANTS.NO_FACE_THRESHOLD_SEC:
                video_path = self.video_writer.video_name  # create a backup since stop_video_recording messes this up
                self.video_writer.stop_video_recording()
                self.recording_last_face_seen_timestamp = 0
                return CONSTANTS.RECORD_STATUS.JUST_STOPPED, video_path

            if seeing_a_face:
                self.recording_last_face_seen_timestamp = time.time()
                self.video_writer.save_image_at_same_path(numpy.array(image.crop(face_rois_in_image[0])))

            return CONSTANTS.RECORD_STATUS.ON, self.video_writer.video_name

        elif seeing_a_face:
            self.recording_last_face_seen_timestamp = time.time()
            self.video_writer.start_video_recording(numpy.array(image))
            self.video_writer.save_image_at_same_path(numpy.array(image.crop(face_rois_in_image[0])))
            return CONSTANTS.RECORD_STATUS.JUST_STARTED, self.video_writer.video_name

        return CONSTANTS.RECORD_STATUS.OFF, None

    def _count(self, face_rois_in_image: List[List[int]]) -> int:
        seeing_a_face = len(face_rois_in_image) > 0

        # if seeing a face and was not seeing a face before
        if seeing_a_face and not self.counter_up_down:
            self.counter_up_down = True  # up

            # if at least MIN_SEC_PER_PULLUP have passed, it means there was a pullup done
            # and the coral camera did not just lose focus
            if time.time() - self.counting_prev_face_seen_timestamp >= CONSTANTS.MIN_SEC_PER_PULLUP:
                self.counting_prev_face_seen_timestamp = time.time()
                self.counter += 1

        elif not seeing_a_face:
            self.counter_up_down = False  # down

        return self.counter

    def _whothis(self, image_of_face: Image) -> str:
        who_prediction = self.face_classifier.classify(image=image_of_face, top_k=len(self.face_classifier.labels))
        who_prediction = {str(k): v for k, v in who_prediction}
        for k in who_prediction:
            self.who[k] = self.who.get(k, 0.0) + who_prediction[k]
        maxid = max(self.who.items(), key=operator.itemgetter(1))[0]
        # print('who_now', who_prediction)
        # print('who_all', self.who)
        # print('who idx: ', maxid, int(maxid))
        # print('labels:  ', self.face_classifier.labels)

    def _write_number_on_photo(self, image: Image, number: int):
        ImageDraw.Draw(image).text((10, 8),
                                   text=str(number),
                                   fill=(255, 0, 0),
                                   font=ImageFont.truetype(font=CONSTANTS.font_path, size=24))

    def _save(self, who: str, pullup_counts: int, evidence_path: str):
        with open(CONSTANTS.db_path, "a+") as track_file:
            # when,who,how_many,evidence
            track_file.write('{},{},{},{}\n'.format(time.time(), who, pullup_counts, evidence_path))

    def _reset_session(self):
        self.counter = 0
        self.who = dict()

    def _callback(self, image, svg_canvas):
        face_rois_in_image = self.face_detector.predict(image)

        counts = self._count(face_rois_in_image=face_rois_in_image)
        self._write_number_on_photo(image, number=counts)

        record_status, video_path = self._record(image=image,
                                                 face_rois_in_image=face_rois_in_image)

        if len(face_rois_in_image) > 0:
            self._whothis(image_of_face=image.crop(face_rois_in_image[0]))

        if record_status == CONSTANTS.RECORD_STATUS.JUST_STOPPED:
            self._save(who=self.face_classifier.labels[int(max(self.who.items(), key=operator.itemgetter(1))[0])],
                       pullup_counts=self.counter,
                       evidence_path=video_path)
            self._reset_session()


    def sigint_handler(self, signum, frame):
        self.video_writer.stop_video_recording()

    def start(self):
        _ = gstreamer.run_pipeline(self._callback, appsink_size=(320, 240))
        self.video_writer.stop_video_recording()
Example #26
0
import FaceDetector as fd
import threading,cv2

d = fd.FaceDetector("/home/pixiepro/Desktop/ftf-iot-demo/faceDetector/weights.txt",'img.jpg')
t = threading.Thread(target=d.detectFace)
t.start()
        opt)  # regular setup: load and print networks; create schedulers
    if opt.eval:
        model.eval(
        )  # regular setup: load and print networks; create schedulers

    return model


def image_to_cycleGAN_data(image):
    data = {"A": None, "A_paths": None}
    image = np.array([image])
    image = image.transpose([0, 3, 1, 2])
    data['A'] = torch.Tensor(image)
    return data


if __name__ == '__main__':
    face_detect = fd.FaceDetector(
        'face-detection-model/deploy.prototxt.txt',
        'face-detection-model/opencv_face_detector.caffemodel')
    face = face_detect.detect_face_from_image('imgs/face_before.jpg')
    image = np.asarray(Image.open('imgs/yosemite.jpg'))

    model = CycleGAN.CycleGAN('style_vangogh_pretrained')

    model.set_model_input(face)

    image = model.run_inference()

    torchvision.utils.save_image(image['fake'], 'output.jpg')
Example #28
0
# set model paths
faceModel = "models/face_model/res10_300x300_ssd_iter_140000.caffemodel"
faceProto = "models/face_model/deploy.prototxt"
genderModel = "models/gender_model/gender_net.caffemodel"
genderProto = "models/gender_model/gender_deploy.prototxt"
ageProto = "models/age_model/age_deploy.prototxt"
ageModel = "models/age_model/age_net.caffemodel"
bodyProto = "models/body_model/mobilenet.prototxt"
bodyModel = "models/body_model/mobilenet.caffemodel"
maskModel = "models/mask_model/mnv2_mask_classifier_v4.pth"

warn = MaskWarning(cooldown=5)
brightOpt = BrightnessOptimizer()
# initialize detectors
face_detector = FaceDetector(faceProto, faceModel)
FACE_CONFID_THRESH = 0.3
age_gender_detector = AgeGenderDetector(ageProto, ageModel, genderProto,
                                        genderModel)
body_detector = BodyDetector(bodyProto, bodyModel)
BODY_CONFID_THRESH = 0.5
face_mask_classifier = FaceMaskClassifier(maskModel)

# initialize distance measurement
'''
FOCAL = (P x  D) / W
P = height of reference object on picture in pixels
D = distance of reference object to camera when photo was taken in cm
W = actual heigt of reference object 
'''
DIST_REF = 22
Example #29
0
import json

from keras.models import load_model

from PIL import Image

import cv2

from scipy.misc import imresize

app = Flask(__name__)
socketio = SocketIO(app)

CORS(app)

fd = FaceDetector("model.h5")
fd.load_model()

model = load_model("emotemodel.h5")

global cur_emote_profile, cur_face_profile

cur_face_profile = None
cur_emote_profile = None


@app.route("/game")
def serve_main_page():
    return render_template("index.html")

Example #30
0
from __future__ import print_function
import cv2
import argparse
from  FaceDetector import *

ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", required=True,
                help="path to where the face cascade resides")
ap.add_argument("-i", "--image", required=True,
                help="path to where the image file resides")
args = vars(ap.parse_args())

image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

fd = FaceDetector(args['face'])
faceReacts = fd.detect(gray, scaleFactor=1.2, minNeighbors=5, minSize=(30, 30))

print("I found {} face(s)".format(len(faceReacts)))

for (x, y, w, h) in faceReacts:-
    cv2.rectange(image, (x, y), (x + w, y + h), (0, 255, 0), 2)

cv2.imshow("Faces", image)
cv2.waitKey(0)