Beispiel #1
0
    frame = video_getter.frame.copy()
    frame = imutils.resize(frame, width=500)
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    (H, W) = frame.shape[:2]
    rects = []

    if cpt % skipped_frames == 0:
        recon = []
        fotos = []
        ps = []
        trackers = []
        devices = []

        detections = get_faces(detector, embedder, sp,frame, D_PROB, fa)
        #[(face,vector,coordenada,imagen_completa)]
        face_data = [(*face, *recognize(face[1], recognizer, le, R_PROB)) for face in detections]
        #[(face,vector,coordenada,imagen_completa, nombre, prob)]
        for item in face_data:
            #Listas con nombres de reconocidos
            recon.append(item[4])
            fotos.append(item[0])
            ps.append(item[5])
            [devices.append(1) for i in range(len(face_data))]
        for face in detections:
            (startX, startY, endX, endY) = face[2]
            tracker = dlib.correlation_tracker()
            rect = dlib.rectangle(startX, startY, endX, endY)
            tracker.start_track(rgb, rect)
            trackers.append(tracker)
            # loop over the trackers
            for item in face_data:
Beispiel #2
0
	rects_esp = []

	#print(cpt)
	if cpt % skipped_frames== 0:
		recon_pi = []
		fotos_pi = []
		ps_pi = []
		trackers_pi = []
		trackers_esp32 = []
		recon_esp32 = []
		fotos_esp32 = []
		ps_esp32 = []

		detections = get_faces(detector, embedder, frame, 0.5, fa)
		#[(face,vector,coordenada,imagen_completa)]
		face_data = [(*face, *recognize(face[1], recognizer, le, 0.65)) for face in detections]
		#[(face,vector,coordenada,imagen_completa, nombre, prob)]
		for item in face_data:
			#Listas con nombres de reconocidos
			recon_pi.append(item[4])
			fotos_pi.append(item[0])
			ps_pi.append(item[5])
			#[devices.append(1) for i in range(len(face_data))]
		for face in detections:
			(startX, startY, endX, endY) = face[2]
			tracker = dlib.correlation_tracker()
			rect = dlib.rectangle(startX, startY, endX, endY)
			tracker.start_track(rgb, rect)
			trackers_pi.append(tracker)
			# loop over the trackers
		frame_esp32 = esp32_frame('grupo14.duckdns.org', 1228)
Beispiel #3
0
def main_core(args, frame_queue, pframe_queue):

    # load our serialized face detector from disk
    print("[INFO] loading face detector...")
    protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
    modelPath = os.path.sep.join([args["detector"],
    							  "res10_300x300_ssd_iter_140000.caffemodel"])
    detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

    predictor = dlib.shape_predictor(args["shape_predictor"])
    fa = FaceAligner(predictor, desiredFaceWidth=256)
    # load our serialized face embedding model from disk
    print("[INFO] loading face recognizer...")
    embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])

    # load the face embeddings
    print("[INFO] loading face embeddings...")
    data = pickle.loads(open(args["embeddings"], "rb").read())

    recognizer, le = train(data)
    time.sleep(1.0)

    # initialize the video stream, then allow the camera sensor to warm up

    ## Set Threding to start filming
    video_getter = VideoGet(frame_queue=frame_queue, src=0, name='Video Getter')
    time.sleep(1.0)
    # print('[INFO] Starting VideoGet...')
    video_getter.start()
    time.sleep(1.0)


    ct = CentroidTracker(maxDisappeared=25, maxDistance=75)
    trackers = []
    trackableObjects = {}
    out = 0
    skipped_frames = 2
    out_prev = 0

    cpt=0;
    exitbool = False
    fps_count = FPS().start()
    while True:
        frame = video_getter.frame.copy()
    	# frame = esp32_frame("grupo14.duckdns.org", 1228)
    	# if frame is None:
    	# 	continue
    	# frame = np.array(frame)
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        (H, W) = frame.shape[:2]
        rects = []

    	#print(cpt)
        if cpt % skipped_frames== 0:
        	recon = []
        	fotos = []
        	ps = []
        	trackers = []
        	detections = get_faces(detector, embedder, frame, 0.5, fa)
        	#[(face,vector,coordenada,imagen_completa)]
        	face_data = [(*face, *recognize(face[1], recognizer, le, 0.65)) for face in detections]
        	#[(face,vector,coordenada,imagen_completa, nombre, prob)]
        	for item in face_data:
        		#Listas con nombres de reconocidos
        		recon.append(item[4])
        		fotos.append(item[0])
        		ps.append(item[5])
        	for face in detections:
        		(startX, startY, endX, endY) = face[2]
        		tracker = dlib.correlation_tracker()
        		rect = dlib.rectangle(startX, startY, endX, endY)
        		tracker.start_track(rgb, rect)
        		# add the tracker to our list of trackers so we can
        		# utilize it during skip frames
        		trackers.append(tracker)
        		# loop over the trackers
        else:
        	for tracker in trackers:
        		# update the tracker and grab the updated position
        		tracker.update(rgb)
        		pos = tracker.get_position()
        		# unpack the position object
        		startX = int(pos.left())
        		startY = int(pos.top())
        		endX = int(pos.right())
        		endY = int(pos.bottom())
        		# add the bounding box coordinates to the rectangles list
        		rects.append((startX, startY, endX, endY))

        objects, names, images, probabilities  = ct.update(rects,recon, fotos, ps)
        # loop over the tracked objects
        for (objectID, centroid),(ID, name),(I,im),(D,prob) in zip(objects.items(),
        					names.items(), images.items(), probabilities.items()):

        	to = trackableObjects.get(objectID, None)
        	if to is None:
        		to = TrackableObject(objectID, centroid, name, im, prob)
        	else:
        		y = [c[1] for c in to.centroids]
        		direction = centroid[1] - np.mean(y)
        		to.centroids.append(centroid)
        		# Si es que salio
        		# check to see if the object has been counted or not
        		if not to.counted and direction > 0  and centroid[1] > H - 50:
        			to.out = True
        			to.counted = True
        	# store the trackable object in our dictionary
        	trackableObjects[objectID] = to

        	#Coordinamos el paquete de envio
        	## Envio de mails
        	if not to.sent:
        		paquete = [to.prob, to.pic, to.reconocido, to.out]
        		if to.reconocido:
        			#enviar mail
        			paquete.append(to.name)
        		else:
        			paquete.append('unknown {}'.format(objectID))
        			# enviar mails
        		##Paquete a enviar
        		to.sent = True

        for item in face_data:
        	print('Reconocido ',item[4])

        #   frame = draw_frame(frame, item)
        fps_count.update()
        cpt += 1
        out_prev = out
        # if cpt > 250:
        # 	video_getter.stop()
        # 	break
        exitbool = 0# show_frame(frame)
        if exitbool or cpt > 50:
          # SV.stop()
          fps_count.stop()
          print("[INFO] elasped time fps processed: {:.2f}".format(fps_count.elapsed()))
          print("[INFO] approx. processed FPS: {:.2f}".format(fps_count.fps()))
          time.sleep(1)
          video_getter.stop()
          # db_client.close()
          break
Beispiel #4
0
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    (H, W) = frame.shape[:2]
    rects = []

    #print(cpt)
    if cpt % skipped_frames == 0:
        recon = []
        fotos = []
        ps = []
        trackers = []
        trackers_esp32 = []
        devices = []

        detections = get_faces(detector, embedder, frame, 0.5, fa)
        #[(face,vector,coordenada,imagen_completa)]
        face_data = [(*face, *recognize(face[1], recognizer, le, 0.65))
                     for face in detections]
        #[(face,vector,coordenada,imagen_completa, nombre, prob)]
        for item in face_data:
            #Listas con nombres de reconocidos
            recon.append(item[4])
            fotos.append(item[0])
            ps.append(item[5])
            [devices.append(1) for i in range(len(face_data))]
        for face in detections:
            (startX, startY, endX, endY) = face[2]
            tracker = dlib.correlation_tracker()
            rect = dlib.rectangle(startX, startY, endX, endY)
            tracker.start_track(rgb, rect)
            trackers.append(tracker)
            # loop over the trackers
Beispiel #5
0
for name in imagePaths:
    Coincidencias = 0
    Caras = 0
    print('Persona a evaluar : {}'.format(name))
    print('Prepare imagen')
    input('Presione cualquier boton')
    for j in range(4):
        for i in range(frames_per_person):
            frame = video_getter.frame.copy()
            #frame = imutils.resize(frame, width=500)
            detections = get_faces(detector, embedder, sp, frame,
                                   confianza_dec, fa)
            #[(face,vector,coordenada,imagen_completa)]
            face_data = [(*face,
                          *recognize(face[1], recognizer, le, confianza_recon))
                         for face in detections]
            #[(face,vector,coordenada,imagen_completa, nombre, prob)]
            for item in face_data:
                print(item[4])
                if item[4] == name:
                    Coincidencias += 1
                Caras += 1
            show_frame(frame)
            fps_count.update()
        input('Cambio de foto')
    calculos[name] = (Coincidencias, Caras)

print(calculos)

fps_count.stop()
    print('Persona a evaluar : {}'.format(name))
    while True:
        (grabbed, frame) = stream.read()

        # if the frame was not grabbed, then we have reached the
        # end of the stream
        if not grabbed:
            stream.release()
            time.sleep(2.0)
            break

        frame = imutils.resize(frame, width=450)
        detections = get_faces(detector, embedder, sp, frame, confianza_dec,
                               fa)
        #[(face,vector,coordenada,imagen_completa)]
        face_data = [(*face, *recognize(face[1], model, le, confianza_recon))
                     for face in detections]
        #[(face,vector,coordenada,imagen_completa, nombre, prob)]
        for item in face_data:
            x_test.append(item[1])
            y_test.append(item[4])
            y_train.append(name)
            prob.append(item[5])
            if item[4] == name:
                Coincidencias += 1
            elif item[4] != 'unknown':
                Falsas_coincidencias += 1
            Caras += 1
        fps_count.update()
    calculos[name] = (Coincidencias, Caras, Falsas_coincidencias,
                      np.mean(prob))