Ejemplo n.º 1
0
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join(
    [args["detector"], "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

#predictor = dlib.shape_predictor(args["shape_predictor"])
#fa = FaceAligner(predictor, desiredFaceWidth=256)
# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])

# load the face embeddings
print("[INFO] loading face embeddings...")
data = pickle.loads(open(args["embeddings"], "rb").read())

recognizer, le = train(data)


def main_core(detector, embedder, recognizer, le, frame_queue, pframe_queue):

    print('[INFO] Starting:', mp.current_process().name)
    # time.sleep(1.0)

    # initialize the video stream, then allow the camera sensor to warm up

    ## Set Threding to start filming
    video_getter = VideoGet(frame_queue=frame_queue,
                            src=0,
                            name='Video Getter')
    time.sleep(1.0)
    # print('[INFO] Starting VideoGet...')
Ejemplo n.º 2
0
def main_core(args, frame_queue, pframe_queue):

    print('[INFO] Starting:', mp.current_process().name)

    print("[INFO] loading face detector...")
    protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
    modelPath = os.path.sep.join(
        [args["detector"], "res10_300x300_ssd_iter_140000.caffemodel"])
    detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

    #predictor = dlib.shape_predictor(args["shape_predictor"])
    #fa = FaceAligner(predictor, desiredFaceWidth=256)
    # load our serialized face embedding model from disk
    print("[INFO] loading face recognizer...")
    embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])

    # load the face embeddings
    print("[INFO] loading face embeddings...")
    data = pickle.loads(open(args["embeddings"], "rb").read())

    recognizer, le = train(data)
    time.sleep(1.0)

    # initialize the video stream, then allow the camera sensor to warm up

    ## Set Threding to start filming
    video_getter = VideoGet(frame_queue=frame_queue,
                            src=0,
                            name='Video Getter')
    time.sleep(1.0)
    # print('[INFO] Starting VideoGet...')
    video_getter.start()
    time.sleep(1.0)

    cpt = 0
    exitbool = False
    fps_count = FPS().start()

    while True:

        frame = video_getter.frame.copy()

        face_data = acquire_frame(detector, embedder, frame, recognizer, le,
                                  0.5, 0.65)  #,fa)
        # pframe_queue.put(face_data)
        for item in face_data:
            # print(item[2:])
            frame = draw_frame(frame, item)
        fps_count.update()

        cpt += 1

        #       exitbool = show_frame(frame)

        if exitbool or cpt == 80:
            #
            fps_count.stop()
            print("[INFO] elasped time fps processed: {:.2f}".format(
                fps_count.elapsed()))
            print("[INFO] approx. processed FPS: {:.2f}".format(
                fps_count.fps()))
            time.sleep(1)
            video_getter.stop()
            time.sleep(2)
            print('[INFO] Exiting :', mp.current_process().name)
            break
Ejemplo n.º 3
0
def main_core(args, frame_queue, pframe_queue):

    # load our serialized face detector from disk
    print("[INFO] loading face detector...")
    protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
    modelPath = os.path.sep.join([args["detector"],
    							  "res10_300x300_ssd_iter_140000.caffemodel"])
    detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

    predictor = dlib.shape_predictor(args["shape_predictor"])
    fa = FaceAligner(predictor, desiredFaceWidth=256)
    # load our serialized face embedding model from disk
    print("[INFO] loading face recognizer...")
    embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])

    # load the face embeddings
    print("[INFO] loading face embeddings...")
    data = pickle.loads(open(args["embeddings"], "rb").read())

    recognizer, le = train(data)
    time.sleep(1.0)

    # initialize the video stream, then allow the camera sensor to warm up

    ## Set Threding to start filming
    video_getter = VideoGet(frame_queue=frame_queue, src=0, name='Video Getter')
    time.sleep(1.0)
    # print('[INFO] Starting VideoGet...')
    video_getter.start()
    time.sleep(1.0)


    ct = CentroidTracker(maxDisappeared=25, maxDistance=75)
    trackers = []
    trackableObjects = {}
    out = 0
    skipped_frames = 2
    out_prev = 0

    cpt=0;
    exitbool = False
    fps_count = FPS().start()
    while True:
        frame = video_getter.frame.copy()
    	# frame = esp32_frame("grupo14.duckdns.org", 1228)
    	# if frame is None:
    	# 	continue
    	# frame = np.array(frame)
        frame = imutils.resize(frame, width=500)
        rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        (H, W) = frame.shape[:2]
        rects = []

    	#print(cpt)
        if cpt % skipped_frames== 0:
        	recon = []
        	fotos = []
        	ps = []
        	trackers = []
        	detections = get_faces(detector, embedder, frame, 0.5, fa)
        	#[(face,vector,coordenada,imagen_completa)]
        	face_data = [(*face, *recognize(face[1], recognizer, le, 0.65)) for face in detections]
        	#[(face,vector,coordenada,imagen_completa, nombre, prob)]
        	for item in face_data:
        		#Listas con nombres de reconocidos
        		recon.append(item[4])
        		fotos.append(item[0])
        		ps.append(item[5])
        	for face in detections:
        		(startX, startY, endX, endY) = face[2]
        		tracker = dlib.correlation_tracker()
        		rect = dlib.rectangle(startX, startY, endX, endY)
        		tracker.start_track(rgb, rect)
        		# add the tracker to our list of trackers so we can
        		# utilize it during skip frames
        		trackers.append(tracker)
        		# loop over the trackers
        else:
        	for tracker in trackers:
        		# update the tracker and grab the updated position
        		tracker.update(rgb)
        		pos = tracker.get_position()
        		# unpack the position object
        		startX = int(pos.left())
        		startY = int(pos.top())
        		endX = int(pos.right())
        		endY = int(pos.bottom())
        		# add the bounding box coordinates to the rectangles list
        		rects.append((startX, startY, endX, endY))

        objects, names, images, probabilities  = ct.update(rects,recon, fotos, ps)
        # loop over the tracked objects
        for (objectID, centroid),(ID, name),(I,im),(D,prob) in zip(objects.items(),
        					names.items(), images.items(), probabilities.items()):

        	to = trackableObjects.get(objectID, None)
        	if to is None:
        		to = TrackableObject(objectID, centroid, name, im, prob)
        	else:
        		y = [c[1] for c in to.centroids]
        		direction = centroid[1] - np.mean(y)
        		to.centroids.append(centroid)
        		# Si es que salio
        		# check to see if the object has been counted or not
        		if not to.counted and direction > 0  and centroid[1] > H - 50:
        			to.out = True
        			to.counted = True
        	# store the trackable object in our dictionary
        	trackableObjects[objectID] = to

        	#Coordinamos el paquete de envio
        	## Envio de mails
        	if not to.sent:
        		paquete = [to.prob, to.pic, to.reconocido, to.out]
        		if to.reconocido:
        			#enviar mail
        			paquete.append(to.name)
        		else:
        			paquete.append('unknown {}'.format(objectID))
        			# enviar mails
        		##Paquete a enviar
        		to.sent = True

        for item in face_data:
        	print('Reconocido ',item[4])

        #   frame = draw_frame(frame, item)
        fps_count.update()
        cpt += 1
        out_prev = out
        # if cpt > 250:
        # 	video_getter.stop()
        # 	break
        exitbool = 0# show_frame(frame)
        if exitbool or cpt > 50:
          # SV.stop()
          fps_count.stop()
          print("[INFO] elasped time fps processed: {:.2f}".format(fps_count.elapsed()))
          print("[INFO] approx. processed FPS: {:.2f}".format(fps_count.fps()))
          time.sleep(1)
          video_getter.stop()
          # db_client.close()
          break
Ejemplo n.º 4
0
# load our serialized face embedding model from disk
print("[INFO] loading face model...")
embedder = dlib.face_recognition_model_v1(args["embedding_model"])
sp = dlib.shape_predictor(args["shape_pred"])
# load the face embeddings
print("[INFO] loading face embeddings...")
data = pickle.loads(open(args["embeddings"], "rb").read())

# grab the paths to the input images in our dataset
i = 0
for (rootDir, dirNames, filenames) in os.walk(args["dataset"]):
    if i == 0:
        imagePaths = dirNames
    i += 1

model, le = train(data)

calculos = {}
fps_count = FPS().start()
x_test = []
y_test = []
y_train = []
nombres = set(data["names"])
nombres.add('unknown')

for name in imagePaths:
    if name == 'emilia_clarke':
        stream = cv2.VideoCapture("videos_eff/{}.m4v".format(name))
    else:
        stream = cv2.VideoCapture("videos_eff/{}.avi".format(name))