def __init__(self, vs, args): setup_definitions(self, vs, args) init_ui(self) self.open_main_view() print("self.cpu_type : ",self.cpu_type) if self.cpu_type == 0: self.top_label_cpu_text.set("CPU") if self.cpu_type == 1: self.top_label_cpu_text.set("VPU") if self.cpu_type == 2: self.top_label_cpu_text.set("GPU") self.root.wm_title("ThinkerFarm") self.root.wm_protocol("WM_DELETE_WINDOW", self.on_close) print("cuda list :",cuda.get_num_devices())
def __init__(self): print(dlib.DLIB_USE_CUDA) print(cuda.get_num_devices())
import face_recognition from PIL import Image import matplotlib.pyplot as plt import matplotlib.image as mpimg from time import time import dlib import dlib.cuda as cuda print(cuda.get_num_devices()) dlib.DLIB_USE_CUDA = 1 dlib.USE_AVX_INSTRUCTIONS = 1 image = face_recognition.load_image_file("baseDatos.jpeg") start = time() face_locations = face_recognition.face_locations(image, model="cnn") print("tiempo: ", time() - start) print(format(len(face_locations))) i = 0 for face_location in face_locations: top, right, bottom, left = face_location #print("bla {}bla{} lba{} right{}", format(top, left, bottom, right)) face_image = image[top:bottom, left:right] pil_image = Image.fromarray(face_image) pil_image.save("out/face-{}.png".format(i)) i = i + 1 """ my_list = [] #almacena las imagenes leidas por face:recognition for j in range (0, i): my_list.append(face_recognition.load_image_file("out/face-{}.png".format(j)))
import logging import os import random from argparse import ArgumentParser import coloredlogs import cv2 import numpy as np import matplotlib.pyplot as plt import dlib LOCATION_MODEL = 'hog' try: import dlib.cuda as cuda if cuda.get_num_devices() > 0 and dlib.DLIB_USE_CUDA: LOCATION_MODEL = 'cnn' except ImportError: pass import face_recognition as fr logger = logging.getLogger() def load_types(basedir, human_prob): type_names = [] type_imgs = {} for fname in os.listdir(basedir): full_path = os.path.join(basedir, fname) type_name = os.path.splitext(fname)[0]
# python recognize_faces_video.py --encodings encodings.pickle # python recognize_faces_video.py --encodings encodings.pickle --output output/jurassic_park_trailer_output.avi --display 0 # import the necessary packages from imutils.video import VideoStream import face_recognition import argparse import imutils import pickle import time import cv2 import dlib.cuda as cuda import dlib print("CUDA devices number : " + str(cuda.get_num_devices())) dlib.DLIB_USE_CUDA = True print("dlib.DLIB_USE_CUDA = " + str(dlib.DLIB_USE_CUDA)) # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-e", "--encodings", required=True, help="path to serialized db of facial encodings") ap.add_argument("-o", "--output", type=str, help="path to output video") ap.add_argument("-y", "--display", type=int,
import os import numpy as np import bz2 import shutil import urllib.request from PIL import Image import cv2 import math from . import utils DLIB_USE_CNN = False try: import dlib.cuda as cuda if cuda.get_num_devices() >= 1: if dlib.DLIB_USE_CUDA: DLIB_USE_CNN = True except Exception as e: print(e) class MyDlib: def __init__(self): print("Dlib is loading") if DLIB_USE_CNN: dlib_cnn_face_detector_path = os.path.join( utils.ROOT_DIR, "mmod_human_face_detector.dat") if not os.path.exists(dlib_cnn_face_detector_path): self.download_cnn_face_detector(dlib_cnn_face_detector_path) self.face_detector = dlib.cnn_face_detection_model_v1(dlib_cnn_face_detector_path)