Пример #1
0
args = vars(ap.parse_args())

# load our serialized face detector from disk
print("[INFO] loading face detector...")
protoPath = os.path.sep.join([args["detector"], "deploy.prototxt"])
modelPath = os.path.sep.join(
    [args["detector"], "res10_300x300_ssd_iter_140000.caffemodel"])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

# load our serialized face embedding model from disk
print("[INFO] loading face recognizer...")
embedder = cv2.dnn.readNetFromTorch(args["embedding_model"])

# grab the paths to the input images in our dataset
print("[INFO] quantifying faces...")
datasetPaths = list(path.list_images(args["dataset"]))
print(datasetPaths)

# initialize our lists of extracted facial embeddings and
# corresponding people names
dataset_embeddings = []
dataset_known_names = []
dataset_collection = []
# initialize the total number of faces processed
total = 0

# loop over the image paths
for (i, imagePath) in enumerate(datasetPaths):
    # extract the person name from the image path
    print("[INFO] processing image {}/{}".format(i + 1, len(datasetPaths)))
    name = imagePath.split(os.path.sep)[-2]
Пример #2
0
dataset_embedding_file = open('output/dataset_embeddings.pickle', 'rb')
dataset_embeddings = pickle.load(dataset_embedding_file)

eval_embeddings_file = open('output/eval_embeddings.pickle', 'rb')
eval_embeddings = pickle.load(eval_embeddings_file)

distances = []

# load the actual face recognition model along with the label encoder
recognizer = pickle.loads(open(args["recognizer"], "rb").read())
le = pickle.loads(open(args["le"], "rb").read())

# load the image, resize it to have a width of 600 pixels (while
# maintaining the aspect ratio), and then grab the image dimensions

imagePaths = list(path.list_images(".\\Dataset\\"))
if not evalmode:
    for (idx, imagePath) in enumerate(imagePaths):
        # extract the person name from the image path
        print("[INFO] processing image {}/{}".format(idx + 1, len(imagePaths)))
        pic_name = imagePath.split(os.path.sep)[-1]
        convert_to_rgb(imagePath)
        #img = Image.open(imagePath)
        img = cv2.imread(imagePath)
        # image = cv2.imread(args["image"])
        # #image = imutils.resize(image, width=600)
        image = imutils.resize(img, width=600)
        (h, w) = image.shape[:2]

        # construct a blob from the image
        imageBlob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)),
Пример #3
0
def process_dataset_images():
    global new_dataset_paths, reference_data, data_sate
    #convert_images_to_RGB_jpeg(dataset_paths, 'dataset')
    new_dataset_paths = list(path.list_images(".\\Dataset\\"))
    reference_data = extract_embeddings(new_dataset_paths)
    data_sate = new_dataset_paths
Пример #4
0
def process_eval_images():
    global new_eval_paths, eval_data, eval_sate
    #convert_images_to_RGB_jpeg(eval_paths, 'evaluation')
    new_eval_paths = list(path.list_images(".\\Eval\\"))
    eval_data = extract_embeddings(new_eval_paths)
    eval_sate = new_eval_paths
Пример #5
0
# Same as above, but for the ./Eval/* directory
eval_state = []

# container for storing the [label, Tensor] tuple calculated from images in the Dataset directory
reference_data = []
# same as above, but for the Eval directory
eval_data = []

# baked in path strings for serialized data
pickled_dataset_path = './dataset.pickle'
pickled_data_state_path = './data_state.pickle'
pickled_eval_path = './eval.pickle'
pickled_eval_state_path = './eval_state.pickle'

# fill appropriate containers with current images in specified directories
dataset_paths = list(path.list_images(".\\Dataset\\"))
eval_paths = list(path.list_images(".\\Eval\\"))

# check states from previous runs, and initialize operative data accordingly
pre_flight_check()

######## MAIN EXECUTIION #########

if test_mode:
    filename = 'new_version_test_output.csv'
    file = open(filename, 'w+')

    if n_to_n_eval_mode:
        distances_between_people = calculate_distances(reference_data,
                                                       reference_data)
        for row in distances_between_people: