def extractFaces1(filename): global faceCnt preprocessImage(filename) face_locations = changeboxtype( face_cascade.detectMultiScale(cv2.imread(tempFile), 1.7, 5)) print(face_locations) # img = Image.open(tempFile) # img.save(savePath+str(faceCnt)+'.jpg') # faceCnt+=1 for face_location in face_locations: cropImage(tempFile, face_location, True, savePath + str(faceCnt) + '.jpg') faceCnt += 1
def recognizeFacesInImage(filename): global data, names preprocessImage(filename) img = cv2.imread(tempFile) rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) boxes = face_recognition.face_locations(rgb) encodings = face_recognition.face_encodings(rgb, boxes) faces = [] if len(encodings) != 0: faces = knn.predict(encodings) faces = list(set(faces)) print(faces) return faces
def recognizeFacesInImage(filename): global data, names, totalFreq preprocessImage(filename) img = cv2.imread(tempFile) rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) boxes = face_recognition.face_locations(rgb) encodings = face_recognition.face_encodings(rgb, boxes) faces = [] for encoding in encodings: matches = face_recognition.compare_faces(data['encodings'], encoding) freq = {} percent = {} for name in names: freq[name] = 0 for i in range(0, len(matches)): if (matches[i]): freq[data['names'][i]] += 1 for name in names: percent[name] = float('%.2f' % (freq[name] / totalFreq[name] * 100)) curMax = 90 curName = 'Unknown' for name in names: if percent[name] >= curMax: curMax = percent[name] curName = name if (curName in faces) == False: faces.append(curName) print(faces) return faces
def recognizeSingleFace(filename): global knn global data, names preprocessImage(filename) img = cv2.imread(tempFile) rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) boxes = face_recognition.face_locations(rgb) encoding = face_recognition.face_encodings(rgb, boxes) if len(encoding) == 0: return 'Unknown' encoding = encoding[0] face = knn.predict([encoding])[0] # prob = knn.predict_proba([encoding])[0] # print(prob) return face
def recognizeSingleFace(filename): global data, names, totalFreq preprocessImage(filename) img = cv2.imread(tempFile) rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) boxes = face_recognition.face_locations(rgb) encoding = face_recognition.face_encodings(rgb, boxes) if len(encoding) == 0: return 'Unknown' encoding = encoding[0] matches = face_recognition.compare_faces(data['encodings'], encoding) freq = {} percent = {} for name in names: freq[name] = 0 for i in range(0, len(matches)): if (matches[i]): freq[data['names'][i]] += 1 for name in names: percent[name] = float('%.2f' % (freq[name] / totalFreq[name] * 100)) curMax = 90 curName = 'Unknown' for name in names: if percent[name] >= curMax: curMax = percent[name] curName = name face = curName return face
def extractFaces2(filename): global faceCnt preprocessImage(filename) # img = Image.open(tempFile) # scale = 0.5 # width, height = img.size # newsize = (int(width*scale), int(height*scale)) # img = img.resize(newsize) # img.save(tempFile) image = face_recognition.load_image_file(tempFile) face_locations = face_recognition.face_locations(image) print(face_locations) # img = Image.open(tempFile) # img.save(savePath+str(faceCnt)+'.jpg') # faceCnt+=1 for face_location in face_locations: cropImage(tempFile, face_location, True, savePath + str(faceCnt) + '.jpg') faceCnt += 1