def computeMeanMain(args): align = NaiveDlib(args.dlibFaceMean, args.dlibFacePredictor) imgs = list(iterImgs(args.inputDir)) if args.numImages > 0: imgs = random.sample(imgs, args.numImages) facePoints = [] for img in imgs: rgb = img.getRGB() bb = align.getLargestFaceBoundingBox(rgb) alignedPoints = align.align(rgb, bb) if alignedPoints: facePoints.append(alignedPoints) facePointsNp = np.array(facePoints) mean = np.mean(facePointsNp, axis=0) std = np.std(facePointsNp, axis=0) write(mean, "{}/mean.csv".format(args.modelDir)) write(std, "{}/std.csv".format(args.modelDir)) # Only import in this mode. import matplotlib as mpl mpl.use('Agg') import matplotlib.pyplot as plt fig, ax = plt.subplots() ax.scatter(mean[:, 0], -mean[:, 1], color='k') ax.axis('equal') for i, p in enumerate(mean): ax.annotate(str(i), (p[0] + 0.005, -p[1] + 0.005), fontsize=8) plt.savefig("{}/mean.png".format(args.modelDir))
def alignMain(args): openface.helper.mkdirP(args.outputDir) imgs = list(iterImgs(args.inputDir)) # Shuffle so multiple versions can be run at once. random.shuffle(imgs) align = NaiveDlib(args.dlibFaceMean, args.dlibFacePredictor) nFallbacks = 0 for imgObject in imgs: outDir = os.path.join(args.outputDir, imgObject.cls) imgName = "{}/{}.png".format(outDir, imgObject.name) openface.helper.mkdirP(outDir) if not os.path.isfile(imgName): rgb = imgObject.getRGB(cache=False) out = align.alignImg(args.method, args.size, rgb) if args.fallbackLfw and out is None: nFallbacks += 1 deepFunneled = "{}/{}.jpg".format( os.path.join(args.fallbackLfw, imgObject.cls), imgObject.name) shutil.copy( deepFunneled, "{}/{}.jpg".format( os.path.join(args.outputDir, imgObject.cls), imgObject.name)) if out is not None: io.imsave(imgName, out) print('nFallbacks:', nFallbacks)
def main(args): align = NaiveDlib(args.dlibFacePredictor) bgrImg = cv2.imread(args.img) if bgrImg is None: raise Exception("Unable to load image: {}".format(args.img)) rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB) bb = align.getLargestFaceBoundingBox(rgbImg) if bb is None: raise Exception("Unable to find a face: {}".format(args.img)) landmarks = align.align(rgbImg, bb) if landmarks is None: raise Exception("Unable to align image: {}".format(args.img)) # alignedFace = align.alignImg("affine", args.size, rgbImg, bb, landmarks) bl = (bb.left(), bb.bottom()) tr = (bb.right(), bb.top()) cv2.rectangle(bgrImg, bl, tr, color=(153, 255, 204), thickness=3) for landmark in landmarks: cv2.circle(bgrImg, center=landmark, radius=3, color=(102, 204, 255), thickness=-1) print("Saving image to 'annotated.png'") cv2.imwrite("annotated.png", bgrImg)
def detectAlignImages(args): pool = ThreadPool(5) imgs = list(iterImgs(args.input)) imgs.sort(key=lambda x: x.path) print("All images is listed") aligner = NaiveDlib(args.dlibFaceMean, args.dlibFacePredictor) bundles = [] current_class = None class_num = -1 for imgObject in imgs: if imgObject.cls != current_class: class_num += 1 current_class = imgObject.cls #descr.write(os.path.join(imgObject.cls, imgObject.name) + " " + str(class_num) + "\n") bundles.append((args, imgObject, aligner, class_num)) #print("Description is generated") exists = pool.map(alignImage, bundles) pool.close() pool.join() with open(os.path.join(args.output, 'description.txt'), 'w') as descr: for (exist, obj) in zip(exists, bundles): if exist: descr.write( os.path.join(obj[1].cls, obj[1].name) + " " + str(obj[3]) + "\n")
def process(code, div): import openface import openface.helper import dlib from openface.alignment import NaiveDlib # Depends on dlib. code = int(code) div = int(div) dlibModelDir = os.path.join(fileDir, "./openface/models/dlib") dlibFaceMean = os.path.join(dlibModelDir, "mean.csv") dlibFacePredictor = os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat") align = NaiveDlib(dlibFaceMean, dlibFacePredictor) dataset = data.Dataset() last = time.time() count = 0 for model, key, img in dataset.get_images(BUCKET_NAME): if hash(key) % div == code: bb = align.getLargestFaceBoundingBox(img) aligned = align.alignImg("affine", 224, img, bb) # print time.time() - last last = time.time() count += 1 if not aligned is None: # print model,key,img.shape,bb,aligned.shape cv2.imwrite( "output/face_{}".format( key.replace('/', '_').replace('models', '')), aligned) # cv2.imshow("test",aligned) # cv2.waitKey(0) # cv2.destroyAllWindows() # break if count % 20 == 0 and code == 0: local( 'aws s3 mv output/ s3://aub3data/output/ --recursive --storage-class "REDUCED_REDUNDANCY" --region "us-east-1"' )
def alignMain(args): openface.helper.mkdirP(args.outputDir) imgs = list(iterImgs(args.inputDir)) # Shuffle so multiple versions can be run at once. random.shuffle(imgs) if args.landmarks == 'outerEyesAndNose': landmarkIndices = NaiveDlib.OUTER_EYES_AND_NOSE elif args.landmarks == 'innerEyesAndBottomLip': landmarkIndices = NaiveDlib.INNER_EYES_AND_BOTTOM_LIP else: raise Exception("Landmarks unrecognized: {}".format(args.landmarks)) align = NaiveDlib(args.dlibFacePredictor) nFallbacks = 0 for imgObject in imgs: outDir = os.path.join(args.outputDir, imgObject.cls) openface.helper.mkdirP(outDir) outputPrefix = os.path.join(outDir, imgObject.name) imgName = outputPrefix + ".png" if not os.path.isfile(imgName): rgb = imgObject.getRGB() if rgb is not None: print(imgName, type(rgb), rgb.shape) outRgb = align.alignImg('affine', args.size, rgb, landmarkIndices=landmarkIndices) else: outRgb = None if args.fallbackLfw and outRgb is None: nFallbacks += 1 deepFunneled = "{}/{}.jpg".format( os.path.join(args.fallbackLfw, imgObject.cls), imgObject.name) shutil.copy( deepFunneled, "{}/{}.jpg".format( os.path.join(args.outputDir, imgObject.cls), imgObject.name)) if outRgb is not None: outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR) cv2.imwrite(imgName, outBgr) if args.fallbackLfw: print('nFallbacks:', nFallbacks)
def detectAlignImages(args):#input_dir, output_dir): with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: imgs = list(iterImgs(args.input)) aligner = NaiveDlib(args.dlibFaceMean, args.dlibFacePredictor) futures = [] for imgObject in imgs: future = executor.submit(alignImage, args, imgObject, aligner) futures.append(future) count_success = 0 n = len(futures) #for future in concurrent.futures.as_completed(futures): for future in futures: future.result() count_success += 1 if count_success % 1000 == 0: print("{} photo resized from {}".format(count_success, n))
def process(): dlibModelDir = os.path.join(fileDir, "./openface/models/dlib") dlibFaceMean = os.path.join(dlibModelDir, "mean.csv") dlibFacePredictor = os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat") align = NaiveDlib(dlibFaceMean, dlibFacePredictor) dataset = data.Dataset() for model, key, img in dataset.get_images(BUCKET_NAME): bb = align.getLargestFaceBoundingBox(img) aligned = align.alignImg("affine", 224, img, bb) if not aligned is None: print model, key, img.shape, bb, aligned.shape cv2.imwrite("test/face_{}".format(key.replace('/', '_')), aligned) # cv2.imshow("test",aligned) # cv2.waitKey(0) # cv2.destroyAllWindows() # break else: print "No face found"
type=int, help="Default image dimension.", default=96) parser.add_argument('--cuda', type=bool, default=False) parser.add_argument('--unknown', type=bool, default=False, help='Try to predict unknown people') args = parser.parse_args() sys.path.append(args.dlibRoot) import dlib from openface.alignment import NaiveDlib # Depends on dlib. align = NaiveDlib(args.dlibFaceMean, args.dlibFacePredictor) net = openface.TorchWrap(args.networkModel, imgDim=args.imgDim, cuda=args.cuda) class Face: def __init__(self, rep, identity): self.rep = rep self.identity = identity def __repr__(self): return "{{id: {}, rep[0:5]: {}}}".format(str(self.identity), self.rep[0:5]) class OpenFaceServerProtocol(WebSocketServerProtocol): def __init__(self):
parser = argparse.ArgumentParser() parser.add_argument('imgDir', type=str, help="Input image directory.") parser.add_argument('--numImages', type=int, default=1000) parser.add_argument('--model', type=str, help="TODO", default="./models/openface/nn4.v1.t7") parser.add_argument('--outputFile', type=str, help="Output file, stored in numpy serialized format.", default="./unknown.npy") parser.add_argument('--imgDim', type=int, help="Default image size.", default=96) args = parser.parse_args() align = NaiveDlib("models/dlib/", "shape_predictor_68_face_landmarks.dat") openface = openface.TorchWrap(args.model, imgDim=args.imgDim, cuda=False) allImgs = list(iterImgs(args.imgDir)) imgObjs = random.sample(allImgs, args.numImages) reps = [] for imgObj in imgObjs: rep = openface.forward(imgObj.path) rep = np.array(rep) reps.append(rep) np.save(args.outputFile, np.row_stack(reps))
import openface.helper from openface.alignment import NaiveDlib # Depends on dlib. from subprocess import Popen, PIPE fileDir = os.path.dirname(os.path.realpath(__file__)) modelDir = os.path.join(fileDir, 'models') dlibModelDir = os.path.join(modelDir, 'dlib') openfaceModelDir = os.path.join(modelDir, 'openface') dlibFacePredictor = os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat") networkModel = os.path.join(openfaceModelDir, 'nn4.v1.t7') imgDim = 96 align = NaiveDlib(dlibFacePredictor) net = openface.TorchWrap(networkModel, imgDim=imgDim) def test_pipeline(): imgPath = os.path.join(fileDir, 'images', 'examples', 'lennon-1.jpg') bgrImg = cv2.imread(imgPath) if bgrImg is None: raise Exception("Unable to load image: {}".format(imgPath)) rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB) assert np.isclose(norm(rgbImg), 11.1355) bb = align.getLargestFaceBoundingBox(rgbImg) assert bb.left() == 341 assert bb.right() == 1006 assert bb.top() == 193