parser = argparse.ArgumentParser() parser.add_argument('imgDir', type=str, help="Input image directory.") parser.add_argument('--numImages', type=int, default=1000) parser.add_argument('--model', type=str, help="TODO", default="./models/openface/nn4.v1.t7") parser.add_argument('--outputFile', type=str, help="Output file, stored in numpy serialized format.", default="./unknown.npy") parser.add_argument('--imgDim', type=int, help="Default image size.", default=96) args = parser.parse_args() align = NaiveDlib("models/dlib/", "shape_predictor_68_face_landmarks.dat") openface = openface.TorchWrap(args.model, imgDim=args.imgDim, cuda=False) allImgs = list(iterImgs(args.imgDir)) imgObjs = random.sample(allImgs, args.numImages) reps = [] for imgObj in imgObjs: rep = openface.forward(imgObj.path) rep = np.array(rep) reps.append(rep) np.save(args.outputFile, np.row_stack(reps))
help="Default image dimension.", default=96) parser.add_argument('--cuda', type=bool, default=False) parser.add_argument('--unknown', type=bool, default=False, help='Try to predict unknown people') args = parser.parse_args() sys.path.append(args.dlibRoot) import dlib from openface.alignment import NaiveDlib # Depends on dlib. align = NaiveDlib(args.dlibFaceMean, args.dlibFacePredictor) net = openface.TorchWrap(args.networkModel, imgDim=args.imgDim, cuda=args.cuda) class Face: def __init__(self, rep, identity): self.rep = rep self.identity = identity def __repr__(self): return "{{id: {}, rep[0:5]: {}}}".format(str(self.identity), self.rep[0:5]) class OpenFaceServerProtocol(WebSocketServerProtocol): def __init__(self): self.images = {}
from openface.alignment import NaiveDlib # Depends on dlib. from subprocess import Popen, PIPE fileDir = os.path.dirname(os.path.realpath(__file__)) modelDir = os.path.join(fileDir, 'models') dlibModelDir = os.path.join(modelDir, 'dlib') openfaceModelDir = os.path.join(modelDir, 'openface') dlibFacePredictor = os.path.join(dlibModelDir, "shape_predictor_68_face_landmarks.dat") networkModel = os.path.join(openfaceModelDir, 'nn4.v1.t7') imgDim = 96 align = NaiveDlib(dlibFacePredictor) net = openface.TorchWrap(networkModel, imgDim=imgDim) def test_pipeline(): imgPath = os.path.join(fileDir, 'images', 'examples', 'lennon-1.jpg') bgrImg = cv2.imread(imgPath) if bgrImg is None: raise Exception("Unable to load image: {}".format(imgPath)) rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB) assert np.isclose(norm(rgbImg), 11.1355) bb = align.getLargestFaceBoundingBox(rgbImg) assert bb.left() == 341 assert bb.right() == 1006 assert bb.top() == 193 assert bb.bottom() == 859