def main(args): mkdirP(args.outputDir) videos = list(iterVids(args.inputDir)) align = openface.AlignDlib(dlibFacePredictor) net = openface.TorchNeuralNet(faceDetectionModel, imgDim=96, cuda=False) # TODO - consider shuffling processList = [ # "brendan", # "allen", # "chris", # "rohan", # "justin" # "eddie", # "nikita", # "tyler" ] for videoObject in videos: print("=== {} ===".format(videoObject.path)) # if videoObject.cls not in processList: # continue # if "2" not in videoObject.name: # continue classDir = os.path.join(args.outputDir, videoObject.cls) mkdirP(classDir) extract_faces(args, videoObject, classDir, net, align, videoObject.name, args.multiple)
def align(args, multi): # Aligning now start = time.time() mkdirP(alignedDir) imgs = list(iterImgs(trainingDir)) # Shuffle so multiple versions can be run at once. random.shuffle(imgs) landmarkMap = { 'outerEyesAndNose': openface.AlignDlib.OUTER_EYES_AND_NOSE, 'innerEyesAndBottomLip': openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP } if landmarks not in landmarkMap: # TODO: Avoid exceptions, find way to silently fail and skip image raise Exception("Landmarks unrecognized: {}".format(landmarks)) landmarkIndices = landmarkMap[landmarks] align = openface.AlignDlib(dlibFacePredictor) nFallbacks = 0 for imgObject in imgs: print("=== {} ===".format(imgObject.path)) outDir = os.path.join(alignedDir, imgObject.cls) mkdirP(outDir) outputPrefix = os.path.join(outDir, imgObject.name) # TODO: output is still PNG? imgName = outputPrefix + ".png" if os.path.isfile(imgName): if args.verbose: print(" + Already found, skipping.") else: rgb = imgObject.getRGB() if rgb is None: if args.verbose: print(" + Unable to load.") outRgb = None else: outRgb = align.align(imgSize, rgb, landmarkIndices=landmarkIndices, skipMulti=not multi) if outRgb is None and args.verbose: print(" + Unable to align.") if outRgb is not None: if args.verbose: print(" + Writing aligned file to disk.") outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR) cv2.imwrite(imgName, outBgr)
def batchAlign(imgs): multi = False start = time.time() landmarkMap = { 'outerEyesAndNose': openface.AlignDlib.OUTER_EYES_AND_NOSE, 'innerEyesAndBottomLip': openface.AlignDlib.INNER_EYES_AND_BOTTOM_LIP } if landmarks not in landmarkMap: # TODO: Avoid exceptions, find way to silently fail and skip image raise Exception("Landmarks unrecognized: {}".format(landmarks)) landmarkIndices = landmarkMap[landmarks] nFallbacks = 0 for imgObject in imgs: print("=== {} ===".format(imgObject.path)) outDir = os.path.join(alignedDir, imgObject.cls) mkdirP(outDir) outputPrefix = os.path.join(outDir, imgObject.name) imgName = outputPrefix + ".png" ''' print(" + Already found, skipping.") else:''' if not os.path.isfile(imgName): rgb = imgObject.getRGB() if rgb is None: print(" + Unable to load.") outRgb = None else: outRgb = align_pred.align(imgSize, rgb, landmarkIndices=landmarkIndices, skipMulti=not multi) if outRgb is None: print(" + Unable to align.") if outRgb is not None: print(" + Writing aligned file to disk.") outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR) cv2.imwrite(imgName, outBgr)
def main(args): mkdirP(args.outputDir) videos = list(iterVids(args.inputDir)) # TODO - consider shuffling for videoObject in videos: print("=== {} ===".format(videoObject.path)) classDir = os.path.join(args.outputDir, videoObject.cls) mkdirP(classDir) outDir = os.path.join(classDir, videoObject.name) mkdirP(outDir) extract_faces(args, videoObject, outDir)
def prebatchAlign(num_processes, args, multi): mkdirP(alignedDir) imgs = list(iterImgs(trainingDir)) np.random.shuffle(imgs) div_arrays = divideArray(imgs, num_processes) return div_arrays
def eventDetector(args): # Make the data folder if args.verbose: print ("Creating {} if it does not exist already".format(savedDataDir)) print ("Creating {} if it does not exist already".format(collectBufferDir)) print ("Creating {} if it does not exist already".format(sessionsDir)) mkdirP(savedDataDir) mkdirP(collectBufferDir) mkdirP(sessionsDir) # First load the feed if len(args.feed) == 0: if args.verbose: print("Loading feed from webcam") cap = cv2.VideoCapture(1) else: if args.verbose: print("Loading feed from {}".format(args.feed)) cap = cv2.VideoCapture(args.feed) processedFrames = 1 imageCount = 0 idleCount = 0 while (cap.isOpened()): ret = False time.sleep(SLEEP_TIME) while not ret: ret, frame = cap.read() processedFrames = processedFrames + 1 # Detect a face faceDetected = detectFace(frame) if faceDetected: idleCount = 0 if (imageCount < NEEDED_IMAGES): # Save module savedFacePath = os.path.join(collectBufferDir, "{}.jpg".format(imageCount)) if args.verbose: print "Saving image to {}".format(savedFacePath) cv2.imwrite(savedFacePath, frame) if (imageCount == NEEDED_IMAGES): newFolderName = time.strftime("%m%d%Y%H%M%S.jpg") newFolderPath = os.path.join(sessionsDir, newFolderName) mvP(collectBufferDir, newFolderPath) if args.verbose: print "Saved enough images, moving {} to {}".format(collectBufferDir, newFolderPath, newFolderName) mkdirP(collectBufferDir) # Move all images in the folder somewhere else testingThread = threading.Thread(target=testImages, args=(args, newFolderPath)) testingThread.start() imageCount += 1 else: # No face detected, count idle idleCount += 1 if idleCount >= IDLE_WAIT_COUNT: idleCount = 0 if imageCount < NEEDED_IMAGES: if args.verbose: print "Detected no movement for awhile, deleting everything in the folder" rmdirP(collectBufferDir) mkdirP(collectBufferDir) imageCount = 0 cv2.imshow('frame', frame) k = cv2.waitKey(30) & 0xff if k == 27: break # Update optical flow info cv2.destroyAllWindows() cap.release()
def alignMain(args): util.mkdirP(args.outputDir) imgs = list(iterImgs(args.inputDir)) # Shuffle so multiple versions can be run at once. random.shuffle(imgs) landmarkMap = { 'outerEyesAndNose': AlignDlib.OUTER_EYES_AND_NOSE, 'innerEyesAndBottomLip': AlignDlib.INNER_EYES_AND_BOTTOM_LIP, 'outerEyesAndJaw': AlignDlib.OUTER_EYES_AND_JAW, 'innerEyesAndNose': AlignDlib.INNER_EYES_AND_NOSE } if args.landmarks not in landmarkMap: raise Exception("Landmarks unrecognized: {}".format(args.landmarks)) landmarkIndices = landmarkMap[args.landmarks] align = AlignDlib(args.dlibFacePredictor) nFallbacks = 0 for imgObject in imgs: print("=== {} ===".format(imgObject.path)) outDir = os.path.join(args.outputDir, imgObject.cls) util.mkdirP(outDir) outputPrefix = os.path.join(outDir, imgObject.name) imgName = outputPrefix + ".png" if os.path.isfile(imgName): if args.verbose: print(" + Already found, skipping.") else: rgb = imgObject.getRGB() if rgb is None: if args.verbose: print(" + Unable to load.") outRgb = None else: outRgb = align.align(args.size, rgb, landmarkIndices=landmarkIndices, skipMulti=args.skipMulti) if outRgb is None and args.verbose: print(" + Unable to align.") if args.fallbackLfw and outRgb is None: nFallbacks += 1 deepFunneled = "{}/{}.jpg".format(os.path.join(args.fallbackLfw, imgObject.cls), imgObject.name) shutil.copy(deepFunneled, "{}/{}.jpg".format(os.path.join(args.outputDir, imgObject.cls), imgObject.name)) if outRgb is not None: if args.verbose: print(" + Writing aligned file to disk.") outBgr = cv2.cvtColor(outRgb, cv2.COLOR_RGB2BGR) cv2.imwrite(imgName, outBgr) if args.fallbackLfw: print('nFallbacks:', nFallbacks)