# Optionally flip webcam image, probably not relevant # img = cv2.flip(img, 1) # Optionally display webcam image with opencv # cv2.imshow('Action Unit Heatmaps - Press Q to exit!', img) # if cv2.waitKey(1) & 0xFF == ord('q'): # break img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) nframes += 1 pred, map, img = AUdetector.detectAU(img) for j in range(0, 5): resized_map = dlib.resize_image(map[j, :, :].cpu().data.numpy(), rows=256, cols=256) # Update face image subplot implots[2 * j].set_data(img) # Update heatmap subplot implots[2 * j + 1].set_data(resized_map) # Set correct heatmap limits resized_map_flat = resized_map.flatten() implots[2 * j + 1].set_clim(min(resized_map_flat), max(resized_map_flat)) # To plot heatmaps the original way - looks identical to the new way! # ax = fig.add_subplot(5,2,2*j+1)
num_label_images.append(cnt) cnt = 0 print("new label: " + name) label = name labels.append(label) os.mkdir(os.path.join(dst, label)) num_labels += 1 image = dlib.load_rgb_image(os.path.join(path, f)) print("processing " + f + " ..." + " (" + label + ")") #size image down div = (max(image.shape[0], image.shape[1]) / 800) new_wid = (int)(image.shape[0] / div) new_height = (int)(image.shape[1] / div) image = dlib.resize_image(image, new_wid, new_height) dets = face_detector(image, 1) if (len(dets) == 0): print("#### Failed to extract face: no face detected ####\n") failed_paths = failed_paths + str( fail_cnt) + ". " + os.path.join( path, f) + " -- no face detected. \n" fail_cnt = fail_cnt + 1 elif (len(dets) > 1): print( "#### Failed to extract face: too many faces in this picture ####\n" ) failed_paths = failed_paths + str( fail_cnt) + ". " + os.path.join(
# to find face landmarks so we can precisely localize the face, and finally the # face recognition model. detector = dlib.get_frontal_face_detector() sp = dlib.shape_predictor(predictor_path) facerec = dlib.face_recognition_model_v1(face_rec_model_path) descriptors = [] images = [] # Now find all the faces and compute 128D face descriptors for each face. for f in glob.glob(os.path.join(faces_folder_path, "*.jpg")) + glob.glob(os.path.join(faces_folder_path, "*.JPG")): print("Processing file: {}".format(f)) img = dlib.load_rgb_image(f) if img.shape[0] > 800 : scale = 800.0/ img.shape[0] img = dlib.resize_image(img, scale) # Ask the detector to find the bounding boxes of each face. The 1 in the # second argument indicates that we should upsample the image 1 time. This # will make everything bigger and allow us to detect more faces. dets = detector(img, 1) print("Number of faces detected: {}".format(len(dets))) # Now process each face we found. for k, d in enumerate(dets): # Get the landmarks/parts for the face in box d. shape = sp(img, d) # Compute the 128D vector that describes the face in img identified by # shape. face_descriptor = facerec.compute_face_descriptor(img, shape)
def main(config): detector_ori = dlib.get_frontal_face_detector() # open the image file try: img = dlib.load_rgb_image(config.input_path) except Exception as e: print("While processing, " + str(e) + '\n') exit() # If the resolution is less than 128x128 then skip img_height = img.shape[0] img_width = img.shape[1] if img_height < 128 or img_width < 128: print("While processing, image size too small" + '\n') exit() # find one face that best matches and finalize the image cropping size max_object = None dets, score, idx = detector_ori.run(img, 1, -1) max_confi = 0.6 if len(dets) == 0: print("While processing, face not detected" + '\n') exit() for i, d in enumerate(dets): print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( i, d.left(), d.top(), d.right(), d.bottom())) if max_confi < score[i]: max_confi = score[i] max_object = d d = max_object if d == None: print("While processing, face not detected" + '\n') exit() d_width = int((d.right() - d.left() + 1) // 2) d_height = int((d.bottom() - d.top() + 1) // 2) crop_top = d.top() - d_height crop_bottom = d.bottom() + d_height crop_left = d.left() - d_width crop_right = d.right() + d_width img_out_lenght = min(crop_top, crop_left, img_height - crop_bottom, img_width - crop_right) if img_out_lenght < -d_width / 2: print("While processing, face image over index" + '\n') exit() if img_out_lenght < 0: crop_top = crop_top - img_out_lenght crop_bottom = crop_bottom + img_out_lenght crop_left = crop_left - img_out_lenght crop_right = crop_right + img_out_lenght # Make the cropped and resized image from the original one img = img[crop_top:crop_bottom, crop_left:crop_right] if img.shape[0] != img.shape[1]: final_size = min(img.shape[0], img.shape[1]) img = img[:final_size, :final_size] img = dlib.resize_image(img, config.image_size / img.shape[0]) dlib.save_image(img, config.save_path)
def f(img): # get proportional width x height wx, wy = _better_proportion(img_x, img_y, img.shape[0], img.shape[1]) # resize proportionally and add black padding return padding(img_x, img_y, dlib.resize_image(img, wx, wy))
ExifTags.TAGS[k]: v for k, v in img._getexif().items() if k in ExifTags.TAGS } except: exif = {'Orientation': 1} if not 'Orientation' in exif.keys(): exif['Orientation'] = 1 degree = dic_exif[exif['Orientation']] # 圖片選轉 , expand 要設定 (不然旋轉後會有黑邊) img_clip = img.rotate(degree, expand=1) # 轉換成 opencv image img = np.array(img_clip) # Convert RGB to BGR #img = img[:, :, ::-1].copy() img = dlib.resize_image(img, scale=1024 / img.shape[1]) # The 1 in the second argument indicates that we should upsample the image # 1 time. This will make everything bigger and allow us to detect more # faces. dets = detector(img, 1) print("Number of faces detected: {}".format(len(dets))) for i, d in enumerate(dets): print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format( i, d.left(), d.top(), d.right(), d.bottom())) win.clear_overlay() win.set_image(img) win.add_overlay(dets) dlib.hit_enter_to_continue() # Finally, if you really want to you can ask the detector to tell you the score
def crop_image(file): detector_ori = dlib.get_frontal_face_detector() # open the image file try: img = io.imread(file) except Exception as e: message = "While processing, " + str(e) return message # If the resolution is less than 128x128 then skip img_height = img.shape[0] img_width = img.shape[1] if img_height < 128 or img_width < 128: message = "While processing, image size too small" return message # find one face that best matches and finalize the image cropping size max_object = None dets, score, idx = detector_ori.run(img, 1, -1) max_confi = 0.6 if len(dets) == 0: message = "While processing, face not detected" return message for i, d in enumerate(dets): if max_confi < score[i]: max_confi = score[i] max_object = d d = max_object if d == None: message = "While processing, face not detected" return message d_width = int((d.right() - d.left() + 1) // 2) d_height = int((d.bottom() - d.top() + 1) // 2) crop_top = d.top() - d_height crop_bottom = d.bottom() + d_height crop_left = d.left() - d_width crop_right = d.right() + d_width img_out_lenght = min(crop_top, crop_left, img_height - crop_bottom, img_width - crop_right) if img_out_lenght < -d_width / 2: message = "While processing, face image over index" return message if img_out_lenght < 0: crop_top = crop_top - img_out_lenght crop_bottom = crop_bottom + img_out_lenght crop_left = crop_left - img_out_lenght crop_right = crop_right + img_out_lenght # Make the cropped and resized image from the original one img = img[crop_top:crop_bottom, crop_left:crop_right] if img.shape[0] != img.shape[1]: final_size = min(img.shape[0], img.shape[1]) img = img[:final_size, :final_size] img = dlib.resize_image(img, 128 / img.shape[0]) return img
def main(): verbose = True if len(sys.argv) > 1 and sys.argv[1] == '-q': verbose = False print(' ** Loading model ... ') AUdetector = AUmaps.AUdetector('shape_predictor_68_face_landmarks.dat', enable_cuda=True) cam = cv2.VideoCapture(0) fig = plt.figure(figsize=plt.figaspect(2.5)) fig.canvas.mpl_connect('close_event', handle_close) axs = fig.subplots(5, 2) # Init subplots and images within implots = [] autitles = ['AU06', 'AU10', 'AU12', 'AU14', 'AU17'] axc = 0 axt = 0 for ax in axs.reshape(-1): ax.axis('off') ax.set_title(autitles[axc]) if (axt % 2) != 0: axc += 1 axt += 1 implots.append(ax.imshow(np.zeros((256, 256)))) clearscr = True try: global keep tstart_time = time.time() nframes = 0 while keep: start_time = time.time() _, img = cam.read() # Downscale webcam image 2x to speed things up img = cv2.resize(img, None, fx=0.5, fy=0.5) # Optionally flip webcam image, probably not relevant # img = cv2.flip(img, 1) # Optionally display webcam image with opencv # cv2.imshow('Action Unit Heatmaps - Press Q to exit!', img) # if cv2.waitKey(1) & 0xFF == ord('q'): # break img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) nframes += 1 pred, map, img = AUdetector.detectAU(img) if len(pred) == 0: sys.stdout.write( "\r | FALSE | -- | -- | -- | -- | -- | -- |" ) sys.stdout.flush() continue if clearscr: clearscr = False if platform.system() is 'Windows': os.system('cls') else: os.system('clear') # Print AU description: https://www.cs.cmu.edu/~face/facs.htm print('') print(' ** AU06 : Cheek Raiser') print(' ** AU10 : Upper Lip Raiser') print(' ** AU12 : Lip Corner Puller (Smile)') print(' ** AU14 : Dimpler') print(' ** AU17 : Chin Raiser') # Print table hat sys.stdout.write( " _______________________________________________________________________ \n" ) sys.stdout.write( " | Face Found | AU06 | AU10 | AU12 | AU14 | AU17 | FPS Elapsed |\n" ) if verbose: for j in range(0, 5): resized_map = dlib.resize_image( map[j, :, :].cpu().data.numpy(), rows=256, cols=256) # Update face image subplot implots[2 * j].set_data(img) # Update heatmap subplot implots[2 * j + 1].set_data(resized_map) # Set correct heatmap limits resized_map_flat = resized_map.flatten() implots[2 * j + 1].set_clim(min(resized_map_flat), max(resized_map_flat)) # To plot heatmaps the original way - looks identical to the new way! # ax = fig.add_subplot(5,2,2*j+1) # ax.imshow(resized_map) # ax.axis('off') plt.pause(0.001) # plt.show(block=False) plt.draw() elapsed_time = 1.0 / (time.time() - start_time) sys.stdout.write( "\r | TRUE | %6.3f | %6.3f | %6.3f | %6.3f | %6.3f | %7.3f |" % (pred[0], pred[1], pred[2], pred[3], pred[4], elapsed_time)) sys.stdout.flush() except KeyboardInterrupt: pass # Close camera cam.release() # If webcam images shown with opencv, close window # cv2.destroyAllWindows() telapsed_time = time.time() - tstart_time print('\n\n ** Mean FPS Elapsed: {0:.3f} \n'.format( 1.0 / (telapsed_time / nframes)))