def __init__(self, *args, **kwargs): super(FacialFeatureTrainer, self).__init__(self, *args, **kwargs) self.train_size = kwargs.pop('train_size', -1) self.images = utils.load_samples(paths.TRAINING_IMAGE_PATH, ".png", allow_consecutive=False) self.landmarks = utils.load_samples(paths.TRAINING_LANDMARK_PATH, ".txt", allow_consecutive=False) if self.train_size < 0: self.train_size = len(self.images) self.train()
def generate_features_from_session(self, session, auid): images_dir = os.path.join(paths.TRAINING_IMAGE_PATH, session) landmarks_dir = os.path.join(paths.TRAINING_LANDMARK_PATH, session) images = utils.load_samples(images_dir, ".png") landmarks = utils.load_samples(landmarks_dir, ".txt") if not images or not landmarks: print "Invalid session %s on AU %d" % (session, auid) exit() initial_image = cv2.imread(images[0]) final_image = cv2.imread(images[-1]) gray, face, init_scale = utils.preprocess_face_image(initial_image) if not face: if auid == 1 or not MULTITHREAD: self.say("\t%d faces found. Discarding." % (init_scale,)) return None gray, face, final_scale = utils.preprocess_face_image(final_image) if not face: if auid == 1 or not MULTITHREAD: self.say("\t%d faces found. Discarding." % (final_scale,)) return None all_initial_landmarks = utils.load_landmarks(landmarks[0], init_scale) all_final_landmarks = utils.load_landmarks(landmarks[-1], final_scale) initial_landmarks = [] final_landmarks = [] for i in range(len(all_initial_landmarks)): if i in EXAMINED_POINTS: initial_landmarks.append(all_initial_landmarks[i]) final_landmarks.append(all_final_landmarks[i]) distances = utils.distances(initial_landmarks, final_landmarks) return distances
def generate_features_from_session(self, session, auid): images_dir = os.path.join(paths.TRAINING_IMAGE_PATH, session) landmarks_dir = os.path.join(paths.TRAINING_LANDMARK_PATH, session) images = utils.load_samples(images_dir, ".png") landmarks = utils.load_samples(landmarks_dir, ".txt") if not images or not landmarks: print "Invalid session %s on AU %d" % (session, auid) exit() initial_image = cv2.imread(images[0]) final_image = cv2.imread(images[-1]) gray, face, init_scale = utils.preprocess_face_image(initial_image) if not face: if auid == 1 or not MULTITHREAD: self.say("\t%d faces found. Discarding." % (init_scale, )) return None gray, face, final_scale = utils.preprocess_face_image(final_image) if not face: if auid == 1 or not MULTITHREAD: self.say("\t%d faces found. Discarding." % (final_scale, )) return None all_initial_landmarks = utils.load_landmarks(landmarks[0], init_scale) all_final_landmarks = utils.load_landmarks(landmarks[-1], final_scale) initial_landmarks = [] final_landmarks = [] for i in range(len(all_initial_landmarks)): if i in EXAMINED_POINTS: initial_landmarks.append(all_initial_landmarks[i]) final_landmarks.append(all_final_landmarks[i]) distances = utils.distances(initial_landmarks, final_landmarks) return distances
def set_sequence(self, sequence, type=".png"): """ Accepts a list of numpy arrays (good for webcam data) or a directory name containing the images in the sequence (good for testing with Cohn-Kanade) """ self.sequence = [] if not sequence: return if isinstance(sequence, basestring): files = utils.load_samples(sequence, type) images = [] for image in files: image = cv2.imread(image) images.append(image) self.sequence = images elif isinstance(sequence[0], np.array): self.sequence = sequence