def test(self): """ Testing images with known mat files """ if os.path.exists("./data/train_sdm.mat"): mat = io.loadmat("./data/train_sdm.mat") R = mat['R'] b = mat['b'] shape_x = mat['i'] # accuracy = 0 for idx, img in enumerate(self.grays): img = copy.deepcopy(self.grays[idx]) shape_x = mat['i'] faces = self.__get_dlib_rect(img)[0] rect = [ faces.left(), faces.top(), faces.right(), faces.bottom() ] # get extend ground truth bounding box new_rect = self.__crop(img, rect, self.extend_rate) # crop image cropped = img[int(new_rect[1]):int(new_rect[3]), int(new_rect[0]):int(new_rect[2])] # resize image img = self.__resize(cropped, self.new_size) # self.__recompute_shape(cropped, self.shapes[idx], rect) for i in range(1): shape = Shape.turn_back_to_point(shape_x) hog_descriptor = HOG(img, shape) hog_x = hog_descriptor.extract() shape_x = shape_x + np.matmul(hog_x, R[i, :]) + b[i, :] # accuracy += self.evaluate(shape_x, self.shapes[idx].get_vector()) # print("Already completed", idx + 1, "images.") height, width = cropped.shape[:2] scale_x = self.new_size[0] / width scale_y = self.new_size[1] / height shape = Shape.turn_back_to_point(shape_x) for pt in shape.pts: pt.x = pt.x / scale_x - ( rect[2] - rect[0]) / self.extend_rate + rect[0] pt.y = pt.y / scale_y - ( rect[3] - rect[1]) / self.extend_rate + rect[1] # print("Accuracy:", accuracy/len(self.shapes)) return shape return
def train_landmarks(self): """ Training face landmarks with Lasso function """ # crop and resize training images orders, detect_box = self.__get_detection() self.__reload(orders) self.__crop_and_resize(detect_box) hog = [] shapes = [] for idx in range(len(self.grays)): print("Calculating ", idx, "th HOG features of training images...") # get hog features hog_descriptor = HOG(self.grays[idx], self.shapes[idx]) h = hog_descriptor.extract() hog.append(h) # get shape vector list s = Shape.get_vector(self.shapes[idx]) shapes.append(s) # true hog features and true shapes hog_star = np.array(hog) shapes_star = np.array(shapes) # get mean shape as x0 #pdm = PointDistributionModel(self.shapes) #x0 = pdm.mean x0 = self.__get_mean_shape().get_vector() shape_x = np.array([x0.tolist()] * len(self.grays)) # parameters we need R = [] b = [] # training for i in range(self.iterates): # delta shape vector delta_x = shapes_star - shape_x # hog features of computed shapes hog_x = np.zeros_like(hog_star) for j in range(len(self.grays)): # get hog features hog_descriptor = HOG(self.grays[j], Shape.turn_back_to_point(shape_x[j])) h = hog_descriptor.extract() hog_x[j, :] = h # linear regression if self.alpha == 0: reg = LinearRegression(fit_intercept=False) else: #reg = LinearRegression() #reg = SVR() #reg = Ridge(alpha=self.alpha) reg = Lasso(alpha=self.alpha) print("Calculating with Linear Regression...") reg.fit(hog_x, delta_x) R.append(reg.coef_.T) b.append(reg.intercept_.T) shape_x = shape_x + np.matmul(hog_x, R[i]) + b[i] # x0 = x0.tolist() io.savemat("./data/train_sdm", {"R": R, "b": b, "i": x0})