def build(self, images, gt_shapes, boxes): self.mean_shape = util.centered_mean_shape(gt_shapes) self.n_landmarks = self.mean_shape.n_points # Generate initial shapes with perturbations. print_dynamic('Generating initial shapes') shapes = np.array([util.fit_shape_to_box(self.mean_shape, box) for box in boxes]) print_dynamic('Perturbing initial estimates') if self.n_perturbations > 1: images, shapes, gt_shapes, boxes = util.perturb_shapes(images, shapes, gt_shapes, boxes, self.n_perturbations, mode='mean_shape') assert(len(boxes) == len(images)) assert(len(shapes) == len(images)) assert(len(gt_shapes) == len(images)) print('\nSize of augmented dataset: {} images.\n'.format(len(images))) weak_regressors = [] for j in xrange(self.n_stages): # Calculate normalized targets. deltas = [gt_shapes[i].points - shapes[i].points for i in xrange(len(images))] targets = np.array([util.transform_to_mean_shape(shapes[i], self.mean_shape).apply(deltas[i]).reshape((2*self.n_landmarks,)) for i in xrange(len(images))]) weak_regressor = self.weak_builder.build(images, targets, (shapes, self.mean_shape, j)) # Update current estimates of shapes. for i in xrange(len(images)): offset = weak_regressor.apply(images[i], shapes[i]) shapes[i].points += offset.points weak_regressors.append(weak_regressor) print("\nBuilt outer regressor {}\n".format(j)) return CascadedShapeRegressor(self.n_landmarks, weak_regressors, self.mean_shape)
def apply(self, image, shape): mean_to_shape = util.transform_to_mean_shape(shape, self.mean_shape).pseudoinverse() shape_indexed_features = self.feature_extractor.extract_features(image, shape, mean_to_shape) res = PointCloud(np.zeros((self.n_landmarks, 2)), copy=False) for r in self.regressors: offset = r.apply(shape_indexed_features, self.extra) res.points += offset.reshape((self.n_landmarks, 2)) return mean_to_shape.apply(res)
def apply(self, image, shape): mean_to_shape = util.transform_to_mean_shape(shape, self.mean_shape).pseudoinverse() #feat = self.feature_extractor.apply(image, shape) #res = self.regression_matrix[feat == 1].sum(axis=0).reshape((self.n_landmarks, 2)) #return mean_to_shape.apply(PointCloud(res, copy=False)) indices = self.feature_extractor.get_indices(image, shape, mean_to_shape) return mean_to_shape.apply(PointCloud(self.regression_matrix[indices].sum(axis=0).reshape((self.n_landmarks, 2)), copy=False))
def apply(self, image, shape): mean_to_shape = util.transform_to_mean_shape( shape, self.mean_shape).pseudoinverse() shape_indexed_features = self.feature_extractor.extract_features( image, shape, mean_to_shape) res = PointCloud(np.zeros((self.n_landmarks, 2)), copy=False) for r in self.regressors: offset = r.apply(shape_indexed_features, self.extra) res.points += offset.reshape((self.n_landmarks, 2)) return mean_to_shape.apply(res)
def apply(self, img, shape): n_landmarks = len(self.forests) n_trees = len(self.forests[0].regressors) n_leaves = len(self.forests[0].regressors[0].leaves) local_binary_features = np.zeros(n_landmarks*n_trees*n_leaves) mean_to_shape = util.transform_to_mean_shape(shape, self.mean_shape).pseudoinverse() for landmark_i, f in enumerate(self.forests): pixels = f.feature_extractor.extract_features(img, shape, mean_to_shape) for tree_i, tree in enumerate(f.regressors): leaf = tree.get_leaf_index(pixels) local_binary_features[landmark_i*n_trees*n_leaves + tree_i*n_leaves + leaf] = 1 return local_binary_features
def build(self, images, gt_shapes, boxes): self.mean_shape = util.centered_mean_shape(gt_shapes) self.n_landmarks = self.mean_shape.n_points # Generate initial shapes with perturbations. print_dynamic('Generating initial shapes') shapes = np.array( [util.fit_shape_to_box(self.mean_shape, box) for box in boxes]) print_dynamic('Perturbing initial estimates') if self.n_perturbations > 1: images, shapes, gt_shapes, boxes = util.perturb_shapes( images, shapes, gt_shapes, boxes, self.n_perturbations, mode='mean_shape') assert (len(boxes) == len(images)) assert (len(shapes) == len(images)) assert (len(gt_shapes) == len(images)) print('\nSize of augmented dataset: {} images.\n'.format(len(images))) weak_regressors = [] for j in xrange(self.n_stages): # Calculate normalized targets. deltas = [ gt_shapes[i].points - shapes[i].points for i in xrange(len(images)) ] targets = np.array([ util.transform_to_mean_shape(shapes[i], self.mean_shape).apply( deltas[i]).reshape((2 * self.n_landmarks, )) for i in xrange(len(images)) ]) weak_regressor = self.weak_builder.build( images, targets, (shapes, self.mean_shape, j)) # Update current estimates of shapes. for i in xrange(len(images)): offset = weak_regressor.apply(images[i], shapes[i]) shapes[i].points += offset.points weak_regressors.append(weak_regressor) print("\nBuilt outer regressor {}\n".format(j)) return CascadedShapeRegressor(self.n_landmarks, weak_regressors, self.mean_shape)
def apply(self, img, shape): n_landmarks = len(self.forests) n_trees = len(self.forests[0].regressors) n_leaves = len(self.forests[0].regressors[0].leaves) local_binary_features = np.zeros(n_landmarks * n_trees * n_leaves) mean_to_shape = util.transform_to_mean_shape( shape, self.mean_shape).pseudoinverse() for landmark_i, f in enumerate(self.forests): pixels = f.feature_extractor.extract_features( img, shape, mean_to_shape) for tree_i, tree in enumerate(f.regressors): leaf = tree.get_leaf_index(pixels) local_binary_features[landmark_i * n_trees * n_leaves + tree_i * n_leaves + leaf] = 1 return local_binary_features
def to_mean(self, shape): return util.transform_to_mean_shape(shape, self.mean_shape)