def compute_cord_warp_batch(self, pair_df): if self._warp_skip == 'full': batch = [np.empty([self._batch_size] + [1, 8])] elif self._warp_skip == 'mask': batch = [np.empty([self._batch_size] + [10, 8]), np.empty([self._batch_size, 10] + list(self._image_size))] else: batch = [np.empty([self._batch_size] + [72])] i = 0 for _, p in pair_df.iterrows(): fr = self._annotations_file.loc[p['from']] to = self._annotations_file.loc[p['to']] kp_array1 = pose_utils.load_pose_cords_from_strings(fr['keypoints_y'], fr['keypoints_x']) kp_array2 = pose_utils.load_pose_cords_from_strings(to['keypoints_y'], to['keypoints_x']) if self._warp_skip == 'mask': batch[0][i] = pose_transform.affine_transforms(kp_array1, kp_array2) batch[1][i] = pose_transform.pose_masks(kp_array2, self._image_size) elif self._warp_skip == 'full': batch[0][i] = pose_transform.estimate_uniform_transform(kp_array1, kp_array2) else: #sel._warp_skip == 'stn' batch[0][i][:36] = kp_array1.reshape((-1, )) batch[0][i][36:] = kp_array2.reshape((-1, )) i += 1 return batch
def compute_pose_map_batch(self, pair_df, direction): assert direction in ['to', 'from'] batch = np.empty([self._batch_size] + list(self._image_size) + [18 if self._pose_rep_type == 'hm' else 3]) i = 0 for _, p in pair_df.iterrows(): row = self._annotations_file.loc[p[direction]] if self._cache_pose_rep: file_name = self._tmp_pose + p[direction] + self._pose_rep_type + '.npy' if os.path.exists(file_name): pose = np.load(file_name) else: kp_array = pose_utils.load_pose_cords_from_strings(row['keypoints_y'], row['keypoints_x']) if self._pose_rep_type == 'hm': pose = pose_utils.cords_to_map(kp_array, self._image_size) else: pose = pose_transform.make_stickman(kp_array, self._image_size) np.save(file_name, pose) else: kp_array = pose_utils.load_pose_cords_from_strings(row['keypoints_y'], row['keypoints_x']) if self._pose_rep_type == 'hm': pose = pose_utils.cords_to_map(kp_array, self._image_size) else: pose = pose_transform.make_stickman(kp_array, self._image_size) batch[i] = pose i += 1 return batch
def compute_cord_warp_batch(self, pair_df, validation=False): batch = [np.empty([self._batch_size] + [self.num_mask, 8]), np.empty([self._batch_size, self.num_mask] + list(self._image_size)), np.empty([self._batch_size] + [self.num_mask, 8]), np.empty([self._batch_size, self.num_mask] + list(self._image_size)), np.empty([self._batch_size] + [self.num_mask, 8]), np.empty([self._batch_size, self.num_mask] + list(self._image_size))] i = 0 for _, p in pair_df.iterrows(): fr = self._annotations_file.loc[p['from']] to = self._annotations_file.loc[p['to']] kp_array1 = pose_utils.load_pose_cords_from_strings(fr['keypoints_y'], fr['keypoints_x']) kp_array2 = pose_utils.load_pose_cords_from_strings(to['keypoints_y'], to['keypoints_x']) if validation: npy_path_from = os.path.join(self._images_dir_test, p['from']) npy_path_from = npy_path_from[:-3]+'npy' npy_path_to = os.path.join(self._images_dir_test, p['to']) npy_path_to = npy_path_to[:-3] + 'npy' else: npy_path_from = os.path.join(self._images_dir_train, p['from']) npy_path_from = npy_path_from[:-3]+'npy' npy_path_to = os.path.join(self._images_dir_train, p['to']) npy_path_to = npy_path_to[:-3] + 'npy' batch[0][i] = pose_transform.affine_transforms(kp_array1, kp_array2, self._image_size, self.use_body_mask) batch[1][i] = pose_transform.pose_masks(kp_array2, self._image_size, self.use_body_mask, self.use_mask, npy_path_to, self.fat) batch[2][i] = np.c_[np.ones([10,1]),np.zeros([10,3]),np.ones([10,1]),np.zeros([10,3])] batch[3][i] = batch[1][i] batch[4][i] = pose_transform.affine_transforms(kp_array2, kp_array1, self._image_size, self.use_body_mask) batch[5][i] = pose_transform.pose_masks(kp_array1, self._image_size, self.use_body_mask, self.use_mask, npy_path_from, self.fat) i += 1 return batch
def get_gaussian_mask(self, P2_name, img_size): to = self.annos.loc[P2_name] kp_array2 = load_pose_cords_from_strings(to['keypoints_y'], to['keypoints_x']) BP2_mask = make_gaussain_limb_masks(kp_array2, img_size) # BP2 mask return BP2_mask
def create_masked_image(names, images, annotation_file): import pose_utils masked_images = [] df = pd.read_csv(annotation_file, sep=':') for name, image in zip(names, images): to = name[1] ano_to = df[df['name'] == to].iloc[0] kp_to = pose_utils.load_pose_cords_from_strings(ano_to['keypoints_y'], ano_to['keypoints_x']) mask = pose_utils.produce_ma_mask(kp_to, image.shape[:2]) masked_images.append(image * mask[..., np.newaxis]) return masked_images
def check_valid(x): kp_array = pose_utils.load_pose_cords_from_strings( x['keypoints_y'], x['keypoints_x']) distractor = x['name'].startswith('-1') or x['name'].startswith('0000') return pose_check_valid(kp_array) and not distractor
args.annotations_file_test = 'data/fasion-annotation-train.csv' args.images_dir_test = 'data/fasion-dataset/train' for n, img_pair in enumerate(os.listdir(in_folder)): m = re.match(r'([A-Za-z0-9_]*.jpg)_([A-Za-z0-9_]*.jpg)', img_pair) fr = m.groups()[0] to = m.groups()[1] gen_img = imread(os.path.join(in_folder, img_pair)) gen_img = gen_img[:, (2 * args.image_size[1]):] df = pd.read_csv(args.annotations_file_test, sep=':') ano_fr = df[df['name'] == fr].iloc[0] ano_to = df[df['name'] == to].iloc[0] kp_fr = pose_utils.load_pose_cords_from_strings(ano_fr['keypoints_y'], ano_fr['keypoints_x']) kp_to = pose_utils.load_pose_cords_from_strings(ano_to['keypoints_y'], ano_to['keypoints_x']) mask = pose_transform.pose_masks(kp_to, img_size=args.image_size).astype(bool) mask = np.array(reduce(np.logical_or, list(mask))) mask = mask.astype('float') pose_fr, _ = pose_utils.draw_pose_from_cords(kp_fr, args.image_size) pose_to, _ = pose_utils.draw_pose_from_cords(kp_to, args.image_size) cur_folder = os.path.join(out_folder, str(n)) if not os.path.exists(cur_folder): os.makedirs(cur_folder)