def random_augmentation(x): aug_policies = found_policies.randaug_policies() x_augm = torch.zeros_like(x) for i in range(x.size()[0]): chosen_policy = aug_policies[np.random.choice(len(aug_policies))] aug_image = augmentation_transforms.apply_policy( chosen_policy, x[i, :, :, :].permute(1, 2, 0).cpu().numpy()) aug_image = augmentation_transforms.cutout_numpy(aug_image) tmp = torch.tensor(aug_image).permute(2, 0, 1) x_augm[i, :, :, :] = torch.mean(tmp, dim=0).unsqueeze(0) return x_augm
def proc_and_dump_unsup_data(sub_set_data, aug_copy_num, used_labels): ori_images = sub_set_data["images"].copy() # print('used_labels', used_labels) ori_images = resampling_unsup_by_labels(ori_images, sub_set_data["labels"].copy(), used_labels) print('unsup ori_images', ori_images.shape) image_idx = np.arange(len(ori_images)) np.random.shuffle(image_idx) ori_images = ori_images[image_idx] # tf.logging.info("first 5 indexes after shuffling: {}".format( # str(image_idx[:5]))) ori_images = ori_images / 255.0 mean, std = augmentation_transforms.get_mean_and_std() ori_images = (ori_images - mean) / std if FLAGS.task_name == "cifar10": aug_policies = found_policies.randaug_policies() elif FLAGS.task_name == "svhn": aug_policies = found_policies.randaug_policies() example_list = [] for image in ori_images: chosen_policy = aug_policies[np.random.choice(len(aug_policies))] aug_image = augmentation_transforms.apply_policy(chosen_policy, image) aug_image = augmentation_transforms.cutout_numpy(aug_image) # Write example to the tfrecord file example = tf.train.Example(features=tf.train.Features( feature={ "ori_image": _float_feature(image.reshape(-1)), "aug_image": _float_feature(aug_image.reshape(-1)), })) example_list += [example] out_path = os.path.join( FLAGS.output_base_dir, format_unsup_filename(aug_copy_num), ) save_tfrecord(example_list, out_path)
def __call__(self, img): policy = self.policies[np.random.choice(len(self.policies))] final_img = augmentation_transforms.apply_policy(policy, img) return final_img