コード例 #1
0
def proc_and_dump_sup_data(sub_set_data, split, sup_size=-1):
    images = sub_set_data["images"]
    labels = sub_set_data["labels"]
    if sup_size != -1:
        chosen_images, chosen_labels = get_data_by_size_lim(
            images, labels, sup_size)
    else:
        chosen_images = images
        chosen_labels = labels

    chosen_images = chosen_images / 255.0
    mean, std = augmentation_transforms.get_mean_and_std()
    chosen_images = (chosen_images - mean) / std
    example_list = []
    for image, label in zip(chosen_images, chosen_labels):
        # Write example to the tfrecord file
        example = tf.train.Example(features=tf.train.Features(
            feature={
                "image": _float_feature(image.reshape(-1)),
                "label": _int64_feature(label.reshape(-1))
            }))
        example_list += [example]
    out_path = os.path.join(FLAGS.output_base_dir,
                            format_sup_filename(split, sup_size))
    tf.logging.info(">> saving {} {} examples to {}".format(
        len(example_list), split, out_path))
    save_tfrecord(example_list, out_path)
コード例 #2
0
def data_augmentation(unsup):
    augs = []
    unsup = unsup / 255.0
    mean, std = augmentation_transforms.get_mean_and_std()
    unsup = (unsup - mean) / std
    aug_policies = found_policies.cifar10_policies()
    for image in unsup:
        chosen_policy = aug_policies[np.random.choice(len(aug_policies))]
        aug = augmentation_transforms.apply_policy(chosen_policy, image)
        aug = augmentation_transforms.cutout_numpy(aug)
        augs.append(aug)
    return np.array(augs), unsup
コード例 #3
0
ファイル: preprocess.py プロジェクト: chritter/translator_UDA
def proc_and_dump_unsup_data(sub_set_data, aug_copy_num):
  ori_images = sub_set_data["images"].copy()

  image_idx = np.arange(len(ori_images))
  np.random.shuffle(image_idx)
  ori_images = ori_images[image_idx]

  # tf.logging.info("first 5 indexes after shuffling: {}".format(
  #     str(image_idx[:5])))

  ori_images = ori_images / 255.0
  mean, std = augmentation_transforms.get_mean_and_std()
  ori_images = (ori_images - mean) / std

  if FLAGS.task_name == "cifar10":
    aug_policies = found_policies.cifar10_policies()
  elif FLAGS.task_name == "svhn":
    aug_policies = found_policies.svhn_policies()

  example_list = []
  for image in ori_images:
    chosen_policy = aug_policies[np.random.choice(
        len(aug_policies))]
    aug_image = augmentation_transforms.apply_policy(
        chosen_policy, image)
    aug_image = augmentation_transforms.cutout_numpy(aug_image)

    # Write example to the tfrecord file
    example = tf.train.Example(features=tf.train.Features(
        feature={
            "ori_image": _float_feature(image.reshape(-1)),
            "aug_image": _float_feature(aug_image.reshape(-1)),
        }))
    example_list += [example]

  out_path = os.path.join(
      FLAGS.output_base_dir,
      format_unsup_filename(aug_copy_num),
  )
  save_tfrecord(example_list, out_path)