Пример #1
0
    def data_loader(self):
        """ load traing and testing dataset """

        hps = self.params

        image_data_class = ImageData(load_size=hps.image_size,
                                     channels=3,
                                     data_path=hps.data_path,
                                     ids=hps.ids)
        image_data_class.preprocess()

        train_dataset_num = len(image_data_class.train_images)
        test_dataset_num = len(image_data_class.test_images)

        print("train dataset ", len(image_data_class.train_images))
        print("test dataset ", len(image_data_class.test_images))

        train_dataset = tf.data.Dataset.from_tensor_slices(
            (image_data_class.train_images, image_data_class.train_angles_r,
             image_data_class.train_labels, image_data_class.train_images_t,
             image_data_class.train_angles_g))
        test_dataset = tf.data.Dataset.from_tensor_slices(
            (image_data_class.test_images, image_data_class.test_angles_r,
             image_data_class.test_labels, image_data_class.test_images_t,
             image_data_class.test_angles_g))

        train_dataset = train_dataset.apply(
            shuffle_and_repeat(train_dataset_num)).apply(
                map_and_batch(image_data_class.image_processing,
                              hps.batch_size,
                              num_parallel_batches=8))

        valid_dataset = test_dataset.apply(
            shuffle_and_repeat(test_dataset_num)).apply(
                map_and_batch(image_data_class.image_processing,
                              hps.batch_size,
                              num_parallel_batches=8))

        test_dataset = test_dataset.apply(
            map_and_batch(image_data_class.image_processing,
                          hps.batch_size,
                          num_parallel_batches=8))

        train_dataset_iterator = train_dataset.make_one_shot_iterator()
        valid_dataset = valid_dataset.make_one_shot_iterator()
        test_dataset_iterator = test_dataset.make_one_shot_iterator()

        return (train_dataset_iterator, valid_dataset, test_dataset_iterator,
                train_dataset_num)
Пример #2
0
    def make_dataset(self):
        self.filename = 'Dataset/cond_dataset.tfrecord'
        self.dataset = tf.data.TFRecordDataset(self.filename, num_parallel_reads=8)
        def _parse(example_proto):
            feature = {
                'label': tf.FixedLenFeature([], tf.int64), 
                'data': tf.FixedLenFeature([], tf.string)
            }
            parsed = tf.parse_single_example(example_proto, feature)
            data = tf.decode_raw(parsed['data'], tf.uint8)
            label = tf.cast(parsed['label'], tf.uint8)
            data = tf.py_func(func=np.unpackbits, inp=[data], Tout=tf.uint8)
            label = tf.py_func(func=np.unpackbits, inp=[label], Tout=tf.uint8)
            data = tf.cast(data, tf.float32)
            label = tf.cast(label, tf.float32)
            data = tf.reshape(data, [self.channel_num, self.class_num, self.input_length])
            label.set_shape([8])
            label = label[:self.channel_num]
            label = tf.expand_dims(tf.expand_dims(label, axis=-1), axis=-1)
            data = data * 2 - 1
            return {'data': data, 'label': label}

        self.dataset = self.dataset.apply(data.shuffle_and_repeat(buffer_size=16384))
        self.dataset = self.dataset.apply(
            data.map_and_batch(_parse, batch_size=256, num_parallel_batches=32, drop_remainder=True)
        )
        self.dataset = self.dataset.prefetch(48)
        self.dataset = self.dataset.apply(data.prefetch_to_device('/gpu:0'))
        self.iterator = self.dataset.make_one_shot_iterator()
        batch = self.iterator.get_next()

        return batch
def load_mask(args):
    # mask files
    with open(args.TRAIN_MASK_FLIST) as f:
        fnames = f.read().splitlines()

    # TODO: create input dataset (masks)
    inputs = tf.data.Dataset.from_tensor_slices(
        fnames)  # a tf dataset object (op)

    if args.NUM_GPUS == 1:
        device = '/gpu:0'  # to which gpu. prefetch_to_device(device, batch_size)
    else:
        device = '/cpu:0'

    dataset_num = len(fnames)
    # TODO: dataset with preprocessing (masks)
    Image_Data_Class = ImageData(args=args)

    # inputs = inputs.apply(shuffle_and_repeat(dataset_num)).apply(
    #     map_and_batch(Image_Data_Class.image_processing, args.BATCH_SIZE, num_parallel_batches=16,
    #                   drop_remainder=True)).apply(prefetch_to_device(gpu_device, args.BATCH_SIZE))
    inputs = inputs.apply(
        shuffle_and_repeat(dataset_num)).map(lambda filename: tf.py_func(
            Image_Data_Class.mask_processing2, [filename], [tf.float32]),
                                             num_parallel_calls=3)
    inputs = inputs.batch(1, drop_remainder=True).apply(
        prefetch_to_device(device, 1))
    # inputs = inputs.apply(prefetch_to_device(device))
    inputs_iterator = inputs.make_one_shot_iterator()  # iterator

    masks = inputs_iterator.get_next()  # an iteration get a batch of data

    return masks
Пример #4
0
 def _get_dataset_from_path(self):
   dataset = tf.data.Dataset.list_files(self._train_data_path)
   dataset = dataset.apply(contrib_data.shuffle_and_repeat(buffer_size=1000))
   dataset = dataset.apply(
       contrib_data.parallel_interleave(
           tf.data.TFRecordDataset, cycle_length=20, sloppy=True))
   return dataset
Пример #5
0
  def make_source_dataset(self, index, num_hosts):
    """See base class."""
    if not self.data_dir:
      tf.logging.info('Undefined data_dir implies null input')
      return tf.data.Dataset.range(1).repeat().map(self._get_null_input)

    # Shuffle the filenames to ensure better randomization.
    file_pattern = os.path.join(
        self.data_dir, 'train-*' if self.is_training else 'validation-*')

    # For multi-host training, we want each hosts to always process the same
    # subset of files.  Each host only sees a subset of the entire dataset,
    # allowing us to cache larger datasets in memory.
    dataset = tf.data.Dataset.list_files(file_pattern, shuffle=False)
    dataset = dataset.shard(num_hosts, index)

    if self.is_training and not self.cache:
      dataset = dataset.repeat()

    def fetch_dataset(filename):
      buffer_size = 8 * 1024 * 1024  # 8 MiB per file
      dataset = tf.data.TFRecordDataset(filename, buffer_size=buffer_size)
      return dataset

    # Read the data from disk in parallel
    dataset = dataset.apply(
        contrib_data.parallel_interleave(
            fetch_dataset, cycle_length=64, sloppy=True))

    if self.cache:
      dataset = dataset.cache().apply(
          contrib_data.shuffle_and_repeat(1024 * 16))
    else:
      dataset = dataset.shuffle(1024)
    return dataset
Пример #6
0
def get_dataset(train, src='data'):
    files = get_files(train, src)
    pairs = []
    for l in files:  # for each folder of a character
        pairs.extend(itertools.combinations(l, 2))  # take all possible pairs
    # sub-sample the pairs
    pairs = random.sample(pairs, k=FLAGS.n_samples)
    pairs = [pair + (1.0, ) for pair in pairs]  # append label=1.0 to all pairs

    for _ in range(len(pairs)):  # sample an equal number of dissimilar pairs
        c1, c2 = random.choices(files, k=2)  # choose two random characters
        f1, f2 = random.choice(c1), random.choice(c2)  # sample from each
        pairs.append((f1, f2, 0.0))

    f1, f2, labels = map(np.array, zip(*pairs))
    shuffle_idxs = np.arange(len(labels))
    np.random.shuffle(shuffle_idxs)
    f1 = f1[shuffle_idxs]
    f2 = f2[shuffle_idxs]
    labels = labels[shuffle_idxs]

    dataset = tf.data.Dataset.from_tensor_slices((f1, f2, labels)) \
                             .map(_parse_function, num_parallel_calls=8) \
                             .apply(shuffle_and_repeat(buffer_size=10_000)) \
                             .batch(FLAGS.batch_size) \
                             .prefetch(FLAGS.batch_size)
    return dataset
Пример #7
0
    def build_model(self):
        """ Graph Input """
        # images
        Image_Data_Class = ImageData(self.img_size, self.c_dim, self.custom_dataset)
        inputs = tf.data.Dataset.from_tensor_slices(self.data)

        gpu_device = '/gpu:0'
        inputs = inputs.\
            apply(shuffle_and_repeat(self.dataset_num)).\
            apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).\
            apply(prefetch_to_device(gpu_device, self.batch_size))

        inputs_iterator = inputs.make_one_shot_iterator()

        self.inputs = inputs_iterator.get_next()

        # noises
        self.z = tf.truncated_normal(shape=[self.batch_size, 1, 1, self.z_dim], name='random_z')

        """ Loss Function """
        # output of D for real images
        real_logits = self.discriminator(self.inputs)

        # output of D for fake images
        fake_images = self.generator(self.z)
        fake_logits = self.discriminator(fake_images, reuse=True)

        if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan':
            GP = self.gradient_penalty(real=self.inputs, fake=fake_images)
        else:
            GP = 0

        # get loss for discriminator
        self.d_loss = discriminator_loss(self.gan_type, real=real_logits, fake=fake_logits) + GP

        # get loss for generator
        self.g_loss = generator_loss(self.gan_type, fake=fake_logits)

        """ Training """
        # divide trainable variables into a group for D and a group for G
        t_vars = tf.trainable_variables()
        d_vars = [var for var in t_vars if 'discriminator' in var.name]
        g_vars = [var for var in t_vars if 'generator' in var.name]

        # optimizers
        with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
            self.d_optim = tf.train.AdamOptimizer(self.d_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.d_loss, var_list=d_vars)

            self.opt = MovingAverageOptimizer(tf.train.AdamOptimizer(self.g_learning_rate, beta1=self.beta1, beta2=self.beta2), average_decay=self.moving_decay)

            self.g_optim = self.opt.minimize(self.g_loss, var_list=g_vars)

        """" Testing """
        # for test
        self.fake_images = self.generator(self.z, is_training=False, reuse=True)

        """ Summary """
        self.d_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.g_sum = tf.summary.scalar("g_loss", self.g_loss)
Пример #8
0
    def build_model(self):
        """ Graph Input """
        # images
        if self.custom_dataset :
            Image_Data_Class = ImageData(self.img_size, self.c_dim)
            inputs = tf.data.Dataset.from_tensor_slices(self.data)

            gpu_device = '/gpu:0'
            inputs = inputs.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=8, drop_remainder=True)).apply(prefetch_to_device(gpu_device, self.batch_size))

            inputs_iterator = inputs.make_one_shot_iterator()

            self.inputs = inputs_iterator.get_next()

        else :
            self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.img_size, self.img_size, self.c_dim], name='real_images')

        # noises
        self.z = tf.placeholder(tf.float32, [self.batch_size, self.z_dim], name='z')

        """ Loss Function """
        # output of D for real images
        real_logits = self.discriminator(self.inputs)

        # output of D for fake images
        fake_images = self.generator(self.z)
        fake_logits = self.discriminator(fake_images, reuse=True)

        if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan' :
            GP = self.gradient_penalty(real=self.inputs, fake=fake_images)
        else :
            GP = 0

        # get loss for discriminator
        self.d_loss = discriminator_loss(self.gan_type, real=real_logits, fake=fake_logits) + GP

        # get loss for generator
        self.g_loss = generator_loss(self.gan_type, fake=fake_logits)

        """ Training """
        # divide trainable variables into a group for D and a group for G
        t_vars = tf.trainable_variables()
        d_vars = [var for var in t_vars if 'discriminator' in var.name]
        g_vars = [var for var in t_vars if 'generator' in var.name]

        # optimizers
        self.d_optim = tf.train.AdamOptimizer(self.d_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.d_loss, var_list=d_vars)
        self.g_optim = tf.train.AdamOptimizer(self.g_learning_rate, beta1=self.beta1, beta2=self.beta2).minimize(self.g_loss, var_list=g_vars)

        """" Testing """
        # for test
        self.fake_images = self.generator(self.z, is_training=False, reuse=True)

        """ Summary """
        self.d_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.g_sum = tf.summary.scalar("g_loss", self.g_loss)
Пример #9
0
 def _input_fn():
     dataset = tf.data.TFRecordDataset(self.record_path)
     dataset = dataset.prefetch(batch_size * 4)
     dataset = dataset.map(self.decode_feature, num_parallel_calls=num_parallel_calls)
     if training:
         dataset = dataset.apply(map_and_batch(self.preprocess_for_train, batch_size=batch_size,
                                               num_parallel_calls=num_parallel_calls))
         dataset = dataset.apply(shuffle_and_repeat(batch_size, epochs))
     else:
         dataset = dataset.apply(map_and_batch(self.preprocess_for_test, batch_size=batch_size,
                                               num_parallel_calls=num_parallel_calls))
     dataset = dataset.prefetch(AUTOTUNE)
     return dataset
Пример #10
0
def input_fn(params, sequence_schema, context_schema, part_files):
    dataset = Dataset.from_tensor_slices(part_files).shuffle(len(part_files))
    dataset = dataset.apply(
        parallel_interleave(
            lambda file: TFRecordDataset(file, compression_type='GZIP'),
            cycle_length=params['cycle_length'],
            sloppy=True))
    dataset = dataset.map(partial(parse_example, context_schema,
                                  sequence_schema),
                          num_parallel_calls=cpu_count())
    dataset = dataset.apply(
        shuffle_and_repeat(params['buffer_size'], count=params['epochs']))
    dataset = dataset.batch(params['batch_size'])
    return dataset
Пример #11
0
def load_dataset(opt, train=True):
    if train:
        data_path = opt.data_path
    else:
        data_path = opt.val_data_path
    image_path_all = []
    lm_path_all = []
    mask_path_all = []

    for dataset in data_path:
        image_path = glob.glob(dataset + '/' + '*.png')
        image_path.sort()
        lm_path_ = [
            os.path.join(dataset, 'lm',
                         f.split('/')[-1].replace('png', 'txt'))
            for f in image_path
        ]
        lm_path_.sort()
        mask_path = [
            os.path.join(dataset, 'mask',
                         f.split('/')[-1]) for f in image_path
        ]
        mask_path.sort()

        # check if landmark binary files exist
        check_lm_bin(dataset, lm_path_)

        lm_path = [
            os.path.join(dataset, 'lm_bin',
                         f.split('/')[-1].replace('png', 'bin'))
            for f in image_path
        ]
        lm_path.sort()

        image_path_all += image_path
        mask_path_all += mask_path
        lm_path_all += lm_path

    dataset_num = len(image_path_all)

    dataset = tf.data.Dataset.from_tensor_slices(
        (image_path_all, lm_path_all, mask_path_all))
    dataset = dataset. \
    apply(shuffle_and_repeat(dataset_num)). \
    apply(map_and_batch(_parse_function, opt.batch_size, num_parallel_batches=4, drop_remainder=True)). \
    apply(prefetch_to_device('/gpu:0', None)) # When using dataset.prefetch, use buffer_size=None to let it detect optimal buffer size

    inputs_iterator = dataset.make_one_shot_iterator()
    return inputs_iterator
Пример #12
0
def load_op(batch_size, iteration_count):
    neighborhood = 0
    loader = GRSS2013DataLoader('C:/GoogleDriveBack/PHD/Tez/Source')
    data_set = loader.load_data(neighborhood, )

    shadow_map, shadow_ratio = loader._load_shadow_map(
        neighborhood,
        data_set.concrete_data[:, :, 0:data_set.concrete_data.shape[2] - 1])

    # normal_data_as_matrix, shadow_data_as_matrix = GRSS2013DataLoader.get_targetbased_shadowed_normal_data(data_set,
    #                                                                                     loader,
    #                                                                                     shadow_map,
    #                                                                                     loader.load_samples(0.1))

    normal_data_as_matrix, shadow_data_as_matrix = get_data_from_scene(
        data_set, loader, shadow_map)

    # normal_data_as_matrix, shadow_data_as_matrix = GRSS2013DataLoader.get_all_shadowed_normal_data(
    #     data_set,
    #     loader,
    #     shadow_map)

    normal_data_as_matrix = normal_data_as_matrix[:, :, :,
                                                  0:normal_data_as_matrix.
                                                  shape[3] - 1]
    shadow_data_as_matrix = shadow_data_as_matrix[:, :, :,
                                                  0:shadow_data_as_matrix.
                                                  shape[3] - 1]

    normal = tf.placeholder(dtype=normal_data_as_matrix.dtype,
                            shape=normal_data_as_matrix.shape,
                            name='x')
    shadow = tf.placeholder(dtype=shadow_data_as_matrix.dtype,
                            shape=shadow_data_as_matrix.shape,
                            name='y')

    epoch = int(
        (iteration_count * batch_size) / normal_data_as_matrix.shape[0])
    data_set = tf.data.Dataset.from_tensor_slices(
        (normal,
         shadow)).apply(shuffle_and_repeat(buffer_size=10000,
                                           count=epoch)).batch(batch_size)
    data_set_itr = data_set.make_initializable_iterator()

    return InitializerHook(data_set_itr, normal, shadow, normal_data_as_matrix,
                           shadow_data_as_matrix)
Пример #13
0
    def __init__(self,
                 file_pattern,
                 num_epochs,
                 batch_size,
                 image_size,
                 shuffle_buff_size=5000):
        files = tf.data.Dataset.list_files(file_pattern)
        dataset = tf.data.TFRecordDataset(files, num_parallel_reads=4)

        dataset = dataset.apply(
            shuffle_and_repeat(shuffle_buff_size, num_epochs))
        dataset = dataset.apply(
            map_and_batch(lambda x: self._parse_fn(x, image_size),
                          batch_size,
                          num_parallel_batches=4))

        self.dataset = dataset
Пример #14
0
def load_op(batch_size, iteration_count, loader_name, path):
    neighborhood = 0
    loader = get_class(loader_name + '.' + loader_name)(path)
    data_set = loader.load_data(neighborhood, True)

    shadow_map, shadow_ratio = loader.load_shadow_map(neighborhood, data_set)

    # normal_data_as_matrix, shadow_data_as_matrix = GRSS2013DataLoader.get_targetbased_shadowed_normal_data(data_set,
    #                                                                                     loader,
    #                                                                                     shadow_map,
    #                                                                                     loader.load_samples(0.1))

    if FLAGS.use_target_map:
        normal_data_as_matrix, shadow_data_as_matrix = get_data_from_scene(
            data_set, loader, shadow_map)
    else:
        normal_data_as_matrix, shadow_data_as_matrix = get_all_shadowed_normal_data(
            data_set, loader, shadow_map, multiply_shadowed_data=False)

    # normal_data_as_matrix, shadow_data_as_matrix = create_dummy_shadowed_normal_data(data_set, loader)

    hsi_channel_len = normal_data_as_matrix.shape[3] - 1
    normal_data_as_matrix = normal_data_as_matrix[:, :, :, 0:hsi_channel_len]
    shadow_data_as_matrix = shadow_data_as_matrix[:, :, :, 0:hsi_channel_len]

    normal = tf.placeholder(dtype=normal_data_as_matrix.dtype,
                            shape=normal_data_as_matrix.shape,
                            name='x')
    shadow = tf.placeholder(dtype=shadow_data_as_matrix.dtype,
                            shape=shadow_data_as_matrix.shape,
                            name='y')

    epoch = int(
        (iteration_count * batch_size) / normal_data_as_matrix.shape[0])
    data_set = tf.data.Dataset.from_tensor_slices(
        (normal,
         shadow)).apply(shuffle_and_repeat(buffer_size=10000, count=epoch))
    data_set = data_set.map(
        lambda param_x, param_y_: perform_shadow_augmentation_random(
            param_x, param_y_, shadow_ratio[0:hsi_channel_len]),
        num_parallel_calls=4)
    data_set = data_set.batch(batch_size)
    data_set_itr = data_set.make_initializable_iterator()

    return InitializerHook(data_set_itr, normal, shadow, normal_data_as_matrix,
                           shadow_data_as_matrix)
Пример #15
0
    def make_source_dataset(self, index=0, num_hosts=1):
        """See base class."""

        if not self.data_dir:
            tf.logging.info("Undefined data_dir implies null input")
            return tf.data.Dataset.range(1).repeat().map(self._get_null_input)

        get_filenames = get_filenames_func()
        filenames, _ = get_filenames(self.dataset_split)
        dataset = tf.data.Dataset.from_tensor_slices(filenames)

        if self.is_training and not self.cache:
            if filenames is not None:
                dataset = dataset.shuffle(len(filenames))
            dataset = dataset.repeat()

        def fetch_dataset(filename):
            buffer_size = 8 * 1024 * 1024  # 8 MB per file
            dataset = tf.data.TFRecordDataset(filename,
                                              buffer_size=buffer_size)
            return dataset

        cycle_length = 64
        shuffle_size = 1024

        # Read the data from disk in parallel
        if self.is_training:
            dataset = dataset.apply(
                contrib_data.parallel_interleave(fetch_dataset,
                                                 cycle_length=cycle_length,
                                                 sloppy=True))
        else:
            dataset = dataset.apply(
                contrib_data.parallel_interleave(fetch_dataset,
                                                 cycle_length=1,
                                                 sloppy=False))

        if self.cache:
            dataset = dataset.cache().apply(
                contrib_data.shuffle_and_repeat(shuffle_size))
        else:
            if self.is_training:
                dataset = dataset.shuffle(shuffle_size)
        return dataset
def training_nn_iterator(data_set, augmentation_info, batch_size, num_epochs, device):
    main_cycle_data_set = data_set.apply(shuffle_and_repeat(buffer_size=10000, count=num_epochs))

    if augmentation_info.offline_or_online is False:
        main_cycle_data_set = add_augmentation_graph(main_cycle_data_set, augmentation_info,
                                                     perform_rotation_augmentation_random,
                                                     perform_shadow_augmentation_random,
                                                     perform_reflection_augmentation_random)

    main_cycle_data_set = main_cycle_data_set.batch(batch_size)
    # main_cycle_data_set = main_cycle_data_set.prefetch(1000)

    if augmentation_info.offline_or_online is True:
        main_cycle_data_set = add_augmentation_graph(main_cycle_data_set, augmentation_info,
                                                     perform_rotation_augmentation,
                                                     perform_shadow_augmentation,
                                                     perform_reflection_augmentation)
    main_cycle_data_set = main_cycle_data_set.apply(prefetch_to_device(device, 10000))
    return main_cycle_data_set.make_initializable_iterator()
Пример #17
0
    def build_model(self):
        """ Graph Input """
        # images
        Image_Data_Class = ImageData(self.img_size, self.c_dim,
                                     self.custom_dataset)
        inputs = tf.data.Dataset.from_tensor_slices(self.data)

        gpu_device = '/gpu:0'
        inputs = inputs.\
            apply(shuffle_and_repeat(self.dataset_num)).\
            apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).\
            apply(prefetch_to_device(gpu_device, self.batch_size))

        inputs_iterator = inputs.make_one_shot_iterator()

        self.inputs = inputs_iterator.get_next()

        # noises
        self.z = tf.random_normal(shape=[self.batch_size, 1, 1, self.z_dim],
                                  name='random_z')
        """ Loss Function """
        # output of D for real images
        real_logits = self.discriminator(self.inputs)

        # output of D for fake images
        fake_images = self.generator(self.z)
        fake_logits = self.discriminator(fake_images, reuse=True)

        # get loss for discriminator
        self.d_loss = discriminator_loss(self.gan_type,
                                         real=real_logits,
                                         fake=fake_logits)

        # get loss for generator
        self.g_loss = generator_loss(self.gan_type, fake=fake_logits)

        t_vars = tf.trainable_variables()
        for var in t_vars:
            if 'discriminator' in var.name:
                d_vars = [var]
        for var in t_vars:
            if 'generator' in var.name:
                g_vars = [var]
Пример #18
0
def load_op(batch_size, iteration_count):
    hsi_2013_spatial_repeat = 10
    hsi_2018_spectral_repeat = 3

    hsi_2013_scale_diff = 2.5
    neighborhood = 0
    band_size = 144

    grss2013_data_set = GRSS2013DataLoader(FLAGS.path).load_data(neighborhood, False)
    grss2018_data_set = GRSS2018DataLoader(FLAGS.path).load_data(neighborhood, False)
    hsi2013_global_minimum, hsi2018_global_minimum, hsi2013_global_maximum, hsi2018_global_maximum = \
        extract_common_normalizer(grss2013_data_set.concrete_data[:, :, 0:-1], grss2018_data_set.casi)

    hsi_grss2013 = grss2013_data_set.concrete_data[7:347, 256 + 8:1894 + 8, 0:-1]
    hsi_grss2018 = grss2018_data_set.casi[0 + 265:-350 + 265, 0:-75, :].astype(numpy.float32)

    debug_data = False
    if debug_data:
        test_match(band_size, hsi_2013_scale_diff,
                   hsi_2013_spatial_repeat, hsi_2018_spectral_repeat,
                   hsi_grss2013, hsi_grss2018)

    hsi_grss2013 -= hsi2013_global_minimum
    hsi_grss2018 -= hsi2018_global_minimum
    hsi_grss2013 /= hsi2013_global_maximum.astype(numpy.float32)
    hsi_grss2018 /= hsi2018_global_maximum.astype(numpy.float32)

    tensor_output_shape = [hsi_2013_spatial_repeat, hsi_2013_spatial_repeat, band_size]

    tensor_type_info = (tf.float32, tf.float32)
    epoch = int((iteration_count * batch_size) / hsi_grss2013.shape[0])
    data_set = tf.data.Dataset.from_generator(
        lambda: _matched_data_generator(hsi_grss2013, hsi_grss2018, hsi_2013_spatial_repeat,
                                        hsi_2018_spectral_repeat, hsi_2013_scale_diff,
                                        0, 0, hsi_grss2013.shape[0], hsi_grss2013.shape[1],
                                        ceil(hsi_2013_scale_diff / 2)), tensor_type_info,
        (tensor_output_shape, tensor_output_shape))
    data_set = data_set.apply(shuffle_and_repeat(buffer_size=10000, count=epoch))
    data_set = data_set.batch(batch_size)
    data_set_itr = data_set.make_initializable_iterator()

    return InitializerHook(data_set_itr)
def load_img_scale_edge(args):
    """
    Load image data
    """
    # training data: 0, as file list
    # image files
    with open(args.DATA_FLIST[args.DATASET][0]) as f:
        fnames = f.read().splitlines()

    # TODO: create input dataset (images and masks)
    inputs = tf.data.Dataset.from_tensor_slices(
        fnames)  # a tf dataset object (op)
    if args.NUM_GPUS == 1:
        device = '/gpu:0'  # to which gpu. prefetch_to_device(device, batch_size)
        # gpu_device = '/gpu:{}'.format(args.GPU_ID)
    else:
        device = '/cpu:0'
    dataset_num = len(fnames)
    # TODO: dataset with preprocessing (images and masks)
    Image_Data_Class = ImageData(args=args)

    # inputs = inputs.apply(shuffle_and_repeat(dataset_num)).apply(
    #     map_and_batch(Image_Data_Class.image_processing, args.BATCH_SIZE, num_parallel_batches=16,
    #                   drop_remainder=True)).apply(prefetch_to_device(gpu_device, args.BATCH_SIZE))
    inputs = inputs.apply(shuffle_and_repeat(dataset_num)).map(
        lambda filename: tf.py_func(
            Image_Data_Class.image_edge_scale_processing, [filename],
            [tf.float32, tf.float32, tf.float32, tf.float32]),
        num_parallel_calls=3)
    inputs = inputs.batch(args.BATCH_SIZE * args.NUM_GPUS,
                          drop_remainder=True).apply(
                              prefetch_to_device(
                                  device, args.BATCH_SIZE * args.NUM_GPUS))
    inputs_iterator = inputs.make_one_shot_iterator()  # iterator

    images_edges = inputs_iterator.get_next(
    )  # an iteration get a batch of data

    return images_edges
def create_dataset(role):
    filenames = filenames_dict[role]
    labels = labels_dict[role]
    num_images = len(filenames)

    # Read the images
    dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
    dataset = dataset.map(read_image, num_parallel_calls=2)

    # Shuffle and batch the data
    dataset = dataset.apply(
        shuffle_and_repeat(buffer_size=num_images,
                           count=num_epochs,
                           seed=seed_train))
    dataset = dataset.batch(batch_size)
    dataset = dataset.prefetch(num_images // batch_size)

    # iterator = dataset.make_one_shot_iterator()
    # images, labels = iterator.get_next()
    # images = {"x": images} # Put into a dict, since this is expected by the model functions

    return dataset
Пример #21
0
def load_op(batch_size, iteration_count, loader, data_set, shadow_map,
            shadow_ratio, reg_support_rate):
    if FLAGS.use_target_map:
        normal_data_as_matrix, shadow_data_as_matrix = get_data_from_scene(
            data_set, loader, shadow_map)
    else:
        normal_data_as_matrix, shadow_data_as_matrix = get_all_shadowed_normal_data(
            data_set, loader, shadow_map, multiply_shadowed_data=False)

    # normal_data_as_matrix, shadow_data_as_matrix = create_dummy_shadowed_normal_data(data_set, loader)

    hsi_channel_len = normal_data_as_matrix.shape[3] - 1
    normal_data_as_matrix = normal_data_as_matrix[:, :, :, 0:hsi_channel_len]
    shadow_data_as_matrix = shadow_data_as_matrix[:, :, :, 0:hsi_channel_len]

    normal_holder = tf.placeholder(dtype=normal_data_as_matrix.dtype,
                                   shape=normal_data_as_matrix.shape,
                                   name='x')
    shadow_holder = tf.placeholder(dtype=shadow_data_as_matrix.dtype,
                                   shape=shadow_data_as_matrix.shape,
                                   name='y')

    epoch = int(
        (iteration_count * batch_size) / normal_data_as_matrix.shape[0])
    data_set = tf.data.Dataset.from_tensor_slices(
        (normal_holder, shadow_holder)).apply(
            shuffle_and_repeat(buffer_size=10000, count=epoch))
    data_set = data_set.map(
        lambda param_x, param_y_: perform_shadow_augmentation_random(
            param_x, param_y_, shadow_ratio[0:hsi_channel_len],
            reg_support_rate),
        num_parallel_calls=4)
    data_set = data_set.batch(batch_size)
    data_set_itr = data_set.make_initializable_iterator()

    return InitializerHook(data_set_itr, normal_holder, shadow_holder,
                           normal_data_as_matrix, shadow_data_as_matrix)
Пример #22
0
    def build_model(self):
        if self.phase == 'train':
            self.lr = tf.placeholder(tf.float32, name='learning_rate')
            """ Input Image"""
            Image_Data_Class = ImageData(self.img_size, self.img_ch,
                                         self.augment_flag)

            trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
            trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)

            gpu_device = '/gpu:0'
            trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(
                map_and_batch(Image_Data_Class.image_processing,
                              self.batch_size,
                              num_parallel_batches=16,
                              drop_remainder=True)).apply(
                                  prefetch_to_device(gpu_device, None))
            trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(
                map_and_batch(Image_Data_Class.image_processing,
                              self.batch_size,
                              num_parallel_batches=16,
                              drop_remainder=True)).apply(
                                  prefetch_to_device(gpu_device, None))

            trainA_iterator = trainA.make_one_shot_iterator()
            trainB_iterator = trainB.make_one_shot_iterator()

            self.domain_A = trainA_iterator.get_next()
            self.domain_B = trainB_iterator.get_next()
            """ Define Generator, Discriminator """
            x_ab, cam_ab = self.generate_a2b(self.domain_A)  # real a
            x_ba, cam_ba = self.generate_b2a(self.domain_B)  # real b

            x_aba, _ = self.generate_b2a(x_ab, reuse=True)  # real b
            x_bab, _ = self.generate_a2b(x_ba, reuse=True)  # real a

            x_aa, cam_aa = self.generate_b2a(self.domain_A,
                                             reuse=True)  # fake b
            x_bb, cam_bb = self.generate_a2b(self.domain_B,
                                             reuse=True)  # fake a

            real_A_logit, real_A_cam_logit, real_B_logit, real_B_cam_logit = self.discriminate_real(
                self.domain_A, self.domain_B)
            fake_A_logit, fake_A_cam_logit, fake_B_logit, fake_B_cam_logit = self.discriminate_fake(
                x_ba, x_ab)
            """ Define Loss """
            if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan':
                GP_A, GP_CAM_A = self.gradient_panalty(real=self.domain_A,
                                                       fake=x_ba,
                                                       scope="discriminator_A")
                GP_B, GP_CAM_B = self.gradient_panalty(real=self.domain_B,
                                                       fake=x_ab,
                                                       scope="discriminator_B")
            else:
                GP_A, GP_CAM_A = 0, 0
                GP_B, GP_CAM_B = 0, 0

            G_ad_loss_A = (generator_loss(self.gan_type, fake_A_logit) +
                           generator_loss(self.gan_type, fake_A_cam_logit))
            G_ad_loss_B = (generator_loss(self.gan_type, fake_B_logit) +
                           generator_loss(self.gan_type, fake_B_cam_logit))

            D_ad_loss_A = (
                discriminator_loss(self.gan_type, real_A_logit, fake_A_logit) +
                discriminator_loss(self.gan_type, real_A_cam_logit,
                                   fake_A_cam_logit) + GP_A + GP_CAM_A)
            D_ad_loss_B = (
                discriminator_loss(self.gan_type, real_B_logit, fake_B_logit) +
                discriminator_loss(self.gan_type, real_B_cam_logit,
                                   fake_B_cam_logit) + GP_B + GP_CAM_B)

            reconstruction_A = L1_loss(x_aba, self.domain_A)  # reconstruction
            reconstruction_B = L1_loss(x_bab, self.domain_B)  # reconstruction

            identity_A = L1_loss(x_aa, self.domain_A)
            identity_B = L1_loss(x_bb, self.domain_B)

            cam_A = cam_loss(source=cam_ba, non_source=cam_aa)
            cam_B = cam_loss(source=cam_ab, non_source=cam_bb)

            Generator_A_gan = self.adv_weight * G_ad_loss_A
            Generator_A_cycle = self.cycle_weight * reconstruction_B
            Generator_A_identity = self.identity_weight * identity_A
            Generator_A_cam = self.cam_weight * cam_A

            Generator_B_gan = self.adv_weight * G_ad_loss_B
            Generator_B_cycle = self.cycle_weight * reconstruction_A
            Generator_B_identity = self.identity_weight * identity_B
            Generator_B_cam = self.cam_weight * cam_B

            Generator_A_loss = Generator_A_gan + Generator_A_cycle + Generator_A_identity + Generator_A_cam
            Generator_B_loss = Generator_B_gan + Generator_B_cycle + Generator_B_identity + Generator_B_cam

            Discriminator_A_loss = self.adv_weight * D_ad_loss_A
            Discriminator_B_loss = self.adv_weight * D_ad_loss_B

            self.Generator_loss = Generator_A_loss + Generator_B_loss + regularization_loss(
                'generator')
            self.Discriminator_loss = Discriminator_A_loss + Discriminator_B_loss + regularization_loss(
                'discriminator')
            """ Result Image """
            self.fake_A = x_ba
            self.fake_B = x_ab

            self.real_A = self.domain_A
            self.real_B = self.domain_B
            """ Training """
            t_vars = tf.trainable_variables()
            G_vars = [var for var in t_vars if 'generator' in var.name]
            D_vars = [var for var in t_vars if 'discriminator' in var.name]

            self.G_optim = tf.train.AdamOptimizer(self.lr,
                                                  beta1=0.5,
                                                  beta2=0.999).minimize(
                                                      self.Generator_loss,
                                                      var_list=G_vars)
            self.D_optim = tf.train.AdamOptimizer(self.lr,
                                                  beta1=0.5,
                                                  beta2=0.999).minimize(
                                                      self.Discriminator_loss,
                                                      var_list=D_vars)
            """" Summary """
            self.all_G_loss = tf.summary.scalar("Generator_loss",
                                                self.Generator_loss)
            self.all_D_loss = tf.summary.scalar("Discriminator_loss",
                                                self.Discriminator_loss)

            self.G_A_loss = tf.summary.scalar("G_A_loss", Generator_A_loss)
            self.G_A_gan = tf.summary.scalar("G_A_gan", Generator_A_gan)
            self.G_A_cycle = tf.summary.scalar("G_A_cycle", Generator_A_cycle)
            self.G_A_identity = tf.summary.scalar("G_A_identity",
                                                  Generator_A_identity)
            self.G_A_cam = tf.summary.scalar("G_A_cam", Generator_A_cam)

            self.G_B_loss = tf.summary.scalar("G_B_loss", Generator_B_loss)
            self.G_B_gan = tf.summary.scalar("G_B_gan", Generator_B_gan)
            self.G_B_cycle = tf.summary.scalar("G_B_cycle", Generator_B_cycle)
            self.G_B_identity = tf.summary.scalar("G_B_identity",
                                                  Generator_B_identity)
            self.G_B_cam = tf.summary.scalar("G_B_cam", Generator_B_cam)

            self.D_A_loss = tf.summary.scalar("D_A_loss", Discriminator_A_loss)
            self.D_B_loss = tf.summary.scalar("D_B_loss", Discriminator_B_loss)

            self.rho_var = []
            for var in tf.trainable_variables():
                if 'rho' in var.name:
                    self.rho_var.append(tf.summary.histogram(var.name, var))
                    self.rho_var.append(
                        tf.summary.scalar(var.name + "_min",
                                          tf.reduce_min(var)))
                    self.rho_var.append(
                        tf.summary.scalar(var.name + "_max",
                                          tf.reduce_max(var)))
                    self.rho_var.append(
                        tf.summary.scalar(var.name + "_mean",
                                          tf.reduce_mean(var)))

            g_summary_list = [
                self.G_A_loss, self.G_A_gan, self.G_A_cycle, self.G_A_identity,
                self.G_A_cam, self.G_B_loss, self.G_B_gan, self.G_B_cycle,
                self.G_B_identity, self.G_B_cam, self.all_G_loss
            ]

            g_summary_list.extend(self.rho_var)
            d_summary_list = [self.D_A_loss, self.D_B_loss, self.all_D_loss]

            self.G_loss = tf.summary.merge(g_summary_list)
            self.D_loss = tf.summary.merge(d_summary_list)

        else:
            """ Test """
            self.test_domain_A = tf.placeholder(
                tf.float32, [1, self.img_size, self.img_size, self.img_ch],
                name='test_domain_A')
            self.test_domain_B = tf.placeholder(
                tf.float32, [1, self.img_size, self.img_size, self.img_ch],
                name='test_domain_B')

            self.test_fake_B, _ = self.generate_a2b(self.test_domain_A)
            self.test_fake_A, _ = self.generate_b2a(self.test_domain_B)
Пример #23
0
    def build_model(self):
        self.lr = tf.placeholder(tf.float32, name='learning_rate')
        """ Input Image"""
        Image_Data_Class = ImageData(self.img_size, self.img_ch,
                                     self.augment_flag)

        trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
        trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)

        gpu_device = '/gpu:0'
        trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(
            map_and_batch(Image_Data_Class.image_processing,
                          self.batch_size,
                          num_parallel_batches=16,
                          drop_remainder=True)).apply(
                              prefetch_to_device(gpu_device, self.batch_size))
        trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(
            map_and_batch(Image_Data_Class.image_processing,
                          self.batch_size,
                          num_parallel_batches=16,
                          drop_remainder=True)).apply(
                              prefetch_to_device(gpu_device, self.batch_size))

        trainA_iterator = trainA.make_one_shot_iterator()
        trainB_iterator = trainB.make_one_shot_iterator()

        self.identity_A = trainA_iterator.get_next()
        self.shape_A = trainA_iterator.get_next()
        self.other_A = trainA_iterator.get_next()

        self.shape_B = trainB_iterator.get_next()

        self.test_identity_A = tf.placeholder(
            tf.float32, [1, self.img_size, self.img_size, self.img_ch],
            name='test_identity_A')
        self.test_shape_B = tf.placeholder(
            tf.float32, [1, self.img_size, self.img_size, self.img_ch],
            name='test_shape_B')
        """ Define Generator, Discriminator """
        self.fake_same = self.generator(x_identity=self.identity_A,
                                        x_shape=self.shape_A)
        self.fake_diff = self.generator(x_identity=self.identity_A,
                                        x_shape=self.shape_B,
                                        reuse=True)
        fake_diff_shape = self.generator(x_identity=self.shape_B,
                                         x_shape=self.fake_diff,
                                         reuse=True)
        fake_diff_identity = self.generator(x_identity=self.fake_diff,
                                            x_shape=self.shape_B,
                                            reuse=True)

        real_logit = self.discriminator(x_identity=self.identity_A,
                                        x=self.other_A)
        fake_logit = self.discriminator(x_identity=self.identity_A,
                                        x=self.fake_diff,
                                        reuse=True)
        """ Define Loss """
        g_identity_loss = self.adv_weight * generator_loss(
            self.gan_type, fake_logit) * 64
        g_shape_loss_same = self.L1_weight * L1_loss(self.fake_same,
                                                     self.shape_A)
        g_shape_loss_diff_shape = self.L1_weight * L1_loss(
            fake_diff_shape, self.shape_B) + self.L1_weight * L1_loss(
                self.fake_diff, self.shape_B)
        g_shape_loss_diff_identity = self.L1_weight * L1_loss(
            fake_diff_identity, self.fake_diff)

        self.Generator_loss = g_identity_loss + g_shape_loss_same + g_shape_loss_diff_shape + g_shape_loss_diff_identity
        self.Discriminator_loss = self.adv_weight * discriminator_loss(
            self.gan_type, real=real_logit, fake=fake_logit)
        """ Result Image """
        self.test_fake = self.generator(x_identity=self.test_identity_A,
                                        x_shape=self.test_shape_B,
                                        reuse=True)
        """ Training """
        t_vars = tf.trainable_variables()
        G_vars = [var for var in t_vars if 'generator' in var.name]
        D_vars = [var for var in t_vars if 'discriminator' in var.name]

        self.G_optim = tf.train.AdamOptimizer(
            self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss,
                                                      var_list=G_vars)
        self.D_optim = tf.train.AdamOptimizer(
            self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss,
                                                      var_list=D_vars)
        """" Summary """
        self.G_loss = tf.summary.scalar("Generator_loss", self.Generator_loss)
        self.D_loss = tf.summary.scalar("Discriminator_loss",
                                        self.Discriminator_loss)

        self.G_identity = tf.summary.scalar("G_identity", g_identity_loss)
        self.G_shape_loss_same = tf.summary.scalar("G_shape_loss_same",
                                                   g_shape_loss_same)
        self.G_shape_loss_diff_shape = tf.summary.scalar(
            "G_shape_loss_diff_shape", g_shape_loss_diff_shape)
        self.G_shape_loss_diff_identity = tf.summary.scalar(
            "G_shape_loss_diff_identity", g_shape_loss_diff_identity)

        self.G_loss_merge = tf.summary.merge([
            self.G_loss, self.G_identity, self.G_shape_loss_same,
            self.G_shape_loss_diff_shape, self.G_shape_loss_diff_identity
        ])
        self.D_loss_merge = tf.summary.merge([self.D_loss])
Пример #24
0
    def _input_fn(params=None):
        """Input function compatible with `Experiment` object.

    Pipeline proceeds as:
    -> generation examples from json or tfrecords
    -> limit to first N examples (if limit is not None)
    -> encode any _tokens fields as bytes
    -> cache the results to avoid recomputing
    -> Lookup tokens in GLOVE embeddings
    -> Shuffle & repeat

    Args:
      params: Params passed to the estimator. Contains 'batch_size'.

    Returns:
      A tuple of feature tensors and target tensors.

    Raises:
      ValueError: If filtering by length is set during eval mode.
    """
        if not is_training:
            assert not is_tpu
        tf.logging.info('Data pipeline given params:\n%s' % params)
        if params:
            if is_training:
                batch_size = params['train_batch_size']
            else:
                batch_size = params['eval_batch_size']

        if use_generator:
            tf.logging.info('Building generator data pipeline.')
            if tokenizer == 'word':
                tf.logging.info('Using word split encoder.')
                tokenizer_fn = word_tokenize
            elif tokenizer == 'nltk':
                tf.logging.info('Using NLTK encoder.')
                tokenizer_fn = build_nltk_tokenizer()
            elif tokenizer == 'subword':
                tokenizer_fn = build_subword_tokenizer(vocab_path=vocab_path)
            else:
                raise ValueError('Unknown tokenizer %s' % tokenizer)
            ds = build_generator_pipeline(data_path=data_path,
                                          split=split,
                                          tokenizer_fn=tokenizer_fn,
                                          sort_by_length=sort_by_length,
                                          is_subword=tokenizer == 'subword')
        else:
            tf.logging.info('Loading TFRecords from %s' % data_path)
            filenames = tf.gfile.Glob(os.path.join(data_path, '%s_*' % split))
            tf.logging.info(filenames)
            ds = build_tfrecord_pipeline(filenames=filenames)

        if max_length:
            if not is_training:
                raise ValueError(
                    'Unable to filter or resample examples at eval time.')
            if resample_too_long:

                tf.logging.info('Resampling with max length %s', max_length)

                def _resample(x):
                    return resample_example(x, max_length=max_length)

                ds = ds.map(_resample, num_parallel_calls=16)
            else:
                # Filter out examples over our max length to avoid an error downstream.
                tf.logging.info('Filtering out examples over max length %s',
                                max_length)

                def _not_too_long(x):
                    return tf.greater_equal(tf.to_int32(max_length),
                                            tf.to_int32(x['context_length']))

                ds = ds.filter(_not_too_long)

        if limit:
            # Take the first N examples
            ds = ds.take(limit)

        if include_bytes:
            tokens_to_bytes = lambda x: _tokens_to_bytes(x, bytes_per_word)
            ds = ds.map(tokens_to_bytes, num_parallel_calls=16)

        if cache:
            # Cache dataset to avoid hitting the python generator after first epoch
            ds = ds.cache()

        # Subset that we should actually pass back to the caller
        # This is required to filter out tf.string fields which are not TPU
        # compatible
        # Specifically: id, context, question, context_tokens and question_tokens
        # are all string fields that will be removed.
        shapes, _ = get_shapes_and_types(is_tpu=is_tpu,
                                         max_length=max_length,
                                         include_bytes=include_bytes,
                                         bytes_per_word=bytes_per_word,
                                         include_ids=include_ids)

        if do_embedding:
            # Embed tokens with pretrained word vectors

            # Add in shape info before batching
            shapes['context_vecs'] = [max_length if is_tpu else None, 300]
            shapes['question_vecs'] = [max_length if is_tpu else None, 300]

            def lookup(words):
                # Do embedding lookups on a tensor of words
                # We use a py_func so we can check for both upper and lowercase.
                # TODO(ddohan): Revert to embedding_lookup for TPU support

                def embed_words(words):
                    def embed_word(word):
                        utf_word = word.decode('utf-8')
                        for key in [
                                word,
                                word.lower(), utf_word,
                                utf_word.lower(), 'UNK'
                        ]:
                            if key in embeddings:
                                return embeddings[key]

                    emb = [embed_word(word) for word in words]
                    emb = np.array(emb, dtype=np.float32)
                    return emb

                embedded = tf.py_func(embed_words,
                                      inp=[words],
                                      Tout=[tf.float32],
                                      stateful=False)
                embedded = tf.reshape(embedded, [-1, 300])
                return embedded

            def lookup_fields(d):
                d['context_vecs'] = lookup(d['context_tokens'])
                d['question_vecs'] = lookup(d['question_tokens'])
                return d

            ds = ds.map(lookup_fields, num_parallel_calls=16)

        repeats = num_repeats if num_repeats else None
        if shuffle and repeats != 1:
            tf.logging.info('Shuffle and repeat size: %s' %
                            shuffle_buffer_size)
            ds = ds.apply(
                contrib_data.shuffle_and_repeat(
                    buffer_size=shuffle_buffer_size, count=repeats))
        elif repeats != 1:
            tf.logging.info('Repeating')
            ds = ds.repeat(count=repeats)
        elif shuffle:
            tf.logging.info('Shuffle size: %s' % shuffle_buffer_size)
            ds = ds.shuffle(buffer_size=shuffle_buffer_size)

        def filter_fields(example):
            out = {}
            for k in shapes:
                out[k] = example[k]
            return out

        ds = ds.map(filter_fields, num_parallel_calls=16)

        if is_training:
            ds = ds.padded_batch(batch_size,
                                 padded_shapes=shapes,
                                 drop_remainder=True)
        else:
            # Never want to ignore values at eval time
            ds = ds.padded_batch(batch_size, padded_shapes=shapes)
        ds = ds.prefetch(
            tf.data.experimental.AUTOTUNE)  # Buffer a few batches ahead
        if do_embedding:
            iterator = ds.make_initializable_iterator()
            # Must be initialized when the graph is initialized and before the
            # dataset tensors are evaluated.
            # Run `tf.tables_initializer()` before getting first batch
            tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
                                 iterator.initializer)
        else:
            iterator = ds.make_one_shot_iterator()
        batch = iterator.get_next()

        if legacy_rename:
            batch = do_renames(batch)
        return batch, batch
Пример #25
0
    def build_model(self):
        self.lr = tf.placeholder(tf.float32, name='learning_rate')
        """ Input Image"""
        img_class = Image_data(self.img_height, self.img_width, self.img_ch,
                               self.segmap_ch, self.dataset_path,
                               self.augment_flag)
        img_class.preprocess()

        self.dataset_num = len(img_class.image)
        self.test_dataset_num = len(img_class.segmap_test)

        img_and_segmap = tf.data.Dataset.from_tensor_slices(
            (img_class.image, img_class.segmap))
        segmap_test = tf.data.Dataset.from_tensor_slices(img_class.segmap_test)

        gpu_device = '/gpu:0'
        img_and_segmap = img_and_segmap.apply(
            shuffle_and_repeat(self.dataset_num)).apply(
                map_and_batch(img_class.image_processing,
                              self.batch_size,
                              num_parallel_batches=16,
                              drop_remainder=True)).apply(
                                  prefetch_to_device(gpu_device,
                                                     self.batch_size))

        segmap_test = segmap_test.apply(shuffle_and_repeat(
            self.dataset_num)).apply(
                map_and_batch(img_class.test_image_processing,
                              batch_size=self.batch_size,
                              num_parallel_batches=16,
                              drop_remainder=True)).apply(
                                  prefetch_to_device(gpu_device,
                                                     self.batch_size))

        img_and_segmap_iterator = img_and_segmap.make_one_shot_iterator()
        segmap_test_iterator = segmap_test.make_one_shot_iterator()

        self.real_x, self.real_x_segmap, self.real_x_segmap_onehot = img_and_segmap_iterator.get_next(
        )
        self.real_x_segmap_test, self.real_x_segmap_test_onehot = segmap_test_iterator.get_next(
        )
        """ Define Generator, Discriminator """
        fake_x, x_mean, x_var = self.image_translate(
            segmap_img=self.real_x_segmap_onehot, x_img=self.real_x)
        real_logit, fake_logit = self.image_discriminate(
            segmap_img=self.real_x_segmap_onehot,
            real_img=self.real_x,
            fake_img=fake_x)

        if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan':
            GP = self.gradient_penalty(real=self.real_x,
                                       segmap=self.real_x_segmap_onehot,
                                       fake=fake_x)
        else:
            GP = 0
        """ Define Loss """
        g_adv_loss = self.adv_weight * generator_loss(self.gan_type,
                                                      fake_logit)
        g_kl_loss = self.kl_weight * kl_loss(x_mean, x_var)
        g_vgg_loss = self.vgg_weight * VGGLoss()(self.real_x, fake_x)
        g_feature_loss = self.feature_weight * feature_loss(
            real_logit, fake_logit)
        g_reg_loss = regularization_loss('generator') + regularization_loss(
            'encoder')

        d_adv_loss = self.adv_weight * (
            discriminator_loss(self.gan_type, real_logit, fake_logit) + GP)
        d_reg_loss = regularization_loss('discriminator')

        self.g_loss = g_adv_loss + g_kl_loss + g_vgg_loss + g_feature_loss + g_reg_loss
        self.d_loss = d_adv_loss + d_reg_loss
        """ Result Image """
        self.fake_x = fake_x
        self.random_fake_x, _, _ = self.image_translate(
            segmap_img=self.real_x_segmap_onehot,
            random_style=True,
            reuse=True)
        """ Test """
        self.test_segmap_image = tf.placeholder(tf.float32, [
            1, self.img_height, self.img_width,
            len(img_class.color_value_dict)
        ])
        self.random_test_fake_x, _, _ = self.image_translate(
            segmap_img=self.test_segmap_image, random_style=True, reuse=True)

        self.test_guide_image = tf.placeholder(
            tf.float32, [1, self.img_height, self.img_width, self.img_ch])
        self.guide_test_fake_x, _, _ = self.image_translate(
            segmap_img=self.test_segmap_image,
            x_img=self.test_guide_image,
            reuse=True)
        """ Training """
        t_vars = tf.trainable_variables()
        G_vars = [
            var for var in t_vars
            if 'encoder' in var.name or 'generator' in var.name
        ]
        D_vars = [var for var in t_vars if 'discriminator' in var.name]

        if self.TTUR:
            beta1 = 0.0
            beta2 = 0.9

            g_lr = self.lr / 2
            d_lr = self.lr * 2

        else:
            beta1 = self.beta1
            beta2 = self.beta2
            g_lr = self.lr
            d_lr = self.lr

        self.G_optim = tf.train.AdamOptimizer(
            g_lr, beta1=beta1, beta2=beta2).minimize(self.g_loss,
                                                     var_list=G_vars)
        self.D_optim = tf.train.AdamOptimizer(
            d_lr, beta1=beta1, beta2=beta2).minimize(self.d_loss,
                                                     var_list=D_vars)
        """" Summary """
        self.summary_g_loss = tf.summary.scalar("g_loss", self.g_loss)
        self.summary_d_loss = tf.summary.scalar("d_loss", self.d_loss)

        self.summary_g_adv_loss = tf.summary.scalar("g_adv_loss", g_adv_loss)
        self.summary_g_kl_loss = tf.summary.scalar("g_kl_loss", g_kl_loss)
        self.summary_g_vgg_loss = tf.summary.scalar("g_vgg_loss", g_vgg_loss)
        self.summary_g_feature_loss = tf.summary.scalar(
            "g_feature_loss", g_feature_loss)

        g_summary_list = [
            self.summary_g_loss, self.summary_g_adv_loss,
            self.summary_g_kl_loss, self.summary_g_vgg_loss,
            self.summary_g_feature_loss
        ]
        d_summary_list = [self.summary_d_loss]

        self.G_loss = tf.summary.merge(g_summary_list)
        self.D_loss = tf.summary.merge(d_summary_list)
Пример #26
0
def _initialize_tf_dataset(file_names,
                           augment,
                           over_sample,
                           shuffle,
                           target_size,
                           normalize,
                           nb_workers=8,
                           batch_size=64,
                           shuffle_buffer_size=3000,
                           input_type="img",
                           nr_epochs=1):
  import tensorflow as tf
  from tensorflow.contrib.data import map_and_batch
  from tensorflow.contrib.data import shuffle_and_repeat

  if not type(target_size) is list:
    target_size = list(target_size)

  with tf.name_scope('input_pipeline'):
    dataset = tf.data.TFRecordDataset(file_names)
    if shuffle:
      dataset = dataset.apply(
          shuffle_and_repeat(shuffle_buffer_size, nr_epochs))

    def _decode_and_augment_image(example_proto):
      keys_to_features = {
          'label': tf.FixedLenFeature([], tf.int64),
          'shape': tf.FixedLenFeature([], tf.string),
          'image': tf.FixedLenFeature([], tf.string),
      }
      tfrecord_features = tf.parse_single_example(example_proto,
                                                  keys_to_features)

      image = tf.decode_raw(tfrecord_features['image'], tf.uint8)
      shape = tf.decode_raw(tfrecord_features['shape'], tf.int64)
      if input_type == ".jpeg":
        image = tf.reshape(image, target_size + [3])
      else:
        image = tf.reshape(image, target_size)
      label = tfrecord_features['label']

      if augment:
        image = tf.image.random_flip_left_right(image)
        image = tf.image.random_flip_up_down(image)
        degrees = tf.random_uniform((), minval=-180, maxval=180)
        image = tf.contrib.image.rotate(image, degrees)

        width_shift = tf.random_uniform((), minval=0, maxval=0.05)
        height_shift = tf.random_uniform((), minval=0, maxval=0.05)

        horizontal_pad = tf.cast(
            tf.ceil(width_shift * target_size[0]), tf.int32)
        vertical_pad = tf.cast(tf.ceil(height_shift * target_size[1]), tf.int32)

        padding = tf.stack([
            horizontal_pad, horizontal_pad, vertical_pad, vertical_pad,
            tf.constant(0),
            tf.constant(0)
        ])
        padding = tf.reshape(padding, (3, 2))

        image = tf.pad(image, padding)
        image = tf.random_crop(image, target_size + [3])

        zoom = tf.random_uniform((), minval=-0.1, maxval=0.1)
        new_dim = tf.cast(tf.ceil((1 - zoom) * target_size[0]), dtype=tf.int32)

        image = tf.image.resize_image_with_crop_or_pad(image, new_dim, new_dim)

        image = tf.image.resize_images(
            image, target_size, method=tf.image.ResizeMethod.BILINEAR)

      if normalize:
        std = tf.constant(
            np.array([70.53946096, 51.71475228, 43.03428563]), dtype=tf.float32)
        std = tf.expand_dims(tf.expand_dims(std, axis=0), axis=0)

        mean = tf.constant(
            np.array([108.64628601, 75.86886597, 54.34005736]),
            dtype=tf.float32)
        mean = tf.expand_dims(tf.expand_dims(mean, axis=0), axis=0)

        image = (tf.cast(image, dtype=tf.float32) - mean) / std

      label = tf.reshape(label, [1])
      if input_type == ".jpeg":
        image = tf.reshape(image, target_size + [3])
      else:
        image = tf.reshape(image, target_size)

      return {'shape': shape, 'image': image}, label

    dataset = dataset \
        .apply(map_and_batch(_decode_and_augment_image, batch_size=batch_size, num_parallel_batches=nb_workers,
                             drop_remainder=True)) \
        .prefetch(nb_workers)

    # def _augment_images(example)

    return dataset
Пример #27
0
merged_summary_op = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=3)

dataset = tf.data.TFRecordDataset('Dataset/dataset_1.tfrecord')
def _parse(example_proto):
    feature = {'roll' : tf.FixedLenFeature([], tf.string)}
    parsed = tf.parse_single_example(example_proto, feature)
    data = tf.decode_raw(parsed['roll'], tf.uint8)
    data = tf.py_func(func=np.unpackbits, inp=[data], Tout=tf.uint8)
    data = tf.cast(data, tf.float32)
    data = tf.reshape(data, [CLASS_NUM, 600])
    data = data * 2 - 1
    return data

dataset = dataset.apply(data.shuffle_and_repeat(buffer_size=60000, count=3))
dataset = dataset.apply(data.map_and_batch(_parse, batch_size=batch_size, num_parallel_batches=2))
iterator = dataset.prefetch(batch_size).make_one_shot_iterator()
real_input_next_element = iterator.get_next()

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    
    if os.path.exists(model_path):
        load_path = saver.restore(sess, model_path)
        print("Model restored from file: %s" % load_path)
        
    for i in range(total_batch):
        tfdata = sess.run(real_input_next_element)
        reshape_tfdata = tfdata.reshape([-1, CLASS_NUM, 600])    #  
        cuted_tfdata = reshape_tfdata[:, :, :INPUT_LENGTH]
Пример #28
0
    def build_model(self, A):
        self.lr = tf.placeholder(tf.float32, name='learning_rate')

        """ Input Image"""
        Image_data_class = ImageData(load_size=self.img_size, channels=self.img_ch, data_path=self.dataset_path, selected_attrs=self.selected_attrs, augment_flag=self.augment_flag)
        Image_data_class.preprocess()

        train_dataset_num = len(Image_data_class.train_dataset)
        test_dataset_num = len(Image_data_class.test_dataset)

        train_dataset = tf.data.Dataset.from_tensor_slices((Image_data_class.train_dataset, Image_data_class.train_dataset_label, Image_data_class.train_dataset_fix_label))
        test_dataset = tf.data.Dataset.from_tensor_slices((Image_data_class.test_dataset, Image_data_class.test_dataset_label, Image_data_class.test_dataset_fix_label))

        gpu_device = '/gpu:0'
        train_dataset = train_dataset.\
            apply(shuffle_and_repeat(train_dataset_num)).\
            apply(map_and_batch(Image_data_class.image_processing, self.batch_size, num_parallel_batches=8, drop_remainder=True)).\
            apply(prefetch_to_device(gpu_device, self.batch_size))

        test_dataset = test_dataset.\
            apply(shuffle_and_repeat(test_dataset_num)).\
            apply(map_and_batch(Image_data_class.image_processing, self.batch_size, num_parallel_batches=8, drop_remainder=True)).\
            apply(prefetch_to_device(gpu_device, self.batch_size))

        train_dataset_iterator = train_dataset.make_one_shot_iterator()
        test_dataset_iterator = test_dataset.make_one_shot_iterator()


        self.x_real, label_org, label_fix_list = train_dataset_iterator.get_next() # Input image / Original domain labels
        label_trg = tf.random_shuffle(label_org) # Target domain labels
        label_fix_list = tf.transpose(label_fix_list, perm=[1, 0, 2])

        self.x_test, test_label_org, test_label_fix_list = test_dataset_iterator.get_next()  # Input image / Original domain labels
        test_label_fix_list = tf.transpose(test_label_fix_list, perm=[1, 0, 2])

        self.custom_image = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='custom_image') # Custom Image
        custom_label_fix_list = tf.transpose(create_labels(self.custom_label, self.selected_attrs), perm=[1, 0, 2])

        """ Define Generator, Discriminator """
        # binary label transformation
        dist = tf_contrib.distributions.Categorical(probs=[0.25, 0.5, 0.25])
        hat_c = tf.cast(dist.sample([self.batch_size, self.c_dim]) - 1, dtype ='float32')

        x_fake, w_fake = self.generator(self.x_real, hat_c) # real a
        x_recon, w_recon = self.generator(x_fake, -hat_c, reuse=True) # real b

        real_logit, real_cls = self.discriminator(self.x_real)
        fake_logit, fake_cls = self.discriminator(x_fake, reuse=True)

        # warp cycle
        A_fake = tf_contrib.image.dense_image_warp(A, w_fake)
        A_cycle = tf_contrib.image.dense_image_warp(A_fake, w_recon)
        
        """ Define Loss """
        if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan' :
            GP = self.gradient_panalty(real=self.x_real, fake=x_fake)
        else :
            GP = 0

        g_adv_loss = generator_loss(loss_func=self.gan_type, fake=fake_logit)
        g_cls_loss = binary_label_loss(hat_c, fake_cls, self.c_dim)

        warp_cycle_loss = tf.reduce_mean((A_cycle - A)** 2, axis=[1,2,3])
        g_rec_loss = tf.reduce_mean(warp_cycle_loss)

        smooth_loss = tf.reduce_mean(total_variation(w_fake))
        d_adv_loss = discriminator_loss(loss_func=self.gan_type, real=real_logit, fake=fake_logit)
        d_cls_loss = classification_loss(logit=real_cls, label=label_org)

        self.d_loss = self.adv_weight * d_adv_loss + self.gp_weight * GP + self.cls_weight * d_cls_loss
        self.g_loss = self.adv_weight * g_adv_loss + self.cls_weight * g_cls_loss + self.rec_weight * g_rec_loss + self.smooth_weight * smooth_loss


        """ Result Image """
        self.x_fake_list = tf.map_fn(lambda x : self.generator(self.x_real, x, reuse=True)[0], label_fix_list, dtype=tf.float32)


        """ Test Image """
        self.x_test_fake_list = tf.map_fn(lambda x : self.generator(self.x_test, x, reuse=True)[0], test_label_fix_list, dtype=tf.float32)
        self.custom_fake_image = tf.map_fn(lambda x : self.generator(self.custom_image, x, reuse=True)[0], custom_label_fix_list, dtype=tf.float32)


        """ Training """
        t_vars = tf.trainable_variables()
        G_vars = [var for var in t_vars if 'generator' in var.name]
        D_vars = [var for var in t_vars if 'discriminator' in var.name]

        self.g_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.g_loss, var_list=G_vars)
        self.d_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.d_loss, var_list=D_vars)


        """" Summary """
        self.Generator_loss = tf.summary.scalar("Generator_loss", self.g_loss)
        self.Discriminator_loss = tf.summary.scalar("Discriminator_loss", self.d_loss)

        self.g_adv_loss = tf.summary.scalar("g_adv_loss", g_adv_loss)
        self.g_cls_loss = tf.summary.scalar("g_cls_loss", g_cls_loss)
        self.g_rec_loss = tf.summary.scalar("g_rec_loss", g_rec_loss)

        self.d_adv_loss = tf.summary.scalar("d_adv_loss", d_adv_loss)
        self.d_cls_loss = tf.summary.scalar("d_cls_loss", d_cls_loss)

        self.g_summary_loss = tf.summary.merge([self.Generator_loss, self.g_adv_loss, self.g_cls_loss, self.g_rec_loss])
        self.d_summary_loss = tf.summary.merge([self.Discriminator_loss, self.d_adv_loss, self.d_cls_loss])
Пример #29
0
    def build_model(self):

        self.ema = tf.train.ExponentialMovingAverage(decay=self.ema_decay)

        if self.phase == 'train' :
            """ Input Image"""
            img_class = Image_data(self.img_height, self.img_width, self.img_ch, self.dataset_path, self.label_list,
                                   self.augment_flag)
            img_class.preprocess()

            dataset_num = len(img_class.image)
            print("Dataset number : ", dataset_num)

            self.lr = tf.placeholder(tf.float32, name='learning_rate')
            self.ds_weight_placeholder = tf.placeholder(tf.float32, name='ds_weight')


            img_and_label = tf.data.Dataset.from_tensor_slices((img_class.image, img_class.label))

            gpu_device = '/gpu:0'
            img_and_label = img_and_label.apply(shuffle_and_repeat(dataset_num)).apply(
                map_and_batch(img_class.image_processing, self.batch_size * self.gpu_num, num_parallel_batches=16,
                              drop_remainder=True)).apply(prefetch_to_device(gpu_device, None))

            img_and_label_iterator = img_and_label.make_one_shot_iterator()

            self.x_real, label_org = img_and_label_iterator.get_next() # [bs, 256, 256, 3], [bs, 1]
            # label_trg = tf.random_shuffle(label_org)  # Target domain labels
            label_trg = tf.random_uniform(shape=tf.shape(label_org), minval=0, maxval=self.c_dim, dtype=tf.int32) # Target domain labels

            """ split """
            x_real_gpu_split = tf.split(self.x_real, num_or_size_splits=self.gpu_num, axis=0)
            label_org_gpu_split = tf.split(label_org, num_or_size_splits=self.gpu_num, axis=0)
            label_trg_gpu_split = tf.split(label_trg, num_or_size_splits=self.gpu_num, axis=0)

            g_adv_loss_per_gpu = []
            g_sty_recon_loss_per_gpu = []
            g_sty_diverse_loss_per_gpu = []
            g_cyc_loss_per_gpu = []
            g_loss_per_gpu = []

            d_adv_loss_per_gpu = []
            d_loss_per_gpu = []

            for gpu_id in range(self.gpu_num):
                with tf.device(tf.DeviceSpec(device_type="GPU", device_index=gpu_id)):
                    with tf.variable_scope(tf.get_variable_scope(), reuse=(gpu_id > 0)):

                        x_real_split = tf.split(x_real_gpu_split[gpu_id], num_or_size_splits=self.batch_size, axis=0)
                        label_org_split = tf.split(label_org_gpu_split[gpu_id], num_or_size_splits=self.batch_size, axis=0)
                        label_trg_split = tf.split(label_trg_gpu_split[gpu_id], num_or_size_splits=self.batch_size, axis=0)

                        g_adv_loss = None
                        g_sty_recon_loss = None
                        g_sty_diverse_loss = None
                        g_cyc_loss = None

                        d_adv_loss = None
                        d_simple_gp = None
                        d_gp = None

                        for each_bs in range(self.batch_size) :
                            """ Define Generator, Discriminator """
                            x_real_each = x_real_split[each_bs] # [1, 256, 256, 3]
                            label_org_each = tf.squeeze(label_org_split[each_bs], axis=[0, 1]) # [1, 1] -> []
                            label_trg_each = tf.squeeze(label_trg_split[each_bs], axis=[0, 1])

                            random_style_code = tf.random_normal(shape=[1, self.style_dim])
                            random_style_code_1 = tf.random_normal(shape=[1, self.style_dim])
                            random_style_code_2 = tf.random_normal(shape=[1, self.style_dim])

                            random_style = tf.gather(self.mapping_network(random_style_code), label_trg_each)
                            random_style_1 = tf.gather(self.mapping_network(random_style_code_1), label_trg_each)
                            random_style_2 = tf.gather(self.mapping_network(random_style_code_2), label_trg_each)

                            x_fake = self.generator(x_real_each, random_style) # for adversarial objective
                            x_fake_1 = self.generator(x_real_each, random_style_1) # for style diversification
                            x_fake_2 = self.generator(x_real_each, random_style_2) # for style diversification

                            x_real_each_style = tf.gather(self.style_encoder(x_real_each), label_org_each) # for cycle consistency
                            x_fake_style = tf.gather(self.style_encoder(x_fake), label_trg_each) # for style reconstruction

                            x_cycle = self.generator(x_fake, x_real_each_style) # for cycle consistency

                            real_logit = tf.gather(self.discriminator(x_real_each), label_org_each)
                            fake_logit = tf.gather(self.discriminator(x_fake), label_trg_each)

                            """ Define loss """
                            if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan':
                                GP = self.gradient_panalty(real=x_real_each, fake=x_fake, real_label=label_org_each)
                            else:
                                GP = tf.constant([0], tf.float32)

                            if each_bs == 0 :
                                g_adv_loss = self.adv_weight * generator_loss(self.gan_type, fake_logit)
                                g_sty_recon_loss = self.sty_weight * L1_loss(random_style, x_fake_style)
                                g_sty_diverse_loss = self.ds_weight_placeholder * L1_loss(x_fake_1, x_fake_2)
                                g_cyc_loss = self.cyc_weight * L1_loss(x_real_each, x_cycle)

                                d_adv_loss = self.adv_weight * discriminator_loss(self.gan_type, real_logit, fake_logit)
                                d_simple_gp = self.adv_weight * simple_gp(real_logit, fake_logit, x_real_each, x_fake, r1_gamma=self.r1_weight, r2_gamma=0.0)
                                d_gp = self.adv_weight * GP

                            else :
                                g_adv_loss = tf.concat([g_adv_loss, self.adv_weight * generator_loss(self.gan_type, fake_logit)], axis=0)
                                g_sty_recon_loss = tf.concat([g_sty_recon_loss, self.sty_weight * L1_loss(random_style, x_fake_style)], axis=0)
                                g_sty_diverse_loss = tf.concat([g_sty_diverse_loss, self.ds_weight_placeholder * L1_loss(x_fake_1, x_fake_2)], axis=0)
                                g_cyc_loss = tf.concat([g_cyc_loss, self.cyc_weight * L1_loss(x_real_each, x_cycle)], axis=0)

                                d_adv_loss = tf.concat([d_adv_loss, self.adv_weight * discriminator_loss(self.gan_type, real_logit, fake_logit)], axis=0)
                                d_simple_gp = tf.concat([d_simple_gp, self.adv_weight * simple_gp(real_logit, fake_logit, x_real_each, x_fake, r1_gamma=self.r1_weight, r2_gamma=0.0)], axis=0)
                                d_gp = tf.concat([d_gp, self.adv_weight * GP], axis=0)


                        g_adv_loss = tf.reduce_mean(g_adv_loss)
                        g_sty_recon_loss = tf.reduce_mean(g_sty_recon_loss)
                        g_sty_diverse_loss = tf.reduce_mean(g_sty_diverse_loss)
                        g_cyc_loss = tf.reduce_mean(g_cyc_loss)

                        d_adv_loss = tf.reduce_mean(d_adv_loss)
                        d_simple_gp = tf.reduce_mean(tf.reduce_sum(d_simple_gp, axis=[1, 2, 3]))
                        d_gp = tf.reduce_mean(d_gp)

                        g_loss = g_adv_loss + g_sty_recon_loss - g_sty_diverse_loss + g_cyc_loss
                        d_loss = d_adv_loss + d_simple_gp + d_gp

                        g_adv_loss_per_gpu.append(g_adv_loss)
                        g_sty_recon_loss_per_gpu.append(g_sty_recon_loss)
                        g_sty_diverse_loss_per_gpu.append(g_sty_diverse_loss)
                        g_cyc_loss_per_gpu.append(g_cyc_loss)

                        d_adv_loss_per_gpu.append(d_adv_loss)

                        g_loss_per_gpu.append(g_loss)
                        d_loss_per_gpu.append(d_loss)

            g_adv_loss = tf.reduce_mean(g_adv_loss_per_gpu)
            g_sty_recon_loss = tf.reduce_mean(g_sty_recon_loss_per_gpu)
            g_sty_diverse_loss = tf.reduce_mean(g_sty_diverse_loss_per_gpu)
            g_cyc_loss = tf.reduce_mean(g_cyc_loss_per_gpu)
            self.g_loss = tf.reduce_mean(g_loss_per_gpu)

            d_adv_loss = tf.reduce_mean(d_adv_loss_per_gpu)
            self.d_loss = tf.reduce_mean(d_loss_per_gpu)


            """ Training """
            t_vars = tf.trainable_variables()
            G_vars = [var for var in t_vars if 'generator' in var.name]
            E_vars = [var for var in t_vars if 'encoder' in var.name]
            F_vars = [var for var in t_vars if 'mapping' in var.name]
            D_vars = [var for var in t_vars if 'discriminator' in var.name]

            if self.gpu_num == 1 :
                prev_g_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0, beta2=0.99).minimize(self.g_loss, var_list=G_vars)
                prev_e_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0, beta2=0.99).minimize(self.g_loss, var_list=E_vars)
                prev_f_optimizer = tf.train.AdamOptimizer(self.lr * 0.01, beta1=0, beta2=0.99).minimize(self.g_loss, var_list=F_vars)

                self.d_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0, beta2=0.99).minimize(self.d_loss, var_list=D_vars)

            else :
                prev_g_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0, beta2=0.99).minimize(self.g_loss, var_list=G_vars,
                                                                                                 colocate_gradients_with_ops=True)
                prev_e_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0, beta2=0.99).minimize(self.g_loss,
                                                                                                 var_list=E_vars,
                                                                                                 colocate_gradients_with_ops=True)
                prev_f_optimizer = tf.train.AdamOptimizer(self.lr * 0.01, beta1=0, beta2=0.99).minimize(self.g_loss,
                                                                                                        var_list=F_vars,
                                                                                                        colocate_gradients_with_ops=True)

                self.d_optimizer = tf.train.AdamOptimizer(self.lr, beta1=0, beta2=0.99).minimize(self.d_loss,
                                                                                                 var_list=D_vars,
                                                                                                 colocate_gradients_with_ops=True)

            with tf.control_dependencies([prev_g_optimizer, prev_e_optimizer, prev_f_optimizer]):
                self.g_optimizer = self.ema.apply(G_vars)
                self.e_optimizer = self.ema.apply(E_vars)
                self.f_optimizer = self.ema.apply(F_vars)

            """" Summary """
            self.Generator_loss = tf.summary.scalar("g_loss", self.g_loss)
            self.Discriminator_loss = tf.summary.scalar("d_loss", self.d_loss)

            self.g_adv_loss = tf.summary.scalar("g_adv_loss", g_adv_loss)
            self.g_sty_recon_loss = tf.summary.scalar("g_sty_recon_loss", g_sty_recon_loss)
            self.g_sty_diverse_loss = tf.summary.scalar("g_sty_diverse_loss", g_sty_diverse_loss)
            self.g_cyc_loss = tf.summary.scalar("g_cyc_loss", g_cyc_loss)

            self.d_adv_loss = tf.summary.scalar("d_adv_loss", d_adv_loss)

            g_summary_list = [self.Generator_loss, self.g_adv_loss, self.g_sty_recon_loss, self.g_sty_diverse_loss, self.g_cyc_loss]
            d_summary_list = [self.Discriminator_loss, self.d_adv_loss]

            self.g_summary_loss = tf.summary.merge(g_summary_list)
            self.d_summary_loss = tf.summary.merge(d_summary_list)

            """ Result Image """
            def return_g_images(generator, image, code):
                x = generator(image, code)
                return x

            self.x_fake_list = []
            first_x_real = tf.expand_dims(self.x_real[0], axis=0)

            label_fix_list = tf.constant([idx for idx in range(self.c_dim)])

            for _ in range(self.num_style):
                random_style_code = tf.truncated_normal(shape=[1, self.style_dim])
                self.x_fake_list.append(tf.map_fn(
                    lambda c: return_g_images(self.generator,
                                              first_x_real,
                                              tf.gather(self.mapping_network(random_style_code), c)),
                    label_fix_list, dtype=tf.float32))

        elif self.phase == 'refer_test':
            """ Test """

            def return_g_images(generator, image, code):
                x = generator(image, code)
                return x

            self.custom_image = tf.placeholder(tf.float32, [1, self.img_height, self.img_width, self.img_ch], name='custom_image')
            self.refer_image = tf.placeholder(tf.float32, [1, self.img_height, self.img_width, self.img_ch], name='refer_image')


            label_fix_list = tf.constant([idx for idx in range(self.c_dim)])

            self.refer_fake_image = tf.map_fn(
                lambda c : return_g_images(self.generator,
                                           self.custom_image,
                                           tf.gather(self.style_encoder(self.refer_image), c)),
                label_fix_list, dtype=tf.float32)

        else :
            """ Test """

            def return_g_images(generator, image, code):
                x = generator(image, code)
                return x

            self.custom_image = tf.placeholder(tf.float32, [1, self.img_height, self.img_width, self.img_ch], name='custom_image')
            label_fix_list = tf.constant([idx for idx in range(self.c_dim)])

            random_style_code = tf.truncated_normal(shape=[1, self.style_dim])
            self.custom_fake_image = tf.map_fn(
                lambda c : return_g_images(self.generator,
                                           self.custom_image,
                                           tf.gather(self.mapping_network(random_style_code), c)),
                label_fix_list, dtype=tf.float32)
Пример #30
0
    def build_model(self):
        self.lr = tf.placeholder(tf.float32, name='learning_rate')
        """ Input Image"""
        Image_data_class = ImageData(load_size=self.img_size,
                                     channels=self.img_ch,
                                     data_path=self.dataset_path,
                                     dataset_name=self.dataset_name,
                                     selected_attrs=self.selected_attrs,
                                     augment_flag=self.augment_flag)
        Image_data_class.preprocess()

        train_dataset_num = len(Image_data_class.train_dataset)
        test_dataset_num = len(Image_data_class.test_dataset)

        train_dataset = tf.data.Dataset.from_tensor_slices(
            (Image_data_class.train_dataset,
             Image_data_class.train_dataset_label,
             Image_data_class.train_dataset_fix_label))
        test_dataset = tf.data.Dataset.from_tensor_slices(
            (Image_data_class.test_dataset,
             Image_data_class.test_dataset_label,
             Image_data_class.test_dataset_fix_label))

        gpu_device = '/gpu:0'
        train_dataset = train_dataset.\
            apply(shuffle_and_repeat(train_dataset_num)).\
            apply(map_and_batch(Image_data_class.image_processing, self.batch_size, num_parallel_batches=8, drop_remainder=True)).\
            apply(prefetch_to_device(gpu_device, self.batch_size))

        test_dataset = test_dataset.\
            apply(shuffle_and_repeat(test_dataset_num)).\
            apply(map_and_batch(Image_data_class.image_processing, self.batch_size, num_parallel_batches=8, drop_remainder=True)).\
            apply(prefetch_to_device(gpu_device, self.batch_size))

        train_dataset_iterator = train_dataset.make_one_shot_iterator()
        test_dataset_iterator = test_dataset.make_one_shot_iterator()

        self.x_real, label_org, label_fix_list = train_dataset_iterator.get_next(
        )  # Input image / Original domain labels
        label_trg = tf.random_shuffle(label_org)  # Target domain labels
        label_fix_list = tf.transpose(label_fix_list, perm=[1, 0, 2])

        self.label_org = label_org
        self.label_trg = label_trg
        self.label_fix_list = label_fix_list

        self.x_test, test_label_org, test_label_fix_list = test_dataset_iterator.get_next(
        )  # Input image / Original domain labels
        test_label_fix_list = tf.transpose(test_label_fix_list, perm=[1, 0, 2])

        self.custom_image = tf.placeholder(
            tf.float32, [1, self.img_size, self.img_size, self.img_ch],
            name='custom_image')  # Custom Image
        custom_label_fix_list = tf.transpose(create_labels(
            self.custom_label, self.selected_attrs, self.dataset_name),
                                             perm=[1, 0, 2])
        """ Define Generator, Discriminator """
        x_fake = self.generator(self.x_real, label_trg)  # real a
        x_recon = self.generator(x_fake, label_org, reuse=True)  # real b

        real_logit, real_cls = self.discriminator(self.x_real)
        fake_logit, fake_cls = self.discriminator(x_fake, reuse=True)
        """ Define Loss """
        if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan':
            GP = self.gradient_panalty(real=self.x_real, fake=x_fake)
        else:
            GP = 0

        g_adv_loss = generator_loss(loss_func=self.gan_type, fake=fake_logit)
        g_cls_loss = classification_loss(logit=fake_cls, label=label_trg)
        g_rec_loss = L1_loss(self.x_real, x_recon)

        d_adv_loss = discriminator_loss(
            loss_func=self.gan_type, real=real_logit, fake=fake_logit) + GP
        d_cls_loss = classification_loss(logit=real_cls, label=label_org)

        self.d_loss = self.adv_weight * d_adv_loss + self.cls_weight * d_cls_loss
        self.g_loss = self.adv_weight * g_adv_loss + self.cls_weight * g_cls_loss + self.rec_weight * g_rec_loss
        """ Result Image """
        self.x_fake_list = tf.map_fn(
            lambda x: self.generator(self.x_real, x, reuse=True),
            label_fix_list,
            dtype=tf.float32)
        """ Test Image """
        self.x_test_fake_list = tf.map_fn(
            lambda x: self.generator(self.x_test, x, reuse=True),
            test_label_fix_list,
            dtype=tf.float32)
        self.custom_fake_image = tf.map_fn(
            lambda x: self.generator(self.custom_image, x, reuse=True),
            custom_label_fix_list,
            dtype=tf.float32)
        """ Training """
        t_vars = tf.trainable_variables()
        G_vars = [var for var in t_vars if 'generator' in var.name]
        D_vars = [var for var in t_vars if 'discriminator' in var.name]

        self.g_optimizer = tf.train.AdamOptimizer(self.lr,
                                                  beta1=0.5,
                                                  beta2=0.999).minimize(
                                                      self.g_loss,
                                                      var_list=G_vars)
        self.d_optimizer = tf.train.AdamOptimizer(self.lr,
                                                  beta1=0.5,
                                                  beta2=0.999).minimize(
                                                      self.d_loss,
                                                      var_list=D_vars)
        """" Summary """
        self.Generator_loss = tf.summary.scalar("Generator_loss", self.g_loss)
        self.Discriminator_loss = tf.summary.scalar("Discriminator_loss",
                                                    self.d_loss)

        self.g_adv_loss = tf.summary.scalar("g_adv_loss", g_adv_loss)
        self.g_cls_loss = tf.summary.scalar("g_cls_loss", g_cls_loss)
        self.g_rec_loss = tf.summary.scalar("g_rec_loss", g_rec_loss)

        self.d_adv_loss = tf.summary.scalar("d_adv_loss", d_adv_loss)
        self.d_cls_loss = tf.summary.scalar("d_cls_loss", d_cls_loss)

        self.g_summary_loss = tf.summary.merge([
            self.Generator_loss, self.g_adv_loss, self.g_cls_loss,
            self.g_rec_loss
        ])
        self.d_summary_loss = tf.summary.merge(
            [self.Discriminator_loss, self.d_adv_loss, self.d_cls_loss])
Пример #31
0
    def build_model(self):
        """ Graph """
        if self.phase == 'train':
            self.d_loss_per_res = {}
            self.g_loss_per_res = {}
            self.generator_optim = {}
            self.discriminator_optim = {}
            self.alpha_summary_per_res = {}
            self.d_summary_per_res = {}
            self.g_summary_per_res = {}
            self.train_fake_images = {}

            for res in self.resolutions[self.resolutions.index(self.start_res
                                                               ):]:
                g_loss_per_gpu = []
                d_loss_per_gpu = []
                train_fake_images_per_gpu = []

                batch_size = self.batch_sizes.get(res, self.batch_size_base)
                global_step = tf.get_variable(
                    'global_step_{}'.format(res),
                    shape=[],
                    dtype=tf.float32,
                    initializer=tf.initializers.zeros(),
                    trainable=False,
                    aggregation=tf.VariableAggregation.ONLY_FIRST_TOWER)
                alpha_const, zero_constant = get_alpha_const(
                    self.iteration // 2, batch_size * self.gpu_num,
                    global_step)

                # smooth transition variable
                do_train_trans = self.train_with_trans[res]

                alpha = tf.get_variable(
                    'alpha_{}'.format(res),
                    shape=[],
                    dtype=tf.float32,
                    initializer=tf.initializers.ones()
                    if do_train_trans else tf.initializers.zeros(),
                    trainable=False,
                    aggregation=tf.VariableAggregation.ONLY_FIRST_TOWER)

                if do_train_trans:
                    alpha_assign_op = tf.assign(alpha, alpha_const)
                else:
                    alpha_assign_op = tf.assign(alpha, zero_constant)

                with tf.control_dependencies([alpha_assign_op]):
                    for gpu_id in range(self.gpu_num):
                        with tf.device(
                                tf.DeviceSpec(device_type="GPU",
                                              device_index=gpu_id)):
                            with tf.variable_scope(tf.get_variable_scope(),
                                                   reuse=(gpu_id > 0)):
                                # images
                                gpu_device = '/gpu:{}'.format(gpu_id)
                                image_class = ImageData(res)
                                inputs = tf.data.Dataset.from_tensor_slices(
                                    self.dataset)

                                inputs = inputs. \
                                    apply(shuffle_and_repeat(self.dataset_num)). \
                                    apply(map_and_batch(image_class.image_processing, batch_size, num_parallel_batches=16, drop_remainder=True)). \
                                    apply(prefetch_to_device(gpu_device, None))
                                # When using dataset.prefetch, use buffer_size=None to let it detect optimal buffer size

                                inputs_iterator = inputs.make_one_shot_iterator(
                                )

                                real_img = inputs_iterator.get_next()
                                z = tf.random_normal(
                                    shape=[batch_size, self.z_dim])

                                fake_img = self.generator(z, alpha, res)
                                real_img = smooth_crossfade(real_img, alpha)

                                real_logit = self.discriminator(
                                    real_img, alpha, res)
                                fake_logit = self.discriminator(
                                    fake_img, alpha, res)

                                # compute loss
                                d_loss, g_loss = compute_loss(
                                    real_img, real_logit, fake_logit)

                                d_loss_per_gpu.append(d_loss)
                                g_loss_per_gpu.append(g_loss)
                                train_fake_images_per_gpu.append(fake_img)

                print("Create graph for {} resolution".format(res))

                # prepare appropriate training vars
                d_vars, g_vars = filter_trainable_variables(res)

                d_loss = tf.reduce_mean(d_loss_per_gpu)
                g_loss = tf.reduce_mean(g_loss_per_gpu)

                d_lr = self.d_learning_rates.get(res, self.learning_rate_base)
                g_lr = self.g_learning_rates.get(res, self.learning_rate_base)

                if self.gpu_num == 1:
                    colocate_grad = False
                else:
                    colocate_grad = True

                d_optim = tf.train.AdamOptimizer(
                    d_lr, beta1=0.0, beta2=0.99, epsilon=1e-8).minimize(
                        d_loss,
                        var_list=d_vars,
                        colocate_gradients_with_ops=colocate_grad)

                g_optim = tf.train.AdamOptimizer(
                    g_lr, beta1=0.0, beta2=0.99, epsilon=1e-8).minimize(
                        g_loss,
                        var_list=g_vars,
                        global_step=global_step,
                        colocate_gradients_with_ops=colocate_grad)

                self.discriminator_optim[res] = d_optim
                self.generator_optim[res] = g_optim

                self.d_loss_per_res[res] = d_loss
                self.g_loss_per_res[res] = g_loss

                self.train_fake_images[res] = tf.concat(
                    train_fake_images_per_gpu, axis=0)
                """ Summary """
                self.alpha_summary_per_res[res] = tf.summary.scalar(
                    "alpha_{}".format(res), alpha)

                self.d_summary_per_res[res] = tf.summary.scalar(
                    "d_loss_{}".format(res), self.d_loss_per_res[res])
                self.g_summary_per_res[res] = tf.summary.scalar(
                    "g_loss_{}".format(res), self.g_loss_per_res[res])

        else:
            """" Testing """
            test_z = tf.random_normal(shape=[self.batch_size, self.z_dim])
            alpha = tf.constant(0.0, dtype=tf.float32, shape=[])
            self.fake_images = self.generator(test_z,
                                              alpha=alpha,
                                              target_img_size=self.img_size,
                                              is_training=False)
Пример #32
0
    def build_model(self):
        """ Input Image"""
        img_data_class = Image_data(self.img_height, self.img_width, self.img_ch, self.dataset_path, self.augment_flag)
        train_captions, train_images, test_captions, test_images, idx_to_word, word_to_idx = img_data_class.preprocess()
        """
        train_captions: (8855, 10, 18), test_captions: (2933, 10, 18)
        train_images: (8855,), test_images: (2933,)
        idx_to_word : 5450 5450
        """

        if self.phase == 'train' :
            self.lr = tf.placeholder(tf.float32, name='learning_rate')

            self.dataset_num = len(train_images)


            img_and_caption = tf.data.Dataset.from_tensor_slices((train_images, train_captions))

            gpu_device = '/gpu:0'
            img_and_caption = img_and_caption.apply(shuffle_and_repeat(self.dataset_num)).apply(
                map_and_batch(img_data_class.image_processing, batch_size=self.batch_size, num_parallel_batches=16,
                              drop_remainder=True)).apply(prefetch_to_device(gpu_device, None))


            img_and_caption_iterator = img_and_caption.make_one_shot_iterator()
            real_img_256, caption = img_and_caption_iterator.get_next()
            target_sentence_index = tf.random_uniform(shape=[], minval=0, maxval=10, dtype=tf.int32)
            caption = tf.gather(caption, target_sentence_index, axis=1)

            word_emb, sent_emb, mask = self.rnn_encoder(caption, n_words=len(idx_to_word),
                                                        embed_dim=self.embed_dim, drop_rate=0.5, n_hidden=128, n_layers=1,
                                                        bidirectional=True, rnn_type='lstm')

            noise = tf.random_normal(shape=[self.batch_size, self.z_dim], mean=0.0, stddev=1.0)
            fake_imgs, _, mu, logvar = self.generator(noise, sent_emb, word_emb, mask)

            real_img_64, real_img_128 = resize(real_img_256, target_size=[64, 64]), resize(real_img_256, target_size=[128, 128])
            fake_img_64, fake_img_128, fake_img_256 = fake_imgs[0], fake_imgs[1], fake_imgs[2]

            uncond_real_logits, cond_real_logits = self.discriminator([real_img_64, real_img_128, real_img_256], sent_emb)
            _, cond_wrong_logits = self.discriminator([real_img_64[:(self.batch_size - 1)], real_img_128[:(self.batch_size - 1)], real_img_256[:(self.batch_size - 1)]], sent_emb[1:self.batch_size])
            uncond_fake_logits, cond_fake_logits = self.discriminator([fake_img_64, fake_img_128, fake_img_256], sent_emb)

            self.g_adv_loss, self.d_adv_loss = 0, 0
            for i in range(3):
                self.g_adv_loss += self.adv_weight * (generator_loss(self.gan_type, uncond_fake_logits[i]) + generator_loss(self.gan_type, cond_fake_logits[i]))

                uncond_real_loss, uncond_fake_loss = discriminator_loss(self.gan_type, uncond_real_logits[i], uncond_fake_logits[i])
                cond_real_loss, cond_fake_loss = discriminator_loss(self.gan_type, cond_real_logits[i], cond_fake_logits[i])
                _, cond_wrong_loss = discriminator_loss(self.gan_type, None, cond_wrong_logits[i])

                self.d_adv_loss += self.adv_weight * (((uncond_real_loss + cond_real_loss) / 2) + (uncond_fake_loss + cond_fake_loss + cond_wrong_loss) / 3)

            self.g_kl_loss = self.kl_weight * kl_loss(mu, logvar)

            self.g_loss = self.g_adv_loss + self.g_kl_loss
            self.d_loss = self.d_adv_loss

            self.real_img = real_img_256
            self.fake_img = fake_img_256


            """ Training """
            t_vars = tf.trainable_variables()
            G_vars = [var for var in t_vars if 'generator' in var.name]
            D_vars = [var for var in t_vars if 'discriminator' in var.name]

            self.g_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.g_loss, var_list=G_vars)
            self.d_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.d_loss,var_list=D_vars)


            """" Summary """
            self.summary_g_loss = tf.summary.scalar("g_loss", self.g_loss)
            self.summary_d_loss = tf.summary.scalar("d_loss", self.d_loss)

            self.summary_g_adv_loss = tf.summary.scalar("g_adv_loss", self.g_adv_loss)
            self.summary_g_kl_loss = tf.summary.scalar("g_kl_loss", self.g_kl_loss)

            self.summary_d_adv_loss = tf.summary.scalar("d_adv_loss", self.d_adv_loss)


            g_summary_list = [self.summary_g_loss,
                              self.summary_g_adv_loss, self.summary_g_kl_loss]

            d_summary_list = [self.summary_d_loss,
                              self.summary_d_adv_loss]

            self.summary_merge_g_loss = tf.summary.merge(g_summary_list)
            self.summary_merge_d_loss = tf.summary.merge(d_summary_list)

        else :
            """ Test """
            self.dataset_num = len(test_captions)

            gpu_device = '/gpu:0'
            img_and_caption = tf.data.Dataset.from_tensor_slices((test_images, test_captions))

            img_and_caption = img_and_caption.apply(
                shuffle_and_repeat(self.dataset_num)).apply(
                map_and_batch(img_data_class.image_processing, batch_size=self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(
                prefetch_to_device(gpu_device, None))

            img_and_caption_iterator = img_and_caption.make_one_shot_iterator()
            real_img_256, caption = img_and_caption_iterator.get_next()
            target_sentence_index = tf.random_uniform(shape=[], minval=0, maxval=10, dtype=tf.int32)
            caption = tf.gather(caption, target_sentence_index, axis=1)

            word_emb, sent_emb, mask = self.rnn_encoder(caption, n_words=len(idx_to_word),
                                                        embed_dim=self.embed_dim, drop_rate=0.5, n_hidden=128,
                                                        n_layers=1,
                                                        bidirectional=True, rnn_type='lstm',
                                                        is_training=False)

            noise = tf.random_normal(shape=[self.batch_size, self.z_dim], mean=0.0, stddev=1.0)
            fake_imgs, _, _, _ = self.generator(noise, sent_emb, word_emb, mask, is_training=False)

            self.test_real_img = real_img_256
            self.test_fake_img = fake_imgs[2]