Exemplo n.º 1
0
    def build_DCGAN(self):
        gen_input = input_data(shape=[None, self.z_dim], name='input_gen_noise')
        input_disc_noise = input_data(shape=[None, self.z_dim], name='input_disc_noise')

        input_disc_real = input_data(shape=[None, self.img_size, self.img_size, 1], name='input_disc_real')

        disc_fake = self.discriminator(self.generator(input_disc_noise))
        disc_real = self.discriminator(input_disc_real, reuse=True)
        disc_net = tf.concat([disc_fake, disc_real], axis=0)

        gen_net = self.generator(gen_input, reuse=True)
        stacked_gan_net = self.discriminator(gen_net, reuse=True)

        disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')

        disc_target = tflearn.multi_target_data(['target_disc_fake', 'target_disc_real'],
                                                shape=[None, 2])

        adam = Adam(learning_rate=self.learning_rate, beta1=self.beta)
        disc_model = regression(disc_net, optimizer=adam,
                                placeholder=disc_target,
                                loss='categorical_crossentropy',
                                trainable_vars=disc_vars,
                                name='target_disc', batch_size=self.batch_size,
                                op_name='DISC')

        gen_vars = tflearn.get_layer_variables_by_scope('Generator')
        gan_model = regression(stacked_gan_net, optimizer=adam,
                               loss='categorical_crossentropy',
                               trainable_vars=gen_vars,
                               name='target_gen', batch_size=self.batch_size,
                               op_name='GEN')

        self.model = tflearn.DNN(gan_model, tensorboard_dir='log',
                          checkpoint_path=self.data_folder + 'checkpoints/' + self.model_name + '/')
        self.gen_net = gen_net
Exemplo n.º 2
0
gen_input = tflearn.input_data(shape=[None, z_dim], name='input_noise')
disc_input = tflearn.input_data(shape=[None, 784], name='disc_input')

gen_sample = generator(gen_input)
disc_real = discriminator(disc_input)
disc_fake = discriminator(gen_sample, reuse=True)

# Define Loss
disc_loss = -tf.reduce_mean(tf.log(disc_real) + tf.log(1. - disc_fake))
gen_loss = -tf.reduce_mean(tf.log(disc_fake))

# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope) and set
# 'placeholder=None' because we do not need to feed any target.
gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gen_model = tflearn.regression(gen_sample,
                               placeholder=None,
                               optimizer='adam',
                               loss=gen_loss,
                               trainable_vars=gen_vars,
                               batch_size=64,
                               name='target_gen',
                               op_name='GEN')
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
disc_model = tflearn.regression(disc_real,
                                placeholder=None,
                                optimizer='adam',
                                loss=disc_loss,
                                trainable_vars=disc_vars,
                                batch_size=64,
Exemplo n.º 3
0
gen_input = tflearn.input_data(shape=[None, z_dim], name='input_gen_noise')
input_disc_noise = tflearn.input_data(shape=[None, z_dim], name='input_disc_noise')
input_disc_real = tflearn.input_data(shape=[None, 28, 28, 1], name='input_disc_real')

# Build Discriminator
disc_fake = discriminator(generator(input_disc_noise))
disc_real = discriminator(input_disc_real, reuse=True)
disc_net = tf.concat([disc_fake, disc_real], axis=0)
# Build Stacked Generator/Discriminator
gen_net = generator(gen_input, reuse=True)
stacked_gan_net = discriminator(gen_net, reuse=True)

# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope).
disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')
# We need 2 target placeholders, for both the real and fake image target.
disc_target = tflearn.multi_target_data(['target_disc_fake', 'target_disc_real'],
                                        shape=[None, 2])
disc_model = tflearn.regression(disc_net, optimizer='adam',
                                placeholder=disc_target,
                                loss='categorical_crossentropy',
                                trainable_vars=disc_vars,
                                batch_size=64, name='target_disc',
                                op_name='DISC')

gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gan_model = tflearn.regression(stacked_gan_net, optimizer='adam',
                               loss='categorical_crossentropy',
                               trainable_vars=gen_vars,
                               batch_size=64, name='target_gen',
Exemplo n.º 4
0
    def __init__(self, sess, state_dim, learning_rate, scope):
        self.reuse_gan = False
        self.reuse_disc = False
        self.sess = sess
        self.s_dim = state_dim
        self.lr_rate = learning_rate
        self.scope = scope
        #self.dual = dual
        #self.critic = critic
        self.inputs_g, self.gan_inputs, self.generate = self.create_generate_network(
        )
        self.inputs_d_real, self.disc_real = self.create_discriminator_network_real(
        )
        self.inputs_d_fake, self.disc_fake = self.create_discriminator_network(
            self.generate)

        # https://arxiv.org/pdf/1611.04076.pdf
        # L2 GAN LOSS
        self.gen_loss = 0.5 * \
            tf.reduce_mean(tflearn.mean_square(self.disc_fake, 1.))
        #-tf.reduce_mean(tf.log(self.disc_fake))
        self.disc_loss = 0.5 * (
            tf.reduce_mean(tflearn.mean_square(self.disc_real, 1.)) +
            tf.reduce_mean(tflearn.mean_square(self.disc_real, 0.)))
        #-(tf.reduce_mean(tf.log(self.disc_real)) + tf.reduce_mean(tf.log(1. - self.disc_fake)))
        #tflearn.mean_square(tf.log(self.discriminator), self.out)

        self.gen_vars = tflearn.get_layer_variables_by_scope(self.scope +
                                                             '-gan-g')
        self.disc_vars = tflearn.get_layer_variables_by_scope(self.scope +
                                                              '-gan-d')
        self.gen_op = tf.train.RMSPropOptimizer(self.lr_rate).minimize(
            self.gen_loss, var_list=self.gen_vars)
        self.disc_op = tf.train.RMSPropOptimizer(self.lr_rate).minimize(
            self.disc_loss, var_list=self.disc_vars)

        # Get all network parameters
        self.network_params_g = \
            tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                              scope=self.scope + '-gan-g')
        self.network_params_d = \
            tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                              scope=self.scope + '-gan-d')
        # Set all network parameters
        self.input_network_params_g = []
        self.input_network_params_d = []
        for param in self.network_params_g:
            self.input_network_params_g.append(
                tf.placeholder(tf.float32, shape=param.get_shape()))
        for param in self.network_params_d:
            self.input_network_params_d.append(
                tf.placeholder(tf.float32, shape=param.get_shape()))

        self.set_network_params_op_g = []
        self.set_network_params_op_d = []
        for idx, param in enumerate(self.input_network_params_g):
            self.set_network_params_op_g.append(
                self.network_params_g[idx].assign(param))
        for idx, param in enumerate(self.input_network_params_d):
            self.set_network_params_op_d.append(
                self.network_params_d[idx].assign(param))
Exemplo n.º 5
0
def main():
    stage = 'train'
    # stage = 'gen'
    model_path = 'gan_model/gan_paper_3_private_loss.tfl'

    if stage == 'train':
        X = np.load('ben_original_feature.npy')
        X_1 = X[500:, :45]
        X = X[500:, :]
        X = X[:, :45]
        X = np.reshape(X, newshape=[-1, 9, 5, 1])
        z_dim = 20  # Noise data points
        total_samples = len(X)

        # Input Data
        gen_input = tflearn.input_data(shape=[None, z_dim],
                                       name='input_gen_noise')
        input_disc_noise = tflearn.input_data(shape=[None, z_dim],
                                              name='input_disc_noise')
        input_disc_real = tflearn.input_data(shape=[None, 9, 5, 1],
                                             name='input_disc_real')

        #Output Data of Generator

        # Build Discriminator
        disc_fake = discriminator(generator(input_disc_noise))
        disc_real = discriminator(input_disc_real, reuse=True)
        disc_net = tf.concat([disc_fake, disc_real], axis=0)

        # Build Stacked Generator/Discriminator
        gen_net = generator(gen_input, reuse=True)
        stacked_gan_net = discriminator(gen_net, reuse=True)

        # Build Training Ops for both Generator and Discriminator.
        # Each network optimization should only update its own variable, thus we need
        # to retrieve each network variables (with get_layer_variables_by_scope).
        disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')

        # We need 2 target placeholders, for both the real and fake image target.
        disc_target = tflearn.multi_target_data(
            ['target_disc_fake', 'target_disc_real'], shape=[None, 2])
        gen_target = tflearn.multi_target_data(
            ['target_disc_fake', 'target_disc_real'], shape=[None, 2])
        disc_model = tflearn.regression(disc_net,
                                        optimizer='adam',
                                        placeholder=disc_target,
                                        loss='categorical_crossentropy',
                                        trainable_vars=disc_vars,
                                        batch_size=64,
                                        name='target_disc',
                                        op_name='DISC')

        gen_vars = tflearn.get_layer_variables_by_scope('Generator')
        gan_model = tflearn.regression(stacked_gan_net,
                                       optimizer='adam',
                                       loss='categorical_crossentropy',
                                       trainable_vars=gen_vars,
                                       batch_size=64,
                                       name='target_gen',
                                       op_name='GEN')

        gen_to_train = tflearn.regression(gen_net,
                                          optimizer='adam',
                                          loss=private_loss,
                                          trainable_vars=gen_vars,
                                          batch_size=64,
                                          name='gen_train',
                                          op_name='GEN')

        # Define GAN model
        gan = tflearn.DNN(gan_model)

        # Training
        # Prepare input data to feed to the discriminator
        disc_noise = np.random.uniform(-1., 1., size=[total_samples, z_dim])
        # Prepare target data to feed to the discriminator (0: fake image, 1: real image)
        y_disc_fake = np.zeros(shape=[total_samples])
        y_disc_real = np.ones(shape=[total_samples])
        y_disc_fake = tflearn.data_utils.to_categorical(y_disc_fake, 2)
        y_disc_real = tflearn.data_utils.to_categorical(y_disc_real, 2)

        # Prepare input data to feed to the stacked generator/discriminator
        gen_noise = np.random.uniform(-1., 1., size=[total_samples, z_dim])
        # Prepare target data to feed to the discriminator
        # Generator tries to fool the discriminator, thus target is 1 (e.g. real images)
        y_gen = np.ones(shape=[total_samples])
        y_gen = tflearn.data_utils.to_categorical(y_gen, 2)

        gan.fit(X_inputs={
            'input_gen_noise': gen_noise,
            'input_disc_noise': disc_noise,
            'input_disc_real': X,
        },
                Y_targets={
                    'target_gen': y_gen,
                    'target_disc_fake': y_disc_fake,
                    'target_disc_real': y_disc_real,
                    'gen_train': X_1
                },
                n_epoch=10)

        print('--GAN training finish--')

        # gen_dnn = tflearn.DNN(gen_net,session=gan.session)
        # gen_dnn.fit(X_inputs={'gen_input':gen_noise},Y_targets={'gen_train':X},n_epoch=10)

        gan.save(model_path)

    if stage == 'gen':
        gen_amount = 13000
        z_dim = 20  # Noise data points

        # Input Data
        gen_input = tflearn.input_data(shape=[None, z_dim],
                                       name='input_gen_noise')
        input_disc_noise = tflearn.input_data(shape=[None, z_dim],
                                              name='input_disc_noise')
        input_disc_real = tflearn.input_data(shape=[None, 9, 5, 1],
                                             name='input_disc_real')

        # Build Discriminator
        disc_fake = discriminator(generator(input_disc_noise))
        disc_real = discriminator(input_disc_real, reuse=True)
        disc_net = tf.concat([disc_fake, disc_real], axis=0)
        # Build Stacked Generator/Discriminator
        gen_net = generator(gen_input, reuse=True)
        stacked_gan_net = discriminator(gen_net, reuse=True)

        # Build Training Ops for both Generator and Discriminator.
        # Each network optimization should only update its own variable, thus we need
        # to retrieve each network variables (with get_layer_variables_by_scope).
        disc_vars = tflearn.get_layer_variables_by_scope('Discriminator')

        # We need 2 target placeholders, for both the real and fake image target.
        disc_target = tflearn.multi_target_data(
            ['target_disc_fake', 'target_disc_real'], shape=[None, 2])
        disc_model = tflearn.regression(disc_net,
                                        optimizer='adam',
                                        placeholder=disc_target,
                                        loss='categorical_crossentropy',
                                        trainable_vars=disc_vars,
                                        batch_size=64,
                                        name='target_disc',
                                        op_name='DISC')

        gen_vars = tflearn.get_layer_variables_by_scope('Generator')
        gan_model = tflearn.regression(stacked_gan_net,
                                       optimizer='adam',
                                       loss='categorical_crossentropy',
                                       trainable_vars=gen_vars,
                                       batch_size=64,
                                       name='target_gen',
                                       op_name='GEN')

        # Define GAN model, that output the generated images.
        gan = tflearn.DNN(gan_model)
        gan.load(model_path)

        # Create another model from the generator graph to generate some samples
        # for testing (re-using same session to re-use the weights learnt).
        gen = tflearn.DNN(gen_net, session=gan.session)
        g = np.zeros((1, 45))
        for i in range(int(gen_amount / 10)):
            z = np.random.uniform(-1., 1., size=[10, z_dim])
            g_part = np.array(gen.predict({'input_gen_noise': z}))
            g = np.concatenate((g, g_part), axis=0)

        # np.save('gen_result.npy', g[1:,:])
        # data = np.load('gen_result.npy')

        data = g[1:, :]
        sample_num = len(data)
        print('sample_number:{}'.format(sample_num))
        result2 = []
        data = np.abs(data)
        for i in range(len(data)):
            # if np.sum(data[i]) <= 1 and np.sum(data[i]) >= 0.9:
            if np.sum(data[i]) <= 100000:
                result2.append(data[i])
        result2 = np.array(result2)
        print('get:{}'.format(len(result2)))
        for i in range(sample_num):
            result2[i, :-1] = result2[i, :-1] / np.sum(result2[i, :-1])

        np.save('result2_' + str(gen_amount) + '.npy', result2)
Exemplo n.º 6
0
input_disc_real = tflearn.input_data(shape=[None, 28, 28, 1],
                                     name='input_disc_real')

# Build Discriminator
disc_fake = discriminator(
    generator(input_disc_noise))  # [n,200]->[n,28,28,1]->[n,2]
disc_real = discriminator(input_disc_real, reuse=True)  # [n,28,28,1]->[n,2]
disc_net = tf.concat([disc_fake, disc_real], axis=0)  # [2n,2]~~[n,2]
# Build Stacked Generator/Discriminator
gen_net = generator(gen_input, reuse=True)  # [n,200]->[n,28,28,1]
stacked_gan_net = discriminator(gen_net, reuse=True)  # [n,28,28,1]->[n,2]

# Build Training Ops for both Generator and Discriminator.
# Each network optimization should only update its own variable, thus we need
# to retrieve each network variables (with get_layer_variables_by_scope).
disc_vars = tflearn.get_layer_variables_by_scope(
    'Discriminator')  # 获取该节点下的所有可训练的变量
# We need 2 target placeholders, for both the real and fake image target.
disc_target = tflearn.multi_target_data(
    ['target_disc_fake', 'target_disc_real'], shape=[None, 2])
disc_model = tflearn.regression(disc_net,
                                optimizer='adam',
                                placeholder=disc_target,
                                loss='categorical_crossentropy',
                                trainable_vars=disc_vars,
                                batch_size=64,
                                name='target_disc',
                                op_name='DISC')

gen_vars = tflearn.get_layer_variables_by_scope('Generator')
gan_model = tflearn.regression(stacked_gan_net,
                               optimizer='adam',
Exemplo n.º 7
0
ctrl_in = tf.concat([img_out, last_ctrl], 1)

ctrl = ctrl_net(ctrl_in)

value_in = tf.concat([ctrl_in, ctrl], 1)
value = value_net(value_in)

#评分(越大越好)
y = tf.placeholder(tf.float32, [None, 1])

value_loss = tf.reduce_mean(tf.square(y - value))
#ctrl_loss = tf.reduce_mean(-tf.log(value))
ctrl_loss = tf.reduce_mean(1 - value)

ctrl_vars = tfl.get_layer_variables_by_scope(
    'ctrl')  # + tfl.get_layer_variables_by_scope('image')
value_vars = tfl.get_layer_variables_by_scope(
    'value') + tfl.get_layer_variables_by_scope('image')

ctrl_model = tfl.regression(ctrl,
                            placeholder=None,
                            optimizer='adam',
                            loss=ctrl_loss,
                            trainable_vars=ctrl_vars,
                            batch_size=64,
                            name='target_ctrl',
                            op_name='ctrl_model')

value_model = tfl.regression(value,
                             placeholder=y,
                             optimizer='adam',
Exemplo n.º 8
0
input_example_ctrl = tfl.input_data([None, 2])

input_map = tf.concat([input_left, input_right], 2)
input_map = tf.reshape(input_map, [-1, 5, 4, 1])

ctrl = ctrl_net(input_map, input_state)
value_example = value_net(input_map, input_state, input_example_ctrl)
value = value_net(input_map, input_state, ctrl, reuse=True)

#评分(越大越好)
y_example = tf.placeholder(tf.float32, [None, 1])

value_loss = tf.reduce_mean(tf.square(y_example - value_example))
ctrl_loss = tf.reduce_mean(tf.square(1 - value))

value_vars = tfl.get_layer_variables_by_scope('value')
value_reg = tfl.regression(value_example,
                           placeholder=y_example,
                           optimizer='adam',
                           loss=value_loss,
                           trainable_vars=value_vars,
                           batch_size=512,
                           name='target_value',
                           op_name='value_reg')

ctrl_vars = tfl.get_layer_variables_by_scope('ctrl')
ctrl_reg = tfl.regression(value,
                          placeholder=None,
                          optimizer='adam',
                          loss=ctrl_loss,
                          trainable_vars=ctrl_vars,