コード例 #1
0
    def Discriminator(self, input, output_dim, name = 'discriminator'):
        with tf.variable_scope(name,reuse= tf.AUTO_REUSE):# and tf_utils.set_device_mode(par.gpu_mode):
            input = tf.reshape(input, [-1, 28,28,1])

            l0 = tf.layers.conv2d(input, 1, [5,5], strides=(1,1), padding='same')

            l1 = tf.layers.conv2d(l0, self.init_filter_size//4, [5,5], strides=(2,2), padding='same', activation=tf.nn.leaky_relu)
            l1 = tf.layers.batch_normalization(l1)

            l2 = tf.layers.conv2d(
                l1,
                self.init_filter_size//2,
                [5,5],
                strides=(2,2),
                padding='same',
                activation=tf.nn.leaky_relu
            )
            l2 = tf.layers.batch_normalization(l2)
            l2 = tf.layers.flatten(l2)
            self.D_L2 = l2
            l3 = tf_utils.Dense(l2, 64, 'l3', activation=tf.nn.leaky_relu)


            logits = tf_utils.Dense(l3, output_dim, 'logits')
            print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator'))
        return logits
コード例 #2
0
    def Q(self, h, disc_name = 'discriminator'):
        h = tf.identity(h)
        D = self.Discriminator(h, self.num_classes, disc_name)
        logits = self.D_L2
        with tf.variable_scope('generator', reuse=tf.AUTO_REUSE):  # and tf_utils.set_device_mode(par.gpu_mode):
            l3 = tf_utils.Dense(logits, 128, 'q_l3', activation=tf.nn.leaky_relu)
            l3 = tf.layers.batch_normalization(l3, name='q_l3_batch_norm')

            l4 = tf_utils.Dense(l3, 64, 'q_l4', activation=tf.nn.elu)
            out = tf_utils.Dense(l4, self.noise_dim, 'q_dis_logits')
            out = tf.nn.tanh(out)
        return out
コード例 #3
0
 def Generator(self, z, output_dim, name='generator'):
     #gaussian = tf.random_uniform(shape=[self.batch_size]+[input_dim], dtype=tf.float32, minval=-1, maxval=1)
     L1 = tf_utils.Dense(z,
                         z.shape[1] // 2,
                         name=name + '/L1',
                         activation=tf.nn.relu)
     L2 = tf_utils.Dense(L1,
                         z.shape[1],
                         name=name + '/L2',
                         activation=tf.nn.relu)
     L3 = tf_utils.Dense(L2, output_dim, name=name + '/L3', activation=None)
     return tf.tanh(L3)
コード例 #4
0
    def Generator(self, z, output_dim, name= 'generator', final = False):
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):# and tf_utils.set_device_mode(par.gpu_mode):
            print(z.shape)
            l0 = tf_utils.Dense(
                z,
                self.init_filter_size * self.init_kernel_size * self.init_kernel_size,
                'l0',
                activation=tf.nn.relu
            )
            print(l0.name)
            l0 = tf.reshape(l0, [-1, self.init_kernel_size, self.init_kernel_size, self.init_filter_size])
            l0 = tf.layers.batch_normalization(l0)

            l1 = tf.layers.conv2d_transpose(
                l0,
                self.init_filter_size//2,
                kernel_size=[5,5],
                strides=(2,2),
                padding='same',
                activation=tf.nn.relu
            )
            l1 = tf.layers.batch_normalization(l1)

            if final: activ = tf.nn.tanh
            else: activ = None
            fc = tf.layers.conv2d_transpose(l1, 1, [5,5], strides=(2,2), padding='same', activation=activ)
            fc = tf.layers.flatten(fc)
        return fc
コード例 #5
0
 def Discriminator(self, input, output_dim, name='discriminator'):
     with tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):
         L1 = tf_utils.Dense(input,
                             256,
                             name=name + '/L1',
                             activation=tf.nn.leaky_relu)
         L2 = tf_utils.Dense(L1,
                             256,
                             name=name + '/L2',
                             activation=tf.nn.leaky_relu)
         self.D_L2 = L2
         L3 = tf_utils.Dense(L2,
                             output_dim,
                             name=name + '/L3',
                             activation=None)
     return L3
コード例 #6
0
    def encoder(self, h, hidden_dim, output_dim, name='encoder', recon_layer = -1):
        # reconlayer 0: total 1: lay1 2: lay2
        dense_dim = hidden_dim

        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            self.encoder_actives = [tf.nn.leaky_relu, tf.nn.elu, None]
            input = h
            if recon_layer <= 1:
                input = tf.reshape(input, [-1, 28, 28, 1])
                input = tf.layers.conv2d(input, 1, [5, 5], strides=(1, 1), padding='same')

                input = tf.layers.conv2d(input, self.init_filter_size // 4, [5, 5], strides=(2, 2), padding='same',
                                     activation=tf.nn.leaky_relu)
                input = tf.layers.batch_normalization(input)
                input = tf.layers.conv2d(
                    input,
                    self.init_filter_size // 2,
                    [5, 5],
                    strides=(2, 2),
                    padding='same',
                    activation=tf.nn.leaky_relu
                )
                input = tf.layers.batch_normalization(input)
                input = tf.layers.flatten(input)
                input = tf_utils.Dense(input, dense_dim, name='reconst', activation=tf.nn.elu)
            for i in range(self.num_stack):
                if recon_layer > 1 and recon_layer-1 == i:
                    input = h

                if i == self.num_stack-1:
                    dense_dim = output_dim
                input = tf_utils.Dense(input, dense_dim, activation=self.encoder_actives[i], name='l'+str(i))

                if i == recon_layer:
                    return input
                #TODO: erase
                print('output shape: {}'.format(input.shape))
        return input
コード例 #7
0
ファイル: Network.py プロジェクト: jason9693/ComposerAi
    def __build_net__(self):
        self.X = tf.placeholder(dtype=tf.float32, shape=[None,]+self.input_shape)
        self.y = tf.placeholder(dtype=tf.int32, shape=(None,))
        self.dropout = tf.placeholder(dtype=tf.float32, shape=())
        self.batch_size = tf.placeholder(dtype=tf.int32, shape=())

        self.Y = tf.one_hot(self.y, depth=self.num_classes, axis=-1)

        x = tf.transpose(self.X,[1,0,2])
        with tf.name_scope('LSTM') and tf_utils.set_device_mode(self.gpu_mode):
        #if True:
            L0, _ = tf_utils.LSTMlayer(x, 256, self.batch_size,0, self.gpu_mode)
            L0 = tf.nn.dropout(L0, keep_prob=1-self.dropout)

            L1, _ = tf_utils.LSTMlayer(L0, 512, self.batch_size, 1, self.gpu_mode)
            L1 = tf.nn.dropout(L1, keep_prob=1-self.dropout)

            L2, _ = tf_utils.LSTMlayer(L1, 256, self.batch_size, 2, self.gpu_mode)
            L2 = tf.nn.dropout(L2, keep_prob=1-self.dropout)
            L2 = L2[-1]
        #print(L2.shape)
        with tf.name_scope('MLP') and tf_utils.set_device_mode(self.gpu_mode):
            MLP1 = tf_utils.Dense(L2, 256, name='M1' ,activation=None)
            MLP1 = tf.nn.dropout(MLP1, keep_prob=1-self.dropout)

            MLP2 = tf_utils.Dense(MLP1, 128, name='M2',activation=None)
            MLP2 = tf.nn.dropout(MLP2, keep_prob=1 - self.dropout)

            self.logit = tf_utils.Dense(MLP2, self.num_classes, 'logit')
            self.out = tf.nn.softmax(self.logit)

            self.loss = tf.nn.softmax_cross_entropy_with_logits(
                logits=self.logit,
                labels=self.Y,
            )
        self.loss = tf.reduce_mean(self.loss)
        self.optim = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
コード例 #8
0
    def __fc_layer__(self, input, cycle, name, hidden_dims, sub=None):
        for i in range(cycle):
            activation = tf.nn.leaky_relu
            if type(hidden_dims) == int:
                output_shape = hidden_dims
            else:
                output_shape = hidden_dims[i]
            if i == cycle-1:
                activation = None

            if sub and sub != i:
                continue

            input = tf_utils.Dense(input, output_shape, name=name+'/fc'+str(i), activation=activation)
            if sub:
                return input

        return input
コード例 #9
0
    def Discriminator(self, input, output_dim, name = 'discriminator'):
        with tf.variable_scope(name,reuse= tf.AUTO_REUSE):# and tf_utils.set_device_mode(par.gpu_mode):
            input = tf.reshape(input, [-1, 28,28,1])

            l0 = tf.layers.conv2d(input, 1, [5,5], strides=(1,1), padding='same')

            l1 = tf.layers.conv2d(l0, self.init_filter_size//4, [5,5], strides=(2,2), padding='same', activation=tf.nn.leaky_relu)
            l1 = tf.layers.batch_normalization(l1)

            l2 = tf.layers.conv2d(
                l1,
                self.init_filter_size//2,
                [5,5],
                strides=(2,2),
                padding='same',
                activation=tf.nn.leaky_relu
            )
            l2 = tf.layers.batch_normalization(l2)

            l3 = tf.layers.flatten(l2)
            l3 = tf_utils.Dense(l3, 64, 'l3', activation=tf.nn.leaky_relu)

            logits = tf_utils.Dense(l3, output_dim, 'logits')
            #print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'discriminator'))

        with tf.variable_scope('generator', reuse=tf.AUTO_REUSE):# and tf_utils.set_device_mode(par.gpu_mode):
            l3 = tf_utils.Dense(l3, 128, 'q_l3', activation=tf.nn.leaky_relu)
            l3 = tf.layers.batch_normalization(l3, name='q_l3_batch_norm')
            
            logits_dis = tf_utils.Dense(l3, 60, 'q_dis_logits', activation = tf.nn.leaky_relu)
            logits_dis = tf_utils.Dense(logits_dis, self.num_discrete, 'q_dis_logits_final')
            logists_cont = tf_utils.Dense(l3, self.num_continuous * 2, 'q_cont_logits')
            mu = logists_cont[:,:self.num_continuous]
            sigma = logists_cont[:,self.num_continuous:]
            sigma = 1e-8 + tf.nn.softplus(sigma)
            #print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'generator'))
        return logits, logits_dis, (mu, sigma)
コード例 #10
0
 def encoder(self, h, hidden_dim, output_dim, name='encoder/'):
     l0 = tf_utils.Dense(h, hidden_dim, activation=tf.nn.leaky_relu, name='_l0')
     l1 = tf_utils.Dense(l0, hidden_dim, activation=tf.nn.elu, name=name + '_l1')
     out = tf_utils.Dense(l1, output_dim, activation=None, name=name + 'final')
     return out
コード例 #11
0
ファイル: GAN.py プロジェクト: jason9693/GANcompare_tfv1_v2
 def Generator(self,z , output_dim, name= 'generator'):
     L1 = tf_utils.Dense(z, z.shape[1] // 2, name=name+'/L1', activation=tf.nn.relu)
     L2 = tf_utils.Dense(L1, z.shape[1], name=name + '/L2', activation=tf.nn.relu)
     L3 = tf_utils.Dense(L2, output_dim, name=name + '/L3', activation=None)
     return tf.tanh(L3)