示例#1
0
    def __build_net__(self):
        with tf_utils.set_device_mode(par.gpu_mode):
            self.X = tf.placeholder(shape=[None]+self.input_shape, dtype=tf.float32, name='X')
            self.Z = tf.placeholder(shape=[None, self.noise_dim], dtype=tf.float32, name='random_z')

            self.G = self.Generator(self.Z , self.X.shape[1])

            self.D = self.Discriminator(self.X, self.num_classes)
            self.D_G = self.Discriminator(self.G, self.num_classes)

            self.__set_loss_and_optim__()
        return
示例#2
0
    def __build_net__(self):
        self.X = tf.placeholder(dtype=tf.float32, shape=[None,]+self.input_shape)
        self.y = tf.placeholder(dtype=tf.int32, shape=(None,))
        self.dropout = tf.placeholder(dtype=tf.float32, shape=())
        self.batch_size = tf.placeholder(dtype=tf.int32, shape=())

        self.Y = tf.one_hot(self.y, depth=self.num_classes, axis=-1)

        x = tf.transpose(self.X,[1,0,2])
        with tf.name_scope('LSTM') and tf_utils.set_device_mode(self.gpu_mode):
        #if True:
            L0, _ = tf_utils.LSTMlayer(x, 256, self.batch_size,0, self.gpu_mode)
            L0 = tf.nn.dropout(L0, keep_prob=1-self.dropout)

            L1, _ = tf_utils.LSTMlayer(L0, 512, self.batch_size, 1, self.gpu_mode)
            L1 = tf.nn.dropout(L1, keep_prob=1-self.dropout)

            L2, _ = tf_utils.LSTMlayer(L1, 256, self.batch_size, 2, self.gpu_mode)
            L2 = tf.nn.dropout(L2, keep_prob=1-self.dropout)
            L2 = L2[-1]
        #print(L2.shape)
        with tf.name_scope('MLP') and tf_utils.set_device_mode(self.gpu_mode):
            MLP1 = tf_utils.Dense(L2, 256, name='M1' ,activation=None)
            MLP1 = tf.nn.dropout(MLP1, keep_prob=1-self.dropout)

            MLP2 = tf_utils.Dense(MLP1, 128, name='M2',activation=None)
            MLP2 = tf.nn.dropout(MLP2, keep_prob=1 - self.dropout)

            self.logit = tf_utils.Dense(MLP2, self.num_classes, 'logit')
            self.out = tf.nn.softmax(self.logit)

            self.loss = tf.nn.softmax_cross_entropy_with_logits(
                logits=self.logit,
                labels=self.Y,
            )
        self.loss = tf.reduce_mean(self.loss)
        self.optim = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss)
示例#3
0
    def __build_net__(self):
        with tf_utils.set_device_mode(par.gpu_mode):
            self.X = tf.placeholder(shape=[None] + self.input_shape, dtype=tf.float32, name='X')
            self.Z = tf.placeholder(shape=[None, self.noise_dim], dtype=tf.float32, name='random_z')
            self.c_dis = tf.placeholder(shape=[None], dtype=tf.uint8, name='input_c_disc')
            self.c_cont = tf.placeholder(shape=[None,self.num_continuous], dtype=tf.float32, name='input_c_cont')
            
        with tf_utils.set_device_mode(False):    
            self.C_DISC = tf.one_hot(indices=self.c_dis, depth=self.num_discrete, dtype=tf.float32, axis=-1)
            #TODO: erase
            print(self.C_DISC.shape)
            
            latent = tf.concat([self.C_DISC, self.Z, self.c_cont], 1)
            
        with tf_utils.set_device_mode(par.gpu_mode):
            latent = latent
            self.G = self.Generator(latent, self.X.shape[1])

            self.D, _, _ = self.Discriminator(self.X, self.num_classes)
            self.D_G,self.Qdis, self.Qcont_params = self.Discriminator(self.G, self.num_classes)
        
        #with tf_utils.set_device_mode(False):
            self.__set_loss_and_optim__()
            return