Exemple #1
0
    def train_gen_step(self,dynamic_X,static_X,targ,pretrain=False):

        with tf.GradientTape() as tape:
            predictions = self._generator(dynamic_X,static_X)

            if pretrain:
                loss = loss_function(tf.expand_dims(targ,2),predictions)
            else:
                ## 如果不是预训练,loss的目标是尽量使discriminator出错
                fake_Y = tf.reshape(predictions,(predictions.shape[0],predictions.shape[1]))
                # print(')
                fake_output = self._discriminator(dynamic_X,static_X,fake_Y)

                ## 为了保证梯度不下降,使用两种loss进行混淆
                loss = generator_loss(fake_output,self._mode)

                
                

            variables = self._generator.trainable_variables

            gradients =  tape.gradient(loss,variables)

            if pretrain:
                self._gen_pre_optimizer.apply_gradients(zip(gradients,variables))
            else:
                self._gen_optimizer.apply_gradients(zip(gradients,variables))

        return loss
Exemple #2
0
    def train_discriminator_step(self,
                                 dynamic_X,
                                 static_X,
                                 real_Y,
                                 fake_Y,
                                 pretrain=False):

        ##获得所有数据之后进行训练
        with tf.GradientTape() as tape:

            ## 使用discriminator进行预测
            fake_output = self._discriminator(dynamic_X, static_X, fake_Y)

            real_output = self._discriminator(dynamic_X, static_X, real_Y)

            dis_loss = discriminator_loss(real_output, fake_output, self._mode)

            ## 为了保证方向的正确性使用mse作为loss
            if self._mode == 'GAN':
                dis_loss += loss_function(real_Y, fake_Y)

            if self._mode == 'WGAN-GP':

                ##加上GP
                dis_loss += self._penalty_weight * self.gradient_penalty(
                    dynamic_X, static_X, real_Y, fake_Y)

        gradients = tape.gradient(dis_loss,
                                  self._discriminator.trainable_variables)

        if pretrain:

            self._dis_pre_optimizer.apply_gradients(
                zip(gradients, self._discriminator.trainable_variables))

        else:

            self._dis_optimizer.apply_gradients(
                zip(gradients, self._discriminator.trainable_variables))

        if self._mode == 'WGAN':

            [
                p.assign(tf.clip_by_value(p, -0.1, 0.1))
                for p in self._discriminator.trainable_variables
            ]

        return dis_loss
Exemple #3
0
    def train_step(self,dynamic_features,static_features,targ,enc_hidden):


        with tf.GradientTape() as tape:


            enc_output, enc_hidden = self._encoder(dynamic_features,enc_hidden)

            ## 需要对enc_output的shape进行查看,并列输入了8种序列特征
            # print('Shape of enc output:{}'.format(enc_output.shape))
            # print('Shape of enc hidden:{}'.format(enc_hidden.shape))
            dec_input = tf.cast(tf.expand_dims([0]*self._batch_sz,1),tf.float64)
            loss = 0

            dec_hidden = enc_hidden

            for t in range(0,targ.shape[1]):

                predictions,dec_hidden = self._decoder(dec_input,dec_hidden,enc_output,static_features)

                # print(targ[:,t].shape,predictions.shape)

                loss += loss_function(tf.expand_dims(targ[:,t],1),predictions)

                # rn = np.random.random_sample()

                ## 时间t的标准结果作为t+1的x
                # if rn<0.1:
                    # dec_input = tf.expand_dims(targ[:,t],1)
                # else:
                ##如果不适用teacher forcing
                dec_input = predictions

            batch_loss = (loss/int(targ.shape[1]))

            variables = self._encoder.trainable_variables + self._decoder.trainable_variables

            gradients =  tape.gradient(loss,variables)

            self._optimizer.apply_gradients(zip(gradients,variables))

        return batch_loss