损失函数: 基于SGAN的Loss 判别器输出为概率值需要sigmoid 网络结构: MLP 第一层的MLP的要加上 Concat One_Hot 条件 数据形式: 不带卷积 没有深度维 图片压缩到0 1 之间 生成器: sigmoid 映射到0 1 之间 迎合数据格式 判别器: sigmoid 映射到0 1 之间 迎合loss公式的约束 初始化: xavier初始化 即考虑输入输出维度的 glorot uniform 训练: 判别器和生成器同时训练 同步训练 不偏重任一方 """ import my_mnist import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from tensorflow.keras import layers import time (train_images, train_labels), (_, _) = my_mnist.load_data(get_new=False, normalization=True, one_hot=True, detype=np.float32) train_images = train_images.reshape(train_images.shape[0], 28, 28).astype('float32') print(train_labels[0]) plt.imshow(train_images[0, :, :], cmap='gray') plt.show() class Dense(layers.Layer): def __init__(self, input_dim, units): super(Dense, self).__init__() initializer = tf.initializers.glorot_uniform() # initializer = tf.initializers.glorot_normal() self.w = tf.Variable(initial_value=initializer(shape=(input_dim, units),
conv_out = tf.nn.conv2d( input=x, filters=self.w, strides=self.strides, padding=self.pandding_way, data_format='NHWC', dilations=None, name=None) #dilations是空洞卷积的一个系数 相当于对卷积核做上采样同时部分置零 这里不进行空洞卷积 l_out = conv_out + self.b return l_out if __name__ == "__main__": (train_images, train_labels), (_, _) = my_mnist.load_data(get_new=False, normalization=False, one_hot=True, detype=np.float32) train_images = (train_images.astype('float32') - 127.5) / 127.5 train_labels = (train_labels.astype('float32') - 0.5) / 0.5 train_images = train_images.reshape(train_images.shape[0], 28, 28, 1) print(train_labels[0]) plt.imshow(train_images[0, :, :, 0], cmap='gray') plt.show() x = tf.random.normal(shape=(64, 784)) a = Dense(28 * 28, 128) print(a(x)) print(len(a.trainable_variables)) y = train_images[0:1, :, :, 0:1] C1 = Conv2D([28, 28, 1], 2, [5, 5],