def __init__(self, shape, nhid=16): super(VAEMLPDecoder, self).__init__() flattened_size = torch.Size(shape).numel() self.shape = shape self.decode = nn.Sequential( MLP([nhid, 64, 128, 256, flattened_size], last_activation=False), nn.Sigmoid(), ) self.invTrans = transforms.Compose( [transforms.Normalize((0.1307, ), (0.3081, ))])
def get_data(): # 将像素点转换到[-1, 1]之间,使得输入变成一个比较对称分布,训练容易收敛 data_tf = transforms.Compose( [transforms.Totensor(), transforms.Normalize([0.5, 0.5])]) train_dataset = datasets.MNIST(root='./data', train=True, transform=data_tf, download=True) train_loder = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, drop_last=True) return train_loder