Esempio n. 1
0
    def compute_errors(self, epoch):
        filename_model = os.path.join(self.dir_models,
                                      'epoch_{}.pth'.format(epoch))
        g = Generator(self.nb_channels_first_layer, self.dim)
        g.cuda()
        g.load_state_dict(torch.load(filename_model))
        g.eval()

        criterion = torch.nn.MSELoss()

        def _compute_error(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            dataloader = DataLoader(dataset,
                                    batch_size=512,
                                    num_workers=4,
                                    pin_memory=True)

            error = 0

            for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                g_z = g.forward(z)

                error += criterion(g_z, x).data.cpu().numpy()

            error /= len(dataloader)

            print('Error for {}: {}'.format(train_test, error))

        _compute_error(self.dir_z_train, self.dir_x_train, 'train')
        _compute_error(self.dir_z_test, self.dir_x_test, 'test')
Esempio n. 2
0
    def analyze_model(self, epoch):
        filename_model = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
        g = Generator(self.nb_channels_first_layer, self.dim)
        g.cuda()
        g.load_state_dict(torch.load(filename_model))
        g.eval()

        nb_samples = 50
        batch_z = np.zeros((nb_samples, 32 * self.nb_channels_first_layer, 4, 4))
        # batch_z = np.maximum(5*np.random.randn(nb_samples, 32 * self.nb_channels_first_layer, 4, 4), 0)
        # batch_z = 5 * np.random.randn(nb_samples, 32 * self.nb_channels_first_layer, 4, 4)

        for i in range(4):
            for j in range(4):
                batch_z[:, :, i, j] = create_path(nb_samples)
        # batch_z[:, :, 0, 0] = create_path(nb_samples)
        # batch_z[:, :, 0, 1] = create_path(nb_samples)
        # batch_z[:, :, 1, 0] = create_path(nb_samples)
        # batch_z[:, :, 1, 1] = create_path(nb_samples)
        batch_z = np.maximum(batch_z, 0)

        z = Variable(torch.from_numpy(batch_z)).type(torch.FloatTensor).cuda()
        temp = g.main._modules['4'].forward(z)
        for i in range(5, 10):
            temp = g.main._modules['{}'.format(i)].forward(temp)

        g_z = temp.data.cpu().numpy().transpose((0, 2, 3, 1))

        folder_to_save = os.path.join(self.dir_experiment, 'epoch_{}_path_after_linear_only00_path'.format(epoch))
        create_folder(folder_to_save)

        for idx in range(nb_samples):
            filename_image = os.path.join(folder_to_save, '{}.png'.format(idx))
            Image.fromarray(np.uint8((g_z[idx] + 1) * 127.5)).save(filename_image)
Esempio n. 3
0
 def get_generator(self, epoch_to_load):
     filename_model = self.dir_models / 'epoch_{}.pth'.format(epoch_to_load)
     g = Generator(self.nb_channels_first_layer, self.dim)
     g.load_state_dict(torch.load(filename_model))
     g.cuda()
     g.eval()
     return g
Esempio n. 4
0
        def compute_errors(epoch):
            filename_model = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
            print("model file name is {}".format(filename_model))
            g = Generator(self.nb_channels_first_layer, self.dim)
            g.cuda()
            g.load_state_dict(torch.load(filename_model))
            g.eval()
            criterion = torch.nn.MSELoss()

            def _compute_error(dir_z, dir_x, train_test):
                fileCount = len(os.listdir(dir_x + "/"))
                dataset = EmbeddingsImagesDataset(dir_z, dir_x)
                dataloader = DataLoader(dataset, batch_size=32, pin_memory=True)
                error = 0
                ssim = 0
                psnr = 0
                for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                    x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                    z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                    g_z = g.forward(z)
                    error += criterion(g_z, x).data.cpu().numpy()

                    for idx_ii in range(g_z.data.cpu().numpy().shape[0]):
                        out = g_z.data.cpu().numpy()[idx_ii].transpose((1, 2, 0))
                        x_out = x.data.cpu().numpy()[idx_ii].transpose((1, 2, 0))
                        ssim += compare_ssim(x_out, out, multichannel=True)  # ji suan dan zhang ssim
                        psnr += compare_psnr(x_out, out)

                error /= len(dataloader)
                ssim /= fileCount
                psnr /= fileCount  # pingjun zhi
                print('Error for {}: {}'.format(train_test, error))
                return error, psnr, ssim

            return _compute_error(self.dir_z_train, self.dir_x_train, 'train')
Esempio n. 5
0
    def fusion_image(self, path1, epoch1, path2, epoch2):

        filename_model1 = os.path.join(path1, 'epoch_{}.pth'.format(epoch1))
        filename_model2 = os.path.join(path2, 'epoch_{}.pth'.format(epoch2))

        mode1s_1 = torch.load(filename_model1)
        mode1s_2 = torch.load(filename_model2)

        g1 = Generator(self.nb_channels_first_layer, self.dim)
        g2 = Generator(self.nb_channels_first_layer, self.dim)
        g1.cuda()
        g2.cuda()
        g1.load_state_dict(mode1s_1)
        g2.load_state_dict(mode1s_2)
        g1.eval()
        g2.eval()

        criterion = torch.nn.MSELoss()

        def _compute_error(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            dataloader = DataLoader(dataset, batch_size=16, num_workers=4, pin_memory=True)

            error = 0
            error1 = 0
            error2 = 0
            for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                g1_z = g1.forward(z)
                g2_z = g2.forward(z)

                error1 += criterion(g1_z, x).data.cpu().numpy()
                error2 += criterion(g2_z, x).data.cpu().numpy()
                error += criterion((g1_z + g2_z) / 2.0, x).data.cpu().numpy()

            error1 /= len(dataloader)
            error2 /= len(dataloader)
            error /= len(dataloader)
            psnr1 = 10 * np.log10(255 * 255 / error1) / 3.0
            psnr2 = 10 * np.log10(255 * 255 / error2) / 3.0
            psnr = 10 * np.log10(255 * 255 / error) / 3.0
            print('MSE1 and PSNR1 for {}: {},{}'.format(train_test, error1, psnr1))
            print('MSE2 and PSNR2 for {}: {},{}'.format(train_test, error2, psnr2))
            print('MSE_f and PSNR_f for {}: {},{}'.format(train_test, error, psnr))

        _compute_error(self.dir_z_train, self.dir_x_train, 'train')
def visualizeArchi():
    net = Generator(32, 4096)
    x = Variable(torch.randn(1, 4096))
    y = net(x)
    g = make_dot(y)
    g.view()
                        dot.edge(str(id(u[0])), str(id(var)))
                        add_nodes(u[0])
            if hasattr(var, 'saved_tensors'):
                for t in var.saved_tensors:
                    dot.edge(str(id(t)), str(id(var)))
                    add_nodes(t)

    add_nodes(var.grad_fn)
    return dot


def visualizeArchi():
    net = Generator(32, 4096)
    x = Variable(torch.randn(1, 4096))
    y = net(x)
    g = make_dot(y)
    g.view()


def modelSummary(model, inputsize):
    summary(model, inputsize)


if __name__ == "__main__":
    net = Generator(32, 4096)
    x = Variable(torch.randn(1, 4096))
    y = net(x)
    g = make_dot(y)
    g.view()

    summary(net, (1, 4096))
Esempio n. 8
0
    def train(self, epoch_to_restore=0):
        g = Generator(self.nb_channels_first_layer, self.dim)

        if epoch_to_restore > 0:
            filename_model = os.path.join(
                self.dir_models, 'epoch_{}.pth'.format(epoch_to_restore))
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        g.cuda()
        g.train()

        dataset = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader = DataLoader(dataset,
                                self.batch_size,
                                shuffle=True,
                                num_workers=4,
                                pin_memory=True)
        fixed_dataloader = DataLoader(dataset, 16)
        fixed_batch = next(iter(fixed_dataloader))

        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer = SummaryWriter(self.dir_logs)

        try:
            epoch = epoch_to_restore
            while True:
                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1

                    for idx_batch, current_batch in enumerate(
                            tqdm(dataloader)):
                        g.zero_grad()
                        x = Variable(current_batch['x']).type(
                            torch.FloatTensor).cuda()
                        z = Variable(current_batch['z']).type(
                            torch.FloatTensor).cuda()
                        g_z = g.forward(z)

                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                    writer.add_scalar('train_loss', loss, epoch)

                z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                g.eval()
                g_z = g.forward(z)
                images = make_grid(g_z.data[:16], nrow=4, normalize=True)
                writer.add_image('generations', images, epoch)
                filename = os.path.join(self.dir_models,
                                        'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('[*] Closing Writer.')
            writer.close()
Esempio n. 9
0
    def generate_from_model(self, epoch):
        filename_model = os.path.join(self.dir_models,
                                      'epoch_{}.pth'.format(epoch))
        g = Generator(self.nb_channels_first_layer, self.dim)
        g.load_state_dict(torch.load(filename_model))
        g.cuda()
        g.eval()

        def _generate_from_model(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            fixed_dataloader = DataLoader(dataset, 16)
            fixed_batch = next(iter(fixed_dataloader))

            z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
            g_z = g.forward(z)
            filename_images = os.path.join(
                self.dir_experiment,
                'epoch_{}_{}.png'.format(epoch, train_test))
            temp = make_grid(g_z.data[:16], nrow=4).cpu().numpy().transpose(
                (1, 2, 0))
            Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)

        _generate_from_model(self.dir_z_train, self.dir_x_train, 'train')
        _generate_from_model(self.dir_z_test, self.dir_x_test, 'test')

        def _generate_path(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            fixed_dataloader = DataLoader(dataset, 2, shuffle=True)
            fixed_batch = next(iter(fixed_dataloader))

            z0 = fixed_batch['z'][[0]].numpy()
            z1 = fixed_batch['z'][[1]].numpy()

            batch_z = np.copy(z0)

            nb_samples = 100

            interval = np.linspace(0, 1, nb_samples)
            for t in interval:
                if t > 0:
                    zt = normalize((1 - t) * z0 + t * z1)
                    batch_z = np.vstack((batch_z, zt))

            z = Variable(torch.from_numpy(batch_z)).type(
                torch.FloatTensor).cuda()
            g_z = g.forward(z)

            # filename_images = os.path.join(self.dir_experiment, 'path_epoch_{}_{}.png'.format(epoch, train_test))
            # temp = make_grid(g_z.data, nrow=nb_samples).cpu().numpy().transpose((1, 2, 0))
            # Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)

            g_z = g_z.data.cpu().numpy().transpose((0, 2, 3, 1))

            folder_to_save = os.path.join(
                self.dir_experiment,
                'epoch_{}_{}_path'.format(epoch, train_test))
            create_folder(folder_to_save)

            for idx in range(nb_samples):
                filename_image = os.path.join(folder_to_save,
                                              '{}.png'.format(idx))
                Image.fromarray(np.uint8(
                    (g_z[idx] + 1) * 127.5)).save(filename_image)

        _generate_path(self.dir_z_train, self.dir_x_train, 'train')
        _generate_path(self.dir_z_test, self.dir_x_test, 'test')

        def _generate_random():
            nb_samples = 16
            z = np.random.randn(nb_samples, self.dim)
            norms = np.sqrt(np.sum(z**2, axis=1))
            norms = np.expand_dims(norms, axis=1)
            norms = np.repeat(norms, self.dim, axis=1)
            z /= norms

            z = Variable(torch.from_numpy(z)).type(torch.FloatTensor).cuda()
            g_z = g.forward(z)
            filename_images = os.path.join(self.dir_experiment,
                                           'epoch_{}_random.png'.format(epoch))
            temp = make_grid(g_z.data[:16], nrow=4).cpu().numpy().transpose(
                (1, 2, 0))
            Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)

        _generate_random()
Esempio n. 10
0
    def train(self, epoch_to_restore=0):
        g = Generator(self.nb_channels_first_layer, self.dim)  #生成器网络
        if epoch_to_restore > 0:
            filename_model = os.path.join(
                self.dir_models,
                'epoch_{}.pth'.format(epoch_to_restore))  #加载之前训练的网络
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)  #否则初始化网络权重

        g.cuda()
        g.train()

        dataset = EmbeddingsImagesDataset(self.dir_z_train,
                                          self.dir_x_train)  #做嵌入
        dataloader = DataLoader(dataset,
                                self.batch_size,
                                shuffle=True,
                                num_workers=4,
                                pin_memory=True)

        fixed_dataloader = DataLoader(dataset, 16)  #用作验证集的数据

        # idx = iter(fixed_dataloader)
        # next(idx)
        # next(idx)

        fixed_batch = next(iter(fixed_dataloader))  #所有值 /127.5 - 1
        # fixed_batch1 = next(iter(dataloader))
        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer = SummaryWriter(self.dir_logs)  #写入日志文档,参数为文件夹名字
        # lr = np.arange(1e-5,1.0,(1.0-1e-5)/512.)
        try:
            epoch = epoch_to_restore
            while True:

                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1
                    print("epoch is %d" % epoch)
                    for idx_batch, current_batch in enumerate(
                            tqdm(dataloader)):
                        # print idx_batch
                        # optimizer = optim.Adam(g.parameters(), lr=lr[idx_batch])
                        g.zero_grad()  #梯度设置为0
                        x = Variable(current_batch['x']).type(
                            torch.FloatTensor).cuda()
                        z = Variable(current_batch['z']).type(
                            torch.FloatTensor).cuda()
                        g_z = g.forward(z)  #前向传播

                        loss = criterion(g_z, x)  #计算损失
                        loss.backward()  #反向传播
                        optimizer.step()  #更新权重

                        # writer.add_scalar('train_loss', loss, idx_batch + (epoch-1)*512)#把训练损失写入文件中
                        # writer.add_scalar('lr',loss,lr[idx_batch]*100000 + 100000*0.5*(epoch - 1))
                        if idx_batch % 4 == 0:
                            writer.add_scalar('train_loss_batch', loss,
                                              idx_batch +
                                              (epoch - 1) * 512)  # 把训练损失写入文件中
                        # print("loss is %f ",np.float(loss.cpu()))

                writer.add_scalar('train_loss_epoch', loss,
                                  epoch)  # 把训练损失写入文件中

                #???????????????????????????
                # break

                z = Variable(fixed_batch['z']).type(
                    torch.FloatTensor).cuda()  #测试数据集
                g.eval()
                g_z = g.forward(z)
                images = make_grid(g_z.data[:16], nrow=4, normalize=True)

                images_tmp = images.cpu().numpy().transpose((1, 2, 0))
                Image.fromarray(np.uint8(
                    (images_tmp + 1) * 127.5)).save('/home/jains/test/' +
                                                    str(epoch) + '.jpg')

                writer.add_image('generations', images, epoch)
                filename = os.path.join(self.dir_models,
                                        'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('[*] Closing Writer.')
            writer.close()
Esempio n. 11
0
    def train(self, epoch_to_restore=0):
        if epoch_to_restore == 0:
            self.make_dirs()

        g = Generator(self.nb_channels_first_layer, self.dim)

        if epoch_to_restore > 0:
            filename_model = self.dir_models / 'epoch_{}.pth'.format(epoch_to_restore)
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        g.cuda()
        g.train()

        dataset_train = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader_train = DataLoader(dataset_train, self.batch_size, shuffle=True, num_workers=4, pin_memory=True)
        dataset_test = EmbeddingsImagesDataset(self.dir_z_test, self.dir_x_test)
        dataloader_test = DataLoader(dataset_test, self.batch_size, shuffle=True, num_workers=4, pin_memory=True)

        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer_train = SummaryWriter(str(self.dir_logs_train))
        writer_test = SummaryWriter(str(self.dir_logs_test))

        try:
            epoch = epoch_to_restore
            while True:
                start_time = time.time()

                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1

                    for idx_batch, current_batch in enumerate(dataloader_train):
                        g.zero_grad()
                        x = Variable(current_batch['x']).float().cuda()
                        z = Variable(current_batch['z']).float().cuda()
                        g_z = g.forward(z)

                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                g.eval()
                with torch.no_grad():
                    train_l1_loss = AverageMeter()
                    for idx_batch, current_batch in enumerate(dataloader_train):
                        if idx_batch == 32:
                            break
                        x = current_batch['x'].float().cuda()
                        z = current_batch['z'].float().cuda()
                        g_z = g.forward(z)
                        loss = criterion(g_z, x)
                        train_l1_loss.update(loss)

                    writer_train.add_scalar('l1_loss', train_l1_loss.avg, epoch)

                    test_l1_loss = AverageMeter()
                    for idx_batch, current_batch in enumerate(dataloader_test):
                        if idx_batch == 32:
                            break
                        x = current_batch['x'].float().cuda()
                        z = current_batch['z'].float().cuda()
                        g_z = g.forward(z)
                        loss = criterion(g_z, x)
                        test_l1_loss.update(loss)

                    writer_test.add_scalar('l1_loss', test_l1_loss.avg, epoch)
                    images = make_grid(g_z.data[:16], nrow=4, normalize=True)
                    writer_test.add_image('generations', images, epoch)

                if epoch % self.nb_epochs_to_save == 0:
                    filename = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
                    torch.save(g.state_dict(), filename)

                end_time = time.time()
                print("[*] Finished epoch {} in {}".format(epoch, get_hms(end_time - start_time)))

        finally:
            print('[*] Closing Writer.')
            writer_train.close()
            writer_test.close()
Esempio n. 12
0
    def train(self, epoch_to_restore=0, epoch_train=50):
        print("------------train start------------------")

        '''训练开始'''
        g = Generator(self.nb_channels_first_layer, self.dim, last_activate=self.last_activate)

        # 读取csv文件记录所有训练数据
        from utilss.tools import read_run_data, save_run_data
        df = read_run_data(self.dir_experiment + '/')

        # 判断是否加载历史训练模型
        if epoch_to_restore > 0:
            filename_model = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch_to_restore))
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        # 判断是否使用cuda
        if (self.cuda):
            g.cuda()
        # 训练函数
        g.train()

        # 加载训练集dataloader
        dataset = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader = DataLoader(dataset, self.batch_size, shuffle=True, num_workers=self.cfg['num_workers'],
                                pin_memory=True)

        # '''查看网络结构图'''
        # batch = next(iter(dataloader))
        # z = batch['z'].type(torch.FloatTensor).cuda()
        # with SummaryWriter(comment='Net1')as w:
        #    w.add_graph(g, (z,))

        # validation set's dataloader
        fixed_dataloader = DataLoader(dataset, self.batch_size)  # 用作验证集的数据
        fixed_batch = next(iter(fixed_dataloader))  # 所有值 /127.5 - 1-------------iter获取容器的迭代器,next表示下一个

        # 测试集dataloader
        testdataset = EmbeddingsImagesDataset(self.dir_z_test, self.dir_x_test)
        testdataloader = DataLoader(dataset=testdataset, batch_size=self.batch_size, shuffle=True,
                                    num_workers=self.cfg['num_workers'], pin_memory=True)
        testfixed_batch = next(iter(testdataloader))

        # 定义损失函数和优化器
        criterion = torch.nn.L1Loss()
        optimizer = optim.Adam(g.parameters())

        # 定义tensorboard记录
        writer = SummaryWriter(self.dir_logs)

        # 开始训练
        try:
            epoch = epoch_to_restore
            while epoch < epoch_train:
                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1
                    print("epoch is : ", epoch)
                    for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                        g.zero_grad()
                        if (self.cuda):
                            x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                            z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                        else:
                            x = Variable(current_batch['x']).type(torch.FloatTensor)
                            z = Variable(current_batch['z']).type(torch.FloatTensor)
                        # 前向传播,计算损失,反向传播
                        g_z = g.forward(z)
                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                    # 计算每一个epoch的loss
                    print("----------------{}-------------".format(loss))
                    writer.add_scalar('train_loss', loss, epoch)

                # 计算PSNR,SSIM
                from skimage.measure import compare_ssim, compare_nrmse, compare_psnr, compare_mse
                if (epoch % 1 == 0):
                    test_psnr = 0
                    test_ssim = 0
                    val_psnr = 0
                    val_ssim = 0
                    if (self.cuda):
                        test_z = Variable(testfixed_batch['z']).type(torch.FloatTensor).cuda()
                        val_z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                    else:
                        test_z = Variable(testfixed_batch['z']).type(torch.FloatTensor)
                        val_z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                    g.eval()
                    gtest_x = g.forward(test_z)
                    if (net_config['train_x'] == '(-1,1)'):
                        gtest_x = np.uint8((gtest_x.data.cpu().numpy().transpose((0, 2, 3, 1)) + 1) * 127.5)
                        test_x = np.uint8(((testfixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1))) + 1) * 127.5)
                    elif (net_config['train_x'] == '(0,1)'):
                        gtest_x = np.uint8((gtest_x.data.cpu().numpy().transpose((0, 2, 3, 1))) * 255)
                        test_x = np.uint8(((testfixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1)))) * 255)
                    for ii in range(self.batch_size):
                        test_psnr += compare_psnr(gtest_x[ii], test_x[ii])
                        test_ssim += compare_ssim(gtest_x[ii], test_x[ii], multichannel=True)
                    test_ssim /= self.batch_size
                    test_psnr /= self.batch_size

                    gval_x = g.forward(val_z)
                    if (net_config['train_x'] == '(-1,1)'):
                        gval_x = np.uint8((gval_x.data.cpu().numpy().transpose((0, 2, 3, 1)) + 1) * 127.5)
                        val_x = np.uint8((fixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1)) + 1) * 127.5)
                    elif (net_config['train_x'] == '(0,1)'):
                        gval_x = np.uint8((gval_x.data.cpu().numpy().transpose((0, 2, 3, 1))) * 255)
                        val_x = np.uint8((fixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1))) * 255)
                    for ii in range(self.batch_size):
                        val_psnr += compare_psnr(gval_x[ii], val_x[ii])
                        val_ssim += compare_ssim(gval_x[ii], val_x[ii], multichannel=True)
                    val_psnr /= self.batch_size
                    val_ssim /= self.batch_size

                    # 记录所有指标:loss,psnr和ssim, And save it as a csv file
                    df.loc[epoch] = [epoch, loss.cpu().data.numpy(), val_psnr, test_psnr, val_ssim, test_ssim]
                    save_run_data(self.dir_experiment + '/', df=df)

                    # 将结果添加到tensorboard中
                    writer.add_scalars('psnr', {'val_psnr': val_psnr, 'test_psnr': test_psnr}, epoch)
                    writer.add_scalars('ssim', {'val_ssim': val_ssim, 'test_psnr': test_ssim}, epoch)

                '''把验证集中的图片保存下来'''
                # images_tmp = np.uint8((make_grid(g_z.data[:16], nrow=4).cpu().numpy().transpose(
                #     (1, 2, 0)) + 1) * 127.5)  # 把图片数据转换到0-255之间
                # writer.add_image('generations', images_tmp, epoch)#把每个epoch生成的图片显示导tensorboard
                # Image.fromarray(images_tmp).save(self.dir_valimg + '/' + str(epoch) + '.jpg')  # 把每个epoch生成图片保存到本地

                '''添加指标得分到tensorboard'''
                # writer.add_scalars('mse_score', {'train_mse': mse, 'test_mse': test_mse}, epoch)
                # writer.add_scalars('psnr_score', {'train_psnr': psnr, 'test_psnr': test_psnr},
                #                    epoch)  # 把每个epoch评价指标在tensorboard显示
                # writer.add_scalars('ssim_score', {'train_ssim': ssim, 'test_ssim': test_ssim}, epoch)

                filename = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('*********************************Closing Writer**********************************.')
            writer.close()