Пример #1
0
        def _compute_error(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            dataloader = DataLoader(dataset, batch_size=16, num_workers=4, pin_memory=True)

            error = 0
            error1 = 0
            error2 = 0
            for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                g1_z = g1.forward(z)
                g2_z = g2.forward(z)

                error1 += criterion(g1_z, x).data.cpu().numpy()
                error2 += criterion(g2_z, x).data.cpu().numpy()
                error += criterion((g1_z + g2_z) / 2.0, x).data.cpu().numpy()

            error1 /= len(dataloader)
            error2 /= len(dataloader)
            error /= len(dataloader)
            psnr1 = 10 * np.log10(255 * 255 / error1) / 3.0
            psnr2 = 10 * np.log10(255 * 255 / error2) / 3.0
            psnr = 10 * np.log10(255 * 255 / error) / 3.0
            print('MSE1 and PSNR1 for {}: {},{}'.format(train_test, error1, psnr1))
            print('MSE2 and PSNR2 for {}: {},{}'.format(train_test, error2, psnr2))
            print('MSE_f and PSNR_f for {}: {},{}'.format(train_test, error, psnr))
Пример #2
0
            def _generate_path(dir_z, dir_x, train_test):
                dataset = EmbeddingsImagesDataset(dir_z, dir_x)
                fixed_dataloader = DataLoader(dataset, 2, shuffle=True)
                fixed_batch = next(iter(fixed_dataloader))

                z0 = fixed_batch['z'][[0]].numpy()
                z1 = fixed_batch['z'][[1]].numpy()

                batch_z = np.copy(z0)

                nb_samples = 100

                interval = np.linspace(0, 1, nb_samples)
                for t in interval:
                    if t > 0:
                        # zt = normalize((1 - t) * z0 + t * z1)
                        zt = (1 - t) * z0 + t * z1
                        batch_z = np.vstack((batch_z, zt))

                z = torch.from_numpy(batch_z).float().cuda()
                g_z = g.forward(z)

                # filename_images = os.path.join(self.dir_experiment, 'path_epoch_{}_{}.png'.format(epoch, train_test))
                # temp = make_grid(g_z.data, nrow=nb_samples).cpu().numpy().transpose((1, 2, 0))
                # Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)

                g_z = g_z.data.cpu().numpy().transpose((0, 2, 3, 1))

                folder_to_save = dir_to_save / 'epoch_{}_{}_path'.format(epoch_to_load, train_test)
                create_folder(folder_to_save)

                for idx in range(nb_samples):
                    filename_image = os.path.join(folder_to_save, '{}.png'.format(idx))
                    Image.fromarray(np.uint8((g_z[idx] + 1) * 127.5)).save(filename_image)
Пример #3
0
        def _save_originals(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            fixed_dataloader = DataLoader(dataset, 16)
            fixed_batch = next(iter(fixed_dataloader))

            temp = make_grid(fixed_batch['x'], nrow=4).numpy().transpose((1, 2, 0))

            filename_images = os.path.join(self.dir_experiment, 'originals_{}.png'.format(train_test))
            Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)
Пример #4
0
            def _generate_from_model(dir_z, dir_x, train_test):
                dataset = EmbeddingsImagesDataset(dir_z, dir_x)
                fixed_dataloader = DataLoader(dataset, 16)
                fixed_batch = next(iter(fixed_dataloader))

                z = fixed_batch['z'].float().cuda()
                g_z = g.forward(z)
                filename_images = dir_to_save / 'epoch_{}_{}.png'.format(epoch_to_load, train_test)
                temp = make_grid(g_z.data[:16], nrow=4).cpu().numpy().transpose((1, 2, 0))
                Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)
Пример #5
0
        def _save_originals(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            # the second parameter indicate the picture we want to show is format of 3x3(if self.images_generate = 9)
            fixed_dataloader = DataLoader(dataset, self.images_generate)
            fixed_batch = next(iter(fixed_dataloader))

            temp = make_grid(fixed_batch['x'], nrow=int(math.sqrt(self.images_generate))).numpy().transpose((1, 2, 0))

            filename_images = os.path.join(self.dir_experiment, 'originals_{}.jpg'.format(train_test))
            Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)
Пример #6
0
        def _generate_from_model(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            fixed_dataloader = DataLoader(dataset, 16)
            fixed_batch = next(iter(fixed_dataloader))

            z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
            g_z = g.forward(z)
            filename_images = os.path.join(
                self.dir_experiment,
                'epoch_{}_{}.png'.format(epoch, train_test))
            temp = make_grid(g_z.data[:16], nrow=4).cpu().numpy().transpose(
                (1, 2, 0))
            Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)
Пример #7
0
        def _save_originals(dir_z, dir_x, train_test):
            # print(dir_z,dir_x,train_test)
            dir_z = '/home/jains/datasets/gsn/celebA_128/65536_ScatJ4_projected512_1norm'
            dir_x = '/home/jains/datasets/gsn/celebA_128/65536'
            train_test = 'train'
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            fixed_dataloader = DataLoader(dataset, 16)
            fixed_batch = next(iter(fixed_dataloader))

            temp = make_grid(fixed_batch['x'], nrow=4).numpy().transpose(
                (1, 2, 0))

            filename_images = os.path.join(
                self.dir_experiment, 'originals_{}.png'.format(train_test))
            Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)
Пример #8
0
        def _generate_from_model(dir_z, dir_x, train_test):
            dataset = EmbeddingsImagesDataset(dir_z, dir_x)
            fixed_dataloader = DataLoader(dataset, self.images_generate, shuffle=True)
            fixed_batch = next(iter(fixed_dataloader))

            # save original images
            images_originals_filename = os.path.join(self.dir_experiment, '{}_originals.jpg'.format(train_test))
            temp_x = make_grid(fixed_batch['x'], nrow=int(math.sqrt(self.images_generate))).numpy().transpose((1, 2, 0))
            Image.fromarray(np.uint8((temp_x + 1) * 127.5)).save(images_originals_filename)

            z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
            g_z = g.forward(z)
            filename_images = os.path.join(self.dir_experiment, 'epoch_{}_{}.jpg'.format(epoch, train_test))
            temp = make_grid(g_z.data[:self.images_generate],
                             nrow=int(math.sqrt(self.images_generate))).cpu().numpy().transpose((1, 2, 0))
            Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)
Пример #9
0
            def _compute_error(dir_z, dir_x, train_test):
                dataset = EmbeddingsImagesDataset(dir_z, dir_x)
                dataloader = DataLoader(dataset, batch_size=512, num_workers=4, pin_memory=True)

                error = 0

                for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                    x = current_batch['x'].float().cuda()
                    z = current_batch['z'].float().cuda()
                    g_z = g.forward(z)

                    error += criterion(g_z, x).data.cpu().numpy()

                error /= len(dataloader)

                print('Error for {}: {}'.format(train_test, error))
Пример #10
0
            def _generate_random(dir_z, dir_x):
                dataset = EmbeddingsImagesDataset(dir_z, dir_x)
                fixed_dataloader = DataLoader(dataset, idx_image + 1, shuffle=False)
                fixed_batch = next(iter(fixed_dataloader))

                x = fixed_batch['x'][[idx_image]]
                filename_images = os.path.join(dir_to_save, 'original.png'.format(epoch_to_load))
                temp = make_grid(x.data, nrow=1).cpu().numpy().transpose((1, 2, 0))
                Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)

                z0 = fixed_batch['z'][[idx_image]].numpy()
                nb_samples = 16
                batch_z = np.repeat(z0, nb_samples, axis=0)
                batch_z[:, z_initial_idx:z_end_idx] = np.random.randn(nb_samples, z_end_idx - z_initial_idx)
                z = torch.from_numpy(batch_z).float().cuda()

                g_z = g.forward(z)
                filename_images = os.path.join(dir_to_save, 'modified.png'.format(epoch_to_load))
                temp = make_grid(g_z.data[:16], nrow=4).cpu().numpy().transpose((1, 2, 0))
                Image.fromarray(np.uint8((temp + 1) * 127.5)).save(filename_images)
Пример #11
0
            def _compute_error(dir_z, dir_x, train_test):
                fileCount = len(os.listdir(dir_x + "/"))
                dataset = EmbeddingsImagesDataset(dir_z, dir_x)
                dataloader = DataLoader(dataset, batch_size=32, pin_memory=True)
                error = 0
                ssim = 0
                psnr = 0
                for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                    x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                    z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                    g_z = g.forward(z)
                    error += criterion(g_z, x).data.cpu().numpy()

                    for idx_ii in range(g_z.data.cpu().numpy().shape[0]):
                        out = g_z.data.cpu().numpy()[idx_ii].transpose((1, 2, 0))
                        x_out = x.data.cpu().numpy()[idx_ii].transpose((1, 2, 0))
                        ssim += compare_ssim(x_out, out, multichannel=True)  # ji suan dan zhang ssim
                        psnr += compare_psnr(x_out, out)

                error /= len(dataloader)
                ssim /= fileCount
                psnr /= fileCount  # pingjun zhi
                print('Error for {}: {}'.format(train_test, error))
                return error, psnr, ssim
Пример #12
0
    def train(self, epoch_to_restore=0):
        g = Generator(self.nb_channels_first_layer, self.dim)

        if epoch_to_restore > 0:
            filename_model = os.path.join(
                self.dir_models, 'epoch_{}.pth'.format(epoch_to_restore))
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        g.cuda()
        g.train()

        dataset = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader = DataLoader(dataset,
                                self.batch_size,
                                shuffle=True,
                                num_workers=4,
                                pin_memory=True)
        fixed_dataloader = DataLoader(dataset, 16)
        fixed_batch = next(iter(fixed_dataloader))

        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer = SummaryWriter(self.dir_logs)

        try:
            epoch = epoch_to_restore
            while True:
                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1

                    for idx_batch, current_batch in enumerate(
                            tqdm(dataloader)):
                        g.zero_grad()
                        x = Variable(current_batch['x']).type(
                            torch.FloatTensor).cuda()
                        z = Variable(current_batch['z']).type(
                            torch.FloatTensor).cuda()
                        g_z = g.forward(z)

                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                    writer.add_scalar('train_loss', loss, epoch)

                z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                g.eval()
                g_z = g.forward(z)
                images = make_grid(g_z.data[:16], nrow=4, normalize=True)
                writer.add_image('generations', images, epoch)
                filename = os.path.join(self.dir_models,
                                        'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('[*] Closing Writer.')
            writer.close()
Пример #13
0
    elif isinstance(layer, nn.BatchNorm2d):
        layer.weight.data.normal_(1.0, 0.02)
        layer.bias.data.fill_(0)


if __name__ == '__main__':
    dir_datasets = os.path.expanduser('~/datasets')
    dataset = 'celebA'
    dataset_attribute = '256'
    embedding_attribute = 'ScatJ4'

    dir_x_train = os.path.join(dir_datasets, dataset,
                               '{0}'.format(dataset_attribute))
    dir_z_train = os.path.join(
        dir_datasets, dataset, '{0}_{1}'.format(dataset_attribute,
                                                embedding_attribute))

    dataset = EmbeddingsImagesDataset(dir_z_train, dir_x_train)
    fixed_dataloader = DataLoader(dataset, batch_size=128)
    fixed_batch = next(iter(fixed_dataloader))

    nb_channels_first_layer = 16

    input_tensor = Variable(fixed_batch['z']).cuda()
    g = Generator(nb_channels_first_layer, 512)
    g.cuda()
    g.train()

    output = g.forward(input_tensor)
    save_image(output[:16].data, 'temp.png', nrow=4)
Пример #14
0
import numpy as np
from torch.utils.data import DataLoader

from EmbeddingsImagesDataset import EmbeddingsImagesDataset

dir_datasets = os.path.expanduser('./datasets')
dataset = 'diracs'
dataset_attribute = '1024'
embedding_attribute = 'ScatJ4'

dir_x_train = os.path.join(dir_datasets, dataset, '{0}'.format(dataset_attribute))
dir_z_train = os.path.join(dir_datasets, dataset, '{0}_{1}'.format(dataset_attribute, embedding_attribute))

# print(dir_x_train)

dataset = EmbeddingsImagesDataset(dir_z_train, dir_x_train, nb_channels=3)#表示嵌入过程,训练的z和x的嵌入
# fixed_dataloader = DataLoader(dataset, batch_size=256)#加载数据集
fixed_dataloader = DataLoader(dataset, batch_size=256)
fixed_batch = next(iter(fixed_dataloader))

x = fixed_batch['x'].numpy()
z = fixed_batch['z'].numpy()

min_distance = np.inf#表示无穷大
i_tilde = 0
j_tilde = 0

distances = list()
for i in range(256):
    for j in range(256):
        if i < j:
Пример #15
0
    def train(self, epoch_to_restore=0):
        g = Generator(self.nb_channels_first_layer, self.dim)  #生成器网络
        if epoch_to_restore > 0:
            filename_model = os.path.join(
                self.dir_models,
                'epoch_{}.pth'.format(epoch_to_restore))  #加载之前训练的网络
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)  #否则初始化网络权重

        g.cuda()
        g.train()

        dataset = EmbeddingsImagesDataset(self.dir_z_train,
                                          self.dir_x_train)  #做嵌入
        dataloader = DataLoader(dataset,
                                self.batch_size,
                                shuffle=True,
                                num_workers=4,
                                pin_memory=True)

        fixed_dataloader = DataLoader(dataset, 16)  #用作验证集的数据

        # idx = iter(fixed_dataloader)
        # next(idx)
        # next(idx)

        fixed_batch = next(iter(fixed_dataloader))  #所有值 /127.5 - 1
        # fixed_batch1 = next(iter(dataloader))
        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer = SummaryWriter(self.dir_logs)  #写入日志文档,参数为文件夹名字
        # lr = np.arange(1e-5,1.0,(1.0-1e-5)/512.)
        try:
            epoch = epoch_to_restore
            while True:

                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1
                    print("epoch is %d" % epoch)
                    for idx_batch, current_batch in enumerate(
                            tqdm(dataloader)):
                        # print idx_batch
                        # optimizer = optim.Adam(g.parameters(), lr=lr[idx_batch])
                        g.zero_grad()  #梯度设置为0
                        x = Variable(current_batch['x']).type(
                            torch.FloatTensor).cuda()
                        z = Variable(current_batch['z']).type(
                            torch.FloatTensor).cuda()
                        g_z = g.forward(z)  #前向传播

                        loss = criterion(g_z, x)  #计算损失
                        loss.backward()  #反向传播
                        optimizer.step()  #更新权重

                        # writer.add_scalar('train_loss', loss, idx_batch + (epoch-1)*512)#把训练损失写入文件中
                        # writer.add_scalar('lr',loss,lr[idx_batch]*100000 + 100000*0.5*(epoch - 1))
                        if idx_batch % 4 == 0:
                            writer.add_scalar('train_loss_batch', loss,
                                              idx_batch +
                                              (epoch - 1) * 512)  # 把训练损失写入文件中
                        # print("loss is %f ",np.float(loss.cpu()))

                writer.add_scalar('train_loss_epoch', loss,
                                  epoch)  # 把训练损失写入文件中

                #???????????????????????????
                # break

                z = Variable(fixed_batch['z']).type(
                    torch.FloatTensor).cuda()  #测试数据集
                g.eval()
                g_z = g.forward(z)
                images = make_grid(g_z.data[:16], nrow=4, normalize=True)

                images_tmp = images.cpu().numpy().transpose((1, 2, 0))
                Image.fromarray(np.uint8(
                    (images_tmp + 1) * 127.5)).save('/home/jains/test/' +
                                                    str(epoch) + '.jpg')

                writer.add_image('generations', images, epoch)
                filename = os.path.join(self.dir_models,
                                        'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('[*] Closing Writer.')
            writer.close()
Пример #16
0
    def train(self, epoch_to_restore=0):
        if epoch_to_restore == 0:
            self.make_dirs()

        g = Generator(self.nb_channels_first_layer, self.dim)

        if epoch_to_restore > 0:
            filename_model = self.dir_models / 'epoch_{}.pth'.format(epoch_to_restore)
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        g.cuda()
        g.train()

        dataset_train = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader_train = DataLoader(dataset_train, self.batch_size, shuffle=True, num_workers=4, pin_memory=True)
        dataset_test = EmbeddingsImagesDataset(self.dir_z_test, self.dir_x_test)
        dataloader_test = DataLoader(dataset_test, self.batch_size, shuffle=True, num_workers=4, pin_memory=True)

        criterion = torch.nn.L1Loss()

        optimizer = optim.Adam(g.parameters())
        writer_train = SummaryWriter(str(self.dir_logs_train))
        writer_test = SummaryWriter(str(self.dir_logs_test))

        try:
            epoch = epoch_to_restore
            while True:
                start_time = time.time()

                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1

                    for idx_batch, current_batch in enumerate(dataloader_train):
                        g.zero_grad()
                        x = Variable(current_batch['x']).float().cuda()
                        z = Variable(current_batch['z']).float().cuda()
                        g_z = g.forward(z)

                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                g.eval()
                with torch.no_grad():
                    train_l1_loss = AverageMeter()
                    for idx_batch, current_batch in enumerate(dataloader_train):
                        if idx_batch == 32:
                            break
                        x = current_batch['x'].float().cuda()
                        z = current_batch['z'].float().cuda()
                        g_z = g.forward(z)
                        loss = criterion(g_z, x)
                        train_l1_loss.update(loss)

                    writer_train.add_scalar('l1_loss', train_l1_loss.avg, epoch)

                    test_l1_loss = AverageMeter()
                    for idx_batch, current_batch in enumerate(dataloader_test):
                        if idx_batch == 32:
                            break
                        x = current_batch['x'].float().cuda()
                        z = current_batch['z'].float().cuda()
                        g_z = g.forward(z)
                        loss = criterion(g_z, x)
                        test_l1_loss.update(loss)

                    writer_test.add_scalar('l1_loss', test_l1_loss.avg, epoch)
                    images = make_grid(g_z.data[:16], nrow=4, normalize=True)
                    writer_test.add_image('generations', images, epoch)

                if epoch % self.nb_epochs_to_save == 0:
                    filename = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
                    torch.save(g.state_dict(), filename)

                end_time = time.time()
                print("[*] Finished epoch {} in {}".format(epoch, get_hms(end_time - start_time)))

        finally:
            print('[*] Closing Writer.')
            writer_train.close()
            writer_test.close()
Пример #17
0
    def train(self, epoch_to_restore=0, epoch_train=50):
        print("------------train start------------------")

        '''训练开始'''
        g = Generator(self.nb_channels_first_layer, self.dim, last_activate=self.last_activate)

        # 读取csv文件记录所有训练数据
        from utilss.tools import read_run_data, save_run_data
        df = read_run_data(self.dir_experiment + '/')

        # 判断是否加载历史训练模型
        if epoch_to_restore > 0:
            filename_model = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch_to_restore))
            g.load_state_dict(torch.load(filename_model))
        else:
            g.apply(weights_init)

        # 判断是否使用cuda
        if (self.cuda):
            g.cuda()
        # 训练函数
        g.train()

        # 加载训练集dataloader
        dataset = EmbeddingsImagesDataset(self.dir_z_train, self.dir_x_train)
        dataloader = DataLoader(dataset, self.batch_size, shuffle=True, num_workers=self.cfg['num_workers'],
                                pin_memory=True)

        # '''查看网络结构图'''
        # batch = next(iter(dataloader))
        # z = batch['z'].type(torch.FloatTensor).cuda()
        # with SummaryWriter(comment='Net1')as w:
        #    w.add_graph(g, (z,))

        # validation set's dataloader
        fixed_dataloader = DataLoader(dataset, self.batch_size)  # 用作验证集的数据
        fixed_batch = next(iter(fixed_dataloader))  # 所有值 /127.5 - 1-------------iter获取容器的迭代器,next表示下一个

        # 测试集dataloader
        testdataset = EmbeddingsImagesDataset(self.dir_z_test, self.dir_x_test)
        testdataloader = DataLoader(dataset=testdataset, batch_size=self.batch_size, shuffle=True,
                                    num_workers=self.cfg['num_workers'], pin_memory=True)
        testfixed_batch = next(iter(testdataloader))

        # 定义损失函数和优化器
        criterion = torch.nn.L1Loss()
        optimizer = optim.Adam(g.parameters())

        # 定义tensorboard记录
        writer = SummaryWriter(self.dir_logs)

        # 开始训练
        try:
            epoch = epoch_to_restore
            while epoch < epoch_train:
                g.train()
                for _ in range(self.nb_epochs_to_save):
                    epoch += 1
                    print("epoch is : ", epoch)
                    for idx_batch, current_batch in enumerate(tqdm(dataloader)):
                        g.zero_grad()
                        if (self.cuda):
                            x = Variable(current_batch['x']).type(torch.FloatTensor).cuda()
                            z = Variable(current_batch['z']).type(torch.FloatTensor).cuda()
                        else:
                            x = Variable(current_batch['x']).type(torch.FloatTensor)
                            z = Variable(current_batch['z']).type(torch.FloatTensor)
                        # 前向传播,计算损失,反向传播
                        g_z = g.forward(z)
                        loss = criterion(g_z, x)
                        loss.backward()
                        optimizer.step()

                    # 计算每一个epoch的loss
                    print("----------------{}-------------".format(loss))
                    writer.add_scalar('train_loss', loss, epoch)

                # 计算PSNR,SSIM
                from skimage.measure import compare_ssim, compare_nrmse, compare_psnr, compare_mse
                if (epoch % 1 == 0):
                    test_psnr = 0
                    test_ssim = 0
                    val_psnr = 0
                    val_ssim = 0
                    if (self.cuda):
                        test_z = Variable(testfixed_batch['z']).type(torch.FloatTensor).cuda()
                        val_z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                    else:
                        test_z = Variable(testfixed_batch['z']).type(torch.FloatTensor)
                        val_z = Variable(fixed_batch['z']).type(torch.FloatTensor).cuda()
                    g.eval()
                    gtest_x = g.forward(test_z)
                    if (net_config['train_x'] == '(-1,1)'):
                        gtest_x = np.uint8((gtest_x.data.cpu().numpy().transpose((0, 2, 3, 1)) + 1) * 127.5)
                        test_x = np.uint8(((testfixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1))) + 1) * 127.5)
                    elif (net_config['train_x'] == '(0,1)'):
                        gtest_x = np.uint8((gtest_x.data.cpu().numpy().transpose((0, 2, 3, 1))) * 255)
                        test_x = np.uint8(((testfixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1)))) * 255)
                    for ii in range(self.batch_size):
                        test_psnr += compare_psnr(gtest_x[ii], test_x[ii])
                        test_ssim += compare_ssim(gtest_x[ii], test_x[ii], multichannel=True)
                    test_ssim /= self.batch_size
                    test_psnr /= self.batch_size

                    gval_x = g.forward(val_z)
                    if (net_config['train_x'] == '(-1,1)'):
                        gval_x = np.uint8((gval_x.data.cpu().numpy().transpose((0, 2, 3, 1)) + 1) * 127.5)
                        val_x = np.uint8((fixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1)) + 1) * 127.5)
                    elif (net_config['train_x'] == '(0,1)'):
                        gval_x = np.uint8((gval_x.data.cpu().numpy().transpose((0, 2, 3, 1))) * 255)
                        val_x = np.uint8((fixed_batch['x'].cpu().numpy().transpose((0, 2, 3, 1))) * 255)
                    for ii in range(self.batch_size):
                        val_psnr += compare_psnr(gval_x[ii], val_x[ii])
                        val_ssim += compare_ssim(gval_x[ii], val_x[ii], multichannel=True)
                    val_psnr /= self.batch_size
                    val_ssim /= self.batch_size

                    # 记录所有指标:loss,psnr和ssim, And save it as a csv file
                    df.loc[epoch] = [epoch, loss.cpu().data.numpy(), val_psnr, test_psnr, val_ssim, test_ssim]
                    save_run_data(self.dir_experiment + '/', df=df)

                    # 将结果添加到tensorboard中
                    writer.add_scalars('psnr', {'val_psnr': val_psnr, 'test_psnr': test_psnr}, epoch)
                    writer.add_scalars('ssim', {'val_ssim': val_ssim, 'test_psnr': test_ssim}, epoch)

                '''把验证集中的图片保存下来'''
                # images_tmp = np.uint8((make_grid(g_z.data[:16], nrow=4).cpu().numpy().transpose(
                #     (1, 2, 0)) + 1) * 127.5)  # 把图片数据转换到0-255之间
                # writer.add_image('generations', images_tmp, epoch)#把每个epoch生成的图片显示导tensorboard
                # Image.fromarray(images_tmp).save(self.dir_valimg + '/' + str(epoch) + '.jpg')  # 把每个epoch生成图片保存到本地

                '''添加指标得分到tensorboard'''
                # writer.add_scalars('mse_score', {'train_mse': mse, 'test_mse': test_mse}, epoch)
                # writer.add_scalars('psnr_score', {'train_psnr': psnr, 'test_psnr': test_psnr},
                #                    epoch)  # 把每个epoch评价指标在tensorboard显示
                # writer.add_scalars('ssim_score', {'train_ssim': ssim, 'test_ssim': test_ssim}, epoch)

                filename = os.path.join(self.dir_models, 'epoch_{}.pth'.format(epoch))
                torch.save(g.state_dict(), filename)

        finally:
            print('*********************************Closing Writer**********************************.')
            writer.close()
from torch.utils.data import DataLoader

from EmbeddingsImagesDataset import EmbeddingsImagesDataset

dir_datasets = os.path.expanduser('~/datasets')
dataset = 'diracs'
dataset_attribute = '1024'
embedding_attribute = 'ScatJ4'

dir_x_train = os.path.join(dir_datasets, dataset,
                           '{0}'.format(dataset_attribute))
dir_z_train = os.path.join(
    dir_datasets, dataset, '{0}_{1}'.format(dataset_attribute,
                                            embedding_attribute))

dataset = EmbeddingsImagesDataset(dir_z_train, dir_x_train, nb_channels=1)
fixed_dataloader = DataLoader(dataset, batch_size=256)
fixed_batch = next(iter(fixed_dataloader))

x = fixed_batch['x'].numpy()
z = fixed_batch['z'].numpy()

min_distance = np.inf
i_tilde = 0
j_tilde = 0

distances = list()
for i in range(256):
    for j in range(256):
        if i < j:
            temp = (z[i] - z[j])**2