Пример #1
0
def main():
    os.makedirs('checkpoints', exist_ok=True)

    # create models
    G = Generator(z_dim=20, image_size=64)
    D = Discriminator(z_dim=20, image_size=64)
    G.apply(weights_init)
    D.apply(weights_init)
    print('*** initialize weights')

    # load data
    train_img_list = make_datapath_list()
    print('*** num of data:', len(train_img_list))

    mean = (0.5, )
    std = (0.5, )
    train_dataset = GAN_Img_Dataset(file_list=train_img_list,
                                    transform=ImageTransform(mean, std))

    batch_size = 64
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True)

    num_epochs = 300
    G_update, D_update = train_model(G, D, train_dataloader, num_epochs)

    torch.save(G.state_dict(), 'checkpoints/G.pt')
    torch.save(D.state_dict(), 'checkpoints/D.pt')
Пример #2
0
def main():
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # load model
    G = Generator(z_dim=20, image_size=64)
    D = Discriminator(z_dim=20, image_size=64)
    G.load_state_dict(torch.load('checkpoints/G.pt'))
    D.load_state_dict(torch.load('checkpoints/D.pt'))
    G.to(device)
    D.to(device)

    batch_size = 8
    z_dim = 20
    fixed_z = torch.randn(batch_size, z_dim)
    fixed_z = fixed_z.view(fixed_z.size(0), fixed_z.size(1), 1, 1)

    # generate fake images
    fake_images, am1, am2 = G(fixed_z.to(device))

    # real images
    train_img_list = make_datapath_list()
    mean = (0.5, )
    std = (0.5, )
    train_dataset = GAN_Img_Dataset(file_list=train_img_list,
                                    transform=ImageTransform(mean, std))
    batch_size = 64
    train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=batch_size,
                                                   shuffle=True)
    imges = next(iter(train_dataloader))

    plt.figure(figsize=(15, 6))
    for i in range(0, 5):
        plt.subplot(2, 5, i + 1)
        plt.imshow(imges[i][0].cpu().detach().numpy(), 'gray')

        plt.subplot(2, 5, 5 + i + 1)
        plt.imshow(fake_images[i][0].cpu().detach().numpy(), 'gray')
    plt.savefig('results.png')
    experiment.log_figure(figure_name='results.png',
                          figure=None,
                          overwrite=True)

    plt.figure(figsize=(15, 6))
    for i in range(0, 5):
        plt.subplot(2, 5, i + 1)
        plt.imshow(fake_images[i][0].cpu().detach().numpy(), 'gray')

        plt.subplot(2, 5, 5 + i + 1)
        am = am1[i].view(16, 16, 16, 16)
        am = am[7][7]
        plt.imshow(am.cpu().detach().numpy(), 'Reds')
    experiment.log_figure(figure_name='attention_map.png',
                          figure=None,
                          overwrite=True)
Пример #3
0
                        epoch_val_loss += loss.item()*batch_multiplier
                
        t_epoch_end = time.time()
        duration = t_epoch_end - t_epoch_start
        print('Epoch {} || Epoch_train_loss: {:.6f} || Epoch_val_loss: {:.6f}'.format(epoch+1, epoch_train_loss/num_train_imgs, epoch_val_loss/num_val_imgs))        
        print('Duration {:.6f} sec'.format(duration))
        t_epoch_start = time.time()

        torch.save(model.state_dict(), 'pspnet50_' + str(epoch) + '.pth')    



if __name__ == '__main__':
    # dataloader_dict
    rootpath = "./data/VOCdevkit/VOC2012/"
    train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(rootpath)
    color_mean = (0.485, 0.456, 0.406)
    color_std = (0.229, 0.224, 0.225)

    train_dataset = MyDataset(train_img_list, train_anno_list, phase="train", transform=DataTransform(input_size=475, color_mean=color_mean, color_std=color_std))
    val_dataset = MyDataset(val_img_list, val_anno_list, phase="val", transform=DataTransform(input_size=475, color_mean=color_mean, color_std=color_std))

    batch_size = 12
    train_dataloader = data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
    val_dataloader = data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False)

    dataloader_dict = {
        "train": train_dataloader,
        "val": val_dataloader
    }