def load_dataset_test(data_dir, dataset, batch_size):
    list_classes_test = []

    fas = False

    path = os.path.join(data_dir, 'Datasets', dataset)

    if dataset == 'mnist':
        dataset_test = datasets.MNIST(path,
                                      train=False,
                                      download=True,
                                      transform=transforms.Compose(
                                          [transforms.ToTensor()]))
    elif dataset == 'fashion':
        if fas:
            dataset_test = DataLoader(datasets.FashionMNIST(
                path,
                train=False,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                      batch_size=batch_size)
        else:
            dataset_test = fashion(path,
                                   train=False,
                                   download=True,
                                   transform=transforms.ToTensor())

    elif dataset == 'cifar10':
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        dataset_test = datasets.CIFAR10(root=path,
                                        train=False,
                                        download=True,
                                        transform=transform)

    elif dataset == 'celebA':
        dataset_test = utils.load_celebA(path + 'celebA',
                                         transform=transforms.Compose([
                                             transforms.CenterCrop(160),
                                             transforms.Scale(64),
                                             transforms.ToTensor()
                                         ]),
                                         batch_size=batch_size)
    elif dataset == 'timagenet':
        dataset_test, labels = get_test_image_folders(path)
        list_classes_test = np.asarray(
            [labels[i] for i in range(len(dataset_test))])
        dataset_test = Subset(dataset_test,
                              np.where(list_classes_test < 10)[0])
        list_classes_test = np.where(list_classes_test < 10)[0]

    list_classes_test = np.asarray(
        [dataset_test[i][1] for i in range(len(dataset_test))])

    return dataset_test, list_classes_test
Example #2
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # EBGAN parameters
        self.pt_loss_weight = 0.1
        self.margin = max(1, self.batch_size / 64.)  # margin for loss function
        # usually margin of 1 is enough, but for large batch size it must be larger than 1

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True,
                                                         transform=transforms.Compose(
                                                             [transforms.ToTensor()])),
                                          batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(
                datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose(
                    [transforms.ToTensor()])),
                batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose(
                [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
                                                 shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # EBGAN parameters
        self.pt_loss_weight = 0.1
        self.margin = max(1, self.batch_size / 64.)  # margin for loss function
        # usually margin of 1 is enough, but for large batch size it must be larger than 1

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.MSE_loss = nn.MSELoss().cuda()
        else:
            self.MSE_loss = nn.MSELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True,
                                                         transform=transforms.Compose(
                                                             [transforms.ToTensor()])),
                                          batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(
                datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose(
                    [transforms.ToTensor()])),
                batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose(
                [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
                                                 shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # BEGAN parameters
        self.gamma = 0.75
        self.lambda_ = 0.001
        self.k = 0.

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            # self.L1_loss = torch.nn.L1loss().cuda()   # BEGAN does not work well when using L1loss().
        # else:
        #     self.L1_loss = torch.nn.L1loss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True,
                                                         transform=transforms.Compose(
                                                             [transforms.ToTensor()])),
                                          batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(
                datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose(
                    [transforms.ToTensor()])),
                batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose(
                [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
                                                 shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
Example #5
0
    def __init__(self, args):
        # parameters
        self.epoch = 2
        self.sample_num = 64
        self.batch_size = 20000
        self.save_dir = './save'
        self.result_dir = './result'
        self.dataset ='mnist'
        self.log_dir = './logs'
        self.gpu_mode = False
        self.model_name = 'wgan-gp'
        self.lrG=1e-4
        self.lrD=1e-4
        self.lambda_ = 0.25
        self.n_critic = 2               # the number of iterations of the critic per generator iteration

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=self.lrG, betas=(0.5,0.9))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=self.lrD, betas=(0.5,0.9))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST('data/mnist', train=True, download=True,
                                                         transform=transforms.Compose(
                                                             [transforms.ToTensor()])),
                                          batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(
                datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose(
                    [transforms.ToTensor()])),
                batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA('data/celebA', transform=transforms.Compose(
                [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]), batch_size=self.batch_size,
                                                 shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)).cuda(), volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand((self.batch_size, self.z_dim)), volatile=True)
Example #6
0
    def __init__(self, args, test_only=False):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 16
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type
        self.test_only = test_only
        self.gan_type = args.gan_type

        # networks init
        self.G = generator(self.dataset)
        if not test_only: self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        if not test_only:
            self.D_optimizer = optim.Adam(self.D.parameters(),
                                          lr=args.lrD,
                                          betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            if not test_only: self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        if not test_only: utils.print_network(self.D)
        print('-----------------------------------------------')

        if not test_only:
            # load dataset
            if self.dataset == 'mnist':
                self.data_loader = DataLoader(datasets.MNIST(
                    'data/mnist',
                    train=True,
                    download=True,
                    transform=transforms.Compose([transforms.ToTensor()])),
                                              batch_size=self.batch_size,
                                              shuffle=True)
            elif self.dataset == 'fashion-mnist':
                self.data_loader = DataLoader(datasets.FashionMNIST(
                    'data/fashion-mnist',
                    train=True,
                    download=True,
                    transform=transforms.Compose([transforms.ToTensor()])),
                                              batch_size=self.batch_size,
                                              shuffle=True)
            elif self.dataset == 'celebA':
                self.data_loader = utils.load_celebA(
                    'data/celebA',
                    transform=transforms.Compose([
                        transforms.CenterCrop(160),
                        transforms.Scale(64),
                        transforms.ToTensor()
                    ]),
                    batch_size=self.batch_size,
                    shuffle=True)

        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)
Example #7
0
    def __init__(self):
        # parameters
        self.epoch = 35
        self.sample_num = 16
        self.batch_size = 64
        self.save_dir = 'simple gan'
        self.result_dir = 'results'
        self.dataset = 'mnist'
        self.log_dir = 'logs'
        self.gpu_mode = True
        self.model_name = 'SimpleGAN'

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=0.0002,
                                      betas=(0.5, 0.999))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=0.0002,
                                      betas=(0.5, 0.999))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                'data/mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                'data/fashion-mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA(
                'data/celebA',
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)
Example #8
0
    def __init__(self, args):
        # parameters
        self.root = args.root
        self.epoch = args.epoch
        self.sample_num = 16
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.z_dim = args.z_dim
        self.model_name = args.model_name + '_7'
        self.load_model = args.load_model
        self.args = args

        # load dataset
        if self.dataset == 'mnist':
            dset = datasets.MNIST('data/mnist', train=True, download=True,
                                    transform=transforms.Compose([transforms.ToTensor()]))
            valid_dset = datasets.MNIST('data/mnist', train=False, download=True,
                                    transform=transforms.Compose([transforms.ToTensor()]))
            self.data_loader = DataLoader(dset, batch_size=self.batch_size, shuffle=True)
            self.valid_loader = DataLoader(valid_dset, batch_size=64, shuffle=True)
        elif self.dataset == 'emnist':
            dset = datasets.EMNIST('data/emnist', split='balanced', train=True, download=True,
                                    transform=transforms.Compose([transforms.ToTensor()]))
            valid_dset = datasets.EMNIST('data/emnist', split='balanced', train=False, download=True,
                                    transform=transforms.Compose([transforms.ToTensor()]))
            self.data_loader = DataLoader(dset, batch_size=self.batch_size, shuffle=True)
            self.valid_loader = DataLoader(valid_dset, batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'cifar10':
            dset = datasets.CIFAR10(root='data/mnist', train=True,
                                        download=True, transform=transforms.Compose([transforms.ToTensor()]))
            valid_dset = datasets.CIFAR10(root='data/mnist', train=False, download=True,
                                    transform=transforms.Compose([transforms.ToTensor()]))
            self.data_loader = DataLoader(dset, batch_size=self.batch_size, shuffle=True)
            self.valid_loader = DataLoader(valid_dset, batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'svhn':
            dset = datasets.SVHN(root='data/svhn', split='train',
                                        download=True, transform=transforms.Compose([transforms.ToTensor()]))
            valid_dset = datasets.SVHN(root='data/svhn', split='test', download=True,
                                    transform=transforms.Compose([transforms.ToTensor()]))
            self.data_loader = DataLoader(dset, batch_size=self.batch_size, shuffle=True)
            self.valid_loader = DataLoader(valid_dset, batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'fashion-mnist':
            dset = datasets.FashionMNIST('data/fashion-mnist', train=True, download=True, transform=transforms.Compose(
                [transforms.ToTensor()]))
            valid_dset = datasets.FashionMNIST('data/fashion-mnist', train=False, download=True, transform=transforms.Compose(
                [transforms.ToTensor()]))
            self.data_loader = DataLoader(
                dset,
                batch_size=self.batch_size, shuffle=True)
            self.valid_loader = DataLoader(
                valid_dset,
                batch_size=self.batch_size, shuffle=True)
        elif self.dataset == 'celebA':
            # TODO: add test data
            dset = utils.load_celebA('data/celebA', transform=transforms.Compose(
                [transforms.CenterCrop(160), transforms.Scale(64), transforms.ToTensor()]))
            self.data_loader = DataLoader(dset, batch_size=self.batch_size,
                                                 shuffle=True)

        # image dimensions
        if self.dataset == 'svhn':
            self.height, self.width = dset.data.shape[2:4]
            self.pix_level = dset.data.shape[1]
        else:
            self.height, self.width = dset.train_data.shape[1:3]
            if len(dset.train_data.shape) == 3:
                self.pix_level = 1
            # elif self.dataset == 'cifar10':
            #     self.height = 2* self.height
            #     self.width = 2 * self.width
            #     self.pix_level = dset.train_data.shape[3]
            elif len(dset.train_data.shape) == 4:
                self.pix_level = dset.train_data.shape[3]

        print("Data shape is height:{}, width:{}, and pixel level:{}\n".format(self.height, self.width, self.pix_level))

        # networks init
        self.G = Generator(self.dataset, self.z_dim, self.height, self.width, self.pix_level)
        self.E = Encoder(self.dataset, self.z_dim, self.height, self.width, self.pix_level)
        self.D = Discriminator(self.dataset, self.height, self.width, self.pix_level)
        self.FC = Feature(self.dataset, self.height, self.width, self.pix_level)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1* 1.2, args.beta2))
        self.D_optimizer = optim.Adam(chain(self.D.parameters(), self.FC.parameters()), lr=args.lrD, betas=(args.beta1* 1.2, args.beta2))
        self.E_optimizer = optim.Adam(self.E.parameters(), lr=args.lrE, betas=(args.beta1* 1.2, args.beta2))

        if torch.cuda.is_available():
            self.G.cuda()
            self.D.cuda()
            self.E.cuda()
            self.FC.cuda()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        utils.print_network(self.E)
        utils.print_network(self.FC)
        print('-----------------------------------------------')
Example #9
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type
        self.lambda_ = 0.25
        self.n_critic = 5  # the number of iterations of the critic per generator iteration

        self.lambda_cl = 0.2
        self.c = 0.01

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        # self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        # self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))
        self.G_optimizer = optim.RMSprop(self.G.parameters(), lr=args.lrG)
        self.D_optimizer = optim.RMSprop(self.D.parameters(), lr=args.lrD)

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                'data/mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
            self.z_dim = 62
            self.y_dim = 10
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                'data/fashion-mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA(
                'data/celebA',
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
            from load_attr import load_attr
            attr = load_attr()
            self.attr = torch.FloatTensor(attr)
            self.z_dim = 62
            self.y_dim = 1

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.sample_num, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)

        if self.dataset == 'mnist':
            temp = torch.zeros((10, 1))
            for i in range(self.y_dim):
                temp[i, 0] = i

            temp_y = torch.zeros((self.sample_num, 1))
            for i in range(10):
                temp_y[i * self.y_dim:(i + 1) * self.y_dim] = temp

            self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
            self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1)
        elif self.dataset == 'celebA':
            self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
            self.sample_y_[:50, 0] = 1
            # self.sample_y_[25:75, 1] = 1
        if self.gpu_mode:
            self.sample_y_ = Variable(self.sample_y_.cuda(), volatile=True)
        else:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_,
                                                      volatile=True), Variable(
                                                          self.sample_y_,
                                                          volatile=True)
Example #10
0
        type=str,
        default=None,
        help=
        f'path to a saved model for resuming training and/or performing testing (default: None)'
    )
    parser.add_argument(
        '--save_examples',
        action='store_true',
        default=False,
        help=
        'whether to save example images to files during training (default: False)'
    )
    args = parser.parse_args()

    if args.train or args.test:
        train_loader, test_loader = utils.load_celebA(
            data_path=args.data_path, batch_size=args.batch_size)
    else:
        print('Neither --train nor --test specified. Quitting.')
        exit(0)

    if args.train:
        vae.train(
            train_loader,
            model_path=args.model_path,
            num_epochs=args.epochs,
            seed=args.seed,
            report_freq=args.report_freq,
            save_examples=args.save_examples,
        )

    if args.test:
Example #11
0
    def __init__(self, args):
        # parameters
        self.root = args.root
        self.epoch = args.epoch
        self.sample_num = 16
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.log_dir = args.log_dir
        self.z_dim = args.z_dim
        self.model_name = args.model_name
        self.load_model = args.load_model
        self.dataset = args.dataset

        # load dataset
        if self.dataset == 'mnist':
            dset = datasets.MNIST('data/mnist',
                                  train=True,
                                  download=True,
                                  transform=transforms.Compose(
                                      [transforms.ToTensor()]))
            valid_dset = datasets.MNIST('data/mnist',
                                        train=False,
                                        download=True,
                                        transform=transforms.Compose(
                                            [transforms.ToTensor()]))
            self.data_loader = DataLoader(dset,
                                          batch_size=self.batch_size,
                                          shuffle=True)
            self.valid_loader = DataLoader(valid_dset,
                                           batch_size=self.batch_size,
                                           shuffle=True)
        elif self.dataset == 'cifar10':
            dset = datasets.CIFAR10(root='data/cifar10',
                                    train=True,
                                    download=True,
                                    transform=transforms.Compose([
                                        transforms.Scale(64),
                                        transforms.ToTensor(),
                                        transforms.Normalize(mean=(0.5, 0.5,
                                                                   0.5),
                                                             std=(0.5, 0.5,
                                                                  0.5))
                                    ]))
            valid_dset = datasets.CIFAR10(root='data/cifar10',
                                          train=False,
                                          download=True,
                                          transform=transforms.Compose([
                                              transforms.Scale(64),
                                              transforms.ToTensor(),
                                              transforms.Normalize(
                                                  mean=(0.5, 0.5, 0.5),
                                                  std=(0.5, 0.5, 0.5))
                                          ]))
            self.data_loader = DataLoader(dset,
                                          batch_size=self.batch_size,
                                          shuffle=True)
            self.valid_loader = DataLoader(valid_dset,
                                           batch_size=self.batch_size,
                                           shuffle=True)
        elif self.dataset == 'svhn':
            # load SVHN dataset (73257, 3, 32, 32)
            dset = datasets.SVHN(root='data/svhn',
                                 split='train',
                                 download=True,
                                 transform=transforms.Compose([
                                     transforms.Scale(64),
                                     transforms.ToTensor(),
                                     transforms.Normalize(mean=(0.5, 0.5, 0.5),
                                                          std=(0.5, 0.5, 0.5))
                                 ]))
            valid_dset = datasets.SVHN(root='data/svhn',
                                       split='test',
                                       download=True,
                                       transform=transforms.Compose([
                                           transforms.Scale(64),
                                           transforms.ToTensor(),
                                           transforms.Normalize(mean=(0.5, 0.5,
                                                                      0.5),
                                                                std=(0.5, 0.5,
                                                                     0.5))
                                       ]))
            self.data_loader = DataLoader(dset,
                                          batch_size=self.batch_size,
                                          shuffle=True)
            self.valid_loader = DataLoader(valid_dset,
                                           batch_size=self.batch_size,
                                           shuffle=True)
        elif self.dataset == 'fashion-mnist':
            dset = datasets.FashionMNIST('data/fashion-mnist',
                                         train=True,
                                         download=True,
                                         transform=transforms.Compose(
                                             [transforms.ToTensor()]))
            valid_dset = datasets.FashionMNIST('data/fashion-mnist',
                                               train=False,
                                               download=True,
                                               transform=transforms.Compose(
                                                   [transforms.ToTensor()]))
            self.data_loader = DataLoader(dset,
                                          batch_size=self.batch_size,
                                          shuffle=True)
            self.valid_loader = DataLoader(valid_dset,
                                           batch_size=self.batch_size,
                                           shuffle=True)
        elif self.dataset == 'celebA':
            # TODO: add test data
            dset = utils.load_celebA('data/celebA',
                                     transform=transforms.Compose([
                                         transforms.CenterCrop(160),
                                         transforms.Scale(64),
                                         transforms.ToTensor()
                                     ]))
            self.data_loader = DataLoader(dset,
                                          batch_size=self.batch_size,
                                          shuffle=True)

        # image dimensions
        if self.dataset == 'svhn':
            self.height, self.width = dset.data.shape[2:4]
            self.pix_level = dset.data.shape[1]
        else:
            self.height, self.width = dset.train_data.shape[1:3]
            if len(dset.train_data.shape) == 3:
                self.pix_level = 1
            elif self.dataset == 'cifar10':
                self.height = 64
                self.width = 64
                self.pix_level = dset.train_data.shape[3]
            elif len(dset.train_data.shape) == 4:
                self.pix_level = dset.train_data.shape[3]

        # networks init
        self.G = Generator(self.z_dim, self.pix_level)
        self.E = Encoder(self.z_dim, self.pix_level)
        self.D = Discriminator(self.pix_level)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))
        self.E_optimizer = optim.Adam(self.E.parameters(),
                                      lr=args.lrE,
                                      betas=(args.beta1, args.beta2))

        if torch.cuda.is_available():
            self.G.cuda()
            self.D.cuda()
            self.E.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        utils.print_network(self.E)
        print('-----------------------------------------------')

        # load in saved model
        if self.load_model:
            self.load()
        download=True),
                              batch_size=batch_size,
                              shuffle=True)
elif dset == 'fashion-mnist':
    train_loader = DataLoader(dataset=datasets.FashionMNIST(
        root='./data/',
        train=True,
        download=True,
        transform=transforms.Compose([transforms.ToTensor()])),
                              batch_size=batch_size,
                              shuffle=True)
elif dset == 'celebA':
    train_loader = utils.load_celebA('./data/',
                                     transform=transforms.Compose([
                                         transforms.CenterCrop(160),
                                         transforms.Scale(64),
                                         transforms.ToTensor()
                                     ]),
                                     batch_size=batch_size,
                                     shuffle=True)
elif dset == 'cifar10':
    train_loader = DataLoader(dataset=datasets.CIFAR10(
        root='./data/',
        train=True,
        transform=transforms.ToTensor(),
        download=True),
                              batch_size=batch_size,
                              shuffle=True)
"""Generator"""


class Generator(nn.Module):
Example #13
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # BEGAN parameters
        self.gamma = 0.75
        self.lambda_ = 0.001
        self.k = 0.

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            # self.L1_loss = torch.nn.L1loss().cuda()   # BEGAN does not work well when using L1loss().
        # else:
        #     self.L1_loss = torch.nn.L1loss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                'data/mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                'data/fashion-mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA(
                'data/celebA',
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)
Example #14
0
    def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 64
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type
        self.dk = args.dk
        self.c = 0.01  # clipping value
        self.n_critic = 5  # the number of iterations of the critic per generator iteration
        if args.dk != 1:
            self.model_name = "WGAN_DK_" + str(args.dk)
            print(self.model_name)

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(),
                                      lr=args.lrG,
                                      betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(),
                                      lr=args.lrD,
                                      betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load dataset
        if self.dataset == 'mnist':
            self.data_loader = DataLoader(datasets.MNIST(
                'data/mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'fashion-mnist':
            self.data_loader = DataLoader(datasets.FashionMNIST(
                'data/fashion-mnist',
                train=True,
                download=True,
                transform=transforms.Compose([transforms.ToTensor()])),
                                          batch_size=self.batch_size,
                                          shuffle=True)
        elif self.dataset == 'celebA':
            self.data_loader = utils.load_celebA(
                'data/celebA',
                transform=transforms.Compose([
                    transforms.CenterCrop(160),
                    transforms.Scale(64),
                    transforms.ToTensor()
                ]),
                batch_size=self.batch_size,
                shuffle=True)
        self.z_dim = 62

        # fixed noise
        if self.gpu_mode:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)).cuda(),
                                      volatile=True)
        else:
            self.sample_z_ = Variable(torch.rand(
                (self.batch_size, self.z_dim)),
                                      volatile=True)