if args.gradient_penalty_mode != 'none':
        args.experiment_name += '_%s' % args.gradient_penalty_mode
output_dir = py.join('output', '%s_BN%d_DPG%d' % (args.experiment_name, args.batch_size, args.n_d ) )
py.mkdir(output_dir)

# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)


# ==============================================================================
# =                               data and model                               =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    dataset, shape, len_dataset = data.make_32x32_dataset(args.dataset, args.batch_size)
    n_G_upsamplings = n_D_downsamplings = 3

elif args.dataset == 'celeba':  # 64x64
    img_paths = py.glob('data/img_align_celeba', '*.jpg')
    dataset, shape, len_dataset = data.make_celeba_dataset(img_paths, args.batch_size)
    n_G_upsamplings = n_D_downsamplings = 4

elif args.dataset == 'anime':  # 64x64
    img_paths = py.glob('data/faces', '*.jpg')
    dataset, shape, len_dataset = data.make_anime_dataset(img_paths, args.batch_size)
    n_G_upsamplings = n_D_downsamplings = 4

elif args.dataset == 'custom':
    # ======================================
    # =               custom               =
# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# others
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")


# ==============================================================================
# =                                    data                                    =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    data_loader, shape = data.make_32x32_dataset(
        args.dataset, args.batch_size, args.imb_index, args.imb_ratio, pin_memory=use_gpu)
    n_G_upsamplings = n_D_downsamplings = 3
elif args.dataset == 'imagenet':
    # ======================================
    # =               custom               =
    # ======================================
    img_paths = 'data/imagenet_small/train'
    data_loader, shape = data.make_custom_dataset(
        img_paths, args.batch_size, resize=32, pin_memory=use_gpu)
    n_G_upsamplings = n_D_downsamplings = 3  # 3 for 32x32 and 4 for 64x64
    # ======================================
    # =               custom               =
    # ======================================


# ==============================================================================
# save settings
py.args_to_yaml(py.join(output_dir, 'settings.yml'), args)

# others
use_gpu = torch.cuda.is_available()
device = torch.device("cuda" if use_gpu else "cpu")

# ==============================================================================
# =                                    data                                    =
# ==============================================================================

# setup dataset
if args.dataset in ['cifar10', 'fashion_mnist', 'mnist']:  # 32x32
    data_loader, shape = data.make_32x32_dataset(args.dataset,
                                                 args.batch_size,
                                                 pin_memory=use_gpu)
    n_G_upsamplings = n_D_downsamplings = 3

elif args.dataset == 'celeba':  # 64x64
    img_paths = py.glob('data/img_align_celeba', '*.jpg')
    data_loader, shape = data.make_celeba_dataset(img_paths,
                                                  args.batch_size,
                                                  pin_memory=use_gpu)
    n_G_upsamplings = n_D_downsamplings = 4

elif args.dataset == 'anime':  # 64x64
    img_paths = py.glob('data/faces', '*.jpg')
    data_loader, shape = data.make_anime_dataset(img_paths,
                                                 args.batch_size,
                                                 pin_memory=use_gpu)
Example #4
0
        y_ = F.relu(y_)
        x = torch.cat([x, y_], 1)
        x = self.fc(x)
        x = x.view(batch_size, 64, 32, 32)
        x = self.bn1(x)
        x = F.relu(x)
        x = self.deconv1(x)
        x = self.bn2(x)
        x = F.relu(x)
        x = self.deconv2(x)
        x = torch.sigmoid(x)
        return x


train_loader, shape = data.make_32x32_dataset("logo",
                                              batch_size,
                                              pin_memory=use_gpu)

model_d = ModelD().to(device)
model_g = ModelG(nz).to(device)
criterion = nn.BCELoss()
input = torch.FloatTensor(batch_size, INPUT_SIZE).to(device)
noise = torch.FloatTensor(batch_size, (nz)).to(device)

fixed_noise = torch.FloatTensor(SAMPLE_SIZE, nz).normal_(0, 1).to(device)
fixed_labels = torch.zeros(SAMPLE_SIZE, NUM_LABELS).to(device)
for i in range(NUM_LABELS):
    for j in range(SAMPLE_SIZE // NUM_LABELS):
        fixed_labels[i * (SAMPLE_SIZE // NUM_LABELS) + j, i] = 1.0

label = torch.FloatTensor(batch_size).to(device)