Exemplo n.º 1
0
def main():
    opt = parse_option()
    set_seed(opt.seed)
    transform = get_color_mnist_transform()

    ds_train = get_predefined_dataset(dataset_name='color_mnist',
                                      root='./dataset/colour_mnist',
                                      weights=None,
                                      major_ratio=0.5,
                                      num_data=opt.num_data)

    dataloader = data.DataLoader(dataset=ds_train,
                                 batch_size=128,
                                 shuffle=False,
                                 num_workers=8,
                                 pin_memory=True)

    model = SimpleConvNet(num_labels=20).cuda()
    model.cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(
        optimizer, [opt.epochs * 3 // 7, opt.epochs * 6 // 7], gamma=0.1)
    print(f'train_biased_model - opt: {optimizer}, sched: {scheduler}')

    ckpt_path = Path(
        f'./exp_results/color-mnist-convnet-{opt.num_data}-seed{opt.seed}')
    ckpt_path.mkdir(exist_ok=True, parents=True)

    for n in range(1, opt.epochs + 1):
        train_acc = train(model, dataloader, optimizer)
        print(f'[{n} / {opt.epochs}] train_acc: {train_acc}')

        if n % 10 == 0:
            torch.save(model.state_dict(), ckpt_path / f'ckpt_{n}.pt')
def main():
    args = parse_option()
    set_seed(args.seed)
    print(args)

    output_dir = f'{args.work_dir}/{args.exp_name}'
    save_path = Path(output_dir)
    save_path.mkdir(parents=True, exist_ok=True)

    if torch.cuda.is_available():
        device = "cuda"
        cudnn.benchmark = True
    else:
        device = "cpu"

    # load model
    assert args.netG_ckpt_step
    print(f'load model from {save_path} step: {args.netG_ckpt_step}')
    netG, _, netD_drs, _, _, _ = get_gan_model(
        dataset_name=args.dataset,
        model=args.model,
        loss_type=args.loss_type,
        drs=True,
    )
    netG.to(device)
    if not args.netG_train_mode:
        netG.eval()
        netD_drs.eval()
        netG.to(device)
        netD_drs.to(device)

    if args.dataset == 'celeba':
        dataset = 'celeba_64'
    else:
        raise ValueError("Dataset should be CelebA")

    evaluate_drs_with_attr(
        metric='partial_recall',
        attr=args.attr,
        log_dir=save_path,
        netG=netG,
        netD_drs=netD_drs,
        dataset=dataset,
        num_real_samples=10000,
        num_fake_samples=10000,
        evaluate_step=args.netG_ckpt_step,
        num_runs=1,
        device=device,
        use_original_netD=args.use_original_netD,
    )
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", "-d", default="color_mnist", type=str)
    parser.add_argument("--root", "-r", default="./dataset/colour_mnist", type=str, help="dataset dir")
    parser.add_argument("--work_dir", default="./exp_results", type=str, help="output dir")
    parser.add_argument("--exp_name", default="colour_mnist", type=str, help="exp name")
    parser.add_argument("--baseline_exp_name", default="colour_mnist", type=str, help="exp name")
    parser.add_argument("--model", default="mnistgan", type=str, help="network model")
    parser.add_argument('--gpu', default='0', type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--num_pack', default=1, type=int)
    parser.add_argument('--batch_size', default=64, type=int)
    parser.add_argument('--seed', default=1, type=int)
    parser.add_argument('--use_clipping', action='store_true')
    parser.add_argument('--num_steps', default=20000, type=int)
    parser.add_argument('--logit_save_steps', default=100, type=int)
    parser.add_argument('--decay', default='None', type=str)
    parser.add_argument('--n_dis', default=1, type=int)
    parser.add_argument('--p1_step', default=10000, type=int)
    parser.add_argument('--major_ratio', default=0.99, type=float)
    parser.add_argument('--num_data', default=10000, type=int)
    parser.add_argument('--resample_score', type=str)
    parser.add_argument("--loss_type", default="hinge", type=str, help="loss type")
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    output_dir = f'{args.work_dir}/{args.exp_name}'
    save_path = Path(output_dir)
    save_path.mkdir(parents=True, exist_ok=True)

    baseline_output_dir = f'{args.work_dir}/{args.baseline_exp_name}'
    baseline_save_path = Path(baseline_output_dir)

    prefix = args.exp_name.split('/')[-1]

    set_seed(args.seed)

    if torch.cuda.is_available():
        device = "cuda"
        cudnn.benchmark = True
    else:
        device = "cpu"

    netG, netD, optG, optD = get_gan_model(
        dataset_name=args.dataset,
        model=args.model,
        loss_type=args.loss_type,
        gold=True
    )

    netG_ckpt_path = baseline_save_path / f'checkpoints/netG/netG_{args.p1_step}_steps.pth'
    netD_ckpt_path = baseline_save_path / f'checkpoints/netD/netD_{args.p1_step}_steps.pth'

    print_num_params(netG, netD)

    ds_train = get_predefined_dataset(
        dataset_name=args.dataset,
        root=args.root,
        weights=None,
        major_ratio=args.major_ratio,
        num_data=args.num_data
    )
    dl_train = get_dataloader(
        ds_train,
        batch_size=args.batch_size,
        weights=None)

    data_iter = iter(dl_train)
    imgs, _, _, _ = next(data_iter)
    plot_data(imgs, num_per_side=8, save_path=save_path, file_name=f'{prefix}_gold_train_data_p2', vis=None)

    print(args, netG_ckpt_path, netD_ckpt_path)

    # Start training
    trainer = LogTrainer(
        output_path=save_path,
        logit_save_steps=args.logit_save_steps,
        netD=netD,
        netG=netG,
        optD=optD,
        optG=optG,
        netG_ckpt_file=netG_ckpt_path,
        netD_ckpt_file=netD_ckpt_path,
        n_dis=args.n_dis,
        num_steps=args.num_steps,
        save_steps=1000,
        vis_steps=100,
        lr_decay=args.decay,
        dataloader=dl_train,
        log_dir=output_dir,
        print_steps=10,
        device=device,
        save_logits=False,
        gold=True,
        gold_step=args.p1_step
    )
    trainer.train()

    plot_color_mnist_generator(netG, save_path=save_path, file_name=f'{prefix}-eval_p2')

    netG.restore_checkpoint(ckpt_file=netG_ckpt_path)
    netG.to(device)
    plot_color_mnist_generator(netG, save_path=save_path, file_name=f'{prefix}-eval_generated_p1')
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", "-d", default="cifar10", type=str)
    parser.add_argument("--root",
                        "-r",
                        default="./dataset/cifar10",
                        type=str,
                        help="dataset dir")
    parser.add_argument("--work_dir",
                        default="./exp_results",
                        type=str,
                        help="output dir")
    parser.add_argument("--exp_name", type=str, help="exp name")
    parser.add_argument("--baseline_exp_name", type=str, help="exp name")
    parser.add_argument('--p1_step', default=40000, type=int)
    parser.add_argument("--model",
                        default="sngan",
                        type=str,
                        help="network model")
    parser.add_argument("--loss_type",
                        default="hinge",
                        type=str,
                        help="loss type")
    parser.add_argument('--gpu',
                        default='0',
                        type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--num_steps', default=80000, type=int)
    parser.add_argument('--batch_size', default=64, type=int)
    parser.add_argument('--seed', default=1, type=int)
    parser.add_argument('--decay', default='linear', type=str)
    parser.add_argument('--n_dis', default=5, type=int)
    parser.add_argument('--resample_score', type=str)
    parser.add_argument('--gold', action='store_true')
    parser.add_argument('--topk', action='store_true')
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    output_dir = f'{args.work_dir}/{args.exp_name}'
    save_path = Path(output_dir)
    save_path.mkdir(parents=True, exist_ok=True)

    baseline_output_dir = f'{args.work_dir}/{args.baseline_exp_name}'
    baseline_save_path = Path(baseline_output_dir)

    set_seed(args.seed)

    if torch.cuda.is_available():
        device = "cuda"
        cudnn.benchmark = True
    else:
        device = "cpu"

    prefix = args.exp_name.split('/')[-1]

    if args.dataset == 'celeba':
        window = 5000
    elif args.dataset == 'cifar10':
        window = 5000
    else:
        window = 5000

    if not args.gold:
        logit_path = baseline_save_path / 'logits_netD_eval.pkl'
        print(f'Use logit from: {logit_path}')
        logits = pickle.load(open(logit_path, "rb"))
        score_start_step = (args.p1_step - window)
        score_end_step = args.p1_step
        score_dict = calculate_scores(logits,
                                      start_epoch=score_start_step,
                                      end_epoch=score_end_step)
        sample_weights = score_dict[args.resample_score]
        print(
            f'sample_weights mean: {sample_weights.mean()}, var: {sample_weights.var()}, max: {sample_weights.max()}, min: {sample_weights.min()}'
        )
    else:
        sample_weights = None

    netG_ckpt_path = baseline_save_path / f'checkpoints/netG/netG_{args.p1_step}_steps.pth'
    netD_ckpt_path = baseline_save_path / f'checkpoints/netD/netD_{args.p1_step}_steps.pth'

    netD_drs_ckpt_path = baseline_save_path / f'checkpoints/netD/netD_{args.p1_step}_steps.pth'
    netG, netD, netD_drs, optG, optD, optD_drs = get_gan_model(
        dataset_name=args.dataset,
        model=args.model,
        loss_type=args.loss_type,
        drs=True,
        topk=args.topk,
        gold=args.gold,
    )

    print(f'model: {args.model} - netD_drs_ckpt_path: {netD_drs_ckpt_path}')

    print_num_params(netG, netD)

    ds_train = get_predefined_dataset(dataset_name=args.dataset,
                                      root=args.root,
                                      weights=None)
    dl_train = get_dataloader(ds_train,
                              batch_size=args.batch_size,
                              weights=sample_weights)

    ds_drs = get_predefined_dataset(dataset_name=args.dataset,
                                    root=args.root,
                                    weights=None)
    dl_drs = get_dataloader(ds_drs, batch_size=args.batch_size, weights=None)

    if not args.gold:
        show_sorted_score_samples(ds_train,
                                  score=sample_weights,
                                  save_path=save_path,
                                  score_name=args.resample_score,
                                  plot_name=prefix)

    print(args)

    # Start training
    trainer = LogTrainer(
        output_path=save_path,
        netD=netD,
        netG=netG,
        optD=optD,
        optG=optG,
        netG_ckpt_file=str(netG_ckpt_path),
        netD_ckpt_file=str(netD_ckpt_path),
        netD_drs_ckpt_file=str(netD_drs_ckpt_path),
        netD_drs=netD_drs,
        optD_drs=optD_drs,
        dataloader_drs=dl_drs,
        n_dis=args.n_dis,
        num_steps=args.num_steps,
        save_steps=1000,
        lr_decay=args.decay,
        dataloader=dl_train,
        log_dir=output_dir,
        print_steps=10,
        device=device,
        topk=args.topk,
        gold=args.gold,
        gold_step=args.p1_step,
        save_logits=False,
    )
    trainer.train()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--model",
                        default="vgg16",
                        type=str,
                        help="network model")
    parser.add_argument('--gpu',
                        default='0',
                        type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--batch_size', default=128, type=int)
    parser.add_argument('--seed', default=1, type=int)
    parser.add_argument('--num_epochs', default=10, type=int)
    parser.add_argument('--attr', default='Bald', type=str)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    set_seed(args.seed)

    if torch.cuda.is_available():
        device = "cuda"
        cudnn.benchmark = True
    else:
        device = "cpu"

    # load dataset
    print('Load data')
    train_dataset = get_celeba_with_attr(attr=args.attr,
                                         split='train',
                                         size=64)
    valid_dataset = get_celeba_with_attr(attr=args.attr,
                                         split='valid',
                                         size=64)
    test_dataset = get_celeba_with_attr(attr=args.attr, split='test', size=64)

    train_loader = get_dataloader(train_dataset, batch_size=args.batch_size)
    valid_loader = get_dataloader(valid_dataset, batch_size=args.batch_size)
    test_loader = get_dataloader(test_dataset, batch_size=args.batch_size)

    # load model
    print('Load model')
    if args.model == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif args.model == 'resnet18':
        model = models.resnet18(pretrained=True)
    elif args.model == 'inception':
        model = models.inception_v3(pretrained=True)
    else:
        raise ValueError('model should be vgg16 or resnet18 or inception')

    # change the number of classes
    in_features = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(in_features, 2, bias=True)

    # freeze convolution weights
    for param in model.features.parameters():
        param.requires_grad = False

    # optimizer
    optimizer = optim.SGD(model.classifier.parameters(),
                          lr=0.001,
                          momentum=0.9)
    # loss function
    criterion = nn.CrossEntropyLoss()

    model.to(device)

    save_path = f'./convnet_celeba'
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    csv_file = os.path.join(save_path, 'loss_acc.csv')
    if os.path.exists(csv_file):
        f = open(csv_file, 'a', newline='')
        wr = csv.writer(f)
    else:
        f = open(csv_file, 'w', newline='')
        wr = csv.writer(f)
        wr.writerow([
            '', 'Train Acc', 'Valid Acc', 'Test Acc', 'Train Loss',
            'Valid Loss', 'Test Loss'
        ])

    train_loss, train_accuracy = [], []
    val_loss, val_accuracy = [], []
    start = time.time()
    print('Start training')
    for epoch in range(args.num_epochs):
        print(f'Epoch: {epoch+1}')
        train_epoch_loss, train_epoch_accuracy = fit(model, optimizer,
                                                     train_loader, criterion,
                                                     device)
        print(
            f'Train Loss: {train_epoch_loss:.4f}, Train Acc: {train_epoch_accuracy:.2f}'
        )
        val_epoch_loss, val_epoch_accuracy = validate(model, valid_loader,
                                                      criterion, device)
        print(
            f'Valid Loss: {val_epoch_loss:.4f}, Valid Acc: {val_epoch_accuracy:.2f}'
        )
        train_loss.append(train_epoch_loss)
        train_accuracy.append(train_epoch_accuracy)
        val_loss.append(val_epoch_loss)
        val_accuracy.append(val_epoch_accuracy)
    end = time.time()
    print((end - start) / 60, 'minutes')

    test_loss, test_accuracy = validate(model, test_loader, criterion, device)
    print(f'Test Loss: {test_loss:.4f}, Test Acc: {test_accuracy:.2f}')

    # save loss and accuracy
    wr.writerow([
        args.attr, train_epoch_loss, val_epoch_loss, test_loss,
        train_epoch_accuracy, val_epoch_accuracy, test_accuracy
    ])
    f.close()

    # save model
    print('Save model')
    torch.save(model.state_dict(), os.path.join(save_path, f'{args.attr}.pth'))
Exemplo n.º 6
0
    parser.add_argument('--resample_score', type=str)
    parser.add_argument('--p1_step', default=200000, type=int)
    parser.add_argument('--logit_save_steps', default=100, type=int)
    parser.add_argument('--save_logit_after', default=1000000, type=int)
    # parser.add_argument('--stop_save_logit_after', default=45000, type=int)

    args = parser.parse_args()
    print(args)

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    output_dir = f'{args.work_dir}/{args.exp_name}'
    save_path = Path(output_dir)
    save_path.mkdir(parents=True, exist_ok=True)

    set_seed(args.seed)

    n_gpu = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = n_gpu > 1

    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    args.latent = 512
    args.n_mlp = 8

    args.start_iter = 0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", "-d", default="cifar10", type=str)
    parser.add_argument("--work_dir", default="./exp_results", type=str, help="output dir")
    parser.add_argument("--exp_name", default="mimicry_pretrained-seed1", type=str, help="exp name")
    parser.add_argument("--baseline_exp_name", type=str, help="exp name")
    parser.add_argument('--p1_step', default=40000, type=int)
    parser.add_argument("--model", default="sngan", type=str, help="network model")
    parser.add_argument("--loss_type", default="hinge", type=str, help="loss type")
    parser.add_argument('--gpu', default='0', type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--batch_size', default=128, type=int)
    parser.add_argument('--seed', default=1, type=int)
    parser.add_argument("--netG_ckpt_step", type=int)
    parser.add_argument("--netG_train_mode", action='store_true')
    parser.add_argument('--resample_score', type=str)
    parser.add_argument('--gold', action='store_true')
    parser.add_argument('--topk', action='store_true')
    parser.add_argument("--index_num", default=100, type=int, help="number of index to use for FID score")
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    output_dir = f'{args.work_dir}/{args.exp_name}'
    save_path = Path(output_dir)
    save_path.mkdir(parents=True, exist_ok=True)

    baseline_output_dir = f'{args.work_dir}/{args.baseline_exp_name}'
    baseline_save_path = Path(baseline_output_dir)

    set_seed(args.seed)

    if torch.cuda.is_available():
        device = "cuda"
        cudnn.benchmark = True
    else:
        device = "cpu"

    # load model
    assert args.netG_ckpt_step
    print(f'load model from {save_path} step: {args.netG_ckpt_step}')
    netG, _, _, _ = get_gan_model(
        dataset_name=args.dataset,
        model=args.model,
        loss_type=args.loss_type,
        topk=args.topk,
        gold=args.gold,
    )
    netG.to(device)
    if not args.netG_train_mode:
        netG.eval()

    if args.dataset == 'celeba':
        dataset = 'celeba_64'
        window = 5000
    else:
        dataset = args.dataset
        window = 5000

    logit_path = baseline_save_path / 'logits_netD_eval.pkl'
    print(f'Use logit from: {logit_path}')
    logits = pickle.load(open(logit_path, "rb"))
    score_start_step = (args.p1_step - window)
    score_end_step = args.p1_step
    score_dict = calculate_scores(logits, start_epoch=score_start_step, end_epoch=score_end_step)
    sample_weights = score_dict[args.resample_score]
    print(
        f'sample_weights mean: {sample_weights.mean()}, var: {sample_weights.var()}, max: {sample_weights.max()}, min: {sample_weights.min()}')

    print(args)

    sort_index = np.argsort(sample_weights)
    high_index = sort_index[-args.index_num:]
    low_index = sort_index[:args.index_num]

    # Evaluate fid with index of high weight
    evaluate_with_index(
        metric='fid',
        index=high_index,
        log_dir=save_path,
        netG=netG,
        dataset=dataset,
        num_fake_samples=50000,
        evaluate_step=args.netG_ckpt_step,
        num_runs=1,
        device=device,
        stats_file=None,
        name=f'high_{args.resample_score}', )

    # Evaluate fid with index of low weight
    evaluate_with_index(
        metric='fid',
        index=low_index,
        log_dir=save_path,
        netG=netG,
        dataset=dataset,
        num_fake_samples=50000,
        evaluate_step=args.netG_ckpt_step,
        num_runs=1,
        device=device,
        stats_file=None,
        name=f'low_{args.resample_score}', )
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--dataset", "-d", default="cifar10", type=str)
    parser.add_argument("--root",
                        "-r",
                        default="./dataset/cifar10",
                        type=str,
                        help="dataset dir")
    parser.add_argument("--work_dir",
                        default="./exp_results",
                        type=str,
                        help="output dir")
    parser.add_argument("--exp_name",
                        default="cifar10",
                        type=str,
                        help="exp name")
    parser.add_argument("--model",
                        default="sngan",
                        type=str,
                        help="network model")
    parser.add_argument("--loss_type",
                        default="hinge",
                        type=str,
                        help="loss type")
    parser.add_argument('--gpu',
                        default='0',
                        type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--num_pack', default=1, type=int)
    parser.add_argument('--batch_size', default=64, type=int)
    parser.add_argument('--seed', default=1, type=int)
    parser.add_argument('--download_dataset', action='store_true')
    parser.add_argument('--topk', action='store_true')
    parser.add_argument('--num_steps', default=100000, type=int)
    parser.add_argument('--logit_save_steps', default=100, type=int)
    parser.add_argument('--decay', default='linear', type=str)
    parser.add_argument('--n_dis', default=5, type=int)
    parser.add_argument('--imb_factor', default=0.1, type=float)
    parser.add_argument('--celeba_class_attr', default='glass', type=str)
    parser.add_argument('--ckpt_step', type=int)
    parser.add_argument('--no_save_logits', action='store_true')
    parser.add_argument('--save_logit_after', default=30000, type=int)
    parser.add_argument('--stop_save_logit_after', default=60000, type=int)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    output_dir = f'{args.work_dir}/{args.exp_name}'
    save_path = Path(output_dir)
    save_path.mkdir(parents=True, exist_ok=True)

    set_seed(args.seed)

    if torch.cuda.is_available():
        device = "cuda"
        cudnn.benchmark = True
    else:
        device = "cpu"

    netG, netD, optG, optD = get_gan_model(
        dataset_name=args.dataset,
        model=args.model,
        loss_type=args.loss_type,
        topk=args.topk,
    )

    print_num_params(netG, netD)

    ds_train = get_predefined_dataset(
        dataset_name=args.dataset,
        root=args.root,
    )
    dl_train = get_dataloader(ds_train, batch_size=args.batch_size)

    if args.dataset == 'celeba':
        args.num_steps = 75000
        args.logit_save_steps = 100
        args.save_logit_after = 55000
        args.stop_save_logit_after = 60000

    if args.dataset == 'cifar10':
        args.num_steps = 50000
        args.logit_save_steps = 100
        args.save_logit_after = 35000
        args.stop_save_logit_after = 40000

    print(args)

    if args.ckpt_step:
        netG_ckpt_file = save_path / f'checkpoints/netG/netG_{args.ckpt_step}_steps.pth'
        netD_ckpt_file = save_path / f'checkpoints/netD/netD_{args.ckpt_step}_steps.pth'
    else:
        netG_ckpt_file = None
        netD_ckpt_file = None

    # Start training
    trainer = LogTrainer(
        output_path=save_path,
        logit_save_steps=args.logit_save_steps,
        netG_ckpt_file=netG_ckpt_file,
        netD_ckpt_file=netD_ckpt_file,
        netD=netD,
        netG=netG,
        optD=optD,
        optG=optG,
        n_dis=args.n_dis,
        num_steps=args.num_steps,
        save_steps=1000,
        lr_decay=args.decay,
        dataloader=dl_train,
        log_dir=output_dir,
        print_steps=10,
        device=device,
        topk=args.topk,
        save_logits=not args.no_save_logits,
        save_logit_after=args.save_logit_after,
        stop_save_logit_after=args.stop_save_logit_after,
    )
    trainer.train()
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--work_dir",
                        default="./exp_results",
                        type=str,
                        help="output dir")
    parser.add_argument("--exp_name",
                        default="mimicry_pretrained-seed1",
                        type=str,
                        help="exp name")
    parser.add_argument("--model",
                        default="sngan",
                        type=str,
                        help="network model")
    parser.add_argument("--loss_type",
                        default="hinge",
                        type=str,
                        help="loss type")
    parser.add_argument("--classifier",
                        default="vgg16",
                        type=str,
                        help="calssifier network model")
    parser.add_argument('--gpu',
                        default='0',
                        type=str,
                        help='id(s) for CUDA_VISIBLE_DEVICES')
    parser.add_argument('--batch_size', default=100, type=int)
    parser.add_argument('--seed', default=1, type=int)
    parser.add_argument("--netG_ckpt_step", type=int)
    parser.add_argument("--netG_train_mode", action='store_true')
    parser.add_argument("--use_original_netD", action='store_true')
    parser.add_argument('--attr', default='Bald', type=str)
    parser.add_argument('--drs', action='store_true')
    parser.add_argument('--num_samples', default=50000, type=int)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    set_seed(args.seed)
    print(args)

    save_path = f'{args.work_dir}/{args.exp_name}'

    if torch.cuda.is_available():
        device = "cuda"
        cudnn.benchmark = True
    else:
        device = "cpu"

    # load model
    assert args.netG_ckpt_step
    print(f'load model from {save_path} step: {args.netG_ckpt_step}')
    if args.drs:
        netG, _, netD_drs, _, _, _ = get_gan_model(dataset_name='celeba',
                                                   model=args.model,
                                                   loss_type=args.loss_type,
                                                   drs=True)
    else:
        netG, _, _, _ = get_gan_model(
            dataset_name='celeba',
            model=args.model,
            loss_type=args.loss_type,
        )
    netG.to(device)
    if not args.netG_train_mode:
        netG.eval()
        netG.to(device)
        if args.drs:
            netD_drs.eval()
            netD_drs.to(device)

    gan_ckpt = f'{args.work_dir}/{args.exp_name}/checkpoints/netG/netG_{args.netG_ckpt_step}_steps.pth'
    if args.use_original_netD:
        netD_drs_ckpt = f'{args.work_dir}/{args.exp_name}/checkpoints/netD/netD_{args.netG_ckpt_step}_steps.pth'
    else:
        netD_drs_ckpt = f'{args.work_dir}/{args.exp_name}/checkpoints/netD_drs/netD_drs_{args.netG_ckpt_step}_steps.pth'
    print(gan_ckpt)
    netG.restore_checkpoint(ckpt_file=gan_ckpt)
    if args.drs:
        netD_drs.restore_checkpoint(ckpt_file=netD_drs_ckpt)
        netG = DRS(netG=netG, netD=netD_drs, device=device)

    # load classifier
    print('Load classifier')
    if args.classifier == 'vgg16':
        model = models.vgg16(pretrained=True)
    elif args.classifier == 'resnet18':
        model = models.resnet18(pretrained=True)
    elif args.classifier == 'inception':
        model = models.inception_v3(pretrained=True)
    else:
        raise ValueError('model should be vgg16 or resnet18 or inception')

    # change the number of classes
    in_features = model.classifier[6].in_features
    model.classifier[6] = nn.Linear(in_features, 2, bias=True)

    classifier_path = './convnet_celeba'
    model.load_state_dict(
        torch.load(os.path.join(classifier_path, f'{args.attr}.pth')))
    model.to(device)

    batch_size = min(args.batch_size, args.num_samples)
    num_batches = args.num_samples // batch_size

    attr_num = 0
    not_attr_num = 0
    for i in range(num_batches):
        with torch.no_grad():
            img = netG.generate_images(batch_size, device=device)
            labels = model(img)
            answers = torch.argmax(labels, dim=1)
            attr = torch.count_nonzero(answers).item()
            not_attr = batch_size - attr
            attr_num += attr
            not_attr_num += not_attr

    print(f'attr: {attr_num}')
    print(f'not attr: {not_attr_num}')

    output_dir = os.path.join(save_path, 'evaluate',
                              f'step-{args.netG_ckpt_step}')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    output_file = os.path.join(output_dir, f'count_attribute.csv')
    if os.path.exists(output_file):
        with open(output_file, 'a', newline='') as f:
            wr = csv.writer(f)
            wr.writerow([args.attr, attr_num, not_attr_num])
    else:
        with open(output_file, 'w', newline='') as f:
            wr = csv.writer(f)
            wr.writerow(['', 'attr', 'not attr'])
            wr.writerow([args.attr, attr_num, not_attr_num])