m, C = statistics['m'], statistics['C']

    if args.cp_path is None:
        raise ValueError(
            'There is no checkpoint/model path. Use arg --cp-path to indicate the path!'
        )

    if args.sub_key is None:
        raise ValueError(
            'There is no key to substitute. Use arg --sub-key to indicate the key!'
        )

    print(args.sub_key, args.cp_path)

    generator = Generator(100, [1024, 512, 256, 128], 3).eval()
    gen_state = torch.load(args.cp_path,
                           map_location=lambda storage, loc: storage)
    generator.load_state_dict(gen_state['model_state'])

    if args.cuda:
        generator = generator.cuda()

    fid = []

    for i in range(args.ntests):
        fid.append(
            compute_fid(
                generator,
                fid_model,
                args.batch_size,
Beispiel #2
0
mod_state = torch.load(args.fid_model_path,
                       map_location=lambda storage, loc: storage)
fid_model.load_state_dict(mod_state['model_state'])

if not os.path.isfile('../test_data_statistics.p'):
    testset = datasets.CIFAR10(root=args.data_path,
                               train=False,
                               download=True,
                               transform=transform)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=1000,
                                              shuffle=False,
                                              num_workers=args.workers)
    save_testdata_statistics(fid_model, test_loader, cuda_mode=args.cuda)

generator = Generator(100, [1024, 512, 256, 128], 3).train()
disc_list = []
for i in range(args.ndiscriminators):
    disc = Discriminator_wgan(3, [128, 256, 512, 1024], 1, optim.Adam, args.lr,
                              (args.beta1, args.beta2)).train()
    disc_list.append(disc)

if args.cuda:
    generator = generator.cuda()
    for disc in disc_list:
        disc = disc.cuda()
    torch.backends.cudnn.benchmark = True

optimizer = optim.Adam(generator.parameters(),
                       lr=args.lr,
                       betas=(args.beta1, args.beta2))
Beispiel #3
0
                        default=False,
                        help='Disables plot of train/test losses')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='Disables GPU use')
    args = parser.parse_args()
    args.cuda = True if not args.no_cuda and torch.cuda.is_available(
    ) else False

    if args.cp_path is None:
        raise ValueError(
            'There is no checkpoint/model path. Use arg --cp-path to indicate the path!'
        )

    model = Generator(128, [1024, 512, 256, 128, 64, 32], 3)

    ckpt = torch.load(args.cp_path, map_location=lambda storage, loc: storage)
    model.load_state_dict(ckpt['model_state'])

    if args.cuda:
        model = model.cuda()

    print('Cuda Mode is: {}'.format(args.cuda))

    history = ckpt['history']

    if not args.no_plots:
        plot_learningcurves(history, 'gen_loss')
        plot_learningcurves(history, 'disc_loss')
        plot_learningcurves(history, 'gen_loss_minibatch')
Beispiel #4
0
parser.add_argument('--alpha', type=float, default=0.8, metavar='alhpa', help='Used in GMAN and loss_del modes (default: 0.8)')
parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use')
args = parser.parse_args()
args.cuda = True if not args.no_cuda and torch.cuda.is_available() else False

torch.manual_seed(args.seed)
if args.cuda:
	torch.cuda.manual_seed(args.seed)

transform = transforms.Compose([transforms.Resize((256, 256), interpolation=Image.BICUBIC), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

cats_data = datasets.ImageFolder(args.data_path, transform=transform)

train_loader = torch.utils.data.DataLoader(cats_data, batch_size=args.batch_size, shuffle=True, num_workers=args.workers)

generator = Generator(128, [1024, 512, 256, 128, 64, 32], 3).train()

if args.disc_mode == 'RP':
	disc_list = []
	for i in range(args.ndiscriminators):
		disc = Discriminator(3, [32, 64, 128, 256, 512, 1024], 1, optim.Adam, args.lr, (args.beta1, args.beta2)).train()
		disc_list.append(disc)

elif args.disc_mode == 'MD':
	D1 = Discriminator_vanilla(ndf=64, nc=3, optimizer=optim.Adam, lr=args.lr, betas=(args.beta1, args.beta2)).train()
	D2 = Discriminator_f6(ndf=64, nc=3, optimizer=optim.Adam, lr=args.lr, betas=(args.beta1, args.beta2)).train()
	D3 = Discriminator_f8(ndf=32, nc=3, optimizer=optim.Adam, lr=args.lr, betas=(args.beta1, args.beta2)).train()
	D4 = Discriminator_f4s3(ndf=64, nc=3, optimizer=optim.Adam, lr=args.lr, betas=(args.beta1, args.beta2)).train()
	D5 = Discriminator_dense(ndf=64, nc=3, optimizer=optim.Adam, lr=args.lr, betas=(args.beta1, args.beta2)).train()
	D6 = Discriminator_f16(ndf=16, nc=3, optimizer=optim.Adam, lr=args.lr, betas=(args.beta1, args.beta2)).train()
Beispiel #5
0
    parser.add_argument(
        '--n-inception',
        type=int,
        default=1024,
        metavar='N',
        help='number of samples to calculate inception score (default: 1024)')
    args = parser.parse_args()
    args.cuda = True if not args.no_cuda and torch.cuda.is_available(
    ) else False

    if args.cp_path is None:
        raise ValueError(
            'There is no checkpoint/model path. Use arg --cp-path to indicate the path!'
        )

    model = Generator(100, [1024, 512, 256, 128], 3)

    ckpt = torch.load(args.cp_path, map_location=lambda storage, loc: storage)
    model.load_state_dict(ckpt['model_state'])

    if args.cuda:
        model = model.cuda()

    print('Cuda Mode is: {}'.format(args.cuda))

    history = ckpt['history']

    print('Min FID:', np.min(history['FID-c']))
    print('Epoch with min FID:', np.argmin(history['FID-c']))

    if not args.no_plots: