class Predictor(cog.Predictor): def setup(self): self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.generator = StyledGenerator(512).to(self.device) print("Loading checkpoint") self.generator.load_state_dict( torch.load( "stylegan-1024px-new.model", map_location=self.device, )["g_running"], ) self.generator.eval() @cog.input("seed", type=int, default=-1, help="Random seed, -1 for random") def predict(self, seed): if seed < 0: seed = int.from_bytes(os.urandom(2), "big") torch.manual_seed(seed) print(f"seed: {seed}") mean_style = get_mean_style(self.generator, self.device) step = int(math.log(SIZE, 2)) - 2 img = sample(self.generator, step, mean_style, 1, self.device) output_path = Path(tempfile.mkdtemp()) / "output.png" utils.save_image(img, output_path, normalize=True) return output_path
def load_network(ckpt): g_running = StyledGenerator(code_size).cuda() discriminator = Discriminator(from_rgb_activate=True).cuda() ckpt = torch.load(ckpt) g_running.load_state_dict(ckpt['g_running']) discriminator.load_state_dict(ckpt['discriminator']) return g_running, discriminator
def setup(self): self.device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") self.generator = StyledGenerator(512).to(self.device) print("Loading checkpoint") self.generator.load_state_dict( torch.load( "stylegan-1024px-new.model", map_location=self.device, )["g_running"], ) self.generator.eval()
def loadStyleGAN(): sys.path.append(StyleGAN1_root) ckpt_root = join(StyleGAN1_root, 'checkpoint') from model import StyledGenerator from generate import get_mean_style import math generator = StyledGenerator(512).to("cuda") # generator.load_state_dict(torch.load(r"E:\Github_Projects\style-based-gan-pytorch\checkpoint\stylegan-256px-new.model")['g_running']) generator.load_state_dict( torch.load(join(StyleGAN1_root, "checkpoint\stylegan-256px-new.model"))['g_running']) generator.eval() for param in generator.parameters(): param.requires_grad_(False) return generator
filehandler = logging.FileHandler('stylegan.log') streamhandler = logging.StreamHandler() logger = logging.getLogger('') logger.setLevel(logging.INFO) logger.addHandler(filehandler) logger.addHandler(streamhandler) logger.info(args) if args.gpu_ids == '-1': context = [mx.cpu()] else: context = [ mx.gpu(int(i)) for i in args.gpu_ids.split(',') if i.strip() ] generator = StyledGenerator(code_size) generator.initialize(ctx=context) generator.collect_params().reset_ctx(context) g_optimizer = gluon.Trainer(generator.collect_params(), optimizer='adam', optimizer_params={ 'learning_rate': args.lr_default, 'beta1': 0.0, 'beta2': 0.99 }, kvstore='local') # Set a different learning rate for style by setting the lr_mult of 0.01 for k in generator.collect_params().keys(): if k.startswith('hybridsequential2'):
import torch from torchvision import utils from model import StyledGenerator device = 'cuda' generator = StyledGenerator(512).to(device) generator.load_state_dict(torch.load('checkpoint/180000.model')) generator.eval() mean_style = None step = 7 alpha = 1 shape = 4 * 2**step with torch.no_grad(): for i in range(10): style = generator.mean_style(torch.randn(1024, 512).to(device)) if mean_style is None: mean_style = style else: mean_style += style mean_style /= 10 image = generator(
help='size of the image') parser.add_argument('--n_row', type=int, default=3, help='number of rows of sample matrix') parser.add_argument('--n_col', type=int, default=5, help='number of columns of sample matrix') parser.add_argument('path', type=str, help='path to checkpoint file') args = parser.parse_args() device = 'cuda' generator = StyledGenerator(512).to(device) generator.load_state_dict(torch.load(args.path)['g_running']) generator.eval() mean_style = get_mean_style(generator, device) step = int(math.log(args.size, 2)) - 2 img = sample(generator, step, mean_style, args.n_row * args.n_col, device) utils.save_image(img, 'sample.png', nrow=args.n_col, normalize=True, range=(-1, 1)) for j in range(20):
batch_size = 16 n_critic = 1 parser = argparse.ArgumentParser(description='Progressive Growing of GANs') parser.add_argument('path', type=str, help='path of specified dataset') parser.add_argument('--lr', default=0.001, type=float, help='learning rate') parser.add_argument('--init-size', default=8, type=int, help='initial image size') parser.add_argument('-d', '--data', default='celeba', type=str, choices=['celeba', 'lsun'], help=('Specify dataset. ' 'Currently CelebA and LSUN is supported')) args = parser.parse_args() generator = StyledGenerator(code_size).cuda() discriminator = Discriminator().cuda() g_running = StyledGenerator(code_size).cuda() g_running.train(False) class_loss = nn.CrossEntropyLoss() g_optimizer = optim.Adam(generator.generator.parameters(), lr=args.lr, betas=(0.0, 0.99)) g_optimizer.add_param_group({'params': generator.style.parameters(), 'lr': args.lr * 0.01}) d_optimizer = optim.Adam( discriminator.parameters(), lr=args.lr, betas=(0.0, 0.99)) accumulate(g_running, generator, 0)
help="size of the image") parser.add_argument("--n_row", type=int, default=3, help="number of rows of sample matrix") parser.add_argument("--n_col", type=int, default=3, help="number of columns of sample matrix") parser.add_argument("path", type=str, help="path to checkpoint file") args = parser.parse_args() device = "cpu" generator = StyledGenerator(512).to(device) generator.load_state_dict( torch.load(args.path, map_location=torch.device("cpu"))["g_running"]) generator.eval() mean_style = get_mean_style(generator, device) step = int(math.log(args.size, 2)) - 2 resize_img = transforms.Compose([ transforms.ToPILImage(), transforms.Resize(size=112), transforms.ToTensor(), ]) for j in range(500): img = sample(generator, step, mean_style, args.n_col * args.n_row,
def main(args, myargs): code_size = 512 batch_size = 16 n_critic = 1 generator = nn.DataParallel(StyledGenerator(code_size)).cuda() discriminator = nn.DataParallel( Discriminator(from_rgb_activate=not args.no_from_rgb_activate)).cuda() g_running = StyledGenerator(code_size).cuda() g_running.train(False) g_optimizer = optim.Adam(generator.module.generator.parameters(), lr=args.lr, betas=(0.0, 0.99)) g_optimizer.add_param_group({ 'params': generator.module.style.parameters(), 'lr': args.lr * 0.01, 'mult': 0.01, }) d_optimizer = optim.Adam(discriminator.parameters(), lr=args.lr, betas=(0.0, 0.99)) accumulate(g_running, generator.module, 0) if args.ckpt is not None: ckpt = torch.load(args.ckpt) generator.module.load_state_dict(ckpt['generator']) discriminator.module.load_state_dict(ckpt['discriminator']) g_running.load_state_dict(ckpt['g_running']) g_optimizer.load_state_dict(ckpt['g_optimizer']) d_optimizer.load_state_dict(ckpt['d_optimizer']) transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True), ]) dataset = MultiResolutionDataset(args.path, transform) if args.sched: args.lr = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003} args.batch = { 4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 32, 256: 32 } else: args.lr = {} args.batch = {} args.gen_sample = {512: (8, 4), 1024: (4, 2)} args.batch_default = 32 train(args, dataset, generator, discriminator, g_optimizer=g_optimizer, d_optimizer=d_optimizer, g_running=g_running, code_size=code_size, n_critic=n_critic, myargs=myargs)
[ transforms.Resize(resize), transforms.CenterCrop(resize), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ] ) imgs = [] for imgfile in args.files: img = transform(Image.open(imgfile).convert("RGB")) imgs.append(img) imgs = torch.stack(imgs, 0).to(device) g_ema = StyledGenerator(512) g_ema.load_state_dict(torch.load(args.ckpt)["g_running"], strict=False) g_ema.eval() g_ema = g_ema.to(device) step = int(math.log(args.size, 2)) - 2 with torch.no_grad(): noise_sample = torch.randn(n_mean_latent, 512, device=device) latent_out = g_ema.style(noise_sample) latent_mean = latent_out.mean(0) latent_std = ((latent_out - latent_mean).pow(2).sum() / n_mean_latent) ** 0.5 percept = lpips.PerceptualLoss( model="net-lin", net="vgg", use_gpu=device.startswith("cuda") )
def optimize_latents(): print("Optimizing Latents.") generator = StyledGenerator(512).to(device) generator.load_state_dict(torch.load(args.path)['generator']) generator.eval() latent_optimizer = LatentOptimizer(generator, args.vgg_layer) mean_style = get_mean_style(generator, device) total = np.zeros((83 * 3, 512)) # Optimize only the dlatents. for param in latent_optimizer.parameters(): param.requires_grad_(False) if args.video or args.save_optimized_image: # Hook, saves an image during optimization to be used to create video. generated_image_hook = GeneratedImageHook( latent_optimizer.post_synthesis_processing, args.save_frequency) for i in range(3 * 83): #3 for each pictrue iid = i % 3 path = int(i / 3) iterations = int(200 * iid + 300) image_path = './data/' + str(path) + '.jpg' print(image_path) reference_image = load_images([image_path]) reference_image = torch.from_numpy(reference_image).to(device) reference_image = latent_optimizer.vgg_processing( reference_image) #normalize reference_features = latent_optimizer.vgg16( reference_image).detach() #vgg reference_image = reference_image.detach() if args.use_latent_finder: image_to_latent = ImageToLatent().cuda() image_to_latent.load_state_dict( torch.load(args.image_to_latent_path)) image_to_latent.eval() latents_to_be_optimized = image_to_latent(reference_image) latents_to_be_optimized = latents_to_be_optimized.detach().cuda( ).requires_grad_(True) else: latents_to_be_optimized = torch.zeros( (1, 512)).cuda().requires_grad_(True) criterion = LatentLoss() optimizer = torch.optim.SGD([latents_to_be_optimized], lr=args.learning_rate) progress_bar = tqdm(range(iterations)) for step in progress_bar: optimizer.zero_grad() generated_image_features = latent_optimizer( latents_to_be_optimized, mean_style, i) #print(latents_to_be_optimized) loss = criterion(generated_image_features, reference_features) loss.backward() loss = loss.item() optimizer.step() progress_bar.set_description("Step: {}, Loss: {}".format( step, loss)) optimized_dlatents = latents_to_be_optimized.detach().cpu().numpy() total[i] = optimized_dlatents[0] np.save(args.dlatent_path, total)
transform = transforms.Compose([ transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True), ]) dataset = MultiResolutionDataset(f'./dataset/{args.dataset}_lmdb', transform, resolution=args.image_size) ### load G and D ### if args.supervised: G_target = nn.DataParallel( StyledGenerator(code_size, dataset_size=len(dataset), embed_dim=code_size)).cuda() G_running_target = StyledGenerator(code_size, dataset_size=len(dataset), embed_dim=code_size).cuda() G_running_target.train(False) accumulate(G_running_target, G_target.module, 0) else: G_target = nn.DataParallel(StyledGenerator(code_size)).cuda() D_target = nn.DataParallel( Discriminator(from_rgb_activate=True)).cuda() G_running_target = StyledGenerator(code_size).cuda() G_running_target.train(False) accumulate(G_running_target, G_target.module, 0) G_source = nn.DataParallel(StyledGenerator(code_size)).cuda()
parser.add_argument( '--mixing', action='store_true', help='use mixing regularization' ) parser.add_argument( '--loss', type=str, default='wgan-gp', choices=['wgan-gp', 'r1'], help='class of gan loss', ) args = parser.parse_args() generator = nn.DataParallel(StyledGenerator(code_size)).cuda() ## mostly it's making the potential operations concurrent ## The batch size should be larger than the number of GPUs used. discriminator = nn.DataParallel( Discriminator(from_rgb_activate=not args.no_from_rgb_activate) ).cuda() ## ditto same g_running = StyledGenerator(code_size).cuda() # g_running is generator_running g_running.train(False) g_optimizer = optim.Adam( generator.module.generator.parameters(), lr=args.lr, betas=(0.0, 0.99) ) # generator loss function / learning rate related stuff g_optimizer.add_param_group( {
if __name__ == '__main__': SIZE = [256, 512, 1024] DEVICE = torch.device( 'cuda:0' if torch.cuda.is_available() else torch.device('cpu')) CODE_SIZE = 512 ###################################### # simple testing # ###################################### parser = argparse.ArgumentParser(description='Progressive Growing of GANs') parser.add_argument('--batch_size', type=int, default=1) opt = parser.parse_args() generator = StyledGenerator(CODE_SIZE).to(DEVICE) g_running = StyledGenerator(CODE_SIZE).to(DEVICE) cur_size = 512 ## load models ckp_pth = os.path.join('checkpoint', 'stylegan-{}px-new.model'.format(cur_size)) ckp1, ckp2 = torch.load(ckp_pth, map_location='cpu')['generator'], torch.load( ckp_pth, map_location='cpu')['g_running'] generator.load_state_dict(ckp1) g_running.load_state_dict(ckp2) print("Successfully loading the trained models!") ## get step
) parser.add_argument( '--loss', type=str, default='wgan-gp', choices=['wgan-gp', 'r1'], help='class of gan loss', ) parser.add_argument('--ckpt_path', default='checkpoint',type=str, help='path where the checkpoints will be stored') parser.add_argument('--gen_path', default='', type=str, help='path of the pretrained generator') parser.add_argument('--discr_path', default='',type=str, help='path of the pretrained generator') args = parser.parse_args() encoder = nn.DataParallel(PortraitEncoder(size=128, filters=64, filters_max=512, num_layers=1)).cuda() generator = nn.DataParallel(StyledGenerator(code_size)).cuda() discriminator = nn.DataParallel( Discriminator(from_rgb_activate=not args.no_from_rgb_activate) ).cuda() e_optimizer = optim.Adam( encoder.module.parameters(), lr=args.lr, betas=(0.0, 0.99) ) # e_optimizer.add_param_group( # { # 'params': encoder.module.parameters(), # 'lr': args.lr * 0.8, # 'mult': 0.8, # } # )
import sys import argparse from matplotlib import pyplot as plt #check cuda is availible can_cuda = torch.cuda.is_available() #get args parser = argparse.ArgumentParser(description='make sample and submit file') parser.add_argument('path', type=str, help='path to G checkpoint') args = parser.parse_args() ckpt = torch.load(args.path) #load generator(G) G = StyledGenerator(code_size).cuda() G.load_state_dict(ckpt['g_running']) if can_cuda: G.cuda() code_size = 512 # Generate 1000 images and make a grid to save them. n_output = 1000 with torch.no_grad(): z_sample = torch.randn(n_output, code_size) if can_cuda: z_sample.cuda() imgs_sample = G(z_sample, step=4, alpha=-1).data torchvision.utils.save_image( imgs_sample, './result.jpg', nrow=10, normalize=True,
fig = plot_spectra(data_FI["H_col"][4, 0][np.newaxis, :], savename="spectrum_method_cmp.jpg", label="ForwardIter 1E-3", fig=fig) fig = plot_spectra(data_FI["H_col"][5, 0][np.newaxis, :], savename="spectrum_method_cmp.jpg", label="ForwardIter 3E-3", fig=fig) fig = plot_spectra(data_FI["H_col"][6, 0][np.newaxis, :], savename="spectrum_method_cmp.jpg", label="ForwardIter 1E-2", fig=fig) plt.show() #%% """ This is the smaller explicit version of StyleGAN. Very easy to work with """ #%% sys.path.append("E:\Github_Projects\style-based-gan-pytorch") sys.path.append("D:\Github\style-based-gan-pytorch") from model import StyledGenerator from generate import get_mean_style import math #%% generator = StyledGenerator(512).to("cuda") # generator.load_state_dict(torch.load(r"E:\Github_Projects\style-based-gan-pytorch\checkpoint\stylegan-256px-new.model")['g_running']) generator.load_state_dict(torch.load(r"D:\Github\style-based-gan-pytorch\checkpoint\stylegan-256px-new.model")[ 'g_running']) generator.eval() for param in generator.parameters(): param.requires_grad_(False) mean_style = get_mean_style(generator, "cuda") step = int(math.log(256, 2)) - 2 #%% feat = torch.randn(1, 512, requires_grad=False).to("cuda") image = generator( feat, step=step, alpha=1, mean_style=mean_style,
type=float, help="ratio of validation set size to training set size") args = parser.parse_args() if args.cuda and not torch.cuda.is_available(): args.cuda = False print( "Cuda is no found on device so defaulting to running code on CPU") if args.valid_size < 0 or args.valid_size > 1: raise ValueError('valid_size must be between 0 and 1') device = torch.device('cuda' if args.cuda else 'cpu') generator = nn.DataParallel(StyledGenerator(args.code_size)).to(device) encoder = nn.DataParallel(Encoder(args.code_size)).to(device) g_optimizer = optim.Adam(generator.module.generator.parameters(), lr=args.lr, betas=(0.0, 0.99)) g_optimizer.add_param_group({ 'params': generator.module.style.parameters(), 'lr': args.lr * 0.01 }) e_optimizer = optim.Adam(encoder.module.parameters(), lr=args.lr, betas=(0.0, 0.99)) step = args.init_size // 4 - 1 random_seed = np.random.seed()
import torch from torchvision import utils from model import StyledGenerator generator = StyledGenerator(512).cuda() generator.load_state_dict(torch.load('checkpoint/130000.model')) mean_style = None step = 6 shape = 4 * 2**step for i in range(10): style = generator.mean_style(torch.randn(1024, 512).cuda()) if mean_style is None: mean_style = style else: mean_style += style mean_style /= 10 image = generator( torch.randn(50, 512).cuda(), step=step, alpha=1, mean_style=mean_style, style_weight=0.7,
parser.add_argument('--out_dir', type=str, default='samples/', help='output directory for samples') parser.add_argument('--path', type=str, default='./stylegan-ffhq-1024px-new.params', help='path to checkpoint file') args = parser.parse_args() if args.gpu_id == '-1': device = mx.cpu() else: device = mx.gpu(int(args.gpu_id.strip())) generator = StyledGenerator(code_dim=512) generator.initialize() generator.collect_params().reset_ctx(device) generator.load_parameters(args.path, ctx=device) mean_style = get_mean_style(generator, device) step = int(math.log(args.size, 2)) - 2 imgs = sample(generator, step, mean_style, args.n_sample, device) if not os.path.isdir(args.out_dir): os.makedirs(args.out_dir) for i in range(args.n_sample): save_image(imgs[i],
parser.add_argument('--mixing', action='store_true', help='use mixing regularization', default=True) parser.add_argument( '--loss', type=str, default='wgan-gp', choices=['wgan-gp', 'r1'], help='class of gan loss', ) args = parser.parse_args() #generator = nn.DataParallel(StyledGenerator(w_dim=code_size)).cuda() generator = StyledGenerator(w_dim=code_size).cuda() #discriminator = nn.DataParallel(Discriminator()).cuda() discriminator = Discriminator().cuda() #g_running = StyledGenerator(code_size).cuda() #g_running.train(False) g_optimizer = optim.Adam([{ 'params': generator.progressive.parameters() }, { 'params': generator.to_rgb.parameters() }], lr=args.lr, betas=(0.0, 0.99)) #slower learning rate fro mapping network g_optimizer.add_param_group({
help='size of the image') parser.add_argument('--n_row', type=int, default=3, help='number of rows of sample matrix') parser.add_argument('--n_col', type=int, default=5, help='number of columns of sample matrix') parser.add_argument('path', type=str, help='path to checkpoint file') args = parser.parse_args() device = 'cuda' generator = StyledGenerator(512).to(device) #generator.load_state_dict(torch.load(args.path)['g_running']) generator.load_state_dict( torch.load(args.path, map_location=torch.device('cpu'))) generator.eval() mean_style = get_mean_style(generator, device) step = int(math.log(args.size, 2)) - 2 img = sample(generator, step, mean_style, args.n_row * args.n_col, device) utils.save_image(img, './sample_matrix/sample.png', nrow=args.n_col, normalize=True, range=(-1, 1))
type=int, default=128, help='size of the image') parser.add_argument('--n_row', type=int, default=5, help='number of rows of sample matrix') parser.add_argument('--n_col', type=int, default=5, help='number of columns of sample matrix') parser.add_argument('path', type=str, help='path to checkpoint file') args = parser.parse_args() generator = StyledGenerator(512) ckpt = jt.load(args.path) generator.load_state_dict(ckpt) generator.eval() mean_style = get_mean_style(generator) step = int(math.log(args.size, 2)) - 2 img = sample(generator, step, mean_style, args.n_row * args.n_col) jt.save_image(img, 'style_mixing/sample.png', nrow=args.n_col, normalize=True, range=(-1, 1))
default='wgan-gp', choices=['wgan-gp', 'r1'], help='class of gan loss', ) parser.add_argument( '-d', '--data', default='folder', type=str, choices=['folder', 'lsun'], help=('Specify dataset. ' 'Currently Image Folder and LSUN is supported'), ) args, _ = parser.parse_known_args() generator = nn.DataParallel(StyledGenerator(code_size)).cuda() discriminator = nn.DataParallel(Discriminator()).cuda() g_running = StyledGenerator(code_size).cuda() g_running.train(False) class_loss = nn.CrossEntropyLoss() g_optimizer = optim.Adam( generator.module.generator.parameters(), lr=args.lr, betas=(0.0, 0.99) ) g_optimizer.add_param_group( { 'params': generator.module.style.parameters(), 'lr': args.lr * 0.01, 'mult': 0.01, }
def get_model(model_name, config, iteration=None, restart=False, from_step=False, load_discriminator=True, alpha=1, step=6, resolution=256, used_samples=0): """ Function that creates a model. Arguments: model_name -- name to use for save and load the model. config -- dict of model parameters. iteration -- iteration to load; last if None restart -- if true, than creates new model even there is a saved model with `model_name`. """ LOGGER.info(f'Getting model "{model_name}"') code_size = config.get('code_size', constants.DEFAULT_CODE_SIZE) init_size = config.get('init_size', constants.INIT_SIZE) n_frames_params = config.get('n_frames_params', dict()) n_frames = n_frames_params.get('n', 1) from_rgb_activate = config['from_rgb_activate'] two_noises = n_frames_params.get('two_noises', False) lr = config.get('lr', constants.LR) dyn_style_coordinates = n_frames_params.get('dyn_style_coordinates', 0) generator = nn.DataParallel(StyledGenerator(code_size, two_noises=two_noises, dyn_style_coordinates=dyn_style_coordinates, )).cuda() g_running = StyledGenerator(code_size, two_noises=two_noises, dyn_style_coordinates=dyn_style_coordinates, ).cuda() g_running.train(False) discriminator = nn.DataParallel(Discriminator(from_rgb_activate=from_rgb_activate)).cuda() n_frames_discriminator = nn.DataParallel( NFramesDiscriminator(from_rgb_activate=from_rgb_activate, n_frames=n_frames) ).cuda() if not restart: if iteration is None: model = get_last_model(model_name, from_step) else: iteration = str(iteration).zfill(6) checkpoint_path = os.path.join(constants.CHECKPOINT_DIR, model_name, f'{iteration}.model') LOGGER.info(f'Loading {checkpoint_path}') model = torch.load(checkpoint_path) generator.module.load_state_dict(model['generator']) g_running.load_state_dict(model['g_running']) if load_discriminator: discriminator.module.load_state_dict(model['discriminator']) if 'n_frames_params' in config: n_frames_discriminator.module.load_state_dict(model['n_frames_discriminator']) alpha = model['alpha'] step = model['step'] LOGGER.debug(f'Step: {step}') resolution = model['resolution'] used_samples = model['used_samples'] LOGGER.debug(f'Used samples: {used_samples}.') iteration = model['iteration'] else: alpha = 0 step = int(math.log2(init_size)) - 2 resolution = 4 * 2 ** step used_samples = 0 iteration = 0 accumulate(to_model=g_running, from_model=generator.module, decay=0) g_optimizer = optim.Adam( generator.module.generator.parameters(), lr=lr[resolution], betas=(0.0, 0.99) ) style_module = generator.module style_params = list(style_module.style.parameters()) g_optimizer.add_param_group( { 'params': style_params, 'lr': lr[resolution] * 0.01, 'mult': 0.01, } ) d_optimizer = optim.Adam(discriminator.parameters(), lr=lr[resolution], betas=(0.0, 0.99)) nfd_optimizer = optim.Adam(n_frames_discriminator.parameters(), lr=lr[resolution], betas=(0.0, 0.99)) if not restart: g_optimizer.load_state_dict(model['g_optimizer']) d_optimizer.load_state_dict(model['d_optimizer']) nfd_optimizer.load_state_dict(model['nfd_optimizer']) return EasyDict( generator=generator, discriminator=discriminator, n_frames_discriminator=n_frames_discriminator, g_running=g_running, g_optimizer=g_optimizer, d_optimizer=d_optimizer, nfd_optimizer=nfd_optimizer, alpha=alpha, step=step, resolution=resolution, used_samples=used_samples, iteration=iteration, )
help='use activate in from_rgb (original implementation)', ) parser.add_argument('--mixing', action='store_true', help='use mixing regularization') parser.add_argument( '--loss', type=str, default='wgan-gp', choices=['wgan-gp', 'r1'], help='class of gan loss', ) args = parser.parse_args() generator = nn.DataParallel(StyledGenerator(code_size)).cuda() discriminator = nn.DataParallel( Discriminator(from_rgb_activate=not args.no_from_rgb_activate)).cuda() g_running = StyledGenerator(code_size).cuda() g_running.train(False) g_optimizer = optim.Adam(generator.module.generator.parameters(), lr=args.lr, betas=(0.0, 0.99)) g_optimizer.add_param_group({ 'params': generator.module.style.parameters(), 'lr': args.lr * 0.01, 'mult': 0.01, }) d_optimizer = optim.Adam(discriminator.parameters(), lr=args.lr,
from generate import get_mean_style standard_normal_distribution = torch.distributions.normal.Normal(0, 1) RESOLUTION = 256 STEP = int(math.log(RESOLUTION, 2)) - 2 DURATION_IN_SECONDS = 60 SAMPLE_COUNT = 30 # Number of distinct objects to generate and interpolate between TRANSITION_FRAMES = DURATION_IN_SECONDS * 30 // SAMPLE_COUNT LATENT_CODE_SIZE = 512 TILES = (3, 3) generator = StyledGenerator(LATENT_CODE_SIZE).to(device) generator.load_state_dict(torch.load('checkpoint/train_step-7.model')['g_running']) generator.eval() @torch.no_grad() def get_spline(use_styles=True): codes = standard_normal_distribution.sample((SAMPLE_COUNT + 1, LATENT_CODE_SIZE)) if use_styles: codes = generator.style(codes.to(device)) codes[0, :] = codes[-1, :] # Make animation periodic return CubicSpline(np.arange(SAMPLE_COUNT + 1), codes.detach().cpu().numpy(), axis=0, bc_type='periodic') def get_noise(): noise = []
type=str, default='samples/', help='output directory for samples') parser.add_argument('--path', type=str, default='./stylegan-ffhq-1024px-new.params', help='path to checkpoint file') args = parser.parse_args() if args.gpu_id == '-1': device = mx.cpu() else: device = mx.gpu(int(args.gpu_id.strip())) generator = StyledGenerator(512, blur=True) generator.initialize() generator.collect_params().reset_ctx(device) generator.load_parameters(args.path, ctx=device) mean_style = get_mean_style(generator, device) step = int(math.log(args.size, 2)) - 2 imgs = sample(generator, step, mean_style, args.n_sample, device) if not os.path.isdir(args.out_dir): os.makedirs(args.out_dir) for i in range(args.n_sample):
choices=['wgan-gp', 'r1'], help='class of gan loss', ) args = parser.parse_args() # Initialize Horovod hvd.init() # Horovod: limit # of CPU threads to be used per worker. torch.set_num_threads(1) # Pin GPU to be used to process local rank (one GPU per process) torch.cuda.set_device(hvd.local_rank()) generator = StyledGenerator(code_size).cuda() discriminator = Discriminator( from_rgb_activate=not args.no_from_rgb_activate).cuda() g_running = StyledGenerator(code_size).cuda() g_running.train(False) g_optimizer = optim.Adam(generator.generator.parameters(), lr=args.lr, betas=(0.0, 0.99)) g_optimizer.add_param_group({ 'params': generator.style.parameters(), 'lr': args.lr * 0.01, 'mult': 0.01, }) d_optimizer = optim.Adam(discriminator.parameters(),