class Predictor(cog.Predictor):
    def setup(self):
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.generator = StyledGenerator(512).to(self.device)
        print("Loading checkpoint")
        self.generator.load_state_dict(
            torch.load(
                "stylegan-1024px-new.model",
                map_location=self.device,
            )["g_running"], )
        self.generator.eval()

    @cog.input("seed", type=int, default=-1, help="Random seed, -1 for random")
    def predict(self, seed):
        if seed < 0:
            seed = int.from_bytes(os.urandom(2), "big")
        torch.manual_seed(seed)
        print(f"seed: {seed}")

        mean_style = get_mean_style(self.generator, self.device)
        step = int(math.log(SIZE, 2)) - 2
        img = sample(self.generator, step, mean_style, 1, self.device)
        output_path = Path(tempfile.mkdtemp()) / "output.png"
        utils.save_image(img, output_path, normalize=True)
        return output_path
Esempio n. 2
0
def load_network(ckpt):
	g_running = StyledGenerator(code_size).cuda()
	discriminator = Discriminator(from_rgb_activate=True).cuda()

	ckpt = torch.load(ckpt)
	g_running.load_state_dict(ckpt['g_running'])
	discriminator.load_state_dict(ckpt['discriminator'])

	return g_running, discriminator
Esempio n. 3
0
def loadStyleGAN():
    sys.path.append(StyleGAN1_root)
    ckpt_root = join(StyleGAN1_root, 'checkpoint')
    from model import StyledGenerator
    from generate import get_mean_style
    import math
    generator = StyledGenerator(512).to("cuda")
    # generator.load_state_dict(torch.load(r"E:\Github_Projects\style-based-gan-pytorch\checkpoint\stylegan-256px-new.model")['g_running'])
    generator.load_state_dict(
        torch.load(join(StyleGAN1_root,
                        "checkpoint\stylegan-256px-new.model"))['g_running'])
    generator.eval()
    for param in generator.parameters():
        param.requires_grad_(False)
    return generator
Esempio n. 4
0
tot_params = 0
for i in range(7):
    for name in mdl_state:
        if name.find(f'to_rgb.{i}') >= 0:
            mdl_state[name] = mdl_state[name] * 0 + torch.randn(
                mdl_state[name].shape)
            tot_params += np.prod(mdl_state[name].shape)
            print(
                f'{name} : {mdl_state[name].shape}; params this layer: {np.prod(mdl_state[name].shape)}'
            )
        # else:
        #     mdl_state[name] = mdl_state[name] * 0 + 6e-3

print(f'Total set params are: {tot_params} \n\n\n\n\n')

generator.load_state_dict(mdl_state)

input_indices = torch.zeros((1, ), dtype=torch.long)
flm_rndr = torch.zeros((1, 3, 4, 4))

torch.manual_seed(2)
forward_pass_gen = generator(flm_rndr,
                             pose=None,
                             step=6,
                             alpha=1,
                             input_indices=input_indices)
print(forward_pass_gen)
print(forward_pass_gen[0].shape)

# for param in generator.parameters():
#     print(param)
Esempio n. 5
0
    #slower learning rate fro mapping network
    g_optimizer.add_param_group({
        'params': generator.style.parameters(),
        'lr': args.lr * 0.01,
        'mult': 0.01,
    })
    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=args.lr,
                             betas=(0.0, 0.99))

    #accumulate(g_running, generator.module, 0)

    if args.ckpt is not None:
        ckpt = torch.load(args.ckpt)

        generator.load_state_dict(ckpt['generator'])
        discriminator.load_state_dict(ckpt['discriminator'])
        #g_running.load_state_dict(ckpt['g_running'])
        g_optimizer.load_state_dict(ckpt['g_optimizer'])
        d_optimizer.load_state_dict(ckpt['d_optimizer'])

    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])

    dataset = MultiResolutionDataset(args.path, transform)

    if args.sched:
        args.lr = {
Esempio n. 6
0
import torch
from torchvision import utils

from model import StyledGenerator

generator = StyledGenerator(512).cuda()
generator.load_state_dict(torch.load('checkpoint/130000.model'))

mean_style = None

step = 6

shape = 4 * 2**step

for i in range(10):
    style = generator.mean_style(torch.randn(1024, 512).cuda())

    if mean_style is None:
        mean_style = style

    else:
        mean_style += style

mean_style /= 10

image = generator(
    torch.randn(50, 512).cuda(),
    step=step,
    alpha=1,
    mean_style=mean_style,
    style_weight=0.7,
Esempio n. 7
0
    parser.add_argument("--n_row",
                        type=int,
                        default=3,
                        help="number of rows of sample matrix")
    parser.add_argument("--n_col",
                        type=int,
                        default=3,
                        help="number of columns of sample matrix")
    parser.add_argument("path", type=str, help="path to checkpoint file")

    args = parser.parse_args()

    device = "cpu"

    generator = StyledGenerator(512).to(device)
    generator.load_state_dict(
        torch.load(args.path, map_location=torch.device("cpu"))["g_running"])
    generator.eval()

    mean_style = get_mean_style(generator, device)

    step = int(math.log(args.size, 2)) - 2

    resize_img = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize(size=112),
        transforms.ToTensor(),
    ])
    for j in range(500):
        img = sample(generator, step, mean_style, args.n_col * args.n_row,
                     device)
        # img = [resize_img(im) for im in img]
Esempio n. 8
0
                        help='output directory')
    parser.add_argument('--output-name',
                        type=str,
                        default='sample',
                        help='name of output file (without extension)')
    parser.add_argument('path', type=str, help='path to checkpoint file')

    args = parser.parse_args()

    device = args.device

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    generator = StyledGenerator(512).to(device)
    generator.load_state_dict(
        torch.load(args.path, map_location=torch.device(device))['g_running'])
    generator.eval()

    mean_style = get_mean_style(generator, device)

    step = int(math.log(args.size, 2)) - 2

    img = sample(generator, step, mean_style, args.n_row * args.n_col, device)
    utils.save_image(img,
                     os.path.join(args.output_dir,
                                  '{}.png'.format(args.output_name)),
                     nrow=args.n_col,
                     normalize=True,
                     range=(-1, 1))

    for j in range(args.mixing):
Esempio n. 9
0
def optimize_latents():
    print("Optimizing Latents.")
    generator = StyledGenerator(512).to(device)
    generator.load_state_dict(torch.load(args.path)['generator'])
    generator.eval()
    latent_optimizer = LatentOptimizer(generator, args.vgg_layer)
    mean_style = get_mean_style(generator, device)
    total = np.zeros((83 * 3, 512))
    # Optimize only the dlatents.
    for param in latent_optimizer.parameters():
        param.requires_grad_(False)

    if args.video or args.save_optimized_image:
        # Hook, saves an image during optimization to be used to create video.
        generated_image_hook = GeneratedImageHook(
            latent_optimizer.post_synthesis_processing, args.save_frequency)

    for i in range(3 * 83):  #3 for each pictrue
        iid = i % 3
        path = int(i / 3)
        iterations = int(200 * iid + 300)
        image_path = './data/' + str(path) + '.jpg'
        print(image_path)
        reference_image = load_images([image_path])
        reference_image = torch.from_numpy(reference_image).to(device)
        reference_image = latent_optimizer.vgg_processing(
            reference_image)  #normalize
        reference_features = latent_optimizer.vgg16(
            reference_image).detach()  #vgg
        reference_image = reference_image.detach()

        if args.use_latent_finder:
            image_to_latent = ImageToLatent().cuda()
            image_to_latent.load_state_dict(
                torch.load(args.image_to_latent_path))
            image_to_latent.eval()

            latents_to_be_optimized = image_to_latent(reference_image)
            latents_to_be_optimized = latents_to_be_optimized.detach().cuda(
            ).requires_grad_(True)
        else:
            latents_to_be_optimized = torch.zeros(
                (1, 512)).cuda().requires_grad_(True)

        criterion = LatentLoss()
        optimizer = torch.optim.SGD([latents_to_be_optimized],
                                    lr=args.learning_rate)

        progress_bar = tqdm(range(iterations))

        for step in progress_bar:
            optimizer.zero_grad()

            generated_image_features = latent_optimizer(
                latents_to_be_optimized, mean_style, i)
            #print(latents_to_be_optimized)
            loss = criterion(generated_image_features, reference_features)
            loss.backward()
            loss = loss.item()

            optimizer.step()
            progress_bar.set_description("Step: {}, Loss: {}".format(
                step, loss))

        optimized_dlatents = latents_to_be_optimized.detach().cpu().numpy()
        total[i] = optimized_dlatents[0]

    np.save(args.dlatent_path, total)
Esempio n. 10
0
def main(args, myargs):
    code_size = 512
    batch_size = 16
    n_critic = 1

    generator = nn.DataParallel(StyledGenerator(code_size)).cuda()
    discriminator = nn.DataParallel(
        Discriminator(from_rgb_activate=not args.no_from_rgb_activate)).cuda()
    g_running = StyledGenerator(code_size).cuda()
    g_running.train(False)

    g_optimizer = optim.Adam(generator.module.generator.parameters(),
                             lr=args.lr,
                             betas=(0.0, 0.99))
    g_optimizer.add_param_group({
        'params': generator.module.style.parameters(),
        'lr': args.lr * 0.01,
        'mult': 0.01,
    })
    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=args.lr,
                             betas=(0.0, 0.99))

    accumulate(g_running, generator.module, 0)

    if args.ckpt is not None:
        ckpt = torch.load(args.ckpt)

        generator.module.load_state_dict(ckpt['generator'])
        discriminator.module.load_state_dict(ckpt['discriminator'])
        g_running.load_state_dict(ckpt['g_running'])
        g_optimizer.load_state_dict(ckpt['g_optimizer'])
        d_optimizer.load_state_dict(ckpt['d_optimizer'])

    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
    ])

    dataset = MultiResolutionDataset(args.path, transform)

    if args.sched:
        args.lr = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
        args.batch = {
            4: 512,
            8: 256,
            16: 128,
            32: 64,
            64: 32,
            128: 32,
            256: 32
        }

    else:
        args.lr = {}
        args.batch = {}

    args.gen_sample = {512: (8, 4), 1024: (4, 2)}

    args.batch_default = 32

    train(args,
          dataset,
          generator,
          discriminator,
          g_optimizer=g_optimizer,
          d_optimizer=d_optimizer,
          g_running=g_running,
          code_size=code_size,
          n_critic=n_critic,
          myargs=myargs)
    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=args.lr,
                             betas=(0.0, 0.99))

    # Add Horovod Distributed Optimizer
    g_optimizer = hvd.DistributedOptimizer(
        g_optimizer, named_parameters=generator.generator.named_parameters())
    d_optimizer = hvd.DistributedOptimizer(
        d_optimizer, named_parameters=discriminator.named_parameters())

    accumulate(g_running, generator, 0)

    if args.ckpt is not None:
        ckpt = torch.load(args.ckpt)

        generator.load_state_dict(ckpt['generator'])
        discriminator.load_state_dict(ckpt['discriminator'])
        g_running.load_state_dict(ckpt['g_running'])
        g_optimizer.load_state_dict(ckpt['g_optimizer'])
        d_optimizer.load_state_dict(ckpt['d_optimizer'])

    # Broadcast parameters from rank 0 to all other processes.
    hvd.broadcast_parameters(generator.generator.state_dict(), root_rank=0)
    hvd.broadcast_parameters(discriminator.state_dict(), root_rank=0)

    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
    ])
generate_mixing = False

device = 'cpu'
generator = StyledGenerator(512).to(device)
# generator.load_state_dict(torch.load('checkpoint/style-gan-256-140k.model', map_location=device))
sd = torch.load('checkpoint/style-gan-256-140k.model', map_location=device)
new_sd = OrderedDict()
for k, v in sd.items():
    if 'weight_orig' in k:
        k = k.replace('weight_orig', 'weight')
        fan_in = v.size(1) * v[0][0].numel()
        v *= torch.sqrt(torch.tensor(2 / fan_in))
    new_sd[k] = v
del sd
generator.load_state_dict(new_sd)

mean_style = None

step = 6

shape = 4 * 2**step

mean_steps = 1
for i in range(mean_steps):
    style = generator.mean_style(torch.randn(10, 512).to(device))

    if mean_style is None:
        mean_style = style

    else:
Esempio n. 13
0
        "params": generator.module.style.parameters(),
        "lr": args.lr * 0.01,
        "mult": 0.01
    })
    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=args.lr,
                             betas=(0.0, 0.99))

    accumulate(g_running, generator.module, 0)

    if args.ckpt is not None:
        ckpt = torch.load(args.ckpt)

        generator.module.load_state_dict(ckpt["generator"])
        discriminator.module.load_state_dict(ckpt["discriminator"])
        g_running.load_state_dict(ckpt["g_running"])
        g_optimizer.load_state_dict(ckpt["g_optimizer"])
        d_optimizer.load_state_dict(ckpt["d_optimizer"])

    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)
    ])

    dataset = sunnerData.ImageDataset(
        root=[[args.path]],
        transform=transforms.Compose([
            sunnertransforms.Resize((128, 128)),
            sunnertransforms.ToTensor(),
            sunnertransforms.ToFloat(),
        full_size = config["data_params"]['full_image_size']
        assert macro_size[0] % micro_size[0] == 0
        assert macro_size[1] % micro_size[1] == 0
        assert full_size[0] % micro_size[0] == 0
        assert full_size[1] % micro_size[1] == 0

    precompute_parameters(config)
    ratio_full_to_micro = config["data_params"]["ratio_full_to_micro"]
        
    ## generator and model
    model_pth = "checkpoint/070000.model"
    n_layers = int(math.log(micro_size[0], 2)) - 1
    step = n_layers - 1
    
    generator = StyledGenerator(CODE_SIZE, n_layers).cuda()
    generator.load_state_dict(torch.load(model_pth))
    
    print("Successfully loading the trained models!")
    
    ##
    coord_handler = CoordHandler(config)
    
    # get micros coordinates for full image
    ROWS = ratio_full_to_micro[0]
    COLS = ratio_full_to_micro[1]
    micro_coords = []
    for row in range(ROWS):
        for col in range(COLS):
            micro_coord = torch.Tensor([coord_handler.euclidean_coord_int_full_to_float_micro(row, ROWS),
                                        coord_handler.euclidean_coord_int_full_to_float_micro(col, COLS)])
            micro_coords.append(micro_coord.unsqueeze(0))
Esempio n. 15
0
                        type=int,
                        default=3,
                        help='number of rows of sample matrix')
    parser.add_argument('--n_col',
                        type=int,
                        default=5,
                        help='number of columns of sample matrix')
    parser.add_argument('path', type=str, help='path to checkpoint file')

    args = parser.parse_args()

    device = 'cuda'

    generator = StyledGenerator(512).to(device)
    #generator.load_state_dict(torch.load(args.path)['g_running'])
    generator.load_state_dict(
        torch.load(args.path, map_location=torch.device('cpu')))
    generator.eval()

    mean_style = get_mean_style(generator, device)

    step = int(math.log(args.size, 2)) - 2

    img = sample(generator, step, mean_style, args.n_row * args.n_col, device)
    utils.save_image(img,
                     './sample_matrix/sample.png',
                     nrow=args.n_col,
                     normalize=True,
                     range=(-1, 1))

    for j in range(20):
        img = style_mixing(generator, step, mean_style, args.n_col, args.n_row,
Esempio n. 16
0
def get_model(model_name, config, iteration=None, restart=False, from_step=False, load_discriminator=True,
              alpha=1, step=6, resolution=256, used_samples=0):
    """
    Function that creates a model.
    Arguments:
        model_name -- name to use for save and load the model.
        config -- dict of model parameters.
        iteration -- iteration to load; last if None
        restart -- if true, than creates new model even there is a saved model with `model_name`.
    """
    LOGGER.info(f'Getting model "{model_name}"')
    code_size = config.get('code_size', constants.DEFAULT_CODE_SIZE)
    init_size = config.get('init_size', constants.INIT_SIZE)
    n_frames_params = config.get('n_frames_params', dict())
    n_frames = n_frames_params.get('n', 1)
    from_rgb_activate = config['from_rgb_activate']
    two_noises = n_frames_params.get('two_noises', False)
    lr = config.get('lr', constants.LR)
    dyn_style_coordinates = n_frames_params.get('dyn_style_coordinates', 0)

    generator = nn.DataParallel(StyledGenerator(code_size,
                                                two_noises=two_noises,
                                                dyn_style_coordinates=dyn_style_coordinates,
                                                )).cuda()
    g_running = StyledGenerator(code_size,
                                two_noises=two_noises,
                                dyn_style_coordinates=dyn_style_coordinates,
                                ).cuda()
    g_running.train(False)
    discriminator = nn.DataParallel(Discriminator(from_rgb_activate=from_rgb_activate)).cuda()
    n_frames_discriminator = nn.DataParallel(
        NFramesDiscriminator(from_rgb_activate=from_rgb_activate, n_frames=n_frames)
    ).cuda()

    if not restart:
        if iteration is None:
            model = get_last_model(model_name, from_step)
        else:
            iteration = str(iteration).zfill(6)
            checkpoint_path = os.path.join(constants.CHECKPOINT_DIR, model_name, f'{iteration}.model')
            LOGGER.info(f'Loading {checkpoint_path}')
            model = torch.load(checkpoint_path)
        generator.module.load_state_dict(model['generator'])
        g_running.load_state_dict(model['g_running'])
        if load_discriminator:
            discriminator.module.load_state_dict(model['discriminator'])
        if 'n_frames_params' in config:
            n_frames_discriminator.module.load_state_dict(model['n_frames_discriminator'])
        alpha = model['alpha']
        step = model['step']
        LOGGER.debug(f'Step: {step}')
        resolution = model['resolution']
        used_samples = model['used_samples']
        LOGGER.debug(f'Used samples: {used_samples}.')
        iteration = model['iteration']
    else:
        alpha = 0
        step = int(math.log2(init_size)) - 2
        resolution = 4 * 2 ** step
        used_samples = 0
        iteration = 0
        accumulate(to_model=g_running, from_model=generator.module, decay=0)

    g_optimizer = optim.Adam(
        generator.module.generator.parameters(),
        lr=lr[resolution], betas=(0.0, 0.99)
    )

    style_module = generator.module
    style_params = list(style_module.style.parameters())
    g_optimizer.add_param_group(
        {
            'params': style_params,
            'lr': lr[resolution] * 0.01,
            'mult': 0.01,
        }
    )

    d_optimizer = optim.Adam(discriminator.parameters(), lr=lr[resolution], betas=(0.0, 0.99))
    nfd_optimizer = optim.Adam(n_frames_discriminator.parameters(), lr=lr[resolution], betas=(0.0, 0.99))

    if not restart:
        g_optimizer.load_state_dict(model['g_optimizer'])
        d_optimizer.load_state_dict(model['d_optimizer'])
        nfd_optimizer.load_state_dict(model['nfd_optimizer'])

    return EasyDict(
           generator=generator,
           discriminator=discriminator,
           n_frames_discriminator=n_frames_discriminator,
           g_running=g_running,
           g_optimizer=g_optimizer,
           d_optimizer=d_optimizer,
           nfd_optimizer=nfd_optimizer,
           alpha=alpha,
           step=step,
           resolution=resolution,
           used_samples=used_samples,
           iteration=iteration,
       )
Esempio n. 17
0
                        help='size of the image')
    parser.add_argument('--n_row',
                        type=int,
                        default=5,
                        help='number of rows of sample matrix')
    parser.add_argument('--n_col',
                        type=int,
                        default=5,
                        help='number of columns of sample matrix')
    parser.add_argument('path', type=str, help='path to checkpoint file')

    args = parser.parse_args()

    generator = StyledGenerator(512)
    ckpt = jt.load(args.path)
    generator.load_state_dict(ckpt)
    generator.eval()

    mean_style = get_mean_style(generator)

    step = int(math.log(args.size, 2)) - 2

    img = sample(generator, step, mean_style, args.n_row * args.n_col)
    jt.save_image(img,
                  'style_mixing/sample.png',
                  nrow=args.n_col,
                  normalize=True,
                  range=(-1, 1))

    for j in range(20):
        img = style_mixing(generator, step, mean_style, args.n_col, args.n_row)
standard_normal_distribution = torch.distributions.normal.Normal(0, 1)

RESOLUTION = 256
STEP = int(math.log(RESOLUTION, 2)) - 2

DURATION_IN_SECONDS = 60
SAMPLE_COUNT = 30 # Number of distinct objects to generate and interpolate between
TRANSITION_FRAMES = DURATION_IN_SECONDS * 30 // SAMPLE_COUNT

LATENT_CODE_SIZE = 512

TILES = (3, 3)

generator = StyledGenerator(LATENT_CODE_SIZE).to(device)
generator.load_state_dict(torch.load('checkpoint/train_step-7.model')['g_running'])
generator.eval()

@torch.no_grad()
def get_spline(use_styles=True):
    codes = standard_normal_distribution.sample((SAMPLE_COUNT + 1, LATENT_CODE_SIZE))
    if use_styles:
        codes = generator.style(codes.to(device))
    
    codes[0, :] = codes[-1, :] # Make animation periodic
    return CubicSpline(np.arange(SAMPLE_COUNT + 1), codes.detach().cpu().numpy(), axis=0, bc_type='periodic')

def get_noise():
    noise = []

    for i in range(STEP + 1):
Esempio n. 19
0
import torch
from torchvision import utils

from model import StyledGenerator

device = 'cuda'

generator = StyledGenerator(512).to(device)
generator.load_state_dict(
    torch.load('checkpoint/stylegan-512px-running-180000.model'))
generator.eval()

mean_style = None

step = 7
alpha = 1

shape = 4 * 2**step

with torch.no_grad():
    for i in range(10):
        style = generator.mean_style(torch.randn(1024, 512).to(device))

        if mean_style is None:
            mean_style = style

        else:
            mean_style += style

    mean_style /= 10
Esempio n. 20
0
    parser.add_argument('--n_row',
                        type=int,
                        default=3,
                        help='number of rows of sample matrix')
    parser.add_argument('--n_col',
                        type=int,
                        default=5,
                        help='number of columns of sample matrix')
    parser.add_argument('path', type=str, help='path to checkpoint file')

    args = parser.parse_args()

    device = 'cuda'

    generator = StyledGenerator(512).to(device)
    generator.load_state_dict(torch.load(args.path)['g_running'])
    generator.eval()

    mean_style = get_mean_style(generator, device)

    step = int(math.log(args.size, 2)) - 2

    img = sample(generator, step, mean_style, args.n_row * args.n_col, device)
    utils.save_image(img,
                     'sample.png',
                     nrow=args.n_col,
                     normalize=True,
                     range=(-1, 1))

    for j in range(20):
        img = style_mixing(generator, step, mean_style, args.n_col, args.n_row,
Esempio n. 21
0
            transforms.Resize(resize),
            transforms.CenterCrop(resize),
            transforms.ToTensor(),
            transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
        ]
    )

    imgs = []

    for imgfile in args.files:
        img = transform(Image.open(imgfile).convert("RGB"))
        imgs.append(img)

    imgs = torch.stack(imgs, 0).to(device)
    g_ema = StyledGenerator(512)
    g_ema.load_state_dict(torch.load(args.ckpt)["g_running"], strict=False)
    g_ema.eval()
    g_ema = g_ema.to(device)
    step = int(math.log(args.size, 2)) - 2
    with torch.no_grad():
        noise_sample = torch.randn(n_mean_latent, 512, device=device)
        latent_out = g_ema.style(noise_sample)

        latent_mean = latent_out.mean(0)
        latent_std = ((latent_out - latent_mean).pow(2).sum() / n_mean_latent) ** 0.5

    percept = lpips.PerceptualLoss(
        model="net-lin", net="vgg", use_gpu=device.startswith("cuda")
    )

    noises_single = make_noise(device,args.size)
Esempio n. 22
0
            'lr':
            args.lr * 0.01,
            'mult':
            0.01
        })

    if not args.supervised:
        D_optimizer = optim.Adam(D_target.parameters(),
                                 lr=args.lr,
                                 betas=(0.0, 0.99))

    ckpt = torch.load(args.ckpt)

    if not args.init_G:
        G_target.module.load_state_dict(ckpt['generator'], strict=False)
        G_running_target.load_state_dict(ckpt['g_running'], strict=False)

    if not args.supervised and not args.init_D:
        D_target.module.load_state_dict(ckpt['discriminator'])

    if not args.supervised:
        G_source.module.load_state_dict(ckpt['generator'])
        D_source.module.load_state_dict(ckpt['discriminator'])

    ### set configs ###

    if args.sched:
        args.lr = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
        args.batch = {4: 128, 8: 64, 16: 32, 32: 16, 64: 8, 128: 8, 256: 8}
    else:
        args.lr = {}
Esempio n. 23
0
                        type=int,
                        default=1,
                        help='number of sample')
    parser.add_argument('--style_mixing',
                        type=int,
                        default=1,
                        help='number of style mixing sample')
    parser.add_argument('path', type=str, help='path to checkpoint file')

    args = parser.parse_args()

    device = 'cuda'

    generator = StyledGenerator(512).to(device)
    try:
        generator.load_state_dict(torch.load(args.path)["g_running"])
    except:
        generator.load_state_dict(torch.load(args.path))
    generator.eval()

    mean_style = get_mean_style(generator, device)

    step = int(math.log(args.size, 2)) - 2

    for j in range(args.sample):
        img = sample(generator, step, mean_style, args.n_row * args.n_col,
                     device)
        utils.save_image(img,
                         f'images/{j:03}_sample.png',
                         nrow=args.n_col,
                         normalize=True,
Esempio n. 24
0
        'params': generator.module.style.parameters(),
        'lr': args.lr * 0.01,
        'mult': 0.01,
    })
    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=args.lr,
                             betas=(0.0, 0.99))

    accumulate(g_running, generator.module, 0)

    if args.ckpt is not None:
        ckpt = torch.load(args.ckpt)

        generator.module.load_state_dict(ckpt['generator'])
        discriminator.module.load_state_dict(ckpt['discriminator'])
        g_running.load_state_dict(ckpt['g_running'])
        g_optimizer.load_state_dict(ckpt['g_optimizer'])
        d_optimizer.load_state_dict(ckpt['d_optimizer'])

    transform = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
    ])

    dataset = MultiResolutionDataset(args.path, transform)

    if args.sched:
        args.lr = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
        args.batch = {
            4: 512,
Esempio n. 25
0
fig = plot_spectra(data_FI["H_col"][6, 0][np.newaxis, :], savename="spectrum_method_cmp.jpg", label="ForwardIter 1E-2", fig=fig)
plt.show()
#%%
"""
This is the smaller explicit version of StyleGAN. Very easy to work with
"""
#%%
sys.path.append("E:\Github_Projects\style-based-gan-pytorch")
sys.path.append("D:\Github\style-based-gan-pytorch")
from model import StyledGenerator
from generate import get_mean_style
import math
#%%
generator = StyledGenerator(512).to("cuda")
# generator.load_state_dict(torch.load(r"E:\Github_Projects\style-based-gan-pytorch\checkpoint\stylegan-256px-new.model")['g_running'])
generator.load_state_dict(torch.load(r"D:\Github\style-based-gan-pytorch\checkpoint\stylegan-256px-new.model")[
                              'g_running'])
generator.eval()
for param in generator.parameters():
    param.requires_grad_(False)
mean_style = get_mean_style(generator, "cuda")
step = int(math.log(256, 2)) - 2
#%%
feat = torch.randn(1, 512, requires_grad=False).to("cuda")
image = generator(
        feat,
        step=step,
        alpha=1,
        mean_style=mean_style,
        style_weight=0.7,
    )
#%%
Esempio n. 26
0
    parser.add_argument('--batch_size', type=int, default=1)

    opt = parser.parse_args()

    generator = StyledGenerator(CODE_SIZE).to(DEVICE)
    g_running = StyledGenerator(CODE_SIZE).to(DEVICE)

    cur_size = 512
    ## load models
    ckp_pth = os.path.join('checkpoint',
                           'stylegan-{}px-new.model'.format(cur_size))
    ckp1, ckp2 = torch.load(ckp_pth,
                            map_location='cpu')['generator'], torch.load(
                                ckp_pth, map_location='cpu')['g_running']

    generator.load_state_dict(ckp1)
    g_running.load_state_dict(ckp2)
    print("Successfully loading the trained models!")

    ## get step
    step = int(math.log2(cur_size)) - 2

    ## get inputs(styles)
    input_style = torch.randn(opt.batch_size, CODE_SIZE).to(DEVICE)

    ## output
    otuput_imgs = generator(input_style, step=step)
    output_img = tensor2rgb(otuput_imgs[0, ...])

    plt.subplot(111)
    plt.imshow(output_img)
def optimize_latents():
    print("Optimizing Latents.")
    generator = StyledGenerator(512).to(device)
    generator.load_state_dict(torch.load(args.path)['generator'])
    generator.eval()
    latent_optimizer = LatentOptimizer(generator, args.vgg_layer)
    mean_style = get_mean_style(generator, device)
    # Optimize only the dlatents.
    for param in latent_optimizer.parameters():
        param.requires_grad_(False)

    if args.video or args.save_optimized_image:
        # Hook, saves an image during optimization to be used to create video.
        generated_image_hook = GeneratedImageHook(
            latent_optimizer.post_synthesis_processing, args.save_frequency)

    reference_image = load_images([args.image_path])
    reference_image = torch.from_numpy(reference_image).to(device)
    reference_image = latent_optimizer.vgg_processing(
        reference_image)  #normalize
    utils.save_image(reference_image,
                     './reference.png',
                     nrow=1,
                     normalize=True,
                     range=(-1, 1))
    reference_features = latent_optimizer.vgg16(reference_image).detach()  #vgg
    reference_image = reference_image.detach()

    if args.use_latent_finder:
        image_to_latent = ImageToLatent().cuda()
        image_to_latent.load_state_dict(torch.load(args.image_to_latent_path))
        image_to_latent.eval()

        latents_to_be_optimized = image_to_latent(reference_image)
        latents_to_be_optimized = latents_to_be_optimized.detach().cuda(
        ).requires_grad_(True)
    else:
        latents_to_be_optimized = torch.zeros(
            (1, 512)).cuda().requires_grad_(True)
    #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std())
    criterion = LatentLoss()
    optimizer = torch.optim.RMSprop([latents_to_be_optimized],
                                    lr=args.learning_rate,
                                    weight_decay=0.02)
    #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std())
    progress_bar = tqdm(range(args.iterations))
    for step in progress_bar:
        #print(latents_to_be_optimized)
        optimizer.zero_grad()
        generated_image_features = latent_optimizer(latents_to_be_optimized,
                                                    mean_style)
        loss = criterion(generated_image_features, reference_features)
        loss.backward()
        loss = loss.item()

        optimizer.step()
        # if step==args.iterations:
        #     break
        # with torch.no_grad():
        #    latents_to_be_optimized.add_(-latents_to_be_optimized.mean()+3e-2*torch.randn(1).to('cuda'))
        #    latents_to_be_optimized.div_(latents_to_be_optimized.std()+3e-2*torch.randn(1).to('cuda'))
        #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std())
        progress_bar.set_description("Step: {}, Loss: {}".format(step, loss))

    print(latents_to_be_optimized)
    #latents_to_be_optimized=latent_optimizer.normalize(latents_to_be_optimized)
    #print(latents_to_be_optimized.mean(),latents_to_be_optimized.std())
    optimized_dlatents = latents_to_be_optimized.detach().cpu().numpy()
    np.save(args.dlatent_path, optimized_dlatents)

    if args.video:
        images_to_video(generated_image_hook.get_images(), args.video_path)
    if args.save_optimized_image:
        save_image(generated_image_hook.last_image, args.optimized_image_path)