def generate(): os.makedirs(a.out_dir, exist_ok=True) np.random.seed(seed=696) device = torch.device('cuda') # setup generator Gs_kwargs = dnnlib.EasyDict() Gs_kwargs.verbose = a.verbose Gs_kwargs.size = a.size Gs_kwargs.scale_type = a.scale_type # mask/blend latents with external latmask or by splitting the frame if a.latmask is None: nHW = [int(s) for s in a.nXY.split('-')][::-1] assert len(nHW) == 2, ' Wrong count nXY: %d (must be 2)' % len(nHW) n_mult = nHW[0] * nHW[1] if a.verbose is True and n_mult > 1: print(' Latent blending w/split frame %d x %d' % (nHW[1], nHW[0])) lmask = np.tile(np.asarray([[[[1]]]]), (1, n_mult, 1, 1)) Gs_kwargs.countHW = nHW Gs_kwargs.splitfine = a.splitfine else: if a.verbose is True: print(' Latent blending with mask', a.latmask) n_mult = 2 if os.path.isfile(a.latmask): # single file lmask = np.asarray([[img_read(a.latmask)[:, :, 0] / 255.] ]) # [h,w] elif os.path.isdir(a.latmask): # directory with frame sequence lmask = np.asarray([[ img_read(f)[:, :, 0] / 255. for f in img_list(a.latmask) ]]) # [h,w] else: print(' !! Blending mask not found:', a.latmask) exit(1) lmask = np.concatenate((lmask, 1 - lmask), 1) # [frm,2,h,w] lmask = torch.from_numpy(lmask).to(device) # load base or custom network pkl_name = osp.splitext(a.model)[0] if '.pkl' in a.model.lower(): custom = False print(' .. Gs from pkl ..', basename(a.model)) else: custom = True print(' .. Gs custom ..', basename(a.model)) with dnnlib.util.open_url(pkl_name + '.pkl') as f: Gs = legacy.load_network_pkl(f, custom=custom, **Gs_kwargs)['G_ema'].to( device) # type: ignore if a.verbose is True: print(' out shape', Gs.output_shape[1:]) if a.verbose is True: print(' making timeline..') lats = [] # list of [frm,1,512] for i in range(n_mult): lat_tmp = latent_anima((1, Gs.z_dim), a.frames, a.fstep, cubic=a.cubic, gauss=a.gauss, verbose=False) # [frm,1,512] lats.append(lat_tmp) # list of [frm,1,512] latents = np.concatenate(lats, 1) # [frm,X,512] print(' latents', latents.shape) latents = torch.from_numpy(latents).to(device) frame_count = latents.shape[0] # distort image by tweaking initial const layer if a.digress > 0: try: init_res = Gs.init_res except: init_res = (4, 4) # default initial layer size dconst = [] for i in range(n_mult): dc_tmp = a.digress * latent_anima([1, Gs.z_dim, *init_res], a.frames, a.fstep, cubic=True, verbose=False) dconst.append(dc_tmp) dconst = np.concatenate(dconst, 1) else: dconst = np.zeros([frame_count, 1, 1, 1, 1]) dconst = torch.from_numpy(dconst).to(device) # labels / conditions label_size = Gs.c_dim if label_size > 0: labels = torch.zeros((frame_count, n_mult, label_size), device=device) # [frm,X,lbl] if a.labels is None: label_ids = [] for i in range(n_mult): label_ids.append(random.randint(0, label_size - 1)) else: label_ids = [int(x) for x in a.labels.split('-')] label_ids = label_ids[:n_mult] # ensure we have enough labels for i, l in enumerate(label_ids): labels[:, i, l] = 1 else: labels = [None] # generate images from latent timeline pbar = ProgressBar(frame_count) for i in range(frame_count): latent = latents[i] # [X,512] label = labels[i % len(labels)] latmask = lmask[i % len(lmask)] if lmask is not None else [None] # [X,h,w] dc = dconst[i % len(dconst)] # [X,512,4,4] # generate multi-latent result if custom: output = Gs(latent, label, latmask, dc, truncation_psi=a.trunc, noise_mode='const') else: output = Gs(latent, label, truncation_psi=a.trunc, noise_mode='const') output = (output.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to( torch.uint8).cpu().numpy() # save image ext = 'png' if output.shape[3] == 4 else 'jpg' filename = osp.join(a.out_dir, "%06d.%s" % (i, ext)) imsave(filename, output[0]) pbar.upd() # convert latents to dlatents, save them if a.save_lat is True: latents = latents.squeeze(1) # [frm,512] dlatents = Gs.mapping(latents, label) # [frm,18,512] if a.size is None: a.size = [''] * 2 filename = '{}-{}-{}.npy'.format(basename(a.model), a.size[1], a.size[0]) filename = osp.join(osp.dirname(a.out_dir), filename) dlatents = dlatents.cpu().numpy() np.save(filename, dlatents) print('saved dlatents', dlatents.shape, 'to', filename)
def main(): os.makedirs(a.out_dir, exist_ok=True) device = torch.device('cuda') # setup generator Gs_kwargs = dnnlib.EasyDict() Gs_kwargs.verbose = a.verbose Gs_kwargs.size = a.size Gs_kwargs.scale_type = a.scale_type # load base or custom network pkl_name = osp.splitext(a.model)[0] if '.pkl' in a.model.lower(): custom = False print(' .. Gs from pkl ..', basename(a.model)) else: custom = True print(' .. Gs custom ..', basename(a.model)) with dnnlib.util.open_url(pkl_name + '.pkl') as f: Gs = legacy.load_network_pkl(f, custom=custom, **Gs_kwargs)['G_ema'].to( device) # type: ignore dlat_shape = (1, Gs.num_ws, Gs.w_dim) # [1,18,512] # read saved latents if a.dlatents is not None and osp.isfile(a.dlatents): key_dlatents = load_latents(a.dlatents) if len(key_dlatents.shape) == 2: key_dlatents = np.expand_dims(key_dlatents, 0) elif a.dlatents is not None and osp.isdir(a.dlatents): # if a.dlatents.endswith('/') or a.dlatents.endswith('\\'): a.dlatents = a.dlatents[:-1] key_dlatents = [] npy_list = file_list(a.dlatents, 'npy') for npy in npy_list: key_dlatent = load_latents(npy) if len(key_dlatent.shape) == 2: key_dlatent = np.expand_dims(key_dlatent, 0) key_dlatents.append(key_dlatent) key_dlatents = np.concatenate(key_dlatents) # [frm,18,512] else: print(' No input dlatents found') exit() key_dlatents = key_dlatents[:, np.newaxis] # [frm,1,18,512] print(' key dlatents', key_dlatents.shape) # replace higher layers with single (style) latent if a.style_dlat is not None: print(' styling with dlatent', a.style_dlat) style_dlatent = load_latents(a.style_dlat) while len(style_dlatent.shape) < 4: style_dlatent = np.expand_dims(style_dlatent, 0) # try replacing 5 by other value, less than Gs.num_ws key_dlatents[:, :, range(5, Gs.num_ws ), :] = style_dlatent[:, :, range(5, Gs.num_ws), :] frames = key_dlatents.shape[0] * a.fstep dlatents = latent_anima(dlat_shape, frames, a.fstep, key_latents=key_dlatents, cubic=a.cubic, verbose=True) # [frm,1,512] print(' dlatents', dlatents.shape) frame_count = dlatents.shape[0] dlatents = torch.from_numpy(dlatents).to(device) # distort image by tweaking initial const layer if a.digress > 0: try: init_res = Gs.init_res except Exception: init_res = (4, 4) # default initial layer size dconst = a.digress * latent_anima([1, Gs.z_dim, *init_res], frame_count, a.fstep, cubic=True, verbose=False) else: dconst = np.zeros([frame_count, 1, 1, 1, 1]) dconst = torch.from_numpy(dconst).to(device) # generate images from latent timeline pbar = ProgressBar(frame_count) for i in range(frame_count): # generate multi-latent result if custom: output = Gs.synthesis(dlatents[i], None, dconst[i], noise_mode='const') else: output = Gs.synthesis(dlatents[i], noise_mode='const') output = (output.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to( torch.uint8).cpu().numpy() ext = 'png' if output.shape[3] == 4 else 'jpg' filename = osp.join(a.out_dir, "%06d.%s" % (i, ext)) imsave(filename, output[0]) pbar.upd()
def main(): os.makedirs(a.out_dir, exist_ok=True) np.random.seed(seed=696) # setup generator fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) Gs_kwargs = dnnlib.EasyDict() Gs_kwargs.func_name = 'training.stylegan2_multi.G_main' Gs_kwargs.verbose = a.verbose Gs_kwargs.size = a.size Gs_kwargs.scale_type = a.scale_type Gs_kwargs.impl = a.ops # mask/blend latents with external latmask or by splitting the frame if a.latmask is None: nHW = [int(s) for s in a.nXY.split('-')][::-1] assert len(nHW)==2, ' Wrong count nXY: %d (must be 2)' % len(nHW) n_mult = nHW[0] * nHW[1] if a.verbose is True and n_mult > 1: print(' Latent blending w/split frame %d x %d' % (nHW[1], nHW[0])) lmask = np.tile(np.asarray([[[[None]]]]), (1,n_mult,1,1)) Gs_kwargs.countHW = nHW Gs_kwargs.splitfine = a.splitfine else: if a.verbose is True: print(' Latent blending with mask', a.latmask) n_mult = 2 if os.path.isfile(a.latmask): # single file lmask = np.asarray([[img_read(a.latmask)[:,:,0] / 255.]]) # [h,w] elif os.path.isdir(a.latmask): # directory with frame sequence lmask = np.asarray([[img_read(f)[:,:,0] / 255. for f in img_list(a.latmask)]]) # [h,w] else: print(' !! Blending mask not found:', a.latmask); exit(1) lmask = np.concatenate((lmask, 1 - lmask), 1) # [frm,2,h,w] Gs_kwargs.latmask_res = lmask.shape[2:] # load model with arguments sess = tflib.init_tf({'allow_soft_placement':True}) pkl_name = osp.splitext(a.model)[0] with open(pkl_name + '.pkl', 'rb') as file: network = pickle.load(file, encoding='latin1') try: _, _, network = network except: pass for k in list(network.static_kwargs.keys()): Gs_kwargs[k] = network.static_kwargs[k] # reload custom network, if needed if '.pkl' in a.model.lower(): print(' .. Gs from pkl ..', basename(a.model)) Gs = network else: # reconstruct network print(' .. Gs custom ..', basename(a.model)) # print(Gs_kwargs) Gs = tflib.Network('Gs', **Gs_kwargs) Gs.copy_vars_from(network) if a.verbose is True: print('kwargs:', ['%s: %s'%(kv[0],kv[1]) for kv in sorted(Gs.static_kwargs.items())]) if a.verbose is True: print(' out shape', Gs.output_shape[1:]) if a.size is None: a.size = Gs.output_shape[2:] if a.verbose is True: print(' making timeline..') lats = [] # list of [frm,1,512] for i in range(n_mult): lat_tmp = latent_anima((1, Gs.input_shape[1]), a.frames, a.fstep, cubic=a.cubic, gauss=a.gauss, verbose=False) # [frm,1,512] lats.append(lat_tmp) # list of [frm,1,512] latents = np.concatenate(lats, 1) # [frm,X,512] print(' latents', latents.shape) frame_count = latents.shape[0] # distort image by tweaking initial const layer if a.digress > 0: try: latent_size = Gs.static_kwargs['latent_size'] except: latent_size = 512 # default latent size try: init_res = Gs.static_kwargs['init_res'] except: init_res = (4,4) # default initial layer size dconst = [] for i in range(n_mult): dc_tmp = a.digress * latent_anima([1, latent_size, *init_res], a.frames, a.fstep, cubic=True, verbose=False) dconst.append(dc_tmp) dconst = np.concatenate(dconst, 1) else: dconst = np.zeros([frame_count, 1, 1, 1, 1]) # labels / conditions try: label_size = Gs_kwargs.label_size except: label_size = 0 if label_size > 0: labels = np.zeros((frame_count, n_mult, label_size)) # [frm,X,lbl] if a.labels is None: label_ids = [] for i in range(n_mult): label_ids.append(random.randint(0, label_size-1)) else: label_ids = [int(x) for x in a.labels.split('-')] label_ids = label_ids[:n_mult] # ensure we have enough labels for i, l in enumerate(label_ids): labels[:,i,l] = 1 else: labels = [None] # generate images from latent timeline pbar = ProgressBar(frame_count) for i in range(frame_count): latent = latents[i] # [X,512] label = labels[i % len(labels)] latmask = lmask[i % len(lmask)] if lmask is not None else [None] # [X,h,w] dc = dconst[i % len(dconst)] # [X,512,4,4] # generate multi-latent result if Gs.num_inputs == 2: output = Gs.run(latent, label, truncation_psi=a.trunc, randomize_noise=False, output_transform=fmt) else: output = Gs.run(latent, label, latmask, dc, truncation_psi=a.trunc, randomize_noise=False, output_transform=fmt) # save image ext = 'png' if output.shape[3]==4 else 'jpg' filename = osp.join(a.out_dir, "%06d.%s" % (i,ext)) imsave(filename, output[0]) pbar.upd() # convert latents to dlatents, save them if a.save_lat is True: latents = latents.squeeze(1) # [frm,512] dlatents = Gs.components.mapping.run(latents, label, dtype='float16') # [frm,18,512] filename = '{}-{}-{}.npy'.format(basename(a.model), a.size[1], a.size[0]) filename = osp.join(osp.dirname(a.out_dir), filename) np.save(filename, dlatents) print('saved dlatents', dlatents.shape, 'to', filename)
def main(): os.makedirs(a.out_dir, exist_ok=True) # setup generator fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) Gs_kwargs = dnnlib.EasyDict() Gs_kwargs.func_name = 'training.stylegan2_multi.G_main' Gs_kwargs.verbose = a.verbose Gs_kwargs.size = a.size Gs_kwargs.scale_type = a.scale_type Gs_kwargs.impl = a.ops # load model with arguments sess = tflib.init_tf({'allow_soft_placement': True}) pkl_name = osp.splitext(a.model)[0] with open(pkl_name + '.pkl', 'rb') as file: network = pickle.load(file, encoding='latin1') try: _, _, network = network except: pass for k in list(network.static_kwargs.keys()): Gs_kwargs[k] = network.static_kwargs[k] # reload custom network, if needed if '.pkl' in a.model.lower(): print(' .. Gs from pkl ..', basename(a.model)) Gs = network else: # reconstruct network print(' .. Gs custom ..', basename(a.model)) Gs = tflib.Network('Gs', **Gs_kwargs) Gs.copy_vars_from(network) z_dim = Gs.input_shape[1] dz_dim = 512 # dlatent_size try: dl_dim = 2 * (int(np.floor(np.log2(Gs_kwargs.resolution))) - 1) except: print(' Resave model, no resolution kwarg found!') exit(1) dlat_shape = (1, dl_dim, dz_dim) # [1,18,512] # read saved latents if a.dlatents is not None and osp.isfile(a.dlatents): key_dlatents = load_latents(a.dlatents) if len(key_dlatents.shape) == 2: key_dlatents = np.expand_dims(key_dlatents, 0) elif a.dlatents is not None and osp.isdir(a.dlatents): # if a.dlatents.endswith('/') or a.dlatents.endswith('\\'): a.dlatents = a.dlatents[:-1] key_dlatents = [] npy_list = file_list(a.dlatents, 'npy') for npy in npy_list: key_dlatent = load_latents(npy) if len(key_dlatent.shape) == 2: key_dlatent = np.expand_dims(key_dlatent, 0) key_dlatents.append(key_dlatent) key_dlatents = np.concatenate(key_dlatents) # [frm,18,512] else: print(' No input dlatents found') exit() key_dlatents = key_dlatents[:, np.newaxis] # [frm,1,18,512] print(' key dlatents', key_dlatents.shape) # replace higher layers with single (style) latent if a.style_npy_file is not None: print(' styling with latent', a.style_npy_file) style_dlatent = load_latents(a.style_npy_file) while len(style_dlatent.shape) < 4: style_dlatent = np.expand_dims(style_dlatent, 0) # try replacing 5 by other value, less than dl_dim key_dlatents[:, :, range(5, dl_dim), :] = style_dlatent[:, :, range(5, dl_dim), :] frames = key_dlatents.shape[0] * a.fstep dlatents = latent_anima(dlat_shape, frames, a.fstep, key_latents=key_dlatents, cubic=a.cubic, verbose=True) # [frm,1,512] print(' dlatents', dlatents.shape) frame_count = dlatents.shape[0] # truncation trick dlatent_avg = Gs.get_var('dlatent_avg') # (512,) tr_range = range(0, 8) dlatents[:, :, tr_range, :] = dlatent_avg + (dlatents[:, :, tr_range, :] - dlatent_avg) * a.trunc # distort image by tweaking initial const layer if a.digress > 0: try: latent_size = Gs.static_kwargs['latent_size'] except: latent_size = 512 # default latent size try: init_res = Gs.static_kwargs['init_res'] except: init_res = (4, 4) # default initial layer size dconst = a.digress * latent_anima([1, latent_size, *init_res], frames, a.fstep, cubic=True, verbose=False) else: dconst = np.zeros([frame_count, 1, 1, 1, 1]) # generate images from latent timeline pbar = ProgressBar(frame_count) for i in range(frame_count): if a.digress is True: tf.get_default_session().run(tf.assign(wvars[0], wts[i])) # generate multi-latent result if Gs.num_inputs == 2: output = Gs.components.synthesis.run(dlatents[i], randomize_noise=False, output_transform=fmt, minibatch_size=1) else: output = Gs.components.synthesis.run(dlatents[i], [None], dconst[i], randomize_noise=False, output_transform=fmt, minibatch_size=1) ext = 'png' if output.shape[3] == 4 else 'jpg' filename = osp.join(a.out_dir, "%06d.%s" % (i, ext)) imsave(filename, output[0]) pbar.upd()
def main(): os.makedirs(a.out_dir, exist_ok=True) np.random.seed(seed=696) # parse filename to model parameters mparams = basename(a.model).split('-') res = int(mparams[1]) cfg = mparams[2] # setup generator fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) Gs_kwargs = dnnlib.EasyDict() Gs_kwargs.func_name = 'training.stylegan2_custom.G_main' Gs_kwargs.verbose = False Gs_kwargs.resolution = res Gs_kwargs.size = a.size Gs_kwargs.scale_type = a.scale_type Gs_kwargs.latent_size = a.latent_size Gs_kwargs.impl = a.ops if cfg.lower() == 'f': Gs_kwargs.synthesis_func = 'G_synthesis_stylegan2' elif cfg.lower() == 'e': Gs_kwargs.synthesis_func = 'G_synthesis_stylegan2' Gs_kwargs.fmap_base = 8 << 10 else: print(' old modes [A-D] not implemented'); exit() # check initial model resolution if len(mparams) > 3: if 'x' in mparams[3].lower(): init_res = [int(x) for x in mparams[3].lower().split('x')] Gs_kwargs.init_res = list(reversed(init_res)) # [H,W] # load model, check channels sess = tflib.init_tf({'allow_soft_placement':True}) pkl_name = osp.splitext(a.model)[0] with open(pkl_name + '.pkl', 'rb') as file: network = pickle.load(file, encoding='latin1') try: _, _, Gs = network except: Gs = network Gs_kwargs.num_channels = Gs.output_shape[1] # reload custom network, if needed if '.pkl' in a.model.lower(): print(' .. Gs from pkl ..') else: print(' .. Gs custom ..') Gs = tflib.Network('Gs', **Gs_kwargs) Gs.copy_vars_from(network) # Gs.print_layers() print(' out shape', Gs.output_shape[1:]) if a.size is None: a.size = Gs.output_shape[2:] z_dim = Gs.input_shape[1] shape = (1, z_dim) print(' making timeline..') latents = latent_anima(shape, a.frames, a.fstep, cubic=a.cubic, gauss=a.gauss, verbose=True) # [frm,1,512] print(' latents', latents.shape) # generate images from latent timeline frame_count = latents.shape[0] pbar = ProgressBar(frame_count) for i in range(frame_count): output = Gs.run(latents[i], [None], truncation_psi=a.trunc, randomize_noise=False, output_transform=fmt) ext = 'png' if output.shape[3]==4 else 'jpg' filename = osp.join(a.out_dir, "%05d.%s" % (i,ext)) imsave(filename, output[0]) pbar.upd() # convert latents to dlatents, save them latents = latents.squeeze(1) # [frm,512] dlatents = Gs.components.mapping.run(latents, None, latent_size=z_dim, dtype='float16') # [frm,18,512] filename = '{}-{}-{}.npy'.format(basename(a.model), a.size[1], a.size[0]) filename = osp.join(osp.dirname(a.out_dir), filename) np.save(filename, dlatents) print('saved dlatents', dlatents.shape, 'to', filename)
def main(): os.makedirs(a.out_dir, exist_ok=True) # parse filename to model parameters mparams = basename(a.model).split('-') res = int(mparams[1]) cfg = mparams[2] # setup generator fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True) Gs_kwargs = dnnlib.EasyDict() Gs_kwargs.func_name = 'training.stylegan2_custom.G_main' Gs_kwargs.verbose = False Gs_kwargs.resolution = res Gs_kwargs.size = a.size Gs_kwargs.scale_type = a.scale_type Gs_kwargs.latent_size = a.latent_size if cfg.lower() == 'f': Gs_kwargs.synthesis_func = 'G_synthesis_stylegan2' elif cfg.lower() == 'e': Gs_kwargs.synthesis_func = 'G_synthesis_stylegan2' Gs_kwargs.fmap_base = 8 << 10 else: print(' old modes [A-D] not implemented') exit() # check initial model resolution if len(mparams) > 3: if 'x' in mparams[3].lower(): init_res = [int(x) for x in mparams[3].lower().split('x')] Gs_kwargs.init_res = list(reversed(init_res)) # [H,W] # load model, check channels sess = tflib.init_tf({'allow_soft_placement': True}) pkl_name = osp.splitext(a.model)[0] with open(pkl_name + '.pkl', 'rb') as file: network = pickle.load(file, encoding='latin1') try: _, _, Gs = network except: Gs = network Gs_kwargs.num_channels = Gs.output_shape[1] # reload custom network, if needed if '.pkl' in a.model.lower(): print(' .. Gs from pkl ..') else: print(' .. Gs custom ..') Gs = tflib.Network('Gs', **Gs_kwargs) Gs.copy_vars_from(network) z_dim = Gs.input_shape[1] dz_dim = a.dlatent_size # 512 dl_dim = 2 * (int(np.floor(np.log2(res))) - 1) dlat_shape = (1, dl_dim, dz_dim) # [1,18,512] # read saved latents if a.dlatents is not None and osp.isfile(a.dlatents): key_dlatents = load_latents(a.dlatents) if len(key_dlatents.shape) == 2: key_dlatents = np.expand_dims(key_dlatents, 0) elif a.dlatents is not None and osp.isdir(a.dlatents): # if a.dlatents.endswith('/') or a.dlatents.endswith('\\'): a.dlatents = a.dlatents[:-1] key_dlatents = [] npy_list = file_list(a.dlatents, 'npy') for npy in npy_list: key_dlatent = load_latents(npy) if len(key_dlatent.shape) == 2: key_dlatent = np.expand_dims(key_dlatent, 0) key_dlatents.append(key_dlatent) key_dlatents = np.concatenate(key_dlatents) # [frm,18,512] else: print(' No input dlatents found') exit() key_dlatents = key_dlatents[:, np.newaxis] # [frm,1,18,512] print(' key dlatents', key_dlatents.shape) # replace higher layers with single (style) latent if a.style_npy_file is not None: print(' styling with latent', a.style_npy_file) style_dlatent = load_latents(a.style_npy_file) while len(style_dlatent.shape) < 4: style_dlatent = np.expand_dims(style_dlatent, 0) # try other values < dl_dim besides 5 key_dlatents[:, :, range(5, dl_dim), :] = style_dlatent[:, :, range(5, dl_dim), :] frames = key_dlatents.shape[0] * a.fstep dlatents = latent_anima(dlat_shape, frames, a.fstep, key_latents=key_dlatents, cubic=a.cubic, verbose=True) # [frm,1,512] print(' dlatents', dlatents.shape) # truncation trick dlatent_avg = Gs.get_var('dlatent_avg') # (512,) tr_range = range(0, 8) dlatents[:, :, tr_range, :] = dlatent_avg + (dlatents[:, :, tr_range, :] - dlatent_avg) * a.trunc # loop for graph frame by frame frame_count = dlatents.shape[0] pbar = ProgressBar(frame_count) for i in range(frame_count): dlatent = dlatents[i] output = Gs.components.synthesis.run(dlatent, randomize_noise=False, output_transform=fmt, minibatch_size=1) ext = 'png' if output.shape[3] == 4 else 'jpg' filename = osp.join(a.out_dir, "%05d.%s" % (i, ext)) imsave(filename, output[0]) pbar.upd()