w = np.zeros((n_pic, 18, 512)) for i, fn in enumerate(w_list): w[i] = np.load(w_path + fn) return torch.from_numpy(w).float().cuda() with torch.no_grad(): ''' Set input_is_Wlatent | True for W-latent , False for Z-latent ''' models = model.Generator(size=1024, style_dim=512, n_mlp=8, input_is_Wlatent=True).to(device) models.load_state_dict(state_dict['g_ema'], strict=False) models = InstrumentedModel(models) models.eval() models.cuda() models.retain_layers([ 'convs.0', 'convs.1', 'convs.2', 'convs.3', 'convs.4', 'convs.5', 'convs.6', 'convs.7', 'convs.8', 'convs.9', 'convs.10', 'convs.11', 'convs.12', 'convs.13', 'convs.14', 'convs.15' ]) ''' Load Latent [1,1,512] for Z [1,18,512] for W+ ''' w_path = './dataset/WLatent200/' w_list = ['33.npy'] #, '164.npy', '197.npy', '150.npy']
def regression(outdir, test_file, fname, model, raw_noise, given_seg=None, eva_im=None, eva_seg=None): pt = os.path.join(outdir, fname) if not os.path.exists(outdir): os.makedirs(outdir) os.makedirs(test_file) # os.makedirs('./results/img/'+args.model_n+'/train/') writer = SummaryWriter(outdir) w = z_sample(1, seed=3).unsqueeze(0) random_z = TensorDataset(w) test_img = torch.utils.data.DataLoader(random_z, batch_size=1) stylegan2_path = './stylegan2_pytorch/checkpoint/stylegan2-ffhq-config-f.pt' stylegan_state_dict = torch.load(stylegan2_path) models = stylegan_model.Generator(size=1024, style_dim=512, n_mlp=8, input_is_Wlatent=False) models.load_state_dict(stylegan_state_dict['g_ema'], strict=False) models = InstrumentedModel(models) models.eval() models.cuda() models.retain_layers([ 'convs.0', 'convs.1', 'convs.2', 'convs.3', 'convs.4', 'convs.5', 'convs.6', 'convs.7', 'convs.8', 'convs.9', 'convs.10', 'convs.11', 'convs.12', 'convs.13', 'convs.14', 'convs.15', ]) stylegan_stack = collect_stack(512, models, test_img) with torch.no_grad(): noise_dataset = torch.utils.data.DataLoader(raw_noise, batch_size=1, num_workers=0, pin_memory=False) # Seg # seg_flat = np.reshape(given_seg, (512 * 512, -1)) #[512*512, 4] stack = collect_stack(512, model, noise_dataset)[0] #[total_c, h, w] num_chan = stack.shape[0] stack = stack.reshape(num_chan, -1).T #[H*W, chan] seg_flat = torch.LongTensor(seg_flat) _, seg_flat = seg_flat.max(dim=1) #[512*512, 1] batch_size = 512 trainDataset = TensorDataset(torch.FloatTensor(stack), torch.LongTensor(seg_flat)) trainLoader = torch.utils.data.DataLoader(dataset=trainDataset, batch_size=batch_size, shuffle=True, num_workers=10, pin_memory=True) lr_rate = 0.001 iterations = 100 ## Model #reg_model = Feedforward(num_chan, 200).cuda() hidden_list = [2000] reg_model = MultiClassFF(num_chan, hidden_list, 4).cuda() ## Loss #criterion = FocalLoss().cuda() criterion = torch.nn.NLLLoss().cuda() optimizer = torch.optim.Adam(reg_model.parameters(), lr=lr_rate, weight_decay=0) decayRate = 0.96 my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR( optimizer=optimizer, gamma=decayRate) #for param in reg_model.parameters(): # print('Parameter shape = ', param.shape) #torch.autograd.set_detect_anomaly(True) for step in range(iterations): print('Epoch {} / {}'.format(step, iterations)) total_loss = 0 for (data, target) in trainLoader: optimizer.zero_grad() data = data.cuda() target = target.cuda() prediction = reg_model(data) # print(prediction.shape) # print(target.shape) # assert False loss = criterion(prediction, target) #loss = weighted_binary_cross_entropy(prediction, target, weights=None) total_loss += loss loss.backward() optimizer.step() # print(prediction[0]) print('Batch_Loss: ', total_loss.item() / batch_size) # Decay every 50 epoch if step % 10 == 0 and step != 0: my_lr_scheduler.step() for param_group in optimizer.param_groups: print('Learning rate = ', param_group['lr']) writer.add_scalar('training loss', total_loss.item() / batch_size, step) torch.save(reg_model.state_dict(), pt) combined = stacked_map(stylegan_stack, pt, hidden_list) k_im = kmean_viz(combined, 512) Image.fromarray((k_im).astype(np.uint8)).resize([1024, 1024]).save( test_file + "im_{:03d}.png".format(step), optimize=True, quality=80)
mod_num = 61 # writer = SummaryWriter("./results/models/logs/"+args.model_di_n) with torch.no_grad(): ''' Set input_is_Wlatent | True for W-latent , False for Z-latent ''' # models = model.Generator(size=1024, style_dim=512, n_mlp=8, input_is_Wlatent=True).to(device) # models.load_state_dict(state_dict['g_ema'], strict=False) # models = InstrumentedModel(models) # models.eval() # models.cuda() models = train_from_folder(models_dir=stylegan2_path, name=name, get_model=True) models = InstrumentedModel(models) models.eval() models.cuda() # models.retain_layers(['convs.0','convs.1','convs.2','convs.3','convs.4', # 'convs.5','convs.6','convs.7','convs.8','convs.9', # 'convs.10','convs.11','convs.12','convs.13','convs.14', # 'convs.15']) pf = 'blocks.' models.retain_layers([pf+'0.conv1', pf+'0.conv2', pf+'1.conv1', pf+'1.conv2', pf+'2.conv1', pf+'2.conv2', pf+'3.conv1', pf+'3.conv2', pf+'4.conv1', pf+'4.conv2', pf+'5.conv1', pf+'5.conv2']) # dict = models.state_dict() # for k,v in dict.items(): # print(k)
save_image( cluster_im, fn + 'cluster/' + str(i) + '_' + str(clus_n - del_clus) + '.png') # print(fn + 'cluster/'+str(i)+'_'+str(clus_n-del_clus)+'.png') with torch.no_grad(): ''' Set input_is_Wlatent | True for W-latent , False for Z-latent ''' models = model.Generator(size=1024, style_dim=512, n_mlp=8, input_is_Wlatent=True).to(device) models.load_state_dict(state_dict['g_ema'], strict=False) models = InstrumentedModel(models) models.eval() models #.cuda() # models.retain_layers(['convs.0','convs.1','convs.2','convs.3','convs.4', # 'convs.5','convs.6','convs.7','convs.8','convs.9', # 'convs.10','convs.11','convs.12','convs.13','convs.14', # 'convs.15']) #B # models.retain_layers(['convs.4', 'convs.5','convs.6','convs.7']) models.retain_layers(['convs.7']) # models.retain_layers(['convs.0','convs.1','convs.2','convs.3','convs.4', # 'convs.5'])