vgg.load_state_dict(torch.load(opt.vgg_dir)) # dec.load_state_dict(torch.load(opt.decoder_dir)) vgg5.load_state_dict(torch.load(opt.loss_network_dir)) matrix.load_state_dict(torch.load(opt.matrixPath)) for param in vgg.parameters(): param.requires_grad = False for param in vgg5.parameters(): param.requires_grad = False for param in matrix.parameters(): param.requires_grad = False # for param in dec.parameters(): # param.requires_grad = False ################# LOSS & OPTIMIZER ################# criterion = LossCriterion(opt.style_layers, opt.content_layers, opt.style_weight, opt.content_weight, opt.sp_weight) optimizer = optim.Adam(dec.parameters(), opt.lr) ################# GPU ################# if (opt.cuda): vgg.cuda() dec.cuda() vgg5.cuda() matrix.cuda() ################# TRAINING ################# def adjust_learning_rate(optimizer, iteration): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" for param_group in optimizer.param_groups: param_group['lr'] = opt.lr / (1 + iteration * 1e-5)
matrix = MulLayer('r41') vgg = encoder4() dec = decoder4() vgg.load_state_dict(torch.load(opt.vgg_dir)) dec.load_state_dict(torch.load(opt.decoder_dir)) vgg5.load_state_dict(torch.load(opt.loss_network_dir)) for param in vgg.parameters(): param.requires_grad = False for param in vgg5.parameters(): param.requires_grad = False for param in dec.parameters(): param.requires_grad = False ################# LOSS & OPTIMIZER ################# criterion = LossCriterion(opt.style_layers, opt.content_layers, opt.style_weight, opt.content_weight) optimizer = optim.Adam(matrix.parameters(), opt.lr) ################# GLOBAL VARIABLE ################# contentV = torch.Tensor(opt.batchSize, 3, opt.fineSize, opt.fineSize) styleV = torch.Tensor(opt.batchSize, 3, opt.fineSize, opt.fineSize) ################# GPU ################# if (opt.cuda): vgg.cuda() dec.cuda() vgg5.cuda() matrix.cuda() contentV = contentV.cuda() styleV = styleV.cuda()