def swap_faces(image_a_path, image_b_path): image_a, image_b = load_images([ os.path.join(IMAGES_FOLDER, 'trump', image_a_path), os.path.join(IMAGES_FOLDER, 'cage', image_b_path)] ) / 255.0 image_a += images_B_mean - images_A_mean image_b += images_A_mean - images_B_mean # Preprocess loaded images image_a = cv2.resize(image_a, (64, 64)) image_b = cv2.resize(image_b, (64, 64)) image_a = toTensor(image_a).to(device).float() image_b = toTensor(image_b).to(device).float() # Forward with opposite encoders result_a = var_to_np(model(image_a, 'B')) result_b = var_to_np(model(image_b, 'A')) result_a = np.moveaxis(np.squeeze(result_a), 0, 2) result_b = np.moveaxis(np.squeeze(result_b), 0, 2) result_a = np.clip(result_a * 255, 0, 255).astype('uint8') result_b = np.clip(result_b * 255, 0, 255).astype('uint8') image_a_filename = os.path.splitext(image_a_path)[0] image_b_filename = os.path.splitext(image_b_path)[0] result_a_filename = f'{image_a_filename}-{image_b_filename}.jpg' result_b_filename = f'{image_b_filename}-{image_a_filename}.jpg' cv2.imwrite(os.path.join(SWAPS_FOLDER, result_a_filename), result_a) cv2.imwrite(os.path.join(SWAPS_FOLDER, result_b_filename), result_b) return result_a_filename, result_b_filename
def convert_face(croped_face): resized_face = cv2.resize(croped_face, (256, 256)) normalized_face = resized_face / 255.0 #normalized_face = normalized_face.reshape(1, normalized_face.shape[0], normalized_face.shape[1], normalized_face.shape[2]) warped_img, _ = random_warp(normalized_face) batch_warped_img = np.expand_dims(warped_img, axis=0) batch_warped_img = toTensor(batch_warped_img) batch_warped_img = batch_warped_img.to(device).float() #print(batch_warped_img.shape, batch_warped_img) model = Autoencoder().to(device) checkpoint = torch.load('./checkpoint/autoencoder.t7') model.load_state_dict(checkpoint['state']) converted_face = model(batch_warped_img, 'B') return converted_face
{'params': model.decoder_B.parameters()}] , lr=5e-5, betas=(0.5, 0.999)) # print all the parameters im model # s = sum([np.prod(list(p.size())) for p in model.parameters()]) # print('Number of params: %d' % s) if __name__ == "__main__": print('Start training, press \'q\' to stop') # training Encoder, Decoder # get deep fake images figure for epoch in range(start_epoch, args.epochs): batch_size = args.batch_size warped_A, target_A = get_training_data(images_A, batch_size) warped_B, target_B = get_training_data(images_B, batch_size) warped_A, target_A = toTensor(warped_A), toTensor(target_A) warped_B, target_B = toTensor(warped_B), toTensor(target_B) if args.cuda: warped_A = warped_A.cuda() target_A = target_A.cuda() warped_B = warped_B.cuda() target_B = target_B.cuda() warped_A, target_A, warped_B, target_B = Variable(warped_A.float()), Variable(target_A.float()), \ Variable(warped_B.float()), Variable(target_B.float()) optimizer_1.zero_grad() optimizer_2.zero_grad()
optimizer_2 = optim.Adam([{'params': model.encoder.parameters()}, #B的优化器 {'params': model.decoder_B.parameters()}] , lr=5e-5, betas=(0.5, 0.999)) if __name__ == "__main__": print('Start training, press \'q\' to stop') for epoch in range(start_epoch, args.epochs): batch_size = args.batch_size warped_A, target_A = get_training_data(images_A, batch_size)#warped是数据增强之后的图片,需要有一个和他对应的目标图片target(因为二者不能完全一样) warped_B, target_B = get_training_data(images_B, batch_size) warped_A, target_A = toTensor(warped_A), toTensor(target_A)#转化为tensor张量 warped_B, target_B = toTensor(warped_B), toTensor(target_B) if args.cuda: warped_A = warped_A.to(device).float()# 将所有最开始读取数据时的tensor变量copy一份到device所指定的GPU上去,之后的运算都在GPU上进行 target_A = target_A.to(device).float()# .float()将该tensor投射为float类型 warped_B = warped_B.to(device).float()# variable是floattensor的封装 target_B = target_B.to(device).float()# 似乎这里就已将把tensor转换为Variable了? optimizer_1.zero_grad() optimizer_2.zero_grad() warped_A = model(warped_A, 'A')#使用A解码器训练A warped_B = model(warped_B, 'B')#使用B解码器训练B loss1 = criterion(warped_A, target_A)#取预测值和目标值的绝对误差的平均数
cirterion=nn.L1Loss() optimizer_1=torch.optim.Adam([{'params':model.encoder.parameters()}, {'params':model.decoder_a.parameters()}], lr=5e-5,betas=(0.5,0.999)) optimizer_2=torch.optim.Adam([{'params':model.encoder.parameters()}, {'params':model.decoder_b.parameters()}], lr=5e-5,betas=(0.5,0.999)) if __name__=="__main__": files=open('log.txt','a+') batch_size=args.batch_size start=0 for epoch in range(start,args.epochs): wrap_a,target_a=get_training_data(images_a,batch_size) wrap_b,target_b=get_training_data(images_b,batch_size) wrap_a,target_a=toTensor(wrap_a),toTensor(target_a) wrap_b,target_b=toTensor(wrap_b),toTensor(target_b) if args.cuda: wrap_a=wrap_a.cuda() wrap_b=wrap_b.cuda() target_a=target_a.cuda() target_b=target_b.cuda() wrap_a,target_a=Variable(wrap_a.float()),Variable(target_a.float()) wrap_b,target_b=Variable(wrap_b.float()),Variable(target_b.float()) optimizer_1.zero_grad() optimizer_2.zero_grad() wrap_a=model(wrap_a,'A')
# comment=f' batch_size={batch_size} lr={lr}' # tb = SummaryWriter(comment=comment) tb = SummaryWriter(log_dir=f'runs/{NAME}') if __name__ == "__main__": print('Start training, press \'q\' to stop') for epoch in range(start_epoch, epochs): # batch_size = args.batch_size warped_A, target_A = get_training_data(images_A, batch_size) warped_B, target_B = get_training_data(images_B, batch_size) #print("warped_A size is {}".format(warped_A.shape)) warped_A, target_A = toTensor(warped_A).float(), toTensor( target_A).float() warped_B, target_B = toTensor(warped_B).float(), toTensor( target_B).float() if args.cuda: warped_A = warped_A.to(device).float() target_A = target_A.to(device).float() warped_B = warped_B.to(device).float() target_B = target_B.to(device).float() optimizer_1.zero_grad() optimizer_2.zero_grad() warped_A = model(warped_A, 'A') warped_B = model(warped_B, 'B')