def swap_faces(image_a_path, image_b_path): image_a, image_b = load_images([ os.path.join(IMAGES_FOLDER, 'trump', image_a_path), os.path.join(IMAGES_FOLDER, 'cage', image_b_path)] ) / 255.0 image_a += images_B_mean - images_A_mean image_b += images_A_mean - images_B_mean # Preprocess loaded images image_a = cv2.resize(image_a, (64, 64)) image_b = cv2.resize(image_b, (64, 64)) image_a = toTensor(image_a).to(device).float() image_b = toTensor(image_b).to(device).float() # Forward with opposite encoders result_a = var_to_np(model(image_a, 'B')) result_b = var_to_np(model(image_b, 'A')) result_a = np.moveaxis(np.squeeze(result_a), 0, 2) result_b = np.moveaxis(np.squeeze(result_b), 0, 2) result_a = np.clip(result_a * 255, 0, 255).astype('uint8') result_b = np.clip(result_b * 255, 0, 255).astype('uint8') image_a_filename = os.path.splitext(image_a_path)[0] image_b_filename = os.path.splitext(image_b_path)[0] result_a_filename = f'{image_a_filename}-{image_b_filename}.jpg' result_b_filename = f'{image_b_filename}-{image_a_filename}.jpg' cv2.imwrite(os.path.join(SWAPS_FOLDER, result_a_filename), result_a) cv2.imwrite(os.path.join(SWAPS_FOLDER, result_b_filename), result_b) return result_a_filename, result_b_filename
def extract_faces(video_path): cap = cv2.VideoCapture(video_path) n = 0 while (cap.isOpened() and n < 1000): _, frame = cap.read() position, croped_face = extract_face(frame) #print(croped_face.shape) #exit(0) #cv2.imshow("croped_face",croped_face) #cv2.waitKey(2000) #cv2.destroyAllWindows() converted_face = convert_face(croped_face) converted_face = converted_face.squeeze(0) converted_face = var_to_np(converted_face) converted_face = converted_face.transpose(1, 2, 0) converted_face = np.clip(converted_face * 255, 0, 255).astype('uint8') cv2.imshow("converted_face", cv2.resize(converted_face, (256, 256))) cv2.waitKey(2000) back_size = cv2.resize( converted_face, (croped_face.shape[0] - 120, croped_face.shape[1] - 120)) #cv2.imshow("back_face", back_size) #cv2.waitKey(1000) #print(frame.shape) #print(back_size.shape) merged = merge(position, back_size, frame) #print(merged.shape) out.write(merged) # cv2.imshow('frame', frame) # if cv2.waitKey(1) & 0xFF == ord('q'): # break n = n + 1 print(n)
optimizer_2.step() print('epoch: {}, lossA:{}, lossB:{}'.format(epoch, loss1.item(), loss2.item())) if epoch % 10 == 0: print('===> Saving models...') state = { 'state': model.state_dict(), 'epoch': epoch } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/autoencoder.t7') test_A_ = target_A[0:14] test_B_ = target_B[0:14] test_A = var_to_np(target_A[0:14]) test_B = var_to_np(target_B[0:14]) figure_A = np.stack([ test_A, var_to_np(model(test_A_, 'A')), var_to_np(model(test_A_, 'B')), ], axis=1) figure_B = np.stack([ test_B, var_to_np(model(test_B_, 'B')), var_to_np(model(test_B_, 'A')), ], axis=1) figure = np.concatenate([figure_A, figure_B], axis=0) figure = figure.transpose((0, 1, 3, 4, 2)) figure = figure.reshape((4, 7) + figure.shape[1:])
loss1 = criterion(warped_A, target_A) loss2 = criterion(warped_B, target_B) loss = loss1.item() + loss2.item() loss1.backward() loss2.backward() optimizer_1.step() optimizer_2.step() print("epoch: {}, lossA:{}, lossB:{}".format(epoch, loss1.item(), loss2.item())) if epoch % args.log_interval == 0: test_A_ = target_A[0:14] test_B_ = target_B[0:14] test_A = var_to_np(target_A[0:14]) test_B = var_to_np(target_B[0:14]) # print("input size is {}".format(test_B_.size())) print("===> Saving models...") state = {"state": model.state_dict(), "epoch": epoch} if not os.path.isdir("checkpoint"): os.mkdir("checkpoint") torch.save(state, "./checkpoint/autoencoder.t7") figure_A = np.stack( [ test_A, var_to_np(model(test_A_, "A")), var_to_np(model(test_A_, "B")), ], axis=1,
loss = loss1.item() + loss2.item() loss1.backward() loss2.backward() optimizer_1.step() optimizer_2.step() tb.add_scalar('LossA', loss1.item(), epoch) tb.add_scalar('LossB', loss2.item(), epoch) print('epoch: {}, lossA:{}, lossB:{}'.format(epoch, loss1.item(), loss2.item())) if epoch % args_log_interval == 0: test_A_ = target_A[0:14] test_B_ = target_B[0:14] test_A = var_to_np(target_A[0:14]) test_B = var_to_np(target_B[0:14]) #print("input size is {}".format(test_B_.size())) print('===> Saving models...') state = {'state': model.state_dict(), 'epoch': epoch} if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/' + model_name) # figure_A = np.stack([ # test_A, # var_to_np(model(test_A_, 'A')), # var_to_np(model(test_A_, 'B')), # ], axis=1) # #print("figure A shape is {}".format(figure_A.shape)) # figure_B = np.stack([
warped_B = model(warped_B, 'B')#使用B解码器训练B loss1 = criterion(warped_A, target_A)#取预测值和目标值的绝对误差的平均数 loss2 = criterion(warped_B, target_B) loss = loss1.item() + loss2.item()#loss合并 loss1.backward()#反向传播,依次计算并存储神经网络中间变量和参数的梯度 loss2.backward() optimizer_1.step()#更新梯度 optimizer_2.step() print('epoch: {}, lossA:{}, lossB:{}'.format(epoch, loss1.item(), loss2.item())) if epoch % args.log_interval == 0: test_A_ = target_A[0:14]# b = a[n:m]表示列表切片,复制列表a[n]到a[m-1]的内容到新的列表对象b[] test_B_ = target_B[0:14] test_A = var_to_np(target_A[0:14])#Variable转化为numpy test_B = var_to_np(target_B[0:14]) print('===> Saving models...') state = { 'state': model.state_dict(), 'epoch': epoch } if not os.path.isdir('checkpoint'): os.mkdir('checkpoint') torch.save(state, './checkpoint/autoencoder.t7') # 使用训练图片测试 figure_A = np.stack([ test_A, var_to_np(model(test_A_, 'A')),#先输入模型,然后转换为numpy var_to_np(model(test_A_, 'B')),#换脸