def movie_swap(temp_movie, dst_movie): import glob, os # gen frames for temp and dst start = time() gen_frames(temp_movie, 'A2') gen_frames(dst_movie, 'B2') if not os.path.exists('out'): os.mkdir('out') # swap every face framesA = sorted(glob.glob('A2/*.png'), key=lambda x:int(os.path.basename(x).split('.')[0].split('-')[-1])) framesB = sorted(glob.glob('B2/*.png'), key=lambda x:int(os.path.basename(x).split('.')[0].split('-')[-1])) i = 0 for f_a, f_b in zip(framesA, framesB): core.face_merge(src_img=f_a, dst_img=f_b, out_img='out/frame-{0}.png'.format(i), face_area=[50, 30, 500, 485], alpha=0.95, k_size=(15, 10), mat_multiple=0.95) i = i + 1 print("已处理",i,"张图片") stop = time() print("总共用时"+str(stop-start)+"秒") # composite movie os.system('ffmpeg -i out/frame-%0d.png -c:v libx264 -vf "fps=25,format=yuv420p" out/out.mp4')
def merge_one(dst_img, deltaData, gender, alpha, dst_matrix, dst_points): nowTime = time.time() # 参数 src_img, src_url, modelName, modelShortName, modelDescription = getModelImage( gender, deltaData) out_img = 'result/output' + str(int(time.time() * 1000)) + '.jpg' face_area = [100, 50, 500, 500] # src_img —— 模特图片 # dst_img —— 待融合的图片 # out_img —— 结果图片输出路径 # face_area —— 指定模板图中进行人脸融合的人脸框位置。四个正整数数组,依次代表人脸框左上角纵坐标(top),左上角横坐标(left),人脸框宽度(width),人脸框高度(height),通过设定改参数可以减少结果的大范围变形,把变形风险控制在人脸框区域 # alpha —— 融合比例,范围 [0,1]。数字越大融合结果包含越多融合图 (dst_img) 特征。 # blur_size—— 模糊核大小,用于模糊人脸融合边缘,减少融合后的违和感 # mat_multiple —— 缩放获取到的人脸心型区域 # 头像融合 core.face_merge(src_img=src_img, dst_img=dst_img, out_img=out_img, face_area=face_area, alpha=alpha, k_size=(10, 5), mat_multiple=0.95, dst_matrix=dst_matrix, dst_points=dst_points) final_img = uploadFileToOSS(out_img) print('Face Merge Success: ', final_img) endTime = time.time() print('Time Cost: ', (endTime - nowTime)) return final_img, src_url, modelName, modelShortName, modelDescription
def main(src_img_path, dst_img_path, out_img_path, alpha, mat_multiple): core.face_merge(src_img=src_img_path, dst_img=dst_img_path, out_img=out_img_path, face_area=[50, 30, 500, 485], alpha=alpha, blur_detail_x=15, blur_detail_y=10, mat_multiple=mat_multiple)
def main(): start = time() core.face_merge(src_img='./target/sen-00015.png', dst_img='./shiyuan/shiyuan-6.png', out_img='output/1.png', face_area=[50, 30, 500, 485], alpha=0.75, k_size=(15, 10), mat_multiple=0.95) stop = time() print(str(stop - start) + "秒")
def merge_one(src_img, dst_img, alpha): out_img = 'images/out.jpg' face_area = [50, 50, 500, 485] # 头像融合 core.face_merge(src_img=src_img, dst_img=dst_img, out_img=out_img, face_area=face_area, alpha=alpha, k_size=(10, 5), mat_multiple=0.95) return out_img
def main(): # os.environ['CUDA_VISIBLE_DEVICES'] = 0 ap = argparse.ArgumentParser(description='batch face swap routine') ap.add_argument('-s', '--source_dir', type=str, help='input source directory') ap.add_argument('-d', '--dest_dir', type=str, help='input destination directory') ap.add_argument('-o', '--output_dir', type=str, help='output directory') args = ap.parse_args() sd = args.source_dir dd = args.dest_dir od = args.output_dir i = 0 source_path_list = [] target_path_list = [] if not os.path.exists(od): os.mkdir(od) types = ('*.jpg', '*.png') for files in types: source_path_list.extend(glob.glob(os.path.join((sd), files))) # target_path_list.extend(glob.glob(os.path.join((dd),files))) # source_path_list = glob.glob(sd) # target_path_list = glob.glob(dd) # source_path_list = glob.glob(os.path.join(sd,'*.png')) # target_path_list = glob.glob(os.path.join(dd,'*.png')) source_path_list.sort(key=lambda x: int(x.split('.')[1].split('-')[-1])) # target_path_list.sort(key=lambda x: int(x.split('.')[1].split('-')[-1])) # print(source_path_list) d = './C/jin-02.png' for s in source_path_list: core.face_merge(src_img=s, dst_img=d, out_img=os.path.join(od, 'output-{0}.jpg'.format(i)), face_area=[50, 30, 500, 485], alpha=0.75, k_size=(15, 10), mat_multiple=0.95) i = i + 1 print("已处理", i, "张图片")
def merge_one(src_img, dst_img, alpha, dst_matrix, dst_points): nowTime = time.time() # 参数 out_img = 'result/output' + str(int(time.time() * 1000)) + '.png' # face_area = [100, 50, 500, 500] face_area = [50, 50, 500, 500] # 头像融合 core.face_merge(src_img=src_img, dst_img=dst_img, out_img=out_img, face_area=face_area, alpha=alpha, k_size=(10, 5), mat_multiple=0.9, dst_matrix=dst_matrix, dst_points=dst_points) final_img = uploadFileToOSS(out_img) print('Face Merge Success: ', final_img) endTime = time.time() print('Time Cost: ', (endTime - nowTime)) return final_img
def main(): ap = argparse.ArgumentParser(description='batch face swap routine') ap.add_argument('-s', '--source_dir', type=str, help='input source directory') ap.add_argument('-d', '--dest_dir', type=str, help='input destination directory') ap.add_argument('-o', '--output_dir', type=str, help='output directory') args = ap.parse_args() sd = args.source_dir dd = args.dest_dir od = args.output_dir i = 0 source_path_list = [] target_path_list = [] if not os.path.exists(od): os.mkdir(od) # types = ('*.jpg','*.png') source_path_list = glob.glob(os.path.join(sd, '*.png')) target_path_list = glob.glob(os.path.join(dd, '*.png')) source_path_list.sort(key=lambda x: int(x.split('.')[0].split('-')[-1])) target_path_list.sort(key=lambda x: int(x.split('.')[0].split('-')[-1])) for s, d in zip((glob.iglob(os.path.join(sd, '*.png'))), glob.iglob(os.path.join(dd, '*.png'))): core.face_merge(src_img=s, dst_img=d, out_img=os.path.join(od, 'output-{0}.jpg'.format(i)), face_area=[50, 30, 500, 485], alpha=0.75, k_size=(15, 10), mat_multiple=0.95) i = i + 1
# -*- coding: utf-8 -*- # @Time : 2017/9/2 13:40 # @Author : 郑梓斌 import core if __name__ == '__main__': core.face_merge(src_img='images/model.jpg', dst_img='images/20171030175254.jpg', out_img='images/output.jpg', face_area=[50, 30, 500, 485], alpha=0.75, blur_detail_x=15, blur_detail_y=10, mat_multiple=0.95)
# -*- coding: utf-8 -*- # @Time : 2017/9/2 13:40 # @Author : 郑梓斌 import core if __name__ == '__main__': core.face_merge(src_img='6.jpg', dst_img='33.jpg', out_img='output.jpg', face_area=[50, 30, 500, 485], alpha=0.65, blur_detail_x=15, blur_detail_y=10, mat_multiple=0.90)
def reg(): token = request.form.get("token") is_exp = request.form.get("is_exp") is_exp = int(is_exp) account_body_id = request.form.get("account_body_id") a = ''.join(token) source_image1_beauty = "/home/scy/face_fusion/pugongyin/" + a + "a.jpg" source_image1_draw = "/home/scy/face_fusion/pugongyin/" + a + "aceshi.jpg" model_image_nv = "/home/scy/face_fusion/yry-master/yry-master/images/nvgai1.png" face_image_output = "/home/scy/face_fusion/pugongyin/" + a + "bceshi.jpg" model_image_nan = "/home/scy/face_fusion/yry-master/yry-master/images/nan.png" if is_exp == 1: conn = pymysql.connect(host='192.168.1.228', port=3306, user='******', password='******', db='pgy') cursor = conn.cursor() cursor.execute("select sex from md_exp_body where account_body_id=%s", [ account_body_id, ]) reslut = cursor.fetchone() sex = reslut[0] if account_body_id: if sex == 2: img_400x320 = cv2.imread(source_image1_beauty) bt = cv2.resize(img_400x320, None, fx=1.1, fy=1, interpolation=cv2.INTER_CUBIC) cv2.imwrite(source_image1_draw, bt) src_points = [ (701, 276), (701, 316), (707, 355), (710, 393), (714, 431), (720, 469), (726, 508), (736, 546), (749, 582), (768, 616), (794, 646), (826, 669), (862, 688), (900, 653), (941, 663), (983, 670), (1028, 674), (1072, 674), (1111, 668), (1147, 657), (1181, 691), (1214, 672), (1245, 646), (1270, 617), (1292, 583), (1306, 547), (1314, 509), (1320, 470), (1324, 431), (1326, 393), (1327, 354), (1325, 315), (1324, 276), (840, 308), (865, 284), (897, 277), (930, 286), (953, 311), (926, 316), (894, 319), (864, 313), (907, 297), (905, 295), (769, 155), (818, 147), (865, 171), (930, 177), (973, 191), (841, 248), (884, 247), (927, 251), (970, 260), (1000, 540), (961, 551), (937, 565), (975, 569), (1050, 541), (1087, 554), (1108, 569), (1072, 571), (1025, 547), (1025, 573), (922, 564), (1123, 568), (1073, 571), (1096, 590), (1064, 605), (976, 567), (951, 586), (983, 603), (1025, 571), (1024, 610), (1023, 292), (1024, 340), (1024, 388), (1024, 436), (983, 312), (961, 412), (944, 456), (969, 470), (997, 475), (1027, 485), (1066, 312), (1088, 412), (1107, 456), (1082, 470), (1055, 475), (1097, 319), (1121, 287), (1155, 278), (1188, 284), (1216, 301), (1189, 312), (1158, 318), (1126, 316), (1151, 297), (1149, 295), (1085, 185), (1127, 175), (1170, 173), (1213, 148), (1248, 154), (1087, 253), (1128, 247), (1169, 245), (1209, 248) ] dst_points = core.face_points3(source_image1_draw) core.face_merge1( src_img=model_image_nv, src_points=src_points, dst_img=source_image1_draw, out_img=face_image_output, dst_points=dst_points, face_area=[705, 157, 629, 629], alpha=0.9, k_size=(1, 240), mat_multiple=1.3, ) with open(face_image_output, "rb") as f: base64_data1 = base64.b64encode(f.read()) face_output = 'data:image/jpeg;base64,' + (str( base64_data1, 'utf-8')) cursor.execute( "update md_exp_body set model_img=%s where account_body_id=%s", [face_output, account_body_id]) conn.commit() cursor.close() conn.close() reslut = {} reslut["status"] = "200" reslut["result"] = "successful" json_str = json.dumps(reslut) rst = make_response(json_str) return rst else: img_400x320 = cv2.imread(source_image1_beauty) bt = cv2.resize(img_400x320, None, fx=1.3, fy=1, interpolation=cv2.INTER_CUBIC) cv2.imwrite(source_image1_draw, bt) src_points = [ (735, 281), (731, 319), (736, 355), (738, 391), (741, 426), (745, 461), (751, 498), (758, 533), (768, 569), (782, 602), (803, 631), (832, 654), (867, 671), (904, 633), (944, 640), (985, 644), (1027, 645), (1068, 645), (1104, 641), (1137, 632), (1168, 669), (1199, 651), (1226, 626), (1246, 597), (1263, 565), (1274, 530), (1280, 495), (1284, 459), (1287, 424), (1288, 388), (1287, 353), (1283, 318), (1279, 283), (846, 314), (867, 289), (896, 282), (927, 289), (951, 309), (925, 315), (896, 318), (868, 315), (898, 300), (896, 298), (782, 158), (831, 152), (876, 176), (940, 180), (980, 192), (854, 253), (896, 251), (937, 254), (977, 261), (1005, 506), (970, 521), (951, 540), (983, 536), (1051, 506), (1086, 522), (1105, 541), (1072, 537), (1028, 512), (1028, 536), (938, 541), (1119, 542), (1072, 536), (1091, 555), (1061, 563), (985, 535), (964, 554), (993, 563), (1028, 534), (1027, 566), (1028, 294), (1028, 335), (1028, 376), (1027, 417), (988, 312), (968, 404), (953, 440), (977, 450), (1002, 454), (1028, 461), (1070, 312), (1088, 405), (1102, 441), (1078, 451), (1053, 455), (1106, 317), (1128, 289), (1157, 282), (1186, 289), (1208, 306), (1185, 315), (1159, 318), (1131, 314), (1159, 301), (1157, 299), (1075, 192), (1112, 181), (1155, 177), (1199, 152), (1238, 155), (1078, 260), (1116, 253), (1156, 250), (1196, 252) ] dst_points = core.face_points2(source_image1_draw) core.face_merge(src_img=model_image_nan, src_points=src_points, dst_img=source_image1_draw, out_img=face_image_output, dst_points=dst_points, face_area=[736, 559, 181, 559], alpha=0.9, k_size=(1, 260), mat_multiple=1.3) with open(face_image_output, "rb") as f: base64_data1 = base64.b64encode(f.read()) face_output = 'data:image/jpeg;base64,' + (str( base64_data1, 'utf-8')) cursor.execute( "update md_exp_body set model_img=%s where account_body_id=%s", [face_output, account_body_id]) conn.commit() cursor.close() conn.close() reslut = {} reslut["status"] = "200" reslut["result"] = "successful" json_str = json.dumps(reslut) rst = make_response(json_str) return rst else: reslut = {} reslut["status"] = "400" reslut["result"] = "failed" json_str = json.dumps(reslut) rst = make_response(json_str) return rst elif is_exp == 0: conn = pymysql.connect(host='192.168.1.228', port=3306, user='******', password='******', db='pgy') cursor = conn.cursor() cursor.execute( "select sex from md_account_body where account_body_id=%s", [ account_body_id, ]) reslut = cursor.fetchone() sex = reslut[0] if account_body_id: if sex == 2: img_400x320 = cv2.imread(source_image1_beauty) bt = cv2.resize(img_400x320, None, fx=1.1, fy=1, interpolation=cv2.INTER_CUBIC) cv2.imwrite(source_image1_draw, bt) src_points = [ (701, 276), (701, 316), (707, 355), (710, 393), (714, 431), (720, 469), (726, 508), (736, 546), (749, 582), (768, 616), (794, 646), (826, 669), (862, 688), (900, 653), (941, 663), (983, 670), (1028, 674), (1072, 674), (1111, 668), (1147, 657), (1181, 691), (1214, 672), (1245, 646), (1270, 617), (1292, 583), (1306, 547), (1314, 509), (1320, 470), (1324, 431), (1326, 393), (1327, 354), (1325, 315), (1324, 276), (840, 308), (865, 284), (897, 277), (930, 286), (953, 311), (926, 316), (894, 319), (864, 313), (907, 297), (905, 295), (769, 155), (818, 147), (865, 171), (930, 177), (973, 191), (841, 248), (884, 247), (927, 251), (970, 260), (1000, 540), (961, 551), (937, 565), (975, 569), (1050, 541), (1087, 554), (1108, 569), (1072, 571), (1025, 547), (1025, 573), (922, 564), (1123, 568), (1073, 571), (1096, 590), (1064, 605), (976, 567), (951, 586), (983, 603), (1025, 571), (1024, 610), (1023, 292), (1024, 340), (1024, 388), (1024, 436), (983, 312), (961, 412), (944, 456), (969, 470), (997, 475), (1027, 485), (1066, 312), (1088, 412), (1107, 456), (1082, 470), (1055, 475), (1097, 319), (1121, 287), (1155, 278), (1188, 284), (1216, 301), (1189, 312), (1158, 318), (1126, 316), (1151, 297), (1149, 295), (1085, 185), (1127, 175), (1170, 173), (1213, 148), (1248, 154), (1087, 253), (1128, 247), (1169, 245), (1209, 248) ] dst_points = core.face_points3(source_image1_draw) core.face_merge1( src_img=model_image_nv, src_points=src_points, dst_img=source_image1_draw, out_img=face_image_output, dst_points=dst_points, face_area=[705, 157, 629, 629], alpha=0.9, k_size=(1, 240), mat_multiple=1.3, ) with open(face_image_output, "rb") as f: base64_data1 = base64.b64encode(f.read()) face_output = 'data:image/jpeg;base64,' + (str( base64_data1, 'utf-8')) cursor.execute( "update md_account_body set model_img=%s where account_body_id=%s", [face_output, account_body_id]) conn.commit() cursor.close() conn.close() reslut = {} reslut["status"] = "200" reslut["result"] = "successful" json_str = json.dumps(reslut) rst = make_response(json_str) return rst else: img_400x320 = cv2.imread(source_image1_beauty) bt = cv2.resize(img_400x320, None, fx=1.3, fy=1, interpolation=cv2.INTER_CUBIC) cv2.imwrite(source_image1_draw, bt) src_points = [ (735, 281), (731, 319), (736, 355), (738, 391), (741, 426), (745, 461), (751, 498), (758, 533), (768, 569), (782, 602), (803, 631), (832, 654), (867, 671), (904, 633), (944, 640), (985, 644), (1027, 645), (1068, 645), (1104, 641), (1137, 632), (1168, 669), (1199, 651), (1226, 626), (1246, 597), (1263, 565), (1274, 530), (1280, 495), (1284, 459), (1287, 424), (1288, 388), (1287, 353), (1283, 318), (1279, 283), (846, 314), (867, 289), (896, 282), (927, 289), (951, 309), (925, 315), (896, 318), (868, 315), (898, 300), (896, 298), (782, 158), (831, 152), (876, 176), (940, 180), (980, 192), (854, 253), (896, 251), (937, 254), (977, 261), (1005, 506), (970, 521), (951, 540), (983, 536), (1051, 506), (1086, 522), (1105, 541), (1072, 537), (1028, 512), (1028, 536), (938, 541), (1119, 542), (1072, 536), (1091, 555), (1061, 563), (985, 535), (964, 554), (993, 563), (1028, 534), (1027, 566), (1028, 294), (1028, 335), (1028, 376), (1027, 417), (988, 312), (968, 404), (953, 440), (977, 450), (1002, 454), (1028, 461), (1070, 312), (1088, 405), (1102, 441), (1078, 451), (1053, 455), (1106, 317), (1128, 289), (1157, 282), (1186, 289), (1208, 306), (1185, 315), (1159, 318), (1131, 314), (1159, 301), (1157, 299), (1075, 192), (1112, 181), (1155, 177), (1199, 152), (1238, 155), (1078, 260), (1116, 253), (1156, 250), (1196, 252) ] dst_points = core.face_points2(source_image1_draw) core.face_merge(src_img=model_image_nan, src_points=src_points, dst_img=source_image1_draw, out_img=face_image_output, dst_points=dst_points, face_area=[736, 559, 181, 559], alpha=0.9, k_size=(1, 260), mat_multiple=1.3) with open(face_image_output, "rb") as f: base64_data1 = base64.b64encode(f.read()) face_output = 'data:image/jpeg;base64,' + (str( base64_data1, 'utf-8')) cursor.execute( "update md_account_body set model_img=%s where account_body_id=%s", [face_output, account_body_id]) conn.commit() cursor.close() conn.close() reslut = {} reslut["status"] = "200" reslut["result"] = "successful" json_str = json.dumps(reslut) rst = make_response(json_str) return rst else: reslut = {} reslut["status"] = "400" reslut["result"] = "failed" json_str = json.dumps(reslut) rst = make_response(json_str) return rst else: reslut = {} reslut["status"] = "400" reslut["result"] = "failed" json_str = json.dumps(reslut) rst = make_response(json_str) return rst
# -*- coding: utf-8 -*- import core # 引入这个包下的所有方法 if __name__ == '__main__': src = 'images/zhang.jpg' dst = 'core/huge_newsize.jpg' output = 'D:\\WWW\\bihuang\\Public\\upload\\face_ouput\\huge_output.jpg' src_points, _ = core.face_points(src) dst_points, _ = core.face_points(dst) core.face_merge(src_img='images/zhang.jpg', # 源图像.jpg src_points=src_points, # 源图像人脸关键点数组坐标 dst_img='core/huge_newsize.jpg', # 输入带融合的目标图像 out_img='D:\\WWW\\bihuang\\Public\\upload\\face_ouput\\huge_output.jpg', # 输出到指定的路径 dst_points=dst_points, # 目标图像人脸关键点数组坐标 face_area=[100, 100, 100, 100], # 模板图中人脸融合的位置左上角横坐标(left),左上角纵坐标(top),人脸框宽度(width),人脸框高度(height) alpha=0.65, # [0~1]融合比,比例越大目标图像的特征就越多 k_size=(300, 250), # 滤波窗口尺寸-图像均值平滑滤波模板 mat_multiple=1.2) # 缩放获取到的人脸心型区域-图像缩放因子
def reg(): exp_id = request.form.get("exp_id") exp_id = int(exp_id) face_data = request.form.get("face_data") sex = request.form.get("sex") sex = int(sex) source_image1_beauty = "/home/scy/face_fusion/pugongyin/" + str( exp_id) + "a.jpg" source_image1_draw = "/home/scy/face_fusion/pugongyin/" + str( exp_id) + "aceshi.jpg" model_image_nv = "/home/scy/face_fusion/yry-master/yry-master/images/nvgai1.png" face_image_output = "/home/scy/face_fusion/pugongyin/" + str( exp_id) + "bceshi.jpg" model_image_nan = "/home/scy/face_fusion/yry-master/yry-master/images/fenglei.jpg" if sex == 2: img_400x320 = cv2.imread(source_image1_beauty) bt = cv2.resize(img_400x320, None, fx=1.1, fy=1, interpolation=cv2.INTER_CUBIC) cv2.imwrite(source_image1_draw, bt) src_points = [ (701, 276), (701, 316), (707, 355), (710, 393), (714, 431), (720, 469), (726, 508), (736, 546), (749, 582), (768, 616), (794, 646), (826, 669), (862, 688), (900, 653), (941, 663), (983, 670), (1028, 674), (1072, 674), (1111, 668), (1147, 657), (1181, 691), (1214, 672), (1245, 646), (1270, 617), (1292, 583), (1306, 547), (1314, 509), (1320, 470), (1324, 431), (1326, 393), (1327, 354), (1325, 315), (1324, 276), (840, 308), (865, 284), (897, 277), (930, 286), (953, 311), (926, 316), (894, 319), (864, 313), (907, 297), (905, 295), (769, 155), (818, 147), (865, 171), (930, 177), (973, 191), (841, 248), (884, 247), (927, 251), (970, 260), (1000, 540), (961, 551), (937, 565), (975, 569), (1050, 541), (1087, 554), (1108, 569), (1072, 571), (1025, 547), (1025, 573), (922, 564), (1123, 568), (1073, 571), (1096, 590), (1064, 605), (976, 567), (951, 586), (983, 603), (1025, 571), (1024, 610), (1023, 292), (1024, 340), (1024, 388), (1024, 436), (983, 312), (961, 412), (944, 456), (969, 470), (997, 475), (1027, 485), (1066, 312), (1088, 412), (1107, 456), (1082, 470), (1055, 475), (1097, 319), (1121, 287), (1155, 278), (1188, 284), (1216, 301), (1189, 312), (1158, 318), (1126, 316), (1151, 297), (1149, 295), (1085, 185), (1127, 175), (1170, 173), (1213, 148), (1248, 154), (1087, 253), (1128, 247), (1169, 245), (1209, 248) ] dst_points = core.face_points3(source_image1_draw) core.face_merge1( src_img=model_image_nv, src_points=src_points, dst_img=source_image1_draw, out_img=face_image_output, dst_points=dst_points, face_area=[705, 157, 629, 629], alpha=0.9, k_size=(1, 240), mat_multiple=1.3, ) with open(face_image_output, "rb") as f: base64_data1 = base64.b64encode(f.read()) face_output = 'data:image/jpeg;base64,' + (str(base64_data1, 'utf-8')) conn = pymysql.connect(host='192.168.1.228', port=3306, user='******', password='******', db='fenglei') cursor = conn.cursor() cursor.execute("update fl_exp set faceTexture=%s where exp_id=%s", [face_output, exp_id]) conn.commit() cursor.close() conn.close() reslut = {} reslut["status"] = "200" reslut["result"] = "successful" json_str = json.dumps(reslut) rst = make_response(json_str) return rst elif sex == 1: img_400x320 = cv2.imread(source_image1_beauty) bt = cv2.resize(img_400x320, None, fx=1, fy=1, interpolation=cv2.INTER_CUBIC) cv2.imwrite(source_image1_draw, bt) src_points = [ (762, 305), (762, 344), (767, 381), (769, 417), (772, 453), (775, 490), (780, 527), (786, 564), (796, 600), (811, 634), (831, 664), (857, 690), (885, 712), (916, 683), (951, 699), (988, 711), (1031, 715), (1077, 712), (1116, 702), (1153, 687), (1187, 717), (1222, 696), (1255, 670), (1281, 640), (1304, 606), (1318, 569), (1324, 529), (1328, 488), (1331, 448), (1332, 409), (1333, 369), (1330, 329), (1327, 287), (867, 335), (887, 312), (914, 306), (944, 314), (966, 335), (941, 340), (913, 344), (887, 339), (917, 322), (915, 320), (776, 194), (821, 176), (871, 194), (940, 202), (981, 222), (848, 279), (891, 272), (934, 279), (976, 293), (1003, 559), (972, 573), (959, 593), (987, 591), (1047, 558), (1079, 571), (1095, 590), (1066, 589), (1025, 564), (1026, 592), (947, 593), (1108, 589), (1066, 592), (1087, 611), (1061, 627), (987, 594), (967, 614), (992, 629), (1026, 595), (1027, 635), (1021, 316), (1020, 363), (1019, 410), (1019, 457), (987, 335), (973, 442), (959, 488), (978, 497), (999, 501), (1021, 505), (1056, 335), (1070, 441), (1086, 486), (1065, 496), (1044, 500), (1080, 343), (1105, 313), (1135, 306), (1164, 311), (1187, 325), (1165, 335), (1137, 341), (1108, 339), (1136, 322), (1134, 320), (1069, 227), (1113, 205), (1164, 196), (1210, 182), (1237, 204), (1073, 295), (1118, 279), (1161, 272), (1201, 283) ] dst_points = core.face_points4(source_image1_draw) core.face_merge(src_img=model_image_nan, src_points=src_points, dst_img=source_image1_draw, out_img=face_image_output, dst_points=dst_points, face_area=[767, 217, 573, 573], alpha=0.9, k_size=(1, 260), mat_multiple=1.3) with open(face_image_output, "rb") as f: base64_data1 = base64.b64encode(f.read()) face_output = 'data:image/jpeg;base64,' + (str(base64_data1, 'utf-8')) conn = pymysql.connect(host='192.168.1.228', port=3306, user='******', password='******', db='fenglei') cursor = conn.cursor() cursor.execute("update fl_exp set faceTexture=%s where exp_id=%s", [face_output, exp_id]) conn.commit() cursor.close() conn.close() reslut = {} reslut["status"] = "200" reslut["result"] = "successful" json_str = json.dumps(reslut) rst = make_response(json_str) return rst else: reslut = {} reslut["status"] = "400" reslut["result"] = "failed" json_str = json.dumps(reslut) rst = make_response(json_str) return rst
# -*- coding: utf-8 -*- # @Time : 2017/9/2 13:40 # @Author : 郑梓斌 import core if __name__ == '__main__': core.face_merge(src_img='images/model.jpg', dst_img='images/20171030175254.jpg', #dst_img='images/test_multi.jpg', #dst_img='images/test_single.jpg', out_img='images/output.jpg', face_area=[50, 30, 500, 485], alpha=0.75, k_size=(15, 10), mat_multiple=0.95)
# -*- coding: utf-8 -*- # @Time : 2017/9/2 13:40 # @Author : 郑梓斌 import core if __name__ == '__main__': src = 'images/model.jpg' dst = 'images/20171030175254.jpg' output = 'images/output.jpg' src_points, _ = core.face_points(src) dst_points, _ = core.face_points(dst) core.face_merge(src_img='images/model.jpg', src_points=src_points, dst_img='images/20171030175254.jpg', out_img='images/output.jpg', dst_points=dst_points, face_area=[250, 150, 270, 250], alpha=0.75, k_size=(15, 10), mat_multiple=0.95)
# -*- coding: utf-8 -*- # @Time : 2017/9/2 13:40 # @Author : 郑梓斌 import core from time import time # src-img ---- 模特图片 # dst_img ---- 带融合图片 # out_img ---- 结果图片输出路径 # face_area ---- 指定模板图中进行人脸融合的人脸框位置。四个正整数数组,依次代表人脸框左上角纵坐标(top),左上角横坐标(left), # 人脸框宽度(width),人脸框高度(height),通过设定改参数可以减少结果的大范围变形,把变形风险控制在人脸框区域 # alpha —— 融合比例,范围 [0,1]。数字越大融合结果包含越多融合图 (dst_img) 特征。 # blur_size—— 模糊核大小,用于模糊人脸融合边缘,减少融合后的违和感 # mat_multiple —— 缩放获取到的人脸心型区域 if __name__ == '__main__': start = time() core.face_merge(src_img='./images/b.jpg', dst_img='./images/aa.jpg', out_img='output/output_test_source.png', face_area=[50, 30, 500, 485], alpha=0.75, k_size=(15, 10), mat_multiple=0.95) stop = time() print(str(stop - start) + "秒")
import core core.face_merge("images/temp_item.jpg", "images/model_zbll.jpg", "result/output_" + "test1.jpg")