Example #1
0
def face_merge(dst_img,
               src_img,
               out_img,
               face_area,
               alpha=0.75,
               skin_buff=0,
               skin_detail=0,
               skin_p=0,
               blur_detail_x=None,
               blur_detail_y=None,
               mat_multiple=None):
    src_matrix, src_points = core.face_points(src_img)
    dst_matrix, dst_points = core.face_points(dst_img)

    src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
    dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)

    dst_img = transformation_points(src_img, src_matrix[core.FACE_POINTS],
                                    dst_img, dst_matrix[core.FACE_POINTS])

    _, dst_points = core.face_points(dst_img)

    dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)

    dst_matrix, dst_points = core.face_points(dst_img.astype('uint8'))

    src_img = tran_src(src_img, src_points, dst_points, face_area)

    dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points,
                        blur_detail_x, blur_detail_y, mat_multiple)

    cv2.imwrite(out_img, dst_img)
Example #2
0
def face_merge(src_img, dst_img, out_img,
               face_area, alpha=0.75,
               k_size=None, mat_multiple=None):
    src_matrix, src_points, src_img = core.face_points(src_img)
    dst_matrix, dst_points, dst_img = core.face_points(dst_img)

    dst_img = transformation_points(src_img=src_img, src_points=src_matrix[core.FACE_POINTS],
                                    dst_img=dst_img, dst_points=dst_matrix[core.FACE_POINTS])

    # 转换
    trans_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(trans_file, dst_img)
    _, dst_points, _ = core.face_points(trans_file)

    dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)

    # 融合
    morph_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(morph_file, dst_img)
    dst_matrix, dst_points, _ = core.face_points(morph_file)

    src_img = tran_src(src_img, src_points, dst_points, face_area)
    cv2.imwrite("images/1.jpg", src_img)
    dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size, mat_multiple)
    # 在最后矫正颜色就可以去掉黑色块块
    cv2.imwrite("images/2.jpg", dst_img)
    dst_img = correct_color(src_img, dst_img, src_matrix[core.FACE_POINTS])
    # 删除掉临时生成的文件
    os.remove(trans_file)
    os.remove(morph_file)

    cv2.imwrite(out_img, dst_img)

    return None
Example #3
0
def face_merge(src_img,
               dst_img,
               out_img,
               alpha=0.75,
               k_size=(10, 5),
               mat_multiple=0.5):

    src_matrix, src_points, src_faces, err = core.face_points(src_img)
    ##直接将第一次寻找目标人物读取的人脸数据作为参数传过来,减少查询人脸识别API次数
    dst_matrix, dst_points, dst_faces, err = core.face_points(dst_img)

    if not (isinstance(src_img, np.ndarray)
            and isinstance(dst_img, np.ndarray)):
        src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
        dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)

    dst_img = transformation_points(src_img=src_img,
                                    src_points=src_matrix[core.FACE_POINTS],
                                    dst_img=dst_img,
                                    dst_points=dst_matrix[core.FACE_POINTS])
    # 转换
    trans_file = 'images/' + "trans" + '.jpg'
    cv2.imwrite(trans_file, dst_img)

    _, dst_points, trans_faces, err = core.face_points(dst_img)

    dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)

    # 融合
    # morph_file = 'images/' + "merge" + '.jpg'
    # cv2.imwrite(morph_file, dst_img)
    dst_matrix, dst_points, morph_faces, err = core.face_points(dst_img)

    if isinstance(src_faces, dict):
        src_img = tran_src(src_img, src_points, dst_points, [
            int(src_faces['x']),
            int(src_faces['y']),
            int(src_faces['width']),
            int(src_faces['height'])
        ])
    else:
        src_img = tran_src(src_img, src_points, dst_points, [
            int(src_faces[-1][0]),
            int(src_faces[-1][1]),
            int(src_faces[-1][2]),
            int(src_faces[-1][3])
        ])
    # cv2.imwrite('images/' + "tran_src" + '.jpg',src_img)
    dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size,
                        mat_multiple)

    # 删除掉临时生成的文件
    # os.remove(trans_file)
    # os.remove(morph_file)

    cv2.imwrite(out_img, dst_img)

    return err
Example #4
0
def face_merge(src_img,
               dst_img,
               out_img,
               face_area,
               alpha=0.75,
               k_size=None,
               mat_multiple=None,
               dst_matrix=[],
               dst_points=[]):
    src_img_name = str(src_img)
    dst_img_name = str(dst_img)
    src_matrix, src_points, src_faces, err = core.face_points(src_img)
    ##直接将第一次寻找目标人物读取的人脸数据作为参数传过来,减少查询人脸识别API次数
    # dst_matrix, dst_points, dst_faces,err = core.face_points(dst_img)

    src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
    dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)

    dst_img = transformation_points(src_img=src_img,
                                    src_points=src_matrix[core.FACE_POINTS],
                                    dst_img=dst_img,
                                    dst_points=dst_matrix[core.FACE_POINTS])
    # 转换
    trans_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(trans_file, dst_img)
    _, dst_points, trans_faces, err = core.face_points(trans_file)

    dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)

    # 融合
    morph_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(morph_file, dst_img)
    dst_matrix, dst_points, morph_faces, err = core.face_points(morph_file)

    if isinstance(src_faces, dict):
        src_img = tran_src(src_img, src_points, dst_points, face_area)
    else:
        rTemp = src_faces.replace("u'", "\"")
        rTemp = rTemp.replace("'", "\"")
        src_faces = json.loads(rTemp)
        src_img = tran_src(src_img, src_points, dst_points, face_area)

    dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size,
                        mat_multiple)
    # 在最后矫正颜色就可以去掉黑色块块
    dst_img = correct_color(src_img, dst_img, src_matrix[core.FACE_POINTS])
    # 删除掉临时生成的文件
    os.remove(trans_file)
    os.remove(morph_file)

    cv2.imwrite(out_img, dst_img)

    return err
Example #5
0
def face_merge(dst_img,
               dst_points,
               src_img,
               src_points,
               out_img,
               face_area,
               alpha=0.75,
               k_size=None,
               mat_multiple=None,
               is_peach=None):
    src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
    dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)

    dst_img = tran_similarity(src_img, [
        src_points[core.LEFT_EYE_CENTER], src_points[core.RIGHT_EYE_CENTER],
        src_points[core.NOSE_TOP]
    ], dst_img, [
        dst_points[core.LEFT_EYE_CENTER], dst_points[core.RIGHT_EYE_CENTER],
        dst_points[core.NOSE_TOP]
    ])

    trans_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(trans_file, dst_img)
    dst_points, err = core.face_points(trans_file)

    dst_img, dst_points = morph_img(src_img,
                                    src_points,
                                    dst_img,
                                    dst_points,
                                    alpha,
                                    show_bg=True)

    morph_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(morph_file, dst_img)
    dst_points, err = core.face_points(morph_file)

    start = time.clock()
    src_img = tran_src(src_img, src_points, dst_points, face_area)
    print('模特图变形耗时:', time.clock() - start)

    start = time.clock()
    dst_img = merge_img(src_img, dst_img, src_points, k_size, mat_multiple,
                        is_peach)
    print('人脸替换耗时:', time.clock() - start)

    os.remove(trans_file)
    os.remove(morph_file)

    cv2.imwrite(out_img, dst_img)

    return err
Example #6
0
def getTriangleData(dst_img):
    # print("getTriangleData")
    src_matrix, src_points, src_faces, err = core.face_points(dst_img)
    # 计算人脸三角形面积(双眼球和下巴最中间点),保证同一张脸多次访问结果一致
    result = 0
    # print(src_faces)
    return src_matrix,src_points
    if isinstance(src_faces, dict):
        x1 = src_faces['face_shape']['pupil'][0]['x'] - src_faces['face_shape']['pupil'][1]['x']
        x2 = src_faces['face_shape']['pupil'][0]['x'] - src_faces['face_shape']['face_profile'][10]['x']
        x3 = src_faces['face_shape']['pupil'][1]['x'] - src_faces['face_shape']['face_profile'][10]['x']
        y1 = src_faces['face_shape']['pupil'][0]['y'] - src_faces['face_shape']['pupil'][1]['y']
        y2 = src_faces['face_shape']['pupil'][0]['y'] - src_faces['face_shape']['face_profile'][10]['y']
        y3 = src_faces['face_shape']['pupil'][1]['y'] - src_faces['face_shape']['face_profile'][10]['y']
        result= (x1*x1 + x2*x2 + x3*x3 + y1*y1 + y2*y2 + y3*y3)/100
    else:
        rTemp = src_faces.replace("u'", "\"")
        rTemp = rTemp.replace("'", "\"")
        src_faces = json.loads(rTemp)
        x1 = src_faces['face_shape']['pupil'][0]['x'] - src_faces['face_shape']['pupil'][1]['x']
        x2 = src_faces['face_shape']['pupil'][0]['x'] - src_faces['face_shape']['face_profile'][10]['x']
        x3 = src_faces['face_shape']['pupil'][1]['x'] - src_faces['face_shape']['face_profile'][10]['x']
        y1 = src_faces['face_shape']['pupil'][0]['y'] - src_faces['face_shape']['pupil'][1]['y']
        y2 = src_faces['face_shape']['pupil'][0]['y'] - src_faces['face_shape']['face_profile'][10]['y']
        y3 = src_faces['face_shape']['pupil'][1]['y'] - src_faces['face_shape']['face_profile'][10]['y']
        result = (x1 * x1 + x2 * x2 + x3 * x3 + y1 * y1 + y2 * y2 + y3 * y3)/100
    return result,src_matrix, src_points
Example #7
0
    def get(self):
        nowTime = time.time()
        url = self.get_argument("url")
        cityIndex = int(self.get_argument("cityIndex"))
        locationIndex = int(self.get_argument("locationIndex"))
        gender = int(self.get_argument("gender"))
        print(
            '======Begin Face Merge ======: ',
            str(cityIndex) + '===' + url + '===' + str(locationIndex) + "===" +
            str(gender))
        # 要融合的图片下载一次就够了——用户上传的图片
        dst_img = downloadImageFromUrl(url)
        dst_matrix, dst_points, dst_faces, err = core.face_points(dst_img)

        # 取得model的图片,下载一次就够了
        src_img = getModelImage(cityIndex, locationIndex, gender)

        # 生成不同融合度的图片
        # images.append(url)
        output_image = merge_one(src_img, dst_img, 1, dst_matrix, dst_points)
        self.write({"imageUrl": output_image})
        #         tempImage = str(dst_img)
        #         if os.path.exists(tempImage):
        #             # 删除文件
        #             os.remove(tempImage)
        endTime = time.time()
        print('Total Cost: ', (endTime - nowTime))
Example #8
0
def face_merge(dst_img,
               src_img,
               out_img,
               face_area,
               alpha=0.75,
               skin_buff=0,
               skin_detail=0,
               skin_p=0,
               blur_detail_x=None,
               blur_detail_y=None,
               mat_multiple=None):
    src_matrix, src_points, err = core.face_points(src_img)
    dst_matrix, dst_points, err = core.face_points(dst_img)

    src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
    dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)

    if skin_buff != 0 or skin_detail != 0:
        dst_img = core.skin_buffing(dst_img, skin_buff, skin_detail, skin_p)

    dst_img = transformation_points(src_img, src_matrix[core.FACE_POINTS],
                                    dst_img, dst_matrix[core.FACE_POINTS])

    trans_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(trans_file, dst_img)
    _, dst_points, err = core.face_points(trans_file)

    dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)

    morph_file = 'images/' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(morph_file, dst_img)
    dst_matrix, dst_points, err = core.face_points(morph_file)

    src_img = tran_src(src_img, src_points, dst_points, face_area)

    dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points,
                        blur_detail_x, blur_detail_y, mat_multiple)

    os.remove(trans_file)
    os.remove(trans_file + '.txt')

    os.remove(morph_file)
    os.remove(morph_file + '.txt')

    cv2.imwrite(out_img, dst_img)

    return err
Example #9
0
def face_merge(dst_img,
               src_img,
               out_img,
               face_area,
               alpha=0.75,
               k_size=None,
               mat_multiple=None):
    src_matrix, src_points, err = core.face_points(src_img)
    dst_matrix, dst_points, err = core.face_points(dst_img)

    src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
    dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)

    dst_img = transformation_points(src_img=src_img,
                                    src_points=src_matrix[core.FACE_POINTS],
                                    dst_img=dst_img,
                                    dst_points=dst_matrix[core.FACE_POINTS])

    trans_file = 'images/trans_' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(trans_file, dst_img)
    _, dst_points, err = core.face_points(trans_file)

    # 再次取点后融合脸部
    dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)

    morph_file = 'images/morph_' + str(int(time.time() * 1000)) + '.jpg'
    cv2.imwrite(morph_file, dst_img)
    dst_matrix, dst_points, err = core.face_points(morph_file)
    # 处理加工模特图片
    src_img = tran_src(src_img, src_points, dst_points, face_area)

    dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size,
                        mat_multiple)

    #os.remove(trans_file)
    #os.remove(trans_file + '.json')

    #os.remove(morph_file)
    #os.remove(morph_file + '.json')

    cv2.imwrite(out_img, dst_img)

    return err
Example #10
0
# -*- coding: utf-8 -*-


import core  # 引入这个包下的所有方法

if __name__ == '__main__':
    src = 'images/zhang.jpg'
    dst = 'core/huge_newsize.jpg'
    output = 'D:\\WWW\\bihuang\\Public\\upload\\face_ouput\\huge_output.jpg'
    src_points, _ = core.face_points(src)
    dst_points, _ = core.face_points(dst)

    core.face_merge(src_img='images/zhang.jpg',  # 源图像.jpg
                    src_points=src_points,  # 源图像人脸关键点数组坐标
                    dst_img='core/huge_newsize.jpg',  # 输入带融合的目标图像
                    out_img='D:\\WWW\\bihuang\\Public\\upload\\face_ouput\\huge_output.jpg',  # 输出到指定的路径
                    dst_points=dst_points,  # 目标图像人脸关键点数组坐标
                    face_area=[100, 100, 100, 100],  # 模板图中人脸融合的位置左上角横坐标(left),左上角纵坐标(top),人脸框宽度(width),人脸框高度(height)
                    alpha=0.65,  # [0~1]融合比,比例越大目标图像的特征就越多
                    k_size=(300, 250),  # 滤波窗口尺寸-图像均值平滑滤波模板
                    mat_multiple=1.2)  # 缩放获取到的人脸心型区域-图像缩放因子
Example #11
0
def face_merge(dst_img, src_img, out_img,
               face_area, alpha=0.75,
               k_size=None, mat_multiple=None):
    # 获取两张图片的人脸关键点(矩阵格式与数组格式)
    # src_matrix, src_points = core.face_points(src_img)
    # dst_matrix, dst_points = core.face_points(dst_img)
    #
    #
    # print(test)
    print(src_img)
    dst_name = dst_img.split('.')[1].split('/')[-1]
    # print(src_name)

    start = time.time() 
    src_matrix,src_points = core.face_points(src_img)
    dst_matrix,dst_points = core.face_points(dst_img)

    # opencv读取图片
    start_read = time.time()

    src_img = cv2.imread(src_img, cv2.IMREAD_COLOR)
    dst_img = cv2.imread(dst_img, cv2.IMREAD_COLOR)

    # stop_read = time.time()
    # print("读两张图片用时"+str(stop_read-start_read)+"秒")
    src_img = cv2.resize(src_img,(src_img.shape[1] * SCALE_FACTOR,
                          src_img.shape[0] * SCALE_FACTOR))

    dst_img = cv2.resize(dst_img,(dst_img.shape[1] * SCALE_FACTOR,
                          dst_img.shape[0] * SCALE_FACTOR))
    # # print(src_img1)
    print("********************")
    # 获取两张图片的人脸关键点(矩阵格式与数组格式)
    # src_matrix,src_points = core.face_points(src_img)
    # dst_matrix, dst_points = core.face_points(dst_img)

    dst_img = transformation_points(src_img=src_img, src_points=src_matrix[core.FACE_POINTS],
                                    dst_img=dst_img, dst_points=dst_matrix[core.FACE_POINTS])

    # print(dst_img)
    trans_file = 'temp/' + dst_name + str(time.time()) + '_trans.jpg'
    if ((os.path.isfile(trans_file))== False):
        cv2.imwrite(trans_file, dst_img)
    # print(trans_file)
    # trans_img = cv2.imread(trans_file,cv2.IMREAD_COLOR)

    # test del
    dst_matrix,dst_points = core.face_points(trans_file)

    # print(dst_points)
    # print(len(dst_points))
    # 再次取点后融合脸部
    start_morph = time.time()
    dst_img = morph_img(src_img, src_points, dst_img, dst_points, alpha)
    stop_morph = time.time()
    print("morph用时"+str(stop_morph-start_morph)+"秒")

    # morph_file = 'temp/' + dst_name +str(int(time.time() * 1000))+ '_morph.jpg'
    morph_file = 'temp/' + dst_name +str(time.time()) + '_morph.jpg'
    
    if((os.path.isfile(morph_file))==False):
        cv2.imwrite(morph_file, dst_img)
    # morph_img1 = cv2.imread(morph_file,cv2.IMREAD_COLOR)
    
    # print(morph_file)
    
    #test del
    # dst_matrix,dst_points = core.face_points(morph_file)

    # 再次对上一部的结果进行取点,然后运用三角放射将模特图片脸部轮廓、关键点变形成上一部得到的关键点
    # src_img = tran_src(src_img, src_points, dst_points, face_area)
    
    # 将融合后的新图片脸部区域用泊松融合算法贴合到模特图上
    start_merge = time.time()
    dst_img = merge_img(src_img, dst_img, dst_matrix, dst_points, k_size, mat_multiple)
    stop_merge = time.time()
    print("merge用时"+str(stop_merge-start_merge)+"秒")
    # os.remove(trans_file)
    # os.remove(trans_file + '.txt')

    # os.remove(morph_file)
    # os.remove(morph_file + '.txt')

    cv2.imwrite(out_img, dst_img)
    stop =time.time()
    print("这张用时"+str(stop-start)+"秒")