コード例 #1
0
ファイル: demo_api.py プロジェクト: iszhyang/face_recognition
def face_recognition(image_path, detection_save_path, threshold=THRESHOLD):
    """
    给Web端使用的api接口
    :param image_path:需要识别的人脸路径
    :param detection_save_path:人脸检测结果保存的路径
    :param threshold:判定是同一个人的阈值
    :return:预测结果(str), 明星其他照片的路径(list), 相似度(float)
    """
    # 加载图片 以RGB通道的形式加载
    image = Faces.load_image_file(image_path)

    # 截取得到图片内的stdface,并将人脸检测的结果存放在detection_save_path中
    time_start = time.time()
    stdface = Faces.get_stdface(image, detection_save_path=detection_save_path)
    time_end = time.time()
    print("[ ]得到stdface花费时间{}".format(time_end - time_start))

    # 检测不到人脸时 返回空值
    if stdface is None:
        return None, None, 0.0

    # 人脸位置为stdface的边界
    top, right, bottom, left = 0, 255, 255, 0

    # 获取人脸编码
    time_start = time.time()
    test_encoding = Faces.face_encodings(stdface, [(top, right, bottom, left)],
                                         NUM_JITTERS)[0]
    time_end = time.time()
    print("[ ]得到encoding花费时间{}".format(time_end - time_start))

    # 与已知人脸编码库对比 得到与所有人脸比对的距离
    distances = Faces.face_distance(known_face_encodings, test_encoding)

    # 查找最小距离
    min_distance_id = find_min_index(distances)
    min_distance = distances[min_distance_id]

    # 根据距离拟合相似度
    similarity = Faces.get_similarity(min_distance)

    image_paths = []

    # 最近距离超出阈值 认为人脸库中无匹配项
    if min_distance > threshold:
        return "unknown", image_paths, 0.0

    # 根据本地数据 得到预测ID 预测结果
    predict_result_id = id_list[min_distance_id]
    predict_result = name_list[predict_result_id]

    for image_path in image_file_list[predict_result_id]:
        # basename = posixpath.basename(image_path)
        # 如果要在本机上展示的话
        # image = cv2.imread(image_path)
        # cv2.imshow(basename, image)
        image_paths.append(image_path)

    return predict_result, image_paths, similarity
コード例 #2
0
def test_image(image_path, known_face_encodings_, threshold=0.6):
    time_start = time.time()
    print("\n[f]当前检测的图片为:{}".format(image_path))
    image = Faces.load_image_file(image_path)
    stdface = Faces.get_stdface(image)

    # 获取人脸位置
    top, right, bottom, left = 0, 255, 255, 0

    # 获取人脸编码
    test_encoding = Faces.face_encodings(stdface, [(top, right, bottom, left)],
                                         1)[0]

    distances = Faces.face_distance(known_face_encodings_, test_encoding)

    min_distance_id = find_min_index(distances)
    min_distance = distances[min_distance_id]
    similarity = Faces.get_similarity(min_distance)
    if min_distance > threshold:
        predict_result = "未知"
        flag = '[-]'
    else:
        predict_result = name_list[id_list[min_distance_id]]
        flag = '[+]'

    print(
        f"{flag}预测结果为:{predict_result:10}最近欧式距离{min_distance:.3f}    相似度{similarity:.2f}"
    )

    # 绘制结果
    pil_image = Image.fromarray(stdface)
    draw = ImageDraw.Draw(pil_image)

    draw.rectangle(((left, bottom - 20), (right, bottom)),
                   fill=(220, 133, 0),
                   outline=(220, 133, 0))

    font = ImageFont.truetype("simhei.ttf", 15, encoding="utf-8")
    draw.text((left + 6, bottom - 18),
              predict_result,
              fill=(255, 255, 255, 255),
              font=font)

    time_end = time.time()
    print("[t]检测本张图片,花费时间{}s".format((time_end - time_start)))

    del draw
    pil_image.show()
    pil_image.save(posixpath.join("result", image_path.split("/")[-1]))
コード例 #3
0
ファイル: demo_api.py プロジェクト: iszhyang/face_recognition
def main():
    dir_path = 'static\images\known'
    for file in os.listdir(dir_path):
        time_start = time.time()

        file_path = os.path.join(dir_path, file)
        image = Faces.load_image_file(file_path)
        expression, expr_score_list = expression_recognition(image)
        print("[+]表情识别预测结果为\"{}\"".format(expression))
        for expr_name, expr_score in zip(EMOTIONS, expr_score_list):
            print('   {:>11}指数:{:3%}'.format(expr_name, expr_score))

        time_end = time.time()
        print("[t]本次识别花费{:.2f}s".format((time_end - time_start)))

        cv2.imshow('image', Faces.rgb2bgr(image))
        cv2.waitKey(0)
コード例 #4
0
ファイル: demo_api.py プロジェクト: iszhyang/face_recognition
def stdface_recognition(stdface, threshold=THRESHOLD):
    """
    直接将stdface与已知人脸数据库做比对
    :param stdface:  256×256的stdface
    :param threshold: 阈值
    :return: 预测结果(str), 明星其他照片的路径(list), 相似度(float)
    """

    # 人脸位置为stdface的边界
    top, right, bottom, left = 0, 255, 255, 0

    # 获取人脸编码
    time_start = time.time()
    test_encoding = Faces.face_encodings(stdface, [(top, right, bottom, left)],
                                         NUM_JITTERS)[0]
    time_end = time.time()
    print("[ ]得到encoding花费时间{}".format(time_end - time_start))

    # 与已知人脸编码库对比 得到与所有人脸比对的距离
    distances = Faces.face_distance(known_face_encodings, test_encoding)

    # 查找最小距离
    min_distance_id = find_min_index(distances)
    min_distance = distances[min_distance_id]

    # 根据距离拟合相似度
    similarity = Faces.get_similarity(min_distance)

    image_paths = []

    # 最近距离超出阈值 认为人脸库中无匹配项
    if min_distance > threshold:
        return "unknown", image_paths, 0.0

    # 根据本地数据 得到预测ID 预测结果
    predict_result_id = id_list[min_distance_id]
    predict_result = name_list[predict_result_id]

    for image_path in image_file_list[predict_result_id]:
        # basename = posixpath.basename(image_path)
        # 如果要在本机上展示的话
        # image = cv2.imread(image_path)
        # cv2.imshow(basename, image)
        image_paths.append(image_path)

    return predict_result, image_paths, similarity
コード例 #5
0
 def __init__(self):
     super(UI, self).__init__()
     self.initUI()
     self.graphic = Faces.Face()
     self.mainLayout = QHBoxLayout()
     self.setLayout(self.mainLayout)
     self.mainLayout.addWidget(self.graphic)
     self.graphic.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
     self.graphic.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
     self.mainLayout.setMargin(0)
     self.mainLayout.setSpacing(0)
コード例 #6
0
ファイル: demo_api.py プロジェクト: iszhyang/face_recognition
def expression_recognition(image):
    """
    识别图像中人物的表情
    :param image: 传入的图像
    :return: 返回值1为表情id(int) 返回值2位表情指数数组(ndarray)
    """
    stdface = Faces.get_stdface(image)

    if stdface is None:
        return None, None

    # 先对stdface进行format操作 再将其拉直变为一维张量
    tensor = image_to_tensor(format_image(stdface))
    # 将图片转为的一维张量输入到神经网络中得到输出
    result = sess.run(probs, feed_dict={face_x: tensor})

    # 表情识别结果为指数最大的那个
    expression_id = np.argmax(result[0])

    return expression_id, result[0]
コード例 #7
0
                    default=256)
parser.add_argument("-T",
                    help="Prepare training and test data",
                    default="False",
                    action="store_true")
parser.add_argument("--split",
                    type=float,
                    help="training to test split (0 to 1)",
                    default=0.8)

args = parser.parse_args()
purpose = "Training" if args.T else "Development"
paths = []

faceCount = int(np.ceil(args.N / 2))
paths.append(Faces.Pull(purpose=purpose, count=faceCount))

objCount = int(np.floor(args.N / 2))
paths.append(Objects.Pull(purpose=purpose, count=objCount))

print("Merging data...")
batchNum = len(glob(join("Dataset", purpose, "batch*")))
#Initialize numpy arrays
firstPath = paths.pop(0)
currFile = np.load(firstPath + ".npz")
count = int(np.ceil(len(currFile["data"]) * args.split)) if args.T else args.N
data, isFace, labels = currFile["data"][:count], currFile[
    "isFace"][:count], currFile["labels"][:count]
testData, testIsFace, testLabels = currFile["data"][count:], currFile[
    "isFace"][count:], currFile["labels"][count:]
#Merge arrays
コード例 #8
0
def createIcoGrid(subdiv):

    ico = icosphere(scale=1, subdiv=subdiv)
    verts_cart = ico.verts
    faces = ico.faces

    rE = 6371000
    nf = len(faces)
    nv = len(verts_cart)

    #######################
    #### PRIMARY GRID #####
    #######################

    # POINTS
    F = Faces.Faces(nf)
    F.rE = rE
    F.nSubdiv = subdiv

    for fi in range(0, len(faces)):
        xmean = 0
        ymean = 0
        zmean = 0
        for vi in faces[fi]:
            xmean = xmean + verts_cart[vi][0]
            ymean = ymean + verts_cart[vi][1]
            zmean = zmean + verts_cart[vi][2]
        xmean = xmean / 3
        ymean = ymean / 3
        zmean = zmean / 3
        fact = 1 / np.sqrt(xmean**2 + ymean**2 + zmean**2)
        xmean = xmean * fact
        ymean = ymean * fact
        zmean = zmean * fact
        # points_polar
        lat = np.arcsin(zmean) * (180 / np.pi)
        lon = np.arctan2(ymean, xmean) * (180 / np.pi) + 180
        cart_mean = [xmean, ymean, zmean]
        polar = cart_to_polar(cart_mean)

        # Fill Point Class Object
        F.lons[fi] = polar[0]
        F.lats[fi] = polar[1]
        F.carts[fi, :] = [xmean, ymean, zmean]
        F.Vids[fi, :] = faces[fi]

    # VERTICES IN POLAR COORDINATES
    V = Vertices.Vertices(nv)
    for vi in range(0, nv):
        V.carts[vi, :] = verts_cart[vi]
        V.carts[vi, :] = V.carts[vi, :]
        V.lats[vi] = np.arcsin(V.carts[vi, 2]) * (180 / np.pi)
        V.lons[vi] = np.arctan2(V.carts[vi, 1],
                                V.carts[vi, 0]) * (180 / np.pi) + 180

    # CALCULATE AREAS OF SPHERICAL TRIANGLES
    for fcid in F.ids:
        F.A[fcid] = rE**2 * spherical_triangle_area(
            V.carts[F.Vids[fcid, 0], :], V.carts[F.Vids[fcid, 1], :],
            V.carts[F.Vids[fcid, 2], :])

    #plt,ax = plotIcosphere(F, V, np.zeros(nf), 'H', 0, 0, 0)
    #plt.show()
    #quit()

    # FIND NEIGHBOUR POINTS
    for fi in F.ids:
        if fi % 100 == 0:
            print(fi / F.nf * 100, '%')
        NFids = np.full(3, -1, np.int)
        c = 0
        for fii in F.ids[F.ids != fi]:
            if np.sum(np.in1d(F.Vids[fii], F.Vids[fi])) == 2:
                NFids[c] = fii
                c = c + 1
        F.NFids[fi, :] = NFids

    # CREATE FLUXES
    FL = Fluxes.Fluxes(F.nf)
    for fcid in F.ids:
        if fcid % 60 == 0:
            print(fcid / F.nf * 100, '%')
        #print('####',fid)
        nfcids = F.NFids[fcid]
        j = 0

        for nfcid in nfcids:
            flid = FL.exists([fcid, nfcid])
            fldir = 1  # points inside
            if flid is None:
                nflon = F.lons[nfcid]
                nflat = F.lats[nfcid]
                if nflon - F.lons[fcid] < -300:
                    nflon = nflon + 360
                elif nflon - F.lons[fcid] > 300:
                    nflon = nflon - 360
                vnlon = nflon - F.lons[fcid]
                vnlat = nflat - F.lats[fcid]
                vn = [vnlon, vnlat]
                vn = vn / np.linalg.norm(vn)
                rotMat = np.array([[0, -1], [1, 0]])
                vp = np.dot(vn, rotMat)

                # vn_cart
                vn_cart = F.carts[nfcid] - F.carts[fcid]
                vn_cart = vn_cart / np.linalg.norm(vn_cart)

                # edge length (distance between vertices)
                vertids = F.Vids[fcid, np.in1d(F.Vids[fcid], F.Vids[nfcid])]
                vert_carts = V.carts[vertids, :]
                d_edge = rE * great_circle_dist(vert_carts[0, :],
                                                vert_carts[1, :])

                # NEW PERPENDICULAR VECTOR
                dlon = V.lons[vertids[1]] - V.lons[vertids[0]]
                dlat = V.lats[vertids[1]] - V.lats[vertids[0]]
                if dlon < -250:
                    dlon = dlon + 360
                elif dlon > 250:
                    dlon = dlon - 360
                vpn = [dlon, dlat]
                vpn = vpn / np.linalg.norm(vpn)

                # rotate those oriented in the wrong direction
                if np.cross(vn, vpn) > 0:
                    vpn = vpn * -1

                vp = vpn
                # rotate by 90 degrees to get vn
                rotMat = np.array([[0, 1], [-1, 0]])
                vn = np.dot(vp, rotMat)

                # distance between points
                d_points = rE * great_circle_dist(F.carts[fcid, :],
                                                  F.carts[nfcid, :])

                # determine cartesian coordinates of flux
                flux_cart = find_midpoint(vert_carts[0, :], vert_carts[1, :])

                # determine polar coordinates of flux
                flux_polar = cart_to_polar(flux_cart)

                # weights of neighbor points
                dist2 = great_circle_dist(F.carts[nfcid, :], flux_cart)
                dist1 = great_circle_dist(F.carts[fcid, :], flux_cart)
                totdist = dist1 + dist2
                dist1 = dist1 / totdist
                dist2 = dist2 / totdist
                # weights of neighbor points
                wght1 = 1 - dist1
                wght2 = 1 - dist2

                # CREATE FLUX
                flid = FL.add([fcid, nfcid], [wght1, wght2], vertids,
                              flux_cart, flux_polar[0], flux_polar[1], vn, vp,
                              d_edge, d_points, vn_cart)

                fldir = -1  # points oudside
            F.FLids[fcid, j] = flid
            F.FLdirs[fcid, j] = fldir
            j = j + 1

    ## FIND NEIGHBOR FLUXES IDS
    #for flid in FL.ids:
    #    Fids = FL.Fids[flid]
    #    NFLids = []
    #    NFLdirs = []
    #    for Fid in Fids:
    #        FLids,FLdirs = F.getFluxes(Fid)
    #        NFLids.extend(FLids[FLids != flid])
    #        NFLdirs.extend(FLdirs[FLids != flid])
    #    FL.NFLids[flid,0:len(NFLids)] = NFLids
    #    FL.NFLdirs[flid,0:len(NFLids)] = NFLdirs

    # STORE ARRAYS NECESSARY FOR RADIAL BASIS FUNCTION RECONSTRUCTION
    for fcid in F.ids:
        for j, flxid_j in enumerate(F.FLids[fcid]):
            F.rbf_phi0[fcid,
                       j, :] = calc_rbf_phi(F.carts[fcid], FL.carts[flxid_j],
                                            FL.vn[flxid_j])
            for k, flxid_k in enumerate(F.FLids[fcid]):
                F.rbf_phi[fcid, j,
                          k] = calc_rbf_phi(FL.carts[flxid_k],
                                            FL.carts[flxid_j], FL.vn[flxid_k],
                                            FL.vn[flxid_j])
        # invert matrix
        F.rbf_phi[fcid, :, :] = np.linalg.inv(F.rbf_phi[fcid, :, :])

    #######################
    ### SECONDARY GRID ####
    #######################

    # FIND FLUX IDS AND DIRECTION AS WELL AS FACE IDS
    for flxid in FL.ids:
        vrtids = FL.Vertids[flxid]
        fcids = FL.Fids[flxid]
        for vrtid in vrtids:
            V.FLids[vrtid][np.argwhere(np.isnan(V.FLids[vrtid]))[0]] = flxid
            for fcid in fcids:
                vrtfcids = V.Fids[vrtid]
                if fcid not in vrtfcids:
                    V.Fids[vrtid][np.argwhere(np.isnan(
                        V.Fids[vrtid]))[0]] = fcid

    for vrtid in V.ids:
        vrt_area = 0
        flxids = V.FLids[vrtid, :]
        flxids = flxids[~np.isnan(flxids)].astype(np.int)
        for flxid in flxids:
            # neighbour vertice indices
            nvrtid = FL.Vertids[flxid, FL.Vertids[flxid, :] != vrtid]
            insertInd = np.argwhere(np.isnan(V.NVrtids[vrtid, :]))[0]
            # set neighbour vertice id
            V.NVrtids[vrtid, insertInd] = nvrtid

            vrtToNvrt = np.zeros(2)
            vrtToNvrt[:] = [
                V.lons[nvrtid] - V.lons[vrtid], V.lats[nvrtid] - V.lats[vrtid]
            ]
            vrtToNvrt = vrtToNvrt / np.linalg.norm(vrtToNvrt)

            # vector pointing to neighbor vertice
            toN = V.carts[nvrtid] - V.carts[vrtid]
            toN = toN / np.linalg.norm(toN)

            # rotate it such that n_vl x t_vl = k_l
            # t_vl is the tangential component
            k_l = FL.carts[flxid]
            t_vl = -np.cross(toN, k_l)

            # find direction (contribution) of flux to vorticity
            # this means +1 = counterclockwise, -1 = clockwise
            direction = np.round(np.dot(t_vl, FL.vn_cart[flxid, :]), 0)

            ## polar method of the same
            #rotMat = np.array([[0, 1],[-1, 0]])
            #tangential = np.dot(vrtToNvrt, rotMat)

            ##direction = np.round(np.dot(tangential,FL.vn[flxid,:]))
            #direction = np.dot(tangential,FL.vn[flxid,:])
            ## TODO: is there a mistake? not all values are close to 1 or -1
            #direction = np.sign(direction)

            # insert direction to Vertice
            insertInd = np.argwhere(np.isnan(V.FLdirs[vrtid, :]))[0]
            V.FLdirs[vrtid, insertInd] = direction

            fcids = FL.Fids[flxid, :]
            fcCentres = F.carts[fcids, :]
            triang_area = spherical_triangle_area(V.carts[vrtid],
                                                  fcCentres[0, :],
                                                  fcCentres[1, :])
            vrt_area = vrt_area + triang_area

        V.A[vrtid] = vrt_area * rE**2

    saveIcoGrid(F, V, FL)
    return (F, V, FL)
コード例 #9
0
ファイル: api.py プロジェクト: rllin/faces_rest
 def post(self):
     face = Faces.detect_face(request.get_json(force=True)['image'])
     return {'face': face}
コード例 #10
0
def facercg():
    # 如果用户提交POST表单
    if request.method == 'POST':

        # 记录时间开销
        timecosts = []

        # 用户提出POST开始计时
        time_post_start = time.time()
        print("[ ]开始处理POST请求")

        # POST表单中没有文件时 提示用户先选择图片
        if 'file' not in request.files:
            return render_template('facercg.html', unknown_show=True,
                                   similarity=0, result="unknown",
                                   message="请先选择图片")

        # 读取表单内的文件
        file = request.files['file']

        # 文件合法
        if file and allowed_file(file.filename):
            # 使用安全文件名以避免中文等字符的出现
            file_type = secure_filename(file.filename).split('.')[-1]
            file_name = get_current_time() + '.' + file_type
            print(file_name)

            #  用户上传图片的保存路径
            image_path = posixpath.join(SAVE_DIR, file_name)

            # 人脸检测结果图片的文件名
            detection_save_name = file_name.split('.')[0] + '_de.jpg'

            # 人脸检测结果图片的保存路径
            detection_save_path = posixpath.join(SAVE_DIR, detection_save_name)

            # 文件名重复时更新文件名
            cnt = 1
            while os.path.exists(image_path):
                basename, filetype = file_name.split('.')
                file_name = basename + '_' + str(cnt) + '.' + filetype
                image_path = posixpath.join(SAVE_DIR, file_name)
            file.save(image_path)
            print("[+]保存成功! 保存路径{}".format(image_path))

            # 传输结束 记录传输时间
            time_trans_end = time.time()
            trans_timecost = time_trans_end - time_post_start
            timecosts.append(trans_timecost)

            image = Faces.load_image_file(image_path)

            # 调用人脸检测程序 得到经过角度校正的尺寸为256*256的标准人脸
            stdface = Faces.get_stdface(image, detection_save_path)
            # 人脸检测调用结束 记录人脸检测花费的时间
            time_detection_end = time.time()
            detection_timecost = time_detection_end - time_trans_end
            timecosts.append(detection_timecost)

            # 未能检测到人脸时
            if stdface is None:
                print(f"[-]未检测出人脸 接受文件+检测共花费"
                      f"{(time_detection_end - time_post_start):.1f}s "
                      f"其中人脸检测花费{detection_timecost:.1f}s")
                return render_template('facercg.html', image_paths=None,
                                       left_photo_path=image_path,
                                       similarity=0.0, result=None,
                                       message="未能识别出人脸")

            # 调用人脸识别程序 得到人脸识别结果
            result, image_paths, similarity = demo_api.stdface_recognition(stdface)
            # 调整相似度的格式
            format_similarity = str(similarity)[:5] + "%"
            # 人脸识别结束 记录人脸识别花费的时间
            time_recognition_end = time.time()
            recognition_timecost = time_recognition_end - time_detection_end
            timecosts.append(recognition_timecost)

            # 调用表情识别API
            expression_id, expr_score_list = demo_api.stdface_expression_recognition(stdface)
            expression_result = demo_api.EMOTIONS[expression_id]
            expression_result_ch = demo_api.EMOTIONS_CH[expression_id]

            # 构造表情识别结果
            expression_data = []
            for expr_name, expr_score in zip(demo_api.EMOTIONS_CH, expr_score_list):
                temp_dict = {'name': expr_name, 'y': expr_score * 100.0}
                expression_data.append(temp_dict)

            # 表情图片地址
            emoji_path = posixpath.join(EMOJI_FOLDER_PATH, expression_result + '.png')

            # 表情识别结束 记录表情识别花费的时间
            time_expression_end = time.time()
            expression_timecost = time_expression_end - time_recognition_end
            timecosts.append(expression_timecost)

            # 将本次识别花费的时间保存到本地
            utils.push_timecost(file_name, timecosts)

            # 检测出人脸但数据库中无匹配项时
            if result == "unknown":
                print(f"[-]库中无匹配项 接受文件+识别共花费"
                      f"{(time_recognition_end - time_post_start):.1f}s "
                      f"其中人脸识别花费{(time_recognition_end-time_trans_end):.1f}s")
                return render_template('facercg.html', image_paths=image_paths, left_photo_path=image_path,
                                       similarity=format_similarity, result=result, message="数据库中无匹配人脸",
                                       expression_data=expression_data, emoji_path=emoji_path,
                                       expression_result=expression_result_ch)
            # 识别成功
            elif result is not None:
                print(f"[+]识别成功 接受文件+识别共花费"
                      f"{(time_recognition_end - time_post_start):.1f}s "
                      f"其中人脸识别花费{(time_recognition_end-time_trans_end):.1f}s")
                return render_template('facercg.html', image_paths=image_paths, left_photo_path=detection_save_path,
                                       similarity=format_similarity, result=result, message="识别成功",
                                       expression_data=expression_data, emoji_path=emoji_path,
                                       expression_result=expression_result_ch)
        # 文件不合法
        else:
            print("[-]图片上传有误")
            left_photo_path = url_for('static', filename='images/loading.gif')
            return render_template('facercg.html', left_photo_path=left_photo_path,
                                   similarity=0, result="unknown", message="图片上传有误")

    # 用户向本页面发出GET请求 返回该页面内容
    else:
        return render_template('facercg.html', unknown_show=True, similarity=0, result="unknown", message=None)
コード例 #11
0
        if re.match(r'.*\.(jpg|jpeg|png)', f_, flags=re.I)
    ]


id_list = []
face_encodings = []

print("---开始读取文件---")
for directory in os.listdir(FACES_FOLDER):
    print("[ ]开始处理文件夹{}".format(directory))
    directory_path = posixpath.join(FACES_FOLDER, directory)

    for file in image_files_in_folder(directory_path):
        basename = os.path.splitext(os.path.basename(file))[0]
        print("[ ]正在处理图片{}".format(file.split('/')[-1]))
        image = Faces.load_image_file(file)
        top, right, bottom, left = 0, 255, 255, 0
        # top, right, bottom, left = Faces.get_only_face(image, min_face_area=1000, upsample=2)
        # print("[+]人脸位置", (top, right, bottom, left))

        encoding = Faces.face_encodings(image, [(top, right, bottom, left)],
                                        NUM_JITTERS)
        id_list.append(int(directory))
        face_encodings.append(encoding)

# 对face_encodings做整形处理 整理成(n, 128)的形式
length = len(id_list)
np_face_encodings = np.asarray(face_encodings)
np_face_encodings = np_face_encodings.reshape(length, -1)

# 结果以pickle的形式保存到本地