Exemplo n.º 1
0
def get_feature1():
    global faceitem
    capture = cv2.VideoCapture(0)
    cv2.namedWindow('frame', 2)
    cv2.namedWindow('face', 2)
    P1 = threading.Thread(target=input_imformation, args=())
    P1.start()
    while True:
        if not P1.is_alive():
            P1 = threading.Thread(target=input_imformation, args=())
            P1.start()
        ret, frame = capture.read()
        image = np.array(frame)
        cv2.imshow('frame', frame)
        faces = facebounding(image)
        try:
            faceitem = faces[0]
            cv2.imshow('face', faceitem.image)
        except:
            cv2.imshow('face', image)
        if pass_state == 'pass':
            P3 = threading.Thread(target=get_feature, args=())
            P3.start()
            P3.join()
        key = cv2.waitKey(1)
        if key == ord('q'):  # 判断是哪一个键按下
            cv2.destroyAllWindows()
            break
Exemplo n.º 2
0
def face_rec(num):
    dataset_path = 'data\\dataset\\test\\test' + str(num) + '.csv'
    label_ID = []
    with open(dataset_path, encoding='utf-8') as f:
        reader = f.readlines()
        embedded = np.zeros((len(reader) + 1, 128))
        i = 1
        for row in reader:
            label_ID.append(row.split(',')[0])
            feature = np.array(list(map(float, row.split(',')[1:])))
            embedded[i, :] = feature
            i += 1
    f.close()
    path = 'data\\dataset\\Yuchao'
    imganchorpath = [path + '\\' + i for i in os.listdir(path)]
    for imganchor in imganchorpath:
        image1 = cv2.imread(imganchor)
        faces = facebounding(image1)
        size = []
        for i in faces:
            h, w, c = i.image.shape
            size.append(h)
        index = size.index(max(size))
        image1 = preprocess_image(faces[index].image)
        embedded[0] = nn4_small2_pretrained.predict(
            np.expand_dims(image1, axis=0))[0]

        imgDistanceList = []
        for i in range(len(embedded) - 1):
            imgDistanceList.append(distance(embedded[0], embedded[i + 1]))
        minIndex = imgDistanceList.index(min(imgDistanceList))
        cv2.rectangle(
            faces[index].container_image,
            (faces[index].bounding_box[0], faces[index].bounding_box[1]),
            (faces[index].bounding_box[2], faces[index].bounding_box[3]),
            (0, 155, 255), 5)
        text = label_ID[minIndex]
        cv2.putText(
            faces[index].container_image, text,
            (faces[index].bounding_box[0], faces[index].bounding_box[1] - 20),
            cv2.FONT_HERSHEY_SIMPLEX, 1, (225, 155, 0), 1)
        print(label_ID[minIndex])
        print(min(imgDistanceList))
        # cv2.namedWindow('image', 2)
        # cv2.namedWindow('result', 2)
        cv2.imshow(imganchor.split('\\')[-1], faces[0].container_image)
        cv2.imshow(
            'result_' + imganchor.split('\\')[-1],
            cv2.imread('data\\dataset\\test\\test' + str(num) + '\\' +
                       label_ID[minIndex]))
        cv2.waitKey()
Exemplo n.º 3
0
def Face_Recognition_Interface(image):
    dataset_path = 'data\\dataset\\FaceDataset50_1.csv'
    label_ID = []
    phone = []
    with open(dataset_path, encoding='utf-8') as f:
        reader = f.readlines()
        embedded = np.zeros((len(reader) + 1, 128))
        i = 1
        for row in reader:
            label_ID.append(row.split(',')[0])
            phone.append(row.split(',')[1])
            feature = np.array(list(map(float, row.split(',')[2:])))
            embedded[i, :] = feature
            i += 1
    f.close()

    faces = facebounding(image)


    faceitem = faces[0]
    faceimage = faceitem.image
    faceimage = preprocess_image(faceimage)
    embedded[0] = nn4_small2_pretrained.predict(np.expand_dims(faceimage, axis=0))[0]

    imgDistanceList = []
    for i in range(len(embedded) - 1):
        imgDistanceList.append(distance(embedded[0], embedded[i + 1]))
    minIndex = imgDistanceList.index(min(imgDistanceList))
    print(label_ID[minIndex])
    print(phone[minIndex])
    print(min(imgDistanceList))
    imageSet_path = dataset_path.split('.')[0]
    image_path = [imageSet_path + '\\' + i for i in os.listdir(imageSet_path)]
    for i in image_path:
        if i.split('\\')[-1].split('_')[0] == label_ID[minIndex]:
            targret = i
            print(targret)
    image_target = cv2.imread(targret)
    cv2.imshow('1', image_target)
    cv2.imshow('2', image)
    cv2.waitKey(10)
Exemplo n.º 4
0
def main():
    testpath = folderpath + '\\RecognitionDemo\\test1'
    testimagepath = [testpath + '\\' + i for i in os.listdir(testpath)]
    datapath = folderpath + '\\data\\dataset\\FaceDataset50_2'
    metadata = load_metadata(datapath)

    for imagepath in testimagepath:
        embedded = np.zeros((metadata.shape[0] + 1, 128))
        imagetest = cv2.imread(imagepath)
        faces = face_detect.facebounding(imagetest)
        faceitem = faces[0]
        face = faceitem.image
        testface = preprocess_image(face)
        embedded[0] = nn4_small2_pretrained.predict(
            np.expand_dims(testface, axis=0))[0]

        for i in range(metadata.shape[0]):
            imageItem = metadata[i]
            image = cv2.imread(imageItem.file)
            imageItem.image = image
            img = preprocess_image(image)
            # # scale RGB values to interval [0,1]
            # img = (img / 255.).astype(np.float32)
            # # obtain embedding vector for image
            embedded[i + 1] = nn4_small2_pretrained.predict(
                np.expand_dims(img, axis=0))[0]

        imgDistanceList = []
        for i in range(len(embedded) - 1):
            imgDistanceList.append(distance(embedded[0], embedded[i + 1]))

        # for i in range(len(embedded)-1):
        #     print(metadata[i].file)
        #     print(imgDistanceList[i])
        print()
        minIndex = imgDistanceList.index(min(imgDistanceList))
        print(metadata[minIndex].file)
        print(min(imgDistanceList))
        cv2.imshow('1', metadata[minIndex].image)
        cv2.imshow('2', testface)
        cv2.waitKey()
Exemplo n.º 5
0
def video_test():
    capture = cv2.VideoCapture('data\\dataset\\yuchao.mp4')
    cv2.namedWindow('frame', 2)
    cv2.namedWindow('face', 2)
    dataset_path = 'data\\dataset\\test\\test100.csv'
    label_ID = []
    with open(dataset_path, encoding='utf-8') as f:
        reader = f.readlines()
        embedded = np.zeros((len(reader) + 1, 128))
        i = 1
        for row in reader:
            label_ID.append(row.split(',')[0])
            feature = np.array(list(map(float, row.split(',')[1:])))
            embedded[i, :] = feature
            i += 1
    f.close()
    while True:
        ret, frame = capture.read()
        if ret == False:
            break
        image = np.array(frame)
        faces = facebounding(image)
        text = ''
        try:
            faceitem = faces[0]
            image = faceitem.image
            cv2.imshow('face', image)
            image1 = preprocess_image(image)
            embedded[0] = nn4_small2_pretrained.predict(
                np.expand_dims(image1, axis=0))[0]
            imgDistanceList = []
            for i in range(len(embedded) - 1):
                imgDistanceList.append(distance(embedded[0], embedded[i + 1]))
            minIndex = imgDistanceList.index(min(imgDistanceList))
            if float(min(imgDistanceList)) < 0.23:
                print(label_ID[minIndex])
                print(min(imgDistanceList))
                text = label_ID[minIndex]
                cv2.rectangle(
                    frame,
                    (faces[0].bounding_box[0], faces[0].bounding_box[1]),
                    (faces[0].bounding_box[2], faces[0].bounding_box[3]),
                    (0, 155, 255), 2)
                cv2.putText(
                    frame, text,
                    (faces[0].bounding_box[0], faces[0].bounding_box[1] - 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 155, 255), 2)
                cv2.imshow('frame', frame)
                cv2.imshow(
                    'person',
                    cv2.imread('data\\dataset\\test\\test100\\' +
                               label_ID[minIndex]))
                cv2.waitKey(5)
        except:
            cv2.imshow('face', image)

        cv2.imshow('frame', frame)
        key = cv2.waitKey(1)
        if key == ord('q'):  # 判断是哪一个键按下
            cv2.destroyAllWindows()
            break