Esempio n. 1
0
def get_image_from_camera():
    capture = cv2.VideoCapture(0)

    while 1:
        ret, frame = capture.read()
        window_name = "face"
        #cv2.imshow(window_name, frame)
        '''
        bounding_boxes, landmarks = detect_faces(frame)
        image = show_bboxes(image, bounding_boxes, landmarks)
        '''

        if (ret):
            cv_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image = Image.fromarray(cv_img)
            image = image.resize((320, 240), Image.ANTIALIAS)
            bounding_boxes, landmarks = detect_faces(image)

            image = show_bboxes(image, bounding_boxes, landmarks)

            img = cv2.cvtColor(numpy.asarray(image), cv2.COLOR_RGB2BGR)
            cv2.imshow("face detect", img)
        if cv2.waitKey(100) & 0xff == ord('q'):
            break

    capture.release()
    cv2.destroyAllWindows()
Esempio n. 2
0
def crop_face(img):
  bounding_boxes, landmarks = detect_faces(img)
  image = show_bboxes(img, bounding_boxes)
  (x, y, h, w)=bounding_boxes[0][:4]
  img = img.crop((x, y, h, w))
  img=img.resize((224,224))
  return img
Esempio n. 3
0
def mtcnn_crop(in_path, out_path, crop_size=(112, 96)):
    if not os.path.exists(out_path):
        os.makedirs(out_path)
    imgs_folder = os.listdir(in_path)
    for img_folder in tqdm.tqdm(imgs_folder):
        if not os.path.exists(os.path.join(out_path, img_folder)):
            os.makedirs(os.path.join(out_path, img_folder))
        img_names = os.listdir(os.path.join(in_path, img_folder))
        for name in img_names:
            img = Image.open(os.path.join(in_path, img_folder, name))
            boundboxes, landmark = detect_faces(img)
            index = 0
            score = boundboxes[0][-1]
            for i, box in enumerate(boundboxes):
                if (box[-1] > score):
                    index = i
                    score = box[-1]
            box = boundboxes[index][:4].astype(np.int32)
            img_crop = img.crop(box).resize(crop_size, Image.BICUBIC)
            img_crop.save(os.path.join(out_path, img_folder, name))
Esempio n. 4
0
def main():
    image = Image.open('images/test.jpg')
    opencv_image = cv2.imread("images/test.jpg")
    print("image size: ", image.size, ", opencv image size: ", opencv_image.shape)
    bounding_boxes, landmarks = detect_faces(image)
    print("bounding_boxes: ", bounding_boxes, ", landmarks: ", landmarks)
    # save face pic
    for bbox in bounding_boxes:
        cropped = opencv_image[int(bbox[1] - 20):int(bbox[3] + 20), int(bbox[0] - 20):int(bbox[2]) + 20]
        print("cropped size: ", cropped.shape)
        cv2.imshow("Face extract", cropped)

        # get eyes center xy
        lks = landmarks[0]
        center_x = (lks[0] + lks[1]) / 2  - bbox[0]
        center_y = (lks[5] + lks[6]) / 2  - bbox[1]

        # center_x = (lks[0] + lks[1]) / 2
        # center_y = (lks[5] + lks[6]) / 2
        eyesCenter = (center_x, center_y)

        # get eye angle
        dy = lks[6] - lks[5]
        dx = lks[1] - lks[0]
        angle = math.atan(dy/dx) / 3.14 * 180.0

        # get rototion matrix
        retval = cv2.getRotationMatrix2D(eyesCenter, angle, 1.0)

        print("angle: ", angle, ", retval: ", retval, ", eyesCenter: ", eyesCenter, ", cropped size: ", cropped.shape)
        # get warp affine
        #dst = cv2.warpAffine(opencv_image, retval, opencv_image.size)
        dst = cv2.warpAffine(cropped, retval, (cropped.shape[1], cropped.shape[0]))
        print("dst: ", type(dst), "len dst: ", len(dst), "dst shape: ", dst.shape)
        cv2.imshow("Face alignment", dst)


    image = show_bboxes(image, bounding_boxes, landmarks)
    image.show()
    cv2.waitKey(0)
Esempio n. 5
0
def img_locate(path):
    img = Image.open(path)
    boundboxes, landmark = detect_faces(img.copy())
    print(boundboxes, landmark)
    img_draw = ImageDraw.Draw(img)
    print(img.size, type(landmark))
    for i, box in enumerate(boundboxes):
        box = np.array(box)
        lm = np.array(landmark[i], np.int32)
        fill = (0, 0, 255)
        for j in range(0, len(lm) // 2):
            print('j:{}'.format(j))
            img_draw.point((lm[j], lm[j + 5]), fill=fill)
        print('box:{}'.format(box))
        img_draw.rectangle(tuple(box[:4].astype(np.int32)),
                           outline=(255, 0, 0),
                           width=2)
        img_draw.text(tuple(box[:2].astype(np.int32)),
                      text="{}".format(box[-1]),
                      fill=fill)
    img.show()
    plt.show()
Esempio n. 6
0
def main():
    image = Image.open('images/test10.jpg')
    bounding_boxes, landmarks = detect_faces(image)
    image = show_bboxes(image, bounding_boxes, landmarks)
    image.show()
def crop_face(imgpath):
    # img = cv2.imread(imgpath)
    # print(img.size)
    # print(img.shape)
    # print(type(img))
    img = Image.open(imgpath)
    if len(img.size) == 2:
        image = img.convert('RGB')
    else:
        image = img
    h, w = image.size  # 图像的宽度和高度
    print(image.size)
    print(type(image))
    # print(img.dtype)
    bounding_box, landmarks = detect_faces(image)
    print('landmarks')
    print(landmarks)
    print(bounding_box.shape)
    faces_count = landmarks.shape[0]
    if faces_count > 1:
        print('图片中有多个人脸')
        return
    # 左眼中心位置: (elx, ely)
    # 右眼中心位置: (erx, ery)
    # 鼻尖位置: (nx, ny)
    # 左嘴角位置: (mlx, mrx)
    # 右嘴角位置: (mrx. mry)
    elx, erx, nx, mlx, mrx, ely, ery, ny, mly, mry = landmarks[0]

    # 计算旋转角度
    angle = calculate_angle(elx, ely, erx, ery)

    # 旋转图像
    img_rotated = img.rotate(angle, expand=1)
    ww, hh = img_rotated.size  # 旋转后图像的宽度和高度

    # 对齐后的位置
    elx, ely = pos_transform(angle, elx, ely, w, h)
    erx, ery = pos_transform(angle, erx, ery, w, h)
    nx, ny = pos_transform(angle, nx, ny, w, h)
    mlx, mly = pos_transform(angle, mlx, mly, w, h)
    mrx, mry = pos_transform(angle, mrx, mry, w, h)

    # draw = ImageDraw.Draw(img_rotated)

    # # 在图像上画出眼睛,鼻子,嘴角的位置
    # r = 3
    # draw.ellipse([(elx - r, ely - r), (elx + r, ely + r)], fill='red')
    # draw.ellipse([(erx - r, ery - r), (erx + r, ery + r)], fill='red')
    # draw.ellipse([(nx - r, ny - r), (nx + r, ny + r)], fill='red')
    # draw.ellipse([(mlx - r, mly - r), (mlx + r, mly + r)], fill='red')
    # draw.ellipse([(mrx - r, mry - r), (mrx + r, mry + r)], fill='red')

    # draw.line([(elx, ely), (erx, ery)], fill='blue')
    # draw.line([(elx, ely), (mrx, mry)], fill='blue')
    # draw.line([(erx, ery), (mlx, mly)], fill='blue')
    # draw.line([(mlx, mly), (mrx, mry)], fill='blue')
    # # img_rotated.show()
    # print(img_rotated.shape)

    # 基本参数
    eye_width = erx - elx  # 两眼之间的距离
    ecx, ecy = (elx + erx) / 2.0, (ely + ery) / 2.0  # 两眼中心坐标
    mouth_width = mrx - mlx  # 嘴巴的宽度
    mcx, mcy = (mlx + mrx) / 2.0, (mly + mry) / 2.0  # 嘴巴中心坐标
    em_height = mcy - ecy  # 两眼睛中心到嘴巴中心高度
    fcx, fcy = (ecx + mcx) / 2.0, (ecy + mcy) / 2.0  # 人脸中心坐标

    # 纯脸
    if eye_width > em_height:
        alpha = eye_width
    else:
        alpha = em_height
    g_beta = 2.0
    g_left = fcx - alpha / 2.0 * g_beta
    g_upper = fcy - alpha / 2.0 * g_beta
    g_right = fcx + alpha / 2.0 * g_beta
    g_lower = fcy + alpha / 2.0 * g_beta
    g_face = img_rotated.crop((g_left, g_upper, g_right, g_lower))

    # 将人脸从鼻子位置处划分成上下两部分,分别为R1区域和R2区域
    # 空间归一化采用眼睛之间的距离(α)的一半
    alpha = (erx - elx) / 2.0  # 空间归一化距离
    crop_width = alpha * 3.8  # 水平因子2.4
    crop_R1_height = alpha * 1.5  # R1区域高度,垂直因子1.5
    crop_R2_height = alpha * 3.5  # R2区域高度,垂直因子3.5

    ecx, ecy = (elx + erx) / 2.0, ely  # 两眼中心点位置
    # 计算裁剪位置,左上角,右上角,左下角,右下角点位置
    TL_x, TL_y = ecx - crop_width / 2.0, ecy - crop_R1_height
    TR_x, TR_y = ecx + crop_width / 2.0, ecy - crop_R1_height
    BL_x, BL_y = ecx - crop_width / 2.0, ecy + crop_R2_height
    BR_x, BR_y = ecx + crop_width / 2.0, ecy + crop_R2_height

    left, upper, right, lower = get_crop_area(TL_x, TL_y, TR_x, BL_y, ww, hh)
    img_crop = img_rotated.crop((left, upper, right, lower))
    # img_crop.show()

    # # 画出裁剪位置
    # draw.line([(TL_x, TL_y), (TR_x, TR_y)], fill='green')
    # draw.line([(TL_x, TL_y), (BL_x, BL_y)], fill='green')
    # draw.line([(TR_x, TR_y), (BR_x, BR_y)], fill='green')
    # draw.line([(BL_x, BL_y), (BR_x, BR_y)], fill='green')

    # R1裁剪区域,左下角和右下角位置坐标
    alpha_h = 0.8
    alpha_v = 1.0
    R1_TL_x, R1_TL_y = elx - alpha * alpha_h, ely - alpha * alpha_v
    R1_TR_x, R1_TR_y = erx + alpha * alpha_h, ely - alpha * alpha_v
    R1_BL_x, R1_BL_y = elx - alpha * alpha_h, ely + alpha * alpha_v
    R1_BR_x, R1_BR_y = erx + alpha * alpha_h, ery + alpha * alpha_v
    if R1_TL_y > R1_TR_y:
        R1_TL_y = R1_TR_y
    if R1_BL_y < R1_BR_y:
        R1_BL_y = R1_BR_y
    left, upper, right, lower = get_crop_area(R1_TL_x, R1_TL_y, R1_TR_x,
                                              R1_BL_y, ww, hh)
    R1 = img_rotated.crop((left, upper, right, lower))
    # R1.show()

    # # 画出R1裁剪区域
    # draw.line([(R1_TL_x, R1_TL_y), (R1_TR_x, R1_TR_y)], fill='blue')
    # draw.line([(R1_TL_x, R1_TL_y), (R1_BL_x, R1_BL_y)], fill='blue')
    # draw.line([(R1_BL_x, R1_BL_y), (R1_BR_x, R1_BR_y)], fill='blue')
    # draw.line([(R1_TR_x, R1_TR_y), (R1_BR_x, R1_BR_y)], fill='blue')

    # R2裁剪区域 左上角和右上角位置坐标
    # 空间归一化采用嘴角距离
    beta = mrx - mlx
    beta_h = 0.3
    beta_v = 0.4
    R2_TL_x, R2_TL_y = mlx - beta * beta_h, mly - beta * beta_v
    R2_TR_x, R2_TR_y = mrx + beta * beta_h, mry - beta * beta_v
    R2_BL_x, R2_BL_y = mlx - beta * beta_h, mly + beta * beta_v
    R2_BR_x, R2_BR_y = mrx + beta * beta_h, mry + beta * beta_v

    if R2_TL_y > R2_TR_y:
        R2_TL_y = R2_TR_y
    if R2_BL_y < R2_BR_y:
        R2_BL_y = R2_BR_y
    left, upper, right, lower = get_crop_area(R2_TL_x, R2_TL_y, R2_TR_x,
                                              R2_BL_y, ww, hh)
    R2 = img_rotated.crop((left, upper, right, lower))
    # R2.show()

    # draw.line([(R2_TL_x, R2_TL_y), (R2_TR_x, R2_TR_y)], fill='blue')
    # draw.line([(R2_TL_x, R2_TL_y), (R2_BL_x, R2_BL_y)], fill='blue')
    # draw.line([(R2_TR_x, R2_TR_y), (R2_BR_x, R2_BR_y)], fill='blue')
    # draw.line([(R2_BL_x, R2_BL_y), (R2_BR_x, R2_BR_y)], fill='blue')
    # img_rotated.show()
    return img_rotated, g_face, R1, R2
Esempio n. 8
0
def main():
    image = cv2.imread('images/test7.jpg')
    bounding_boxes, landmarks = detect_faces(image, gpu_id=0)
    image = show_bboxes(image, bounding_boxes, landmarks)
    cv2.imshow('image', image)
    cv2.waitKey()
Esempio n. 9
0
from PIL import Image
from src import detector, box_utils, visualization_utils
import torch
from torchvision import transforms

image = Image.open("../../images/results/mosaic-face/bridge-face=True-3.png")
bounding_boxes, landmarks = detector.detect_faces(image)
print(bounding_boxes)

image_transform = transforms.ToTensor()
image_tensor = image_transform(image).unsqueeze(0)
for i, b in enumerate(bounding_boxes[:, :-1].round().astype("int")):
    x = image_tensor[:, :, b[1]:b[3], b[0]:b[2]].clone()
    x = torch.nn.functional.interpolate(x,
                                        size=(96, 96),
                                        mode="bilinear",
                                        align_corners=True)
    image_array = x.numpy().squeeze(0) * 255
    image = Image.fromarray(image_array.transpose(1, 2, 0).astype("uint8"))
    image.save(
        f"../../images/faces/test-face-{i}-p={bounding_boxes[i, 4]}.png",
        subsampling=0,
        quality=100)

# img_boxes = box_utils.get_image_boxes(bounding_boxes, image, size=96)
# img_copy = visualization_utils.show_bboxes(image, bounding_boxes, landmarks)
# img_copy.save("tmp.png")
Esempio n. 10
0
    def Test(self):
        """
        Test network on test set
        """
        print("Testing........")
        test_loss = 0
        total = 0
        correct = 0
        test_data = []
        save_img = False
        i = 0
        labels = []
        imgs = []
        img_labels = []

        # set model val
        # torch.cuda.empty_cache()
        print(self.model)
        self.model.eval()

        # prepare test data
        # test_size = len(self.datasets['test'])
        # if test_size % self.params.test_batch != 0:
        #     total_batch = test_size // self.params.test_batch + 1
        # else:
        #     total_batch = test_size // self.params.test_batch

        # test for one epoch
        for img, idx in self.test_loader:
            # print(expression)
            # print(img)
            # if expression == 0:
            name = self.dataset.idx_to_class[idx]
            label = self.dataset.class_to_idx[name]
            # print('label')
            # print(label)
            # label_array = np.asarray(label)
            # print('label_array')
            # print(label_array)
            label = torch.from_numpy(np.asarray([label])).float()
            print('label_array')
            print(label)
            # label_loader = data.dataloader(label)
            label_cuda = label.cuda()
            print('labelcuda')
            print(label_cuda)
            # print(self.dataset.class_to_idx[name])
            print(name)
            print('img类型')
            print(type(img))
            # image = transforms.ToPILImage()(img).convert('RGB')

            # img = Image.open(img_path)
            print(img.size)

            if len(img.size) == 2:
                image = img.convert('RGB')
            else:
                image = img

            i += 1

            h, w = image.size  # 图像的宽度和高度
            print(image.size)
            print(type(image))

            # print(img.dtype)
            bounding_box, landmarks = detect_faces(image)
            print('landmarks')
            print(landmarks)
            print(bounding_box.shape)
            # faces_count = landmarks.shape[0]
            # if faces_count > 1:
            #     print('图片中有多个人脸')
            #     return
            # elx, erx, nx, mlx, mrx, ely, ery, ny, mly, mry = landmarks[0]
            #
            # # 计算旋转角度
            # angle = calculate_angle(elx, ely, erx, ery)
            #
            # # 旋转图像
            # img_rotated = img.rotate(angle, expand=1)
            # ww, hh = img_rotated.size  # 旋转后图像的宽度和高度
            #
            # # 对齐后的位置
            # elx, ely = pos_transform(angle, elx, ely, w, h)
            # erx, ery = pos_transform(angle, erx, ery, w, h)
            # nx, ny = pos_transform(angle, nx, ny, w, h)
            # mlx, mly = pos_transform(angle, mlx, mly, w, h)
            # mrx, mry = pos_transform(angle, mrx, mry, w, h)
            #
            # # 基本参数
            # eye_width = erx - elx  # 两眼之间的距离
            # ecx, ecy = (elx + erx) / 2.0, (ely + ery) / 2.0  # 两眼中心坐标
            # mouth_width = mrx - mlx  # 嘴巴的宽度
            # mcx, mcy = (mlx + mrx) / 2.0, (mly + mry) / 2.0  # 嘴巴中心坐标
            # em_height = mcy - ecy  # 两眼睛中心到嘴巴中心高度
            # fcx, fcy = (ecx + mcx) / 2.0, (ecy + mcy) / 2.0  # 人脸中心坐标
            #
            # # 纯脸
            # if eye_width > em_height:
            #     alpha = eye_width
            # else:
            #     alpha = em_height
            # g_beta = 2.0
            # g_left = fcx - alpha / 2.0 * g_beta
            # g_upper = fcy - alpha / 2.0 * g_beta
            # g_right = fcx + alpha / 2.0 * g_beta
            # g_lower = fcy + alpha / 2.0 * g_beta
            # g_face = img_rotated.crop((g_left, g_upper, g_right, g_lower))
            # print(type(g_face))
            # save_path = os.path.join('%s/%s' % (self.params.save_test, name))
            # if not os.path.exists(save_path):
            #     os.makedirs(save_path)
            #
            # g_face = np.float32(g_face)
            # dst = np.zeros(g_face.shape, dtype=np.float32)
            # cv2.normalize(g_face, dst=dst, alpha=0, beta=1.0, norm_type=cv2.NORM_MINMAX)
            # # norm_gface = np.uint8(dst * 255)
            # # print(norm_gface.size)
            #
            # resize_img = cv2.resize(dst, (128, 128), cv2.INTER_LINEAR)
            # print('resize后')
            # print(resize_img.shape)
            # g_face = Image.fromarray(np.uint8(resize_img))
            # g_face = g_face.convert('RGB')
            image = np.array(image)
            for face_position in bounding_box:
                face_position = face_position.astype(int)
                x1, y1, x2, y2 = face_position[0], face_position[
                    1], face_position[2], face_position[3]
                gray_face = image[y1:y2, x1:x2]
                print(gray_face.shape)
                plt.imshow(gray_face)
                plt.show()
                try:
                    gray_face = cv2.resize(gray_face, (224, 224))
                except:
                    continue

                gray_face = _preprocess(gray_face)

                # gray_face = Image.fromarray(np.uint8(gray_face))
                # gray_face = gray_face.convert('RGB')
                # gray_face = np.expand_dims(gray_face, 0)
                # gray_face = np.expand_dims(gray_face, -1)

                print(gray_face.shape)
                # g_face = np.expand_dims(g_face, -1)
                gray_face = torch.from_numpy(gray_face).float()
                gray_face_cuda = gray_face.cuda()
                print('gray_face_cuda')
                print(gray_face_cuda)

                out = self.model(gray_face_cuda)
                print(out)

                _, predicted = torch.max(out.data, 1)

                total += label_cuda.long()
                # print(label_cuda.long().data)
                correct += predicted.eq(label_cuda.long()).sum()
                # test_acc = float(correct) / float(total)

                # print(test_acc)

        test_acc = float(correct) / float(total)
        print('total')
        print(total)
        print('correct')
        print(correct)
        self.test_acc.append(test_acc)
        print(self.test_acc)
def main():

    cap = cv2.VideoCapture(
        '/home/admin/face_detection/dataset/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.mp4'
    )

    mog = cv2.createBackgroundSubtractorMOG2(1, 55)

    while (1):

        ret, frame = cap.read()

        if (ret == False):
            break

        if (i % 5):
            i += 1
            continue

        j = str(i)

        cv2.imwrite('image/' + j + '.jpg', frame)

        #image = Image.open('images/test3.jpg')

        image1 = Image.open('image/' + j + '.jpg')

        path = 'image/' + j + '.jpg'

        bounding_boxes, landmarks = detect_faces(image1)

        #image = show_bboxes(image, bounding_boxes, landmarks)

        image2 = show_bboxes(image1, bounding_boxes, landmarks)

        a = 0

        for b in bounding_boxes:
            a = 1
            break

        if a:

            fgmask = mog.apply(frame, 1)

            dilate = cv2.dilate(fgmask, (21, 21), iterations=1)

            (cnts, _) = cv2.findContours(dilate.copy(), cv2.RETR_EXTERNAL,
                                         cv2.CHAIN_APPROX_SIMPLE)

            ee = open('chayi/' + j + ' chayi' + '.txt', 'a')

            for c in cnts:

                c_area = cv2.contourArea(c)

                if (c_area > 3000):

                    cc = str(c_area)

                    ee.write(cc + '\n')

                    f = open('txt' + p + '/' + j + '.txt', 'a')
                    for b in bounding_boxes:

                        x = ((b[0] + b[2]) / 2) / (image1.size[0])

                        y = (0.5 * b[1] + 0.5 * b[3]) / (image1.size[1])

                        w = ((b[2] - b[0]) * 1.0) / (image1.size[0])

                        h = ((b[3] - b[1]) * 1.0) / (image1.size[1])

                        x = str(x)

                        y = str(y)

                        w = str(w)

                        h = str(h)

                        f.write('0 ' + x + ' ' + y + ' ' + w + ' ' + h + '\n')

                        z = int(j)

                        image1.save('dataset' + p + '/' + j + '.jpg')

                    break

        #image.show()

        #image1.show()

        i += 1
    def __getitem__(self, index):
        if self.mode == 'train':
            # img, target = self.train_data[index], self.train_label[index]
            img = torch.from_numpy(self.img[index]).float()
            # print('img')
            # print(img)
            label = torch.from_numpy(np.asarray(self.label[index])).float()
            # print('label')
            # print(label)
            label_name = label_dict[self.label[index]]
            # print('label_name')
            # print(label_name)
            img = img[:, :, np.newaxis]
            img = np.concatenate((img, img, img), axis=2)
            # print('np.concatenate')
            # print(img.shape)
            img = Image.fromarray(np.uint8(img))
            img = img.convert('RGB')
            # print(img.shape)
            plt.imshow(img)
            # plt.show()
            if self.transforms is not None:
                img = self.transforms(img)
                # print('tranform之后')
                # print(img.shape)
                # print(img)
            return img, label, label_name
        elif self.mode == 'val':
            img = torch.from_numpy(self.img[index]).float()
            # print('img')
            # print(img)
            label = torch.from_numpy(np.asarray(self.label[index])).float()
            # print('label')
            # print(label)
            label_name = label_dict[self.label[index]]
            # print('label_name')
            # print(label_name)
            img = img[:, :, np.newaxis]
            img = np.concatenate((img, img, img), axis=2)
            # print('np.concatenate')
            # print(img.shape)
            img = Image.fromarray(np.uint8(img))
            img = img.convert('RGB')
            # print(img.shape)
            plt.imshow(img)
            # plt.show()
            if self.transforms is not None:
                img = self.transforms(img)
                # print('tranform之后')
                # print(img.shape)
                # print(img)
            return img, label, label_name
        else:
            img = torch.from_numpy(self.img[index]).float()
            # print('img')
            # print(img)
            # print('每一个label')
            # print(self.label[index])
            # label_array = np.asarray(self.label[index])
            # print('转换为数组之后的label')
            # print(label_array)
            label = torch.from_numpy(np.asarray(self.label[index])).float()
            print('转换为tensor')
            print(label)
            label_name = label_dict[self.label[index]]
            # print('label_name')
            # print(label_name)
            img = img[:, :, np.newaxis]
            img = np.concatenate((img, img, img), axis=2)
            # print('np.concatenate')
            # print(img.shape)
            img = Image.fromarray(np.uint8(img))
            img = img.convert('RGB')
            if len(img.size) == 2:
                img = img.convert('RGB')
            else:
                img = img

            h, w = img.size
            bounding_box, landmarks = detect_faces(img)
            print('landmarks')
            print(landmarks)
            print(bounding_box.shape)
            faces_count = landmarks.shape[0]
            if faces_count > 1:
                print('图片中有多个人脸')
                return
            elx, erx, nx, mlx, mrx, ely, ery, ny, mly, mry = landmarks[0]

            # 计算旋转角度
            angle = calculate_angle(elx, ely, erx, ery)

            # 旋转图像
            img_rotated = img.rotate(angle, expand=1)
            ww, hh = img_rotated.size  # 旋转后图像的宽度和高度

            # 对齐后的位置
            elx, ely = pos_transform(angle, elx, ely, w, h)
            erx, ery = pos_transform(angle, erx, ery, w, h)
            nx, ny = pos_transform(angle, nx, ny, w, h)
            mlx, mly = pos_transform(angle, mlx, mly, w, h)
            mrx, mry = pos_transform(angle, mrx, mry, w, h)

            # 基本参数
            eye_width = erx - elx  # 两眼之间的距离
            ecx, ecy = (elx + erx) / 2.0, (ely + ery) / 2.0  # 两眼中心坐标
            mouth_width = mrx - mlx  # 嘴巴的宽度
            mcx, mcy = (mlx + mrx) / 2.0, (mly + mry) / 2.0  # 嘴巴中心坐标
            em_height = mcy - ecy  # 两眼睛中心到嘴巴中心高度
            fcx, fcy = (ecx + mcx) / 2.0, (ecy + mcy) / 2.0  # 人脸中心坐标

            # 纯脸
            if eye_width > em_height:
                alpha = eye_width
            else:
                alpha = em_height
            g_beta = 2.0
            g_left = fcx - alpha / 2.0 * g_beta
            g_upper = fcy - alpha / 2.0 * g_beta
            g_right = fcx + alpha / 2.0 * g_beta
            g_lower = fcy + alpha / 2.0 * g_beta
            g_face = img_rotated.crop((g_left, g_upper, g_right, g_lower))

            # print('g_face')
            # print(type(g_face))
            # print(img.size)

            # g_face = Image.fromarray(np.uint8(g_face))
            # g_face = g_face.convert('RGB')
            plt.imshow(g_face)
            plt.show()
            if self.transforms is not None:
                g_face = self.transforms(g_face)
                # print('tranform之后')
                # print(img.shape)
                # print(img)
            return g_face, label, label_name
Esempio n. 13
0
def main():
    image = Image.open('images/test3.jpg')
    bounding_boxes, landmarks = detect_faces(image)
    image = show_bboxes(image, bounding_boxes, landmarks)
    image.save("result.bmp")