x1 = cx - size // 2
            x2 = x1 + size
            y1 = cy - size // 2
            y2 = y1 + size

            dx = max(0, -x1)
            dy = max(0, -y1)
            x1 = max(0, x1)
            y1 = max(0, y1)

            edx = max(0, x2 - width)
            edy = max(0, y2 - height)
            x2 = min(width, x2)
            y2 = min(height, y2)
            new_bbox = list(map(int, [x1, x2, y1, y2]))
            new_bbox = BBox(new_bbox)
            cropped = img[new_bbox.top:new_bbox.bottom,
                          new_bbox.left:new_bbox.right]
            if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
                cropped = cv2.copyMakeBorder(cropped,
                                             int(dy), int(edy), int(dx),
                                             int(edx), cv2.BORDER_CONSTANT, 0)
            cropped_face = cv2.resize(cropped, (out_size, out_size))

            if cropped_face.shape[0] <= 0 or cropped_face.shape[1] <= 0:
                continue
            #test_face = cv2.resize(cropped_face,(out_size,out_size))
            cropped_face = cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB)
            cropped_face = Image.fromarray(cropped_face)
            test_face = resize(cropped_face)
            test_face = to_tensor(test_face)
示例#2
0
def face_swap(img, swap_area):
    img = cv2.resize(img, dsize=(0, 0), fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
    height, width, channels = img.shape
    img_new_face = np.zeros((height, width, channels), np.uint8)
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    out_size = 112
    model = PFLDInference()
    checkpoint = torch.load('checkpoint/pfld_model_best.pth.tar', map_location='cpu')
    model.load_state_dict(checkpoint['state_dict'])
    model = model.eval()

    for k in range(0, len(swap_area)):
        #face swap할 이미지 분석
        x1 = swap_area[k][0]*2
        y1 = swap_area[k][1]*2
        x2 = swap_area[k][2]*2
        y2 = swap_area[k][3]*2

        dx = max(0, -x1)
        dy = max(0, -y1)
        x1 = max(0, x1)
        y1 = max(0, y1)

        edx = max(0, x2 - width)
        edy = max(0, y2 - height)

        new_bbox = list(map(int, [x1, x2, y1, y2]))
        new_bbox = BBox(new_bbox)
        cropped = img[new_bbox.top:new_bbox.bottom, new_bbox.left:new_bbox.right]
        if (dx > 0 or dy > 0 or edx > 0 or edy > 0):
            cropped = cv2.copyMakeBorder(cropped, int(dy), int(edy), int(dx), int(edx), cv2.BORDER_CONSTANT, 0)
        cropped_face = cv2.resize(cropped, (out_size, out_size))

        if cropped_face.shape[0] <= 0 or cropped_face.shape[1] <= 0:
            continue
        test_face = cropped_face.copy()
        test_face = test_face / 255.0
        test_face = test_face.transpose((2, 0, 1))
        test_face = test_face.reshape((1,) + test_face.shape)
        input = torch.from_numpy(test_face).float()
        input = torch.autograd.Variable(input)


        landmark = model(input).cpu().data.numpy()
        landmark = landmark.reshape(-1, 2)
        landmark = new_bbox.reprojectLandmark(landmark)

        points = np.array(landmark, np.int32)
        convexhull = cv2.convexHull(points)
        landmarks_points = []

        for x, y in landmark:
            landmarks_points.append(( int(x), int(y) ))
        img2 = cv2.imread("samples/12--Group/newface2.jpg")
        height2, width2, _ = img2.shape
        img2 =  cv2.resize(img2, dsize=(0, 0), fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
        img_gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        mask = np.zeros_like(img_gray2)

        new_face = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
        new_face = Image.fromarray(new_face)
        face2 = list(map(int, detect_faces(new_face)[0]))

        new_bbox2 = list(map(int, [face2[0], face2[2], face2[1], face2[3]]))
        new_bbox2 = BBox(new_bbox2)
        cropped2 = img2[new_bbox2.top:new_bbox2.bottom, new_bbox2.left:new_bbox2.right]
        cropped2_face = cv2.resize(cropped2, (out_size, out_size))

        test_face2 = cropped2_face.copy()
        test_face2 = test_face2 / 255.0
        test_face2 = test_face2.transpose((2, 0, 1))
        test_face2 = test_face2.reshape((1,) + test_face2.shape)
        input2 = torch.from_numpy(test_face2).float()
        input2 = torch.autograd.Variable(input2)

        landmark2 = model(input2).cpu().data.numpy()
        landmark2 = landmark2.reshape(-1, 2)
        landmark2 = new_bbox2.reprojectLandmark(landmark2)
        points2 = np.array(landmark2, np.int32)

        convexhull2 = cv2.convexHull(points2)
        cv2.fillConvexPoly(mask, convexhull2, 255)

        rect2 = cv2.boundingRect(convexhull2)
        subdiv = cv2.Subdiv2D(rect2)

        landmarks_points2 = []
        for x, y in landmark2:
            landmarks_points2.append((int(x), int(y)))
        subdiv.insert(landmarks_points2)
        triangles = subdiv.getTriangleList()
        triangles = np.array(triangles, dtype=np.int32)
        indexes_triangles = []
        for t in triangles:
            pt1 = (t[0], t[1])
            pt2 = (t[2], t[3])
            pt3 = (t[4], t[5])

            index_pt1 = np.where((points2 == pt1).all(axis=1))
            index_pt1 = extract_index_nparray(index_pt1)

            index_pt2 = np.where((points2 == pt2).all(axis=1))
            index_pt2 = extract_index_nparray(index_pt2)

            index_pt3 = np.where((points2 == pt3).all(axis=1))
            index_pt3 = extract_index_nparray(index_pt3)

            if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None:
                triangle = [index_pt1, index_pt2, index_pt3]
                indexes_triangles.append(triangle)

        img_face_mask = np.zeros_like(img_gray)
        img_head_mask = cv2.fillConvexPoly(img_face_mask, convexhull, 255)

        for triangle_index in indexes_triangles:
            # Triangulation of the first face2
            tr2_pt1 = landmarks_points2[triangle_index[0]]
            tr2_pt2 = landmarks_points2[triangle_index[1]]
            tr2_pt3 = landmarks_points2[triangle_index[2]]
            triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32)

            rect2 = cv2.boundingRect(triangle2)
            (x, y, w, h) = rect2
            cropped_triangle = img2[y: y + h, x: x + w]

            cropped_tr2_mask = np.zeros((h, w), np.uint8)

            points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y],
                                [tr2_pt2[0] - x, tr2_pt2[1] - y],
                                [tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32)

            cv2.fillConvexPoly(cropped_tr2_mask, points2, 255)

            # Triangulation of second face2
            tr1_pt1 = landmarks_points[triangle_index[0]]
            tr1_pt2 = landmarks_points[triangle_index[1]]
            tr1_pt3 = landmarks_points[triangle_index[2]]
            triangle = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32)

            rect1 = cv2.boundingRect(triangle)
            (x, y, w, h) = rect1

            cropped_tr1_mask = np.zeros((h, w), np.uint8)

            points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y],
                               [tr1_pt2[0] - x, tr1_pt2[1] - y],
                               [tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32)

            cv2.fillConvexPoly(cropped_tr1_mask, points, 255)

            # Warp triangles
            points2 = np.float32(points2)
            points = np.float32(points)
            M = cv2.getAffineTransform(points2, points)
            warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h))
            warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr1_mask)

            # Reconstructing destination face2
            img_new_face_rect_area = img_new_face[y: y + h, x: x + w]
            img_new_face_rect_area_gray = cv2.cvtColor(img_new_face_rect_area, cv2.COLOR_BGR2GRAY)
            _, mask_triangles_designed = cv2.threshold(img_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV)
            warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=mask_triangles_designed)

            img_new_face_rect_area = cv2.add(img_new_face_rect_area, warped_triangle)
            img_new_face[y: y + h, x: x + w] = img_new_face_rect_area

        img_face_mask = cv2.bitwise_not(img_head_mask)
        img_head_noface = cv2.bitwise_and(img, img, mask=img_face_mask)
        img_new_face = cv2.medianBlur(img_new_face, 3)
        result = cv2.add(img_head_noface, img_new_face)

        (x, y, w, h) = cv2.boundingRect(convexhull)
        center_face = (int((x + x + w) / 2), int((y + y + h) / 2))
        img = cv2.seamlessClone(result, img, img_head_mask, center_face, cv2.MIXED_CLONE)
    img = cv2.resize(img, dsize=(0, 0), fx=0.5, fy=0.5, interpolation=cv2.INTER_LINEAR)
    # cv2.imwrite(os.path.join('results', "6.jpg"), img)
    # cv2.imshow("a", img)
    # cv2.waitKey(0)
    return img
示例#3
0
def generate_h5py(data, h5_path, txt_path, augment=False):
    '''
	Get images and turn them into h5py files
	Input:
		- data: a tuple of [imgpath, bbox, landmark]
		- h5_path: h5py file name
		- txt_path: h5 txt name
	'''
    F_imgs = []
    F_landmarks = []
    img_size = 40
    num_landmark = 68 * 2
    num_sample = 0
    for (imgpath, bbox, landmarkGt) in data:
        img = cv2.imread(imgpath, cv2.CV_LOAD_IMAGE_GRAYSCALE)
        print("processing %s") % imgpath
        if not check_bbox(img, bbox):
            print 'BBox out of range.'
            continue
        face = img[bbox.top:bbox.bottom, bbox.left:bbox.right]

        if augment:
            # flip the face
            #face_flip, landmark_flip = flip(face, landmarkGt)
            #face_flip = cv2.resize(face_flip, (img_size,img_size)).reshape(1,img_size,img_size)

            #fit=0
            #for i in range(0,num_landmark/2):
            #    if landmark_flip[i,0]<0 or landmark_flip[i,0]>1 or landmark_flip[i,1]<0 or landmark_flip[i,1]>1:
            #        fit=1
            #        break
            #if fit==0:
            #print landmark_flipped_alpha
            #    F_imgs.append(face_flip)
            #    F_landmarks.append(landmark_flip.reshape(num_landmark))

            #print landmark_flip
            #angles=[5,10,15,20,25,30,-5,-10,-15,-20,-25,-30]
            #for alpha in angles:
            # rotate the face
            #face_rotated_by_alpha, landmark_rotated_alpha = rotate(img, bbox,bbox.reprojectLandmark(landmarkGt), alpha)
            #landmark_rotated_alpha = bbox.projectLandmark(landmark_rotated_alpha)
            #face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (img_size,img_size))

            #fit=0
            #for i in range(0,num_landmark/2):
            #    if landmark_rotated_alpha[i,0]<0 or landmark_rotated_alpha[i,0]>1 or landmark_rotated_alpha[i,1]<0 or landmark_rotated_alpha[i,1]>1:
            #        fit=1
            #        break
            #if fit==0:
            #print landmark_rotated_alpha
            #    F_imgs.append(face_rotated_by_alpha.reshape((1, img_size,img_size)))
            #    F_landmarks.append(landmark_rotated_alpha.reshape(num_landmark))

            #print landmark_rotated_5
            # flip with the rotation
            #face_flipped_alpha, landmark_flipped_alpha = flip(face_rotated_by_alpha, landmark_rotated_alpha)
            #face_flipped_alpha = cv2.resize(face_flipped_alpha, (img_size,img_size))
            #fit=0
            #for i in range(0,num_landmark/2):
            #    if landmark_flipped_alpha[i,0]<0 or landmark_flipped_alpha[i,0]>1 or landmark_flipped_alpha[i,1]<0 or landmark_flipped_alpha[i,1]>1:
            #        fit=1
            #        break
            #if fit==0:
            #print landmark_flipped_alpha
            #    F_imgs.append(face_flipped_alpha.reshape((1, img_size,img_size)))
            #    F_landmarks.append(landmark_flipped_alpha.reshape(num_landmark))

            # debug
            #center = ((bbox.left+bbox.right)/2, (bbox.top+bbox.bottom)/2)
            #rot_mat = cv2.getRotationMatrix2D(center, alpha, 1)
            #img_rotated_by_alpha = cv2.warpAffine(img, rot_mat, img.shape)
            #landmark_rotated_alpha = bbox.reprojectLandmark(landmark_rotated_alpha) # will affect the flip "landmark_rotated_alpha"
            #img_rotated_by_alpha = drawLandmark(img_rotated_by_alpha, bbox, landmark_rotated_alpha)
            #fp = 'debug_results/'+ str(num_sample)+'.jpg'
            #cv2.imwrite(fp, img_rotated_by_alpha)
            #num_sample=num_sample+1

            # debug

            #use bounding box perturbation
            y = bbox.top
            x = bbox.left
            h = bbox.bottom - bbox.top
            w = bbox.right - bbox.left
            # original landmark position
            landmarkGT_scale = bbox.reprojectLandmark(landmarkGt)
            for cur_scale in [0.83, 0.91, 1.0, 1.10, 1.21]:
                for cur_x in [-0.17, 0, 0.17]:
                    for cur_y in [-0.17, 0, 0.17]:
                        s_n = 1 / cur_scale
                        x_n = -cur_x / cur_scale
                        y_n = -cur_y / cur_scale

                        x_temp = int(x - (x_n * w / s_n))
                        y_temp = int(y - (y_n * h / s_n))
                        w_temp = int(w / s_n)
                        h_temp = int(h / s_n)
                        # generate new bounding box
                        bbox_left = x_temp
                        bbox_right = x_temp + w_temp
                        bbox_top = y_temp
                        bbox_bottom = y_temp + h_temp
                        new_bbox = map(
                            int,
                            [bbox_left, bbox_right, bbox_top, bbox_bottom])
                        new_bbox = BBox(new_bbox)
                        if not check_bbox(img, new_bbox):
                            print 'BBox out of range.'
                            continue

                        # project landmark onto the new bounding box
                        new_landmarkGT = new_bbox.projectLandmark(
                            landmarkGT_scale)
                        new_landmarkGT_org = new_landmarkGT.copy()

                        angles = [
                            5, 10, 15, 20, 25, 30, -5, -10, -15, -20, -25, -30
                        ]
                        for alpha in angles:
                            # rotate the face
                            face_rotated_by_alpha, landmark_rotated_alpha = rotate(
                                img, new_bbox,
                                new_bbox.reprojectLandmark(new_landmarkGT),
                                alpha)
                            landmark_rotated_alpha = new_bbox.projectLandmark(
                                landmark_rotated_alpha)
                            face_rotated_by_alpha = cv2.resize(
                                face_rotated_by_alpha, (img_size, img_size))

                            fit = 0
                            for i in range(0, num_landmark / 2):
                                if landmark_rotated_alpha[
                                        i, 0] < 0 or landmark_rotated_alpha[
                                            i,
                                            0] > 1 or landmark_rotated_alpha[
                                                i,
                                                1] < 0 or landmark_rotated_alpha[
                                                    i, 1] > 1:
                                    fit = 1
                                    break
                            if fit == 0:
                                #print landmark_rotated_alpha
                                F_imgs.append(
                                    face_rotated_by_alpha.reshape(
                                        (1, img_size, img_size)))
                                F_landmarks.append(
                                    landmark_rotated_alpha.reshape(
                                        num_landmark))

                                # debug
                                #center = ((new_bbox.left+new_bbox.right)/2, (new_bbox.top+new_bbox.bottom)/2)
                                #rot_mat = cv2.getRotationMatrix2D(center, alpha, 1)
                                #img_rotated_by_alpha = cv2.warpAffine(img, rot_mat, img.shape)
                                #landmark_rotated_alpha = new_bbox.reprojectLandmark(landmark_rotated_alpha) # will affect the flip "landmark_rotated_alpha"
                                #img_rotated_by_alpha = drawLandmark(img_rotated_by_alpha, new_bbox, landmark_rotated_alpha)
                                #fp = 'debug_results/'+ str(num_sample)+'.jpg'
                                #cv2.imwrite(fp, img_rotated_by_alpha)
                                #num_sample=num_sample+1
                                # debug

                        # project landmark onto the new bounding box
                        landmarkGT_project = new_landmarkGT_org.copy(
                        )  # error is fixed here
                        #print landmarkGT_project

                        fit = 0
                        for i in range(0, num_landmark / 2):
                            if landmarkGT_project[
                                    i, 0] < 0 or landmarkGT_project[
                                        i, 0] > 1 or landmarkGT_project[
                                            i, 1] < 0 or landmarkGT_project[
                                                i, 1] > 1:
                                fit = 1
                                break
                        if fit == 0:
                            #print landmarkGT_project
                            #if landmarkGT_project[i,0]<0 or landmarkGT_project[i,0]>1 or landmarkGT_project[i,1]<0 or landmarkGT_project[i,1]>1:
                            #    pdb.set_trace()
                            cropped_face = img[new_bbox.top:new_bbox.bottom,
                                               new_bbox.left:new_bbox.right]
                            cropped_face = cv2.resize(
                                cropped_face, (img_size, img_size)).reshape(
                                    1, img_size, img_size)
                            F_imgs.append(cropped_face)
                            F_landmarks.append(
                                landmarkGT_project.reshape(num_landmark))

        # debug on the bounding box perturbation
        #landmark_org = new_bbox.reprojectLandmark(landmarkGT_project) # will affect the flip "landmark_rotated_alpha"
        #img_debug = drawLandmark(img, new_bbox, landmark_org)
        #fp = 'debug_results/'+ str(num_sample)+'_bbox.jpg'
        #cv2.imwrite(fp, img_debug)
        #num_sample=num_sample+1

        face = cv2.resize(face,
                          (img_size, img_size)).reshape(1, img_size, img_size)

        fit = 0
        for i in range(0, num_landmark / 2):
            if landmarkGt[i, 0] < 0 or landmarkGt[i, 0] > 1 or landmarkGt[
                    i, 1] < 0 or landmarkGt[i, 1] > 1:
                fit = 1
                break
        if fit == 0:
            #print landmark_flipped_alpha
            F_imgs.append(face)
            F_landmarks.append(landmarkGt.reshape(num_landmark))

    F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
    F_imgs = processImage(F_imgs)
    F_imgs, F_landmarks = shuffle(F_imgs, F_landmarks, random_state=42)

    with h5py.File(h5_path, 'w') as f:
        f['data'] = F_imgs.astype(np.float32)
        f['landmarks'] = F_landmarks.astype(np.float32)
    with open(txt_path, 'w') as f:
        f.write(h5_path)
        f.write(str(len(F_landmarks)))
        #y_min=face[1]
        #x_max=face[0] + face[2]
        #y_max=face[1]+face[3]
        # used by dlib
        x_min = face.left()
        y_min = face.top()
        x_max = face.right()
        y_max = face.bottom()
        w, h = x_max - x_min, y_max - y_min
        w = h = min(w, h)
        x_new = x_min - w * ratio
        y_new = y_min - h * ratio
        w_new = w * (1 + 2 * ratio)
        h_new = h * (1 + 2 * ratio)
        new_bbox = map(int, [x_new, x_new + w_new, y_new, y_new + h_new])
        new_bbox = BBox(new_bbox)
        #print bbox_left,bbox_top,bbox_right,bbox_bottom
        if not check_bbox(gray.transpose(), new_bbox):
            print 'BBox out of range.'
            continue
        face = load_test_img(gray, new_bbox)
        #landmark = net.forward(face, 'Dense2')
        #print landmark
        #landmark = new_bbox.reprojectLandmark(landmark)
        gender = net.forward_gender(face, 'prob_gender')
        age = net.forward_age(face, 'prob_age')
        img = drawAttribute(img, new_bbox, gender, age)
        #img = drawLandmark_multiple(img, new_bbox, landmark)

        cv2.imwrite('demo_result.jpg', img)