示例#1
0
 def __init__(self, BasePath, ModelPath, NumFeatures):
     self.BasePath = BasePath
     self.ModelPath = ModelPath
     InputImageList = []
     for filename in sorted(glob.glob(self.BasePath + '/*.jpg')):
         ImageTemp = cv2.imread(filename)
         InputImageList.append(ImageTemp)
     self.NumFeatures = NumFeatures
     self.Images = np.array(InputImageList)
     self.NumImages = len(InputImageList)
     self.HelperFunctions = HelperFunctions()
     self.Model = DeepNetwork()
     self.ImageUtils = ImageUtils()
     self.ImageSize = InputImageList[0].shape
     self.ImgPH = tf.placeholder('float', shape=(1, 128, 128, 2))
示例#2
0
 def __init__(self, ModelPath):
     self.HelperFunctions = ImageUtils()
     self.FaceDetector = dlib.get_frontal_face_detector()
     self.ShapePredictor = dlib.shape_predictor(ModelPath)
示例#3
0
class FaceSwapUtils:
    def __init__(self, ModelPath):
        self.HelperFunctions = ImageUtils()
        self.FaceDetector = dlib.get_frontal_face_detector()
        self.ShapePredictor = dlib.shape_predictor(ModelPath)

    def getTriangulationCorrespondence(self, LandMarkPointsTarget,
                                       DelaunayTriangleList,
                                       LandMarkPointsSrc):
        DelaunayTriangleListSrc = []
        resultShape = DelaunayTriangleList.shape

        for triangle in DelaunayTriangleList:
            a = (triangle[0], triangle[1])
            idA = LandMarkPointsTarget.index(a)
            b = (triangle[2], triangle[3])
            idB = LandMarkPointsTarget.index(b)
            c = (triangle[4], triangle[5])
            idC = LandMarkPointsTarget.index(c)
            triangleIndices = LandMarkPointsSrc[idA], LandMarkPointsSrc[
                idB], LandMarkPointsSrc[idC]
            DelaunayTriangleListSrc.append(triangleIndices)
        DelaunayTriangleListSrc = np.array(DelaunayTriangleListSrc)
        DelaunayTriangleListSrc = np.reshape(DelaunayTriangleListSrc,
                                             resultShape)

        return DelaunayTriangleListSrc

    def getMatrixBary(self, DelaunayTriangleList):
        BMatrix = []
        BInvMatrix = []
        for triangle in DelaunayTriangleList:
            ax = triangle[0]
            ay = triangle[1]
            bx = triangle[2]
            by = triangle[3]
            cx = triangle[4]
            cy = triangle[5]
            B = np.array([[ax, bx, cx], [ay, by, cy], [1, 1, 1]])
            BMatrix.append(B)
            BInv = np.linalg.inv(B)
            BInvMatrix.append(BInv)
        BMatrix = np.array(BMatrix)
        BInvMatrix = np.array(BInvMatrix)

        return BMatrix, BInvMatrix

    def Blending(self, TargetImg, SrcImg, Mask):
        radius = 3
        kernel = np.ones((radius, radius), np.uint8)
        Mask = cv2.dilate(Mask, kernel, iterations=1)
        r = cv2.boundingRect(Mask)
        center = ((r[0] + int(r[2] / 2), r[1] + int(r[3] / 2)))
        return cv2.seamlessClone(TargetImg, SrcImg, Mask, center,
                                 cv2.NORMAL_CLONE)

    def DetectFacialLandmarks(self, InputImage):
        GrayImage = cv2.cvtColor(InputImage, cv2.COLOR_BGR2GRAY)
        dets = self.FaceDetector(GrayImage, 1)
        isFace = False
        for (_, det) in enumerate(dets):
            isFace = True
            shape = self.ShapePredictor(GrayImage, det)
            shape = self.HelperFunctions.dlibFODObjectToNumpy(shape)
            DelaunayBBox = (0, 0, InputImage.shape[1], InputImage.shape[0])
            LandMarkPoints = []

            for (x, y) in shape:
                LandMarkPoints.append((x, y))
                if (debug):
                    print("x: {} y: {}".format(x, y))
                    cv2.circle(InputImage, (x, y), 7, (0, 255, 0), -1)

            if (debug):
                print("Face Landmark Points: {}".format(LandMarkPoints))
        if (isFace):
            return isFace, DelaunayBBox, LandMarkPoints
        else:
            return isFace, [], []

    def DetectFacialLandmarks2(self, InputImage):
        GrayImage = cv2.cvtColor(InputImage, cv2.COLOR_BGR2GRAY)
        dets = self.FaceDetector(GrayImage, 1)
        isFace = False
        LandMarkPoints = []
        for (idX, det) in enumerate(dets):
            isFace = True
            shape = self.ShapePredictor(GrayImage, det)
            shape = self.HelperFunctions.dlibFODObjectToNumpy(shape)
            DelaunayBBox = (0, 0, InputImage.shape[1], InputImage.shape[0])
            LandMarkPoints1 = []

            for (x, y) in shape:
                LandMarkPoints1.append((x, y))
                if (debug):
                    print("x: {} y: {}".format(x, y))
                    cv2.circle(InputImage, (x, y), 7, (0, 255, 0), -1)
            LandMarkPoints.append(LandMarkPoints1)
            if (idX == 1):
                break
            if (debug):
                print("Face Landmark Points: {}".format(LandMarkPoints))
        if (isFace):
            return isFace, DelaunayBBox, LandMarkPoints
        else:
            return isFace, [0], [0]

    def GetDelaunayTriangulation(self, DelaunayBBox, LandMarkPoints,
                                 InputImage):
        subdiv = cv2.Subdiv2D(DelaunayBBox)
        subdiv.insert(LandMarkPoints)
        DelaunayTriangleList = subdiv.getTriangleList()
        DelaunayTriangleList = DelaunayTriangleList[DelaunayTriangleList.min(
            axis=1) >= 0, :]
        DelaunayTriangleList = DelaunayTriangleList[DelaunayTriangleList.max(
            axis=1) <= max(InputImage.shape[0], InputImage.shape[1]), :]
        DelaunayTriangleListN = []
        for triangle in DelaunayTriangleList:
            a = (triangle[0], triangle[1])
            b = (triangle[2], triangle[3])
            c = (triangle[4], triangle[5])
            if (self.HelperFunctions.isGoodAngle(a, b, c, 140)):
                DelaunayTriangleListN.append(triangle)
        DelaunayTriangleListN = np.array(DelaunayTriangleListN)

        if (debug):
            # print("DelaunayTriangleList: {}".format(DelaunayTriangleList))
            # print(type(DelaunayTriangleList))
            for triangle in DelaunayTriangleList:
                a = (triangle[0], triangle[1])
                b = (triangle[2], triangle[3])
                c = (triangle[4], triangle[5])
                cv2.line(InputImage, a, b, (0, 0, 255), 1, cv2.LINE_AA, 0)
                cv2.line(InputImage, b, c, (0, 0, 255), 1, cv2.LINE_AA, 0)
                cv2.line(InputImage, c, a, (0, 0, 255), 1, cv2.LINE_AA, 0)
            self.HelperFunctions.ShowImage(InputImage, 'Facial Landmarks')
        return DelaunayTriangleList
示例#4
0
 def __init__(self):
     self.ImageUtils = ImageUtils()
示例#5
0
class Test:
    def __init__(self):
        self.ImageUtils = ImageUtils()

    def SetupAll(self, BasePath):
        """
        Inputs: 
        BasePath - Path to images
        Outputs:
        ImageSize - Size of the Image
        DataPath - Paths of all images where testing will be run on
        """
        # Image Input Shape
        ImageSize = [32, 32, 3]
        DataPath = []
        NumImages = len(glob.glob(BasePath + '*.jpg'))
        SkipFactor = 1
        for count in range(1, NumImages + 1, SkipFactor):
            DataPath.append(BasePath + str(count) + '.jpg')

        return ImageSize, DataPath

    def ReadImages(self, ImageSize, DataPath):
        """
        Inputs: 
        ImageSize - Size of the Image
        DataPath - Paths of all images where testing will be run on
        Outputs:
        I1Combined - I1 image after any standardization and/or cropping/resizing to ImageSize
        I1 - Original I1 image for visualization purposes only
        """

        ImageName = DataPath

        I1 = cv2.imread(ImageName)

        if (I1 is None):
            # OpenCV returns empty list if image is not read!
            print('ERROR: Image I1 cannot be read')
            sys.exit()

        ##########################################################################
        # Add any standardization or cropping/resizing if used in Training here!
        ##########################################################################

        Im = self.ImageUtils.PreProcess(I1, 640, 480)
        I1S, H4PtTruth1 = self.ImageUtils.CreateTrainingData(Im, 256, 256, 64)
        I2S, H4PtTruth2 = self.ImageUtils.CreateTrainingData(Im, 256, 256, 64)

        I1Combined = np.expand_dims(I1S, axis=0)

        return I1Combined, I1

    def TestOperation(self, ImgPH, ImageSize, ModelPath, DataPath,
                      LabelsPathPred):
        """
        Inputs: 
        ImgPH is the Input Image placeholder
        ImageSize is the size of the image
        ModelPath - Path to load trained model from
        DataPath - Paths of all images where testing will be run on
        LabelsPathPred - Path to save predictions
        Outputs:
        Predictions written to ./TxtFiles/PredOut.txt
        """
        Length = ImageSize[0]
        # Predict output with forward pass, MiniBatchSize for Test is 1
        _, prSoftMaxS = HomographyModel(ImgPH, ImageSize, 1)

        # Setup Saver
        Saver = tf.train.Saver()

        with tf.Session() as sess:
            Saver.restore(sess, ModelPath)
            print('Number of parameters in this model are %d ' % np.sum([
                np.prod(v.get_shape().as_list())
                for v in tf.trainable_variables()
            ]))

            OutSaveT = open(LabelsPathPred, 'w')

            for count in tqdm(range(np.size(DataPath))):
                DataPathNow = DataPath[count]
                Img, ImgOrg = self.ReadImages(ImageSize, DataPathNow)
                FeedDict = {ImgPH: Img}
                PredT = np.argmax(sess.run(prSoftMaxS, FeedDict))

                OutSaveT.write(str(PredT) + '\n')

            OutSaveT.close()

    def ReadLabels(self, LabelsPathTest, LabelsPathPred):
        if (not (os.path.isfile(LabelsPathTest))):
            print('ERROR: Test Labels do not exist in ' + LabelsPathTest)
            sys.exit()
        else:
            LabelTest = open(LabelsPathTest, 'r')
            LabelTest = LabelTest.read()
            LabelTest = map(float, LabelTest.split())

        if (not (os.path.isfile(LabelsPathPred))):
            print('ERROR: Pred Labels do not exist in ' + LabelsPathPred)
            sys.exit()
        else:
            LabelPred = open(LabelsPathPred, 'r')
            LabelPred = LabelPred.read()
            LabelPred = map(float, LabelPred.split())

        return LabelTest, LabelPred
示例#6
0
 def __init__(self, ModelPath):
     self.ModelPath = ModelPath
     self.HelperFunctions = ImageUtils()
     self.FaceSwapFunctions = FaceSwapUtils(self.ModelPath)
示例#7
0
class FaceSwapVideoFaces:
    def __init__(self, ModelPath):
        self.ModelPath = ModelPath
        self.HelperFunctions = ImageUtils()
        self.FaceSwapFunctions = FaceSwapUtils(self.ModelPath)

    def FaceWarpByTriangulation(self, frame, weight):
        TargetImg = frame.copy()
        SrcImg = frame.copy()
        CloneSrcImg = TargetImg.copy()
        Mask = np.zeros(TargetImg.shape, TargetImg.dtype)

        isFace1, DelaunayBBoxTarget, LandMarkPoints = self.FaceSwapFunctions.DetectFacialLandmarks2(
            TargetImg)
        if (not isFace1 or len(LandMarkPoints) < 2):
            return TargetImg
        LandMarkPointsTarget = LandMarkPoints[0]
        LandMarkPointsSrc = LandMarkPoints[1]

        DelaunayTrianglesTarget = self.FaceSwapFunctions.GetDelaunayTriangulation(
            DelaunayBBoxTarget, LandMarkPointsTarget, TargetImg)
        DelaunayTrianglesTarget2 = self.FaceSwapFunctions.GetDelaunayTriangulation(
            DelaunayBBoxTarget, LandMarkPointsSrc, TargetImg)

        DelaunayTrianglesSrc = self.FaceSwapFunctions.getTriangulationCorrespondence(
            LandMarkPointsTarget, DelaunayTrianglesTarget, LandMarkPointsSrc)

        points = np.array(LandMarkPointsTarget)
        hull = cv2.convexHull(points)
        color = (255, 255, 255)
        cv2.fillPoly(Mask, [hull], color)
        Mask = cv2.cvtColor(Mask, cv2.COLOR_BGR2GRAY)
        # self.HelperFunctions.ShowImage(Mask, 'Mask')

        _, BInvMatrix = self.FaceSwapFunctions.getMatrixBary(
            DelaunayTrianglesTarget)
        AMatrix, _ = self.FaceSwapFunctions.getMatrixBary(DelaunayTrianglesSrc)

        triangle_number = 0
        for triangle in DelaunayTrianglesTarget:
            xMin, yMin, xMax, yMax = self.HelperFunctions.getTriangleBBox(
                triangle)
            BInv = BInvMatrix[triangle_number]
            for x in range(int(xMin), int(xMax + 1)):
                for y in range(int(yMin), int(yMax + 1)):
                    BarycentricCoordinatesTarget = np.matmul(
                        BInv, np.array([[x], [y], [1]]))
                    if ((BarycentricCoordinatesTarget[0] >= 0
                         and BarycentricCoordinatesTarget[0] <= 1)
                            and (BarycentricCoordinatesTarget[1] >= 0
                                 and BarycentricCoordinatesTarget[1] <= 1)
                            and (BarycentricCoordinatesTarget[2] >= 0
                                 and BarycentricCoordinatesTarget[2] <= 1)):
                        PixelLocSrc = np.matmul(AMatrix[triangle_number],
                                                BarycentricCoordinatesTarget)
                        PixelLocSrc = np.divide(PixelLocSrc, PixelLocSrc[2])
                        TargetImg[y][x] = weight * SrcImg[int(
                            PixelLocSrc[1])][int(PixelLocSrc[0])] + (
                                1 - weight) * TargetImg[y][x]
            triangle_number += 1
        TargetImg1 = self.FaceSwapFunctions.Blending(TargetImg, CloneSrcImg,
                                                     Mask)
        CloneSrcImg = TargetImg1
        # self.HelperFunctions.ShowImage(TargetImg1, 'TargetImg1')
        #################################

        Mask = np.zeros(TargetImg1.shape, TargetImg1.dtype)

        DelaunayTrianglesSrc = self.FaceSwapFunctions.getTriangulationCorrespondence(
            LandMarkPointsSrc, DelaunayTrianglesTarget2, LandMarkPointsTarget)

        points = np.array(LandMarkPointsSrc)
        hull = cv2.convexHull(points)
        color = (255, 255, 255)
        cv2.fillPoly(Mask, [hull], color)
        Mask = cv2.cvtColor(Mask, cv2.COLOR_BGR2GRAY)
        # self.HelperFunctions.ShowImage(Mask, 'Mask')

        _, BInvMatrix = self.FaceSwapFunctions.getMatrixBary(
            DelaunayTrianglesTarget2)
        AMatrix, _ = self.FaceSwapFunctions.getMatrixBary(DelaunayTrianglesSrc)

        triangle_number = 0
        for triangle in DelaunayTrianglesTarget2:
            xMin, yMin, xMax, yMax = self.HelperFunctions.getTriangleBBox(
                triangle)
            BInv = BInvMatrix[triangle_number]
            for x in range(int(xMin), int(xMax + 1)):
                for y in range(int(yMin), int(yMax + 1)):
                    BarycentricCoordinatesTarget = np.matmul(
                        BInv, np.array([[x], [y], [1]]))
                    if ((BarycentricCoordinatesTarget[0] >= 0
                         and BarycentricCoordinatesTarget[0] <= 1)
                            and (BarycentricCoordinatesTarget[1] >= 0
                                 and BarycentricCoordinatesTarget[1] <= 1)
                            and (BarycentricCoordinatesTarget[2] >= 0
                                 and BarycentricCoordinatesTarget[2] <= 1)):
                        PixelLocSrc = np.matmul(AMatrix[triangle_number],
                                                BarycentricCoordinatesTarget)
                        PixelLocSrc = np.divide(PixelLocSrc, PixelLocSrc[2])
                        TargetImg1[y][x] = weight * SrcImg[int(
                            PixelLocSrc[1])][int(PixelLocSrc[0])] + (
                                1 - weight) * TargetImg1[y][x]
            triangle_number += 1

        # self.HelperFunctions.ShowImage(TargetImg1, 'Target Image')
        FaceSwapOP = self.FaceSwapFunctions.Blending(TargetImg1, CloneSrcImg,
                                                     Mask)
        if debug:
            self.HelperFunctions.ShowImage(TargetImg, 'Target Image')
            self.HelperFunctions.ShowImage(CloneSrcImg, 'Source Image')
            FaceSwapOP = cv2.resize(FaceSwapOP, (720, 720))
            self.HelperFunctions.ShowImage(
                FaceSwapOP, 'FaceSwap using Delaunay Triangulation')
        return FaceSwapOP

    def FaceWarpByTPS(self, frame):
        TargetImg = frame
        SrcImg = frame.copy()
        CloneSrcImg = TargetImg.copy()

        isFace1, _, LandMarkPoints = self.FaceSwapFunctions.DetectFacialLandmarks2(
            TargetImg)
        if (not isFace1 or len(LandMarkPoints) < 2):
            return TargetImg
        LandMarkPointsTarget = LandMarkPoints[0]
        LandMarkPointsSrc = LandMarkPoints[1]
        p = len(LandMarkPointsTarget)
        K = np.zeros([p, p])

        # K matrix
        for idX in range(p):
            for idY in range(p):
                K[idX][idY] = self.HelperFunctions.FuncUofR(
                    np.linalg.norm(np.array(LandMarkPointsTarget[idX]) -
                                   np.array(LandMarkPointsTarget[idY]),
                                   ord=2))

        # P matrix
        P = np.zeros([p, 3])
        for i, (x, y) in enumerate(LandMarkPointsTarget):
            P[i] = (x, y, 1)

        # lambda I matrix
        I = np.identity(p + 3)
        lamb = 1e-4

        # V matrix
        vx = np.zeros([p + 3])
        vy = np.zeros([p + 3])

        for i, (x, y) in enumerate(LandMarkPointsSrc):
            vx[i] = x
            vy[i] = y
        vx = np.reshape(vx, (p + 3, 1))
        vy = np.reshape(vy, (p + 3, 1))

        t1 = np.hstack((K, P))
        t2 = np.hstack((np.transpose(P), np.zeros([3, 3])))
        M = np.vstack((t1, t2))

        resultX = np.dot(np.linalg.inv(M + lamb * I), vx)
        resultY = np.dot(np.linalg.inv(M + lamb * I), vy)

        wX = resultX[0:p]
        axX = resultX[p]
        axY = resultX[p + 1]
        ax1 = resultX[p + 2]

        wY = resultY[0:p]
        ayX = resultY[p]
        ayY = resultY[p + 1]
        ay1 = resultY[p + 2]

        points = np.array(LandMarkPointsTarget)
        hull = cv2.convexHull(points)
        Mask = np.zeros(TargetImg.shape, TargetImg.dtype)
        if debug:
            print("Points inside convex hull: {}".format(hull))
        Mask = np.zeros((TargetImg.shape[0], TargetImg.shape[1], 3), np.uint8)
        color = (255, 255, 255)
        cv2.fillPoly(Mask, [hull], color)
        Mask = Mask[:, :, 1]
        ptsY = np.where(Mask == 255)[0]
        ptsX = np.where(Mask == 255)[1]
        ptsY = np.transpose(ptsY)
        ptsX = np.transpose(ptsX)

        pts = np.vstack((ptsX, ptsY))
        pts = np.transpose(pts)

        if debug:
            print("K: {}".format(K))
            print("P: {}".format(P))
            print("M: {}".format(M))
            print("vx: {}".format(vx))
            print("vy: {}".format(vy))
            print("resultX: {}".format(resultX))
            print("resultY: {}".format(resultY))
            self.HelperFunctions.ShowImage(Mask, 'Convex hull for face')

        for Lpts in pts:
            U1 = (points - Lpts)
            U1 = np.linalg.norm(U1, ord=2, axis=1)
            U1 = self.HelperFunctions.FuncUofR(U1)
            wUX = np.matmul(np.transpose(wX), U1)
            wUY = np.matmul(np.transpose(wY), U1)
            fX = int(ax1 + axX * Lpts[0] + axY * Lpts[1] + wUX)
            fY = int(ay1 + ayX * Lpts[0] + ayY * Lpts[1] + wUY)
            if fX < 0 or fY < 0:
                continue
            TargetImg[Lpts[1]][Lpts[0]] = SrcImg[fY][fX]

        TargetImg1 = self.FaceSwapFunctions.Blending(TargetImg, CloneSrcImg,
                                                     Mask)
        CloneSrcImg = TargetImg1

        ########################
        p = len(LandMarkPointsSrc)
        K = np.zeros([p, p])

        # K matrix
        for idX in range(p):
            for idY in range(p):
                K[idX][idY] = self.HelperFunctions.FuncUofR(
                    np.linalg.norm(np.array(LandMarkPointsSrc[idX]) -
                                   np.array(LandMarkPointsSrc[idY]),
                                   ord=2))

        # P matrix
        P = np.zeros([p, 3])
        for i, (x, y) in enumerate(LandMarkPointsSrc):
            P[i] = (x, y, 1)

        # lambda I matrix
        I = np.identity(p + 3)
        lamb = 1e-4

        # V matrix
        vx = np.zeros([p + 3])
        vy = np.zeros([p + 3])

        for i, (x, y) in enumerate(LandMarkPointsTarget):
            vx[i] = x
            vy[i] = y
        vx = np.reshape(vx, (p + 3, 1))
        vy = np.reshape(vy, (p + 3, 1))

        t1 = np.hstack((K, P))
        t2 = np.hstack((np.transpose(P), np.zeros([3, 3])))
        M = np.vstack((t1, t2))

        resultX = np.dot(np.linalg.inv(M + lamb * I), vx)
        resultY = np.dot(np.linalg.inv(M + lamb * I), vy)

        wX = resultX[0:p]
        axX = resultX[p]
        axY = resultX[p + 1]
        ax1 = resultX[p + 2]

        wY = resultY[0:p]
        ayX = resultY[p]
        ayY = resultY[p + 1]
        ay1 = resultY[p + 2]

        points = np.array(LandMarkPointsSrc)
        hull = cv2.convexHull(points)
        Mask = np.zeros(TargetImg.shape, TargetImg.dtype)
        if debug:
            print("Points inside convex hull: {}".format(hull))
        Mask = np.zeros((TargetImg1.shape[0], TargetImg1.shape[1], 3),
                        np.uint8)
        color = (255, 255, 255)
        cv2.fillPoly(Mask, [hull], color)
        Mask = Mask[:, :, 1]
        ptsY = np.where(Mask == 255)[0]
        ptsX = np.where(Mask == 255)[1]
        ptsY = np.transpose(ptsY)
        ptsX = np.transpose(ptsX)

        pts = np.vstack((ptsX, ptsY))
        pts = np.transpose(pts)

        if debug:
            print("K: {}".format(K))
            print("P: {}".format(P))
            print("M: {}".format(M))
            print("vx: {}".format(vx))
            print("vy: {}".format(vy))
            print("resultX: {}".format(resultX))
            print("resultY: {}".format(resultY))
            self.HelperFunctions.ShowImage(Mask, 'Convex hull for face')

        for Lpts in pts:
            U1 = (points - Lpts)
            U1 = np.linalg.norm(U1, ord=2, axis=1)
            U1 = self.HelperFunctions.FuncUofR(U1)
            wUX = np.matmul(np.transpose(wX), U1)
            wUY = np.matmul(np.transpose(wY), U1)
            fX = int(ax1 + axX * Lpts[0] + axY * Lpts[1] + wUX)
            fY = int(ay1 + ayX * Lpts[0] + ayY * Lpts[1] + wUY)
            if fX < 0 or fY < 0:
                continue
            TargetImg1[Lpts[1]][Lpts[0]] = SrcImg[fY][fX]

        FaceSwapOP = self.FaceSwapFunctions.Blending(TargetImg1, CloneSrcImg,
                                                     Mask)

        # self.HelperFunctions.ShowImage(
        #     FaceSwapOP, 'FaceSwap using Thin Plate Spline')
        return FaceSwapOP
示例#8
0
 def __init__(self, Image_Path_1, Image_Path_2, ModelPath):
     self.ModelPath = ModelPath
     InputImageList = [cv2.imread(Image_Path_1), cv2.imread(Image_Path_2)]
     self.Images = np.array(InputImageList)
     self.HelperFunctions = ImageUtils()
     self.FaceSwapFunctions = FaceSwapUtils(self.ModelPath)
示例#9
0
 def __init__(self, Image_Path, ModelPath):
     self.ModelPath = ModelPath
     # ImageTemp = cv2.resize(ImageTemp, (1000, 800))
     self.Image = cv2.imread(Image_Path)
     self.HelperFunctions = ImageUtils()
     self.FaceSwapFunctions = FaceSwapUtils(self.ModelPath)
示例#10
0
class Stitcher:
    """
    Read a set of images for Panorama stitching
    """
    def __init__(self, BasePath, ModelPath, NumFeatures):
        self.BasePath = BasePath
        self.ModelPath = ModelPath
        InputImageList = []
        for filename in sorted(glob.glob(self.BasePath + '/*.jpg')):
            ImageTemp = cv2.imread(filename)
            InputImageList.append(ImageTemp)
        self.NumFeatures = NumFeatures
        self.Images = np.array(InputImageList)
        self.NumImages = len(InputImageList)
        self.HelperFunctions = HelperFunctions()
        self.Model = DeepNetwork()
        self.ImageUtils = ImageUtils()
        self.ImageSize = InputImageList[0].shape
        self.ImgPH = tf.placeholder('float', shape=(1, 128, 128, 2))

    """
	Obtain Homography using Deep Learning Model (Supervised and Unsupervised)
	"""

    def ExtractHomographyFromH4Pt(self, H4PtPred):

        pts1 = np.float32([[0, 0], [self.ImageSize[1], 0],
                           [self.ImageSize[1], self.ImageSize[0]],
                           [0, self.ImageSize[0]]])

        pts2 = np.float32(
            [[0 + H4PtPred[0][0], 0 + H4PtPred[0][4]],
             [self.ImageSize[1] + H4PtPred[0][1], 0 + H4PtPred[0][5]],
             [
                 self.ImageSize[1] + H4PtPred[0][2],
                 self.ImageSize[0] + H4PtPred[0][6]
             ], [0 + H4PtPred[0][3], self.ImageSize[0] + H4PtPred[0][7]]])
        HPred = cv2.getPerspectiveTransform(pts1, pts2)
        return HPred

    def EstimateHomographySupervised(self, Image1, Image2):
        # Setup Saver
        H4PtPred = self.Model.HomographyNet(self.ImgPH, False)
        Saver = tf.train.Saver()

        with tf.Session() as sess:
            Saver.restore(sess, self.ModelPath)
            print('Number of parameters in this model are %d ' % np.sum([
                np.prod(v.get_shape().as_list())
                for v in tf.trainable_variables()
            ]))

            Image1 = np.float32(self.ImageUtils.PreProcess(Image1, 128, 128))
            Image1 = self.ImageUtils.ImageStandardization(Image1)
            Image2 = np.float32(self.ImageUtils.PreProcess(Image2, 128, 128))
            Image2 = self.ImageUtils.ImageStandardization(Image2)

            Images = np.dstack((Image1, Image2))
            I1Batch = []
            I1Batch.append(Images)
            FeedDict = {self.ImgPH: I1Batch}
            H4Pt = sess.run(H4PtPred, FeedDict)
            print("H4pt: {}".format(H4Pt))
            H = self.ExtractHomographyFromH4Pt(H4Pt)
            Hinv = np.linalg.inv(H)

        return H, Hinv

    """
	Image Warping + Blending
	Save Panorama output as mypano.png
	"""

    def RemoveBlackBoundary(self, ImageIn):
        gray = cv2.cvtColor(ImageIn, cv2.COLOR_BGR2GRAY)
        _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
        _, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)
        cnt = contours[0]
        x, y, w, h = cv2.boundingRect(cnt)
        ImageOut = ImageIn[y:y + h, x:x + w]
        return ImageOut

    def Warping(self, Img, Homography, NextShape):
        nH, nW, _ = Img.shape
        Borders = np.array([[0, nW, nW, 0], [0, 0, nH, nH], [1, 1, 1, 1]])
        BordersNew = np.dot(Homography, Borders)
        Ymin = min(BordersNew[1] / BordersNew[2])
        Xmin = min(BordersNew[0] / BordersNew[2])
        Ymax = max(BordersNew[1] / BordersNew[2])
        Xmax = max(BordersNew[0] / BordersNew[2])
        if Ymin < 0:
            MatChange = np.array([[1, 0, -1 * Xmin], [0, 1, -1 * Ymin],
                                  [0, 0, 1]])
            Hnew = np.dot(MatChange, Homography)
            h = int(round(Ymax - Ymin)) + NextShape[0]
        else:
            MatChange = np.array([[1, 0, -1 * Xmin], [0, 1, Ymin], [0, 0, 1]])
            Hnew = np.dot(MatChange, Homography)
            h = int(round(Ymax + Ymin)) + NextShape[0]
        w = int(round(Xmax - Xmin)) + NextShape[1]
        sz = (w, h)
        PanoHolder = cv2.warpPerspective(Img, Hnew, dsize=sz)
        return PanoHolder, int(Xmin), int(Ymin)

    def Blender(self):
        Pano = self.Images[0]
        for NextImage in self.Images[1:2]:
            H, Hinv = self.EstimateHomographySupervised(Pano, NextImage)
            PanoHolder, oX, oY = self.Warping(Pano, H, NextImage.shape)
            self.HelperFunctions.ShowImage(PanoHolder, 'PanoHolder')
            oX = abs(oX)
            oY = abs(oY)
            for IdY in range(oY, NextImage.shape[0] + oY):
                for IdX in range(oX, NextImage.shape[1] + oX):
                    y = IdY - oY
                    x = IdX - oX
                    PanoHolder[IdY, IdX, :] = NextImage[y, x, :]
            # Pano = self.RemoveBlackBoundary(PanoHolder)
        PanoResize = cv2.resize(Pano, (1280, 1024))
        self.HelperFunctions.ShowImage(PanoResize, 'PanoResize')
        PanoResize = cv2.GaussianBlur(PanoResize, (5, 5), 1.2)
        return PanoResize
示例#11
0
def main():
    Parser = argparse.ArgumentParser()
    Parser.add_argument(
        '--Mode',
        default='0',
        help=
        'FaceSwap Mode. 0: Swap faces in two images. 1: Swap largest face in video with user defined image. 2: Swap two faces in single video, Default:0'
    )
    Parser.add_argument(
        '--FaceSwapMethod',
        default='1',
        help=
        'FaceSwap FaceSwapMethod. 0: Triangulation. 1: Thin Plate Spline, Default:0'
    )
    Parser.add_argument(
        '--Path1',
        default='/home/rohith/CMSC733/git/FaceSwap/Data/anakin2.jpg',
        help=
        'Video path for Mode 1 and 2. Target image for Mode 0, Default:/home/rohith/CMSC733/git/FaceSwap/Data/elon.mp4'
    )
    Parser.add_argument(
        '--Path2',
        default='/home/rohith/CMSC733/git/FaceSwap/Data/rohith.jpeg',
        help=
        'Image path to be swapped for Mode 0 and 1. Dont care for Mode2, Default:/home/rohith/CMSC733/git/FaceSwap/Data/padme2.jpg'
    )
    Parser.add_argument(
        '--ModelPath',
        default=
        '/home/rohith/CMSC733/git/FaceSwap/Models/shape_predictor_68_face_landmarks.dat',
        help=
        'Model path of dlib predictor, Default:/home/rohith/CMSC733/git/FaceSwap/Models/shape_predictor_68_face_landmarks.dat'
    )

    Args = Parser.parse_args()
    Mode = Args.Mode
    FaceSwapMethod = Args.FaceSwapMethod
    Path1 = Args.Path1
    Path2 = Args.Path2
    ModelPath = Args.ModelPath
    HelperFunctions = ImageUtils()

    if Mode == '0':
        if FaceSwapMethod == '0':
            print(
                "**********Triangulation method started for FaceSwap in two images**********"
            )
            swapFacesTri = FaceSwapImages(Path1, Path2, ModelPath)
            faceSwapOP = swapFacesTri.FaceWarpByTriangulation(1)
            HelperFunctions.ShowImage(faceSwapOP,
                                      'Triangulation method FaceSwap')
            print(
                "**********Triangulation method ended for FaceSwap in two images**********"
            )
        elif FaceSwapMethod == '1':
            print(
                "**********TPS method started for FaceSwap in two images**********"
            )
            swapFacesTPS = FaceSwapImages(Path1, Path2, ModelPath)
            faceSwapOP = swapFacesTPS.FaceWarpByTPS()
            HelperFunctions.ShowImage(faceSwapOP, 'TPS method FaceSwap')
            print(
                "**********TPS method ended for FaceSwap in two images**********"
            )

    elif Mode == '1':
        cap = cv2.VideoCapture(Path1)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        w = int(cap.get(3))
        h = int(cap.get(4))

        if FaceSwapMethod == '0':
            print(
                "**********Triangulation method started for swapping face in video with image**********"
            )
            swapFaces1 = FaceSwapVideoWithImage(Path2, ModelPath)
            out1 = cv2.VideoWriter('DataOutputTri.avi', fourcc, 20.0, (w, h))
            while (cap.isOpened()):
                ret, frame = cap.read()
                if ret == True:
                    frame = swapFaces1.FaceWarpByTriangulation(frame, 1)
                    out1.write(frame)

                    # cv2.imshow('frame', frame)
                    # if cv2.waitKey(1) & 0xFF == ord('q'):
                    #     break
                else:
                    break
            cap.release()
            out1.release()
            cv2.destroyAllWindows()
            print(
                "**********Triangulation method ended for swapping face in video with image**********"
            )
        elif FaceSwapMethod == '1':
            print(
                "**********TPS method started for swapping face in video with image**********"
            )
            # i = 0
            swapFaces2 = FaceSwapVideoWithImage(Path2, ModelPath)
            out2 = cv2.VideoWriter('DataOutputTPS.avi', fourcc, 20.0, (w, h))
            while (cap.isOpened()):
                ret, frame = cap.read()
                if ret == True:
                    frame = swapFaces2.FaceWarpByTPS(frame)
                    out2.write(frame)
                    # print(i)
                    # i += 1
                    # cv2.imshow('frame', frame)
                    # if cv2.waitKey(1) & 0xFF == ord('q'):
                    #     break
                else:
                    break
            cap.release()
            out2.release()
            cv2.destroyAllWindows()
            print(
                "**********TPS method ended for swapping face in video with image**********"
            )

    elif Mode == '2':
        cap = cv2.VideoCapture(Path1)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        w = int(cap.get(3))
        h = int(cap.get(4))
        if FaceSwapMethod == '0':
            print(
                "**********Triangulation method started for swapping face in video**********"
            )
            swapFaces1 = FaceSwapVideoFaces(ModelPath)
            out1 = cv2.VideoWriter('DataOutputTri.avi', fourcc, 20.0, (w, h))
            while (cap.isOpened()):
                ret, frame = cap.read()
                if ret == True:
                    frame = swapFaces1.FaceWarpByTriangulation(frame, 1)
                    out1.write(frame)

                    # cv2.imshow('frame', frame)
                    # if cv2.waitKey(1) & 0xFF == ord('q'):
                    #     break
                else:
                    break
            cap.release()
            out1.release()
            cv2.destroyAllWindows()
            print(
                "**********Triangulation method ended for swapping face in video**********"
            )
        elif FaceSwapMethod == '1':
            print(
                "**********TPS method started for swapping face in video**********"
            )
            i = 0
            swapFaces2 = FaceSwapVideoFaces(ModelPath)
            out2 = cv2.VideoWriter('DataOutputTPS.avi', fourcc, 20.0, (w, h))
            while (cap.isOpened()):
                ret, frame = cap.read()
                if ret == True:
                    frame = swapFaces2.FaceWarpByTPS(frame)
                    out2.write(frame)
                    # print(i)
                    # i += 1
                    # cv2.imshow('frame', frame)
                    # if cv2.waitKey(1) & 0xFF == ord('q'):
                    #     break
                else:
                    break
            cap.release()
            out2.release()
            cv2.destroyAllWindows()
            print(
                "**********TPS method ended for swapping face in video**********"
            )