Exemplo n.º 1
0
def get_transform_mat(image_landmarks, output_size, face_type, scale=1.0):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array(image_landmarks)
    """
    if face_type == FaceType.AVATAR:
        centroid = np.mean (image_landmarks, axis=0)

        mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
        a, c = mat[0,0], mat[1,0]
        scale = math.sqrt((a * a) + (c * c))

        padding = (output_size / 64) * 32

        mat = np.eye ( 2,3 )
        mat[0,2] = -centroid[0]
        mat[1,2] = -centroid[1]
        mat = mat * scale * (output_size / 3)
        mat[:,2] += output_size / 2
    else:
    """
    remove_align = False
    if face_type == FaceType.FULL_NO_ALIGN:
        face_type = FaceType.FULL
        remove_align = True
    elif face_type == FaceType.HEAD_NO_ALIGN:
        face_type = FaceType.HEAD
        remove_align = True

    if face_type == FaceType.HALF:
        padding = 0
    elif face_type == FaceType.FULL:
        padding = (output_size / 64) * 12
    elif face_type == FaceType.HEAD:
        padding = (output_size / 64) * 21
    else:
        raise ValueError('wrong face_type: ', face_type)

    mat = umeyama(image_landmarks[17:], landmarks_2D, True)[0:2]
    mat = mat * (output_size - 2 * padding)
    mat[:, 2] += padding
    mat *= (1 / scale)
    mat[:, 2] += -output_size * (((1 / scale) - 1.0) / 2)

    if remove_align:
        bbox = transform_points([(0, 0), (0, output_size - 1),
                                 (output_size - 1, output_size - 1),
                                 (output_size - 1, 0)], mat, True)
        area = mathlib.polygon_area(bbox[:, 0], bbox[:, 1])
        side = math.sqrt(area) / 2
        center = transform_points([(output_size / 2, output_size / 2)], mat,
                                  True)

        pts1 = np.float32([
            center + [-side, -side], center + [side, -side],
            center + [-side, side]
        ])
        pts2 = np.float32([[0, 0], [output_size - 1, 0], [0, output_size - 1]])
        mat = cv2.getAffineTransform(pts1, pts2)

    return mat
Exemplo n.º 2
0
def get_transform_mat (image_landmarks, output_size, face_type, scale=1.0, full_face_align_top=True):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array (image_landmarks)

    padding, remove_align = FaceType_to_padding_remove_align.get(face_type, 0.0)

    mat = umeyama( np.concatenate ( [ image_landmarks[17:49] , image_landmarks[54:55] ] ) , landmarks_2D_new, True)[0:2]
    l_p = transform_points (  np.float32([(0,0),(1,0),(1,1),(0,1),(0.5,0.5)]) , mat, True)
    l_c = l_p[4]

    tb_diag_vec = (l_p[2]-l_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (l_p[1]-l_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)
    
    mod = (1.0 / scale)* ( npla.norm(l_p[0]-l_p[2])*(padding*np.sqrt(2.0) + 0.5) )
    
    l_t = np.array( [ np.round( l_c - tb_diag_vec*mod ), 
                      np.round( l_c + bt_diag_vec*mod ), 
                      np.round( l_c + tb_diag_vec*mod ) ] )    

    pts2 = np.float32(( (0,0),(output_size,0),(output_size,output_size) ))
    mat = cv2.getAffineTransform(l_t,pts2)
    
    #if full_face_align_top and (face_type == FaceType.FULL or face_type == FaceType.FULL_NO_ALIGN):
    #    #lmrks2 = expand_eyebrows(image_landmarks)    
    #    #lmrks2_ = transform_points( [ lmrks2[19], lmrks2[24] ], mat, False )     
    #    #y_diff = np.float32( (0,np.min(lmrks2_[:,1])) ) 
    #    #y_diff = transform_points( [ np.float32( (0,0) ), y_diff], mat, True)
    #    #y_diff = y_diff[1]-y_diff[0]
    #    
    #    x_diff = np.float32((0,0))
    #    
    #    lmrks2_ = transform_points( [ image_landmarks[0], image_landmarks[16] ], mat, False )   
    #    if lmrks2_[0,0] < 0:
    #        x_diff = lmrks2_[0,0]        
    #        x_diff = transform_points( [ np.float32( (0,0) ), np.float32((x_diff,0)) ], mat, True)
    #        x_diff = x_diff[1]-x_diff[0]        
    #    elif lmrks2_[1,0] >= output_size:
    #        x_diff = lmrks2_[1,0]-(output_size-1)
    #        x_diff = transform_points( [ np.float32( (0,0) ), np.float32((x_diff,0)) ], mat, True)
    #        x_diff = x_diff[1]-x_diff[0]    
    #    
    #    mat = cv2.getAffineTransform( l_t+y_diff+x_diff ,pts2)
        
    if remove_align:
        bbox = transform_points ( [ (0,0), (0,output_size), (output_size, output_size), (output_size,0) ], mat, True)
        area = mathlib.polygon_area(bbox[:,0], bbox[:,1] )
        side = math.sqrt(area) / 2
        center = transform_points ( [(output_size/2,output_size/2)], mat, True)
        pts1 = np.float32(( center+[-side,-side], center+[side,-side], center+[-side,side] ))
        mat = cv2.getAffineTransform(pts1,pts2)

    return mat
Exemplo n.º 3
0
        def process_data(self, data):
            filename_path = Path(data.filename)

            filename_path_str = str(filename_path)
            if self.cached_image[0] == filename_path_str:
                image = self.cached_image[
                    1]  #cached image for manual extractor
            else:
                image = cv2_imread(filename_path_str)

                if image is None:
                    self.log_err(
                        'Failed to extract %s, reason: cv2_imread() fail.' %
                        (str(filename_path)))
                    return data

                image = imagelib.normalize_channels(image, 3)
                h, w, ch = image.shape

                wm, hm = w % 2, h % 2
                if wm + hm != 0:  #fix odd image
                    image = image[0:h - hm, 0:w - wm, :]
                self.cached_image = (filename_path_str, image)

            src_dflimg = None
            h, w, ch = image.shape
            if h == w:
                #extracting from already extracted jpg image?
                if filename_path.suffix == '.png':
                    src_dflimg = DFLPNG.load(str(filename_path))
                if filename_path.suffix == '.jpg':
                    src_dflimg = DFLJPG.load(str(filename_path))

            if 'rects' in self.type:
                if min(w, h) < 128:
                    self.log_err('Image is too small %s : [%d, %d]' %
                                 (str(filename_path), w, h))
                    data.rects = []
                else:
                    for rot in ([0, 90, 270, 180]):
                        data.rects_rotation = rot
                        if rot == 0:
                            rotated_image = image
                        elif rot == 90:
                            rotated_image = image.swapaxes(0, 1)[:, ::-1, :]
                        elif rot == 180:
                            rotated_image = image[::-1, ::-1, :]
                        elif rot == 270:
                            rotated_image = image.swapaxes(0, 1)[::-1, :, :]

                        rects = data.rects = self.e.extract(rotated_image,
                                                            is_bgr=True)
                        if len(rects) != 0:
                            break

                    if self.max_faces_from_image != 0 and len(data.rects) > 1:
                        data.rects = data.rects[0:self.max_faces_from_image]

                return data

            elif self.type == 'landmarks':

                if data.rects_rotation == 0:
                    rotated_image = image
                elif data.rects_rotation == 90:
                    rotated_image = image.swapaxes(0, 1)[:, ::-1, :]
                elif data.rects_rotation == 180:
                    rotated_image = image[::-1, ::-1, :]
                elif data.rects_rotation == 270:
                    rotated_image = image.swapaxes(0, 1)[::-1, :, :]

                data.landmarks = self.e.extract(
                    rotated_image,
                    data.rects,
                    self.second_pass_e if
                    (src_dflimg is None and data.landmarks_accurate) else None,
                    is_bgr=True)
                if data.rects_rotation != 0:
                    for i, (rect,
                            lmrks) in enumerate(zip(data.rects,
                                                    data.landmarks)):
                        new_rect, new_lmrks = rect, lmrks
                        (l, t, r, b) = rect
                        if data.rects_rotation == 90:
                            new_rect = (t, h - l, b, h - r)
                            if lmrks is not None:
                                new_lmrks = lmrks[:, ::-1].copy()
                                new_lmrks[:, 1] = h - new_lmrks[:, 1]
                        elif data.rects_rotation == 180:
                            if lmrks is not None:
                                new_rect = (w - l, h - t, w - r, h - b)
                                new_lmrks = lmrks.copy()
                                new_lmrks[:, 0] = w - new_lmrks[:, 0]
                                new_lmrks[:, 1] = h - new_lmrks[:, 1]
                        elif data.rects_rotation == 270:
                            new_rect = (w - b, l, w - t, r)
                            if lmrks is not None:
                                new_lmrks = lmrks[:, ::-1].copy()
                                new_lmrks[:, 0] = w - new_lmrks[:, 0]
                        data.rects[i], data.landmarks[i] = new_rect, new_lmrks

                return data

            elif self.type == 'final':
                data.final_output_files = []
                rects = data.rects
                landmarks = data.landmarks

                if self.debug_dir is not None:
                    debug_output_file = str(
                        Path(self.debug_dir) / (filename_path.stem + '.jpg'))
                    debug_image = image.copy()

                if src_dflimg is not None and len(rects) != 1:
                    #if re-extracting from dflimg and more than 1 or zero faces detected - dont process and just copy it
                    print("src_dflimg is not None and len(rects) != 1",
                          str(filename_path))
                    output_file = str(self.final_output_path /
                                      filename_path.name)
                    if str(filename_path) != str(output_file):
                        shutil.copy(str(filename_path), str(output_file))
                    data.final_output_files.append(output_file)
                else:
                    face_idx = 0
                    for rect, image_landmarks in zip(rects, landmarks):

                        if src_dflimg is not None and face_idx > 1:
                            #cannot extract more than 1 face from dflimg
                            break

                        if image_landmarks is None:
                            continue

                        rect = np.array(rect)

                        if self.face_type == FaceType.MARK_ONLY:
                            image_to_face_mat = None
                            face_image = image
                            face_image_landmarks = image_landmarks
                        else:
                            image_to_face_mat = LandmarksProcessor.get_transform_mat(
                                image_landmarks, self.image_size,
                                self.face_type)

                            face_image = cv2.warpAffine(
                                image, image_to_face_mat,
                                (self.image_size, self.image_size),
                                cv2.INTER_LANCZOS4)
                            face_image_landmarks = LandmarksProcessor.transform_points(
                                image_landmarks, image_to_face_mat)

                            landmarks_bbox = LandmarksProcessor.transform_points(
                                [(0, 0), (0, self.image_size - 1),
                                 (self.image_size - 1, self.image_size - 1),
                                 (self.image_size - 1, 0)], image_to_face_mat,
                                True)

                            rect_area = mathlib.polygon_area(
                                np.array(rect[[0, 2, 2, 0]]),
                                np.array(rect[[1, 1, 3, 3]]))
                            landmarks_area = mathlib.polygon_area(
                                landmarks_bbox[:, 0], landmarks_bbox[:, 1])

                            if landmarks_area > 4 * rect_area:  #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
                                continue

                            if self.debug_dir is not None:
                                LandmarksProcessor.draw_rect_landmarks(
                                    debug_image,
                                    rect,
                                    image_landmarks,
                                    self.image_size,
                                    self.face_type,
                                    transparent_mask=True)

                        if src_dflimg is not None and filename_path.suffix == '.jpg':
                            #if extracting from dflimg and jpg copy it in order not to lose quality
                            output_file = str(self.final_output_path /
                                              filename_path.name)
                            if str(filename_path) != str(output_file):
                                shutil.copy(str(filename_path),
                                            str(output_file))
                        else:
                            output_file = '{}_{}{}'.format(
                                str(self.final_output_path /
                                    filename_path.stem), str(face_idx), '.jpg')
                            cv2_imwrite(output_file, face_image,
                                        [int(cv2.IMWRITE_JPEG_QUALITY), 85])

                        DFLJPG.embed_data(
                            output_file,
                            face_type=FaceType.toString(self.face_type),
                            landmarks=face_image_landmarks.tolist(),
                            source_filename=filename_path.name,
                            source_rect=rect,
                            source_landmarks=image_landmarks.tolist(),
                            image_to_face_mat=image_to_face_mat,
                            pitch_yaw_roll=data.pitch_yaw_roll)

                        data.final_output_files.append(output_file)
                        face_idx += 1
                    data.faces_detected = face_idx

                if self.debug_dir is not None:
                    cv2_imwrite(debug_output_file, debug_image,
                                [int(cv2.IMWRITE_JPEG_QUALITY), 50])

                return data

            elif self.type == 'fanseg':
                if src_dflimg is not None:
                    fanseg_mask = self.e.extract(image / 255.0)
                    src_dflimg.embed_and_set(
                        filename_path_str,
                        fanseg_mask=fanseg_mask,
                    )
Exemplo n.º 4
0
        def process_data(self, data):
            filename_path = Path(data[0])

            filename_path_str = str(filename_path)
            if self.cached_image[0] == filename_path_str:
                image = self.cached_image[
                    1]  #cached image for manual extractor
            else:
                image = cv2_imread(filename_path_str)

                if image is None:
                    self.log_err(
                        'Failed to extract %s, reason: cv2_imread() fail.' %
                        (str(filename_path)))
                    return None

                image_shape = image.shape
                if len(image_shape) == 2:
                    h, w = image.shape
                    ch = 1
                else:
                    h, w, ch = image.shape

                if ch == 1:
                    image = np.repeat(image[:, :, np.newaxis], 3, -1)
                elif ch == 4:
                    image = image[:, :, 0:3]

                wm = w % 2
                hm = h % 2
                if wm + hm != 0:  #fix odd image
                    image = image[0:h - hm, 0:w - wm, :]
                self.cached_image = (filename_path_str, image)

            src_dflimg = None
            h, w, ch = image.shape
            if h == w:
                #extracting from already extracted jpg image?
                if filename_path.suffix == '.jpg':
                    src_dflimg = DFLJPG.load(str(filename_path))

            if self.type == 'rects':
                if min(w, h) < 128:
                    self.log_err('Image is too small %s : [%d, %d]' %
                                 (str(filename_path), w, h))
                    rects = []
                else:
                    rects = self.e.extract_from_bgr(image)

                return [str(filename_path), rects]

            elif self.type == 'landmarks':
                rects = data[1]
                if rects is None:
                    landmarks = None
                else:
                    landmarks = self.e.extract_from_bgr(
                        image, rects,
                        self.second_pass_e if src_dflimg is None else None)

                return [str(filename_path), landmarks]

            elif self.type == 'final':

                result = []
                faces = data[1]

                if self.debug_dir is not None:
                    debug_output_file = str(
                        Path(self.debug_dir) / (filename_path.stem + '.jpg'))
                    debug_image = image.copy()

                if src_dflimg is not None and len(faces) != 1:
                    #if re-extracting from dflimg and more than 1 or zero faces detected - dont process and just copy it
                    print("src_dflimg is not None and len(faces) != 1",
                          str(filename_path))
                    output_file = str(self.output_path / filename_path.name)
                    if str(filename_path) != str(output_file):
                        shutil.copy(str(filename_path), str(output_file))
                    result.append(output_file)
                else:
                    face_idx = 0
                    for face in faces:
                        rect = np.array(face[0])
                        image_landmarks = face[1]
                        if image_landmarks is None:
                            continue
                        image_landmarks = np.array(image_landmarks)

                        if self.face_type == FaceType.MARK_ONLY:
                            face_image = image
                            face_image_landmarks = image_landmarks
                        else:
                            image_to_face_mat = LandmarksProcessor.get_transform_mat(
                                image_landmarks, self.image_size,
                                self.face_type)
                            face_image = cv2.warpAffine(
                                image, image_to_face_mat,
                                (self.image_size, self.image_size),
                                cv2.INTER_LANCZOS4)
                            face_image_landmarks = LandmarksProcessor.transform_points(
                                image_landmarks, image_to_face_mat)

                            landmarks_bbox = LandmarksProcessor.transform_points(
                                [(0, 0), (0, self.image_size - 1),
                                 (self.image_size - 1, self.image_size - 1),
                                 (self.image_size - 1, 0)], image_to_face_mat,
                                True)

                            rect_area = mathlib.polygon_area(
                                np.array(rect[[0, 2, 2, 0]]),
                                np.array(rect[[1, 1, 3, 3]]))
                            landmarks_area = mathlib.polygon_area(
                                landmarks_bbox[:, 0], landmarks_bbox[:, 1])

                            if landmarks_area > 4 * rect_area:  #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
                                continue

                        if self.debug_dir is not None:
                            LandmarksProcessor.draw_rect_landmarks(
                                debug_image,
                                rect,
                                image_landmarks,
                                self.image_size,
                                self.face_type,
                                transparent_mask=True)

                        if src_dflimg is not None:
                            #if extracting from dflimg copy it in order not to lose quality
                            output_file = str(self.output_path /
                                              filename_path.name)
                            if str(filename_path) != str(output_file):
                                shutil.copy(str(filename_path),
                                            str(output_file))
                        else:
                            output_file = '{}_{}{}'.format(
                                str(self.output_path / filename_path.stem),
                                str(face_idx), '.jpg')
                            cv2_imwrite(output_file, face_image,
                                        [int(cv2.IMWRITE_JPEG_QUALITY), 85])

                        DFLJPG.embed_data(
                            output_file,
                            face_type=FaceType.toString(self.face_type),
                            landmarks=face_image_landmarks.tolist(),
                            source_filename=filename_path.name,
                            source_rect=rect,
                            source_landmarks=image_landmarks.tolist(),
                            image_to_face_mat=image_to_face_mat)

                        result.append(output_file)
                        face_idx += 1

                if self.debug_dir is not None:
                    cv2_imwrite(debug_output_file, debug_image,
                                [int(cv2.IMWRITE_JPEG_QUALITY), 50])

                return result
Exemplo n.º 5
0
def get_transform_mat(image_landmarks,
                      output_size,
                      face_type,
                      scale=1.0,
                      full_face_align_top=True):
    if not isinstance(image_landmarks, np.ndarray):
        image_landmarks = np.array(image_landmarks)

    padding, remove_align = FaceType_to_padding_remove_align.get(
        face_type, 0.0)

    mat = umeyama(
        np.concatenate([image_landmarks[17:49], image_landmarks[54:55]]),
        landmarks_2D_new, True)[0:2]
    l_p = transform_points(
        np.float32([(0, 0), (1, 0), (1, 1), (0, 1), (0.5, 0.5)]), mat, True)
    l_c = l_p[4]

    tb_diag_vec = (l_p[2] - l_p[0]).astype(np.float32)
    tb_diag_vec /= npla.norm(tb_diag_vec)
    bt_diag_vec = (l_p[1] - l_p[3]).astype(np.float32)
    bt_diag_vec /= npla.norm(bt_diag_vec)

    mod = (1.0 / scale) * (npla.norm(l_p[0] - l_p[2]) *
                           (padding * np.sqrt(2.0) + 0.5))

    if not remove_align:
        l_t = np.array([
            np.round(l_c - tb_diag_vec * mod),
            np.round(l_c + bt_diag_vec * mod),
            np.round(l_c + tb_diag_vec * mod)
        ])
    else:
        l_t = np.array([
            np.round(l_c - tb_diag_vec * mod),
            np.round(l_c + bt_diag_vec * mod),
            np.round(l_c + tb_diag_vec * mod),
            np.round(l_c - bt_diag_vec * mod),
        ])

        area = mathlib.polygon_area(l_t[:, 0], l_t[:, 1])
        side = np.float32(math.sqrt(area) / 2)
        l_t = np.array([
            np.round(l_c + [-side, -side]),
            np.round(l_c + [side, -side]),
            np.round(l_c + [side, side])
        ])

    pts2 = np.float32(((0, 0), (output_size, 0), (output_size, output_size)))
    mat = cv2.getAffineTransform(l_t, pts2)

    #if remove_align:
    #    bbox = transform_points ( [ (0,0), (0,output_size), (output_size, output_size), (output_size,0) ], mat, True)
    #    #import code
    #    #code.interact(local=dict(globals(), **locals()))
    #    area = mathlib.polygon_area(bbox[:,0], bbox[:,1] )
    #    side = math.sqrt(area) / 2
    #    center = transform_points ( [(output_size/2,output_size/2)], mat, True)
    #    pts1 = np.float32(( center+[-side,-side], center+[side,-side], center+[side,-side] ))
    #    pts2 = np.float32([[0,0],[output_size,0],[0,output_size]])
    #    mat = cv2.getAffineTransform(pts1,pts2)

    return mat
Exemplo n.º 6
0
        def process_data(self, data):
            filename_path = Path(data[0])

            filename_path_str = str(filename_path)
            if self.cached_image[0] == filename_path_str:
                image = self.cached_image[1]
            else:
                image = cv2_imread(filename_path_str)
                self.cached_image = (filename_path_str, image)

            if image is None:
                self.log_err(
                    'Failed to extract %s, reason: cv2_imread() fail.' %
                    (str(filename_path)))
            else:
                if self.type == 'rects':
                    rects = self.e.extract_from_bgr(image)
                    return [str(filename_path), rects]

                elif self.type == 'landmarks':
                    rects = data[1]
                    landmarks = self.e.extract_from_bgr(image, rects)
                    return [str(filename_path), landmarks]

                elif self.type == 'final':
                    src_dflimg = None
                    (h, w, c) = image.shape
                    if h == w:
                        #extracting from already extracted jpg image?
                        if filename_path.suffix == '.jpg':
                            src_dflimg = DFLJPG.load(str(filename_path))

                    result = []
                    faces = data[1]

                    if self.debug:
                        debug_output_file = '{}{}'.format(
                            str(
                                Path(str(self.output_path) + '_debug') /
                                filename_path.stem), '.jpg')
                        debug_image = image.copy()

                    face_idx = 0
                    for face in faces:
                        rect = np.array(face[0])
                        image_landmarks = np.array(face[1])

                        if self.face_type == FaceType.MARK_ONLY:
                            face_image = image
                            face_image_landmarks = image_landmarks
                        else:
                            image_to_face_mat = LandmarksProcessor.get_transform_mat(
                                image_landmarks, self.image_size,
                                self.face_type)
                            face_image = cv2.warpAffine(
                                image, image_to_face_mat,
                                (self.image_size, self.image_size),
                                cv2.INTER_LANCZOS4)
                            face_image_landmarks = LandmarksProcessor.transform_points(
                                image_landmarks, image_to_face_mat)

                            landmarks_bbox = LandmarksProcessor.transform_points(
                                [(0, 0), (0, self.image_size - 1),
                                 (self.image_size - 1, self.image_size - 1),
                                 (self.image_size - 1, 0)], image_to_face_mat,
                                True)

                            rect_area = mathlib.polygon_area(
                                np.array(rect[[0, 2, 2, 0]]),
                                np.array(rect[[1, 1, 3, 3]]))
                            landmarks_area = mathlib.polygon_area(
                                landmarks_bbox[:, 0], landmarks_bbox[:, 1])

                            if landmarks_area > 4 * rect_area:  #get rid of faces which umeyama-landmark-area > 4*detector-rect-area
                                continue

                        if self.debug:
                            LandmarksProcessor.draw_rect_landmarks(
                                debug_image, rect, image_landmarks,
                                self.image_size, self.face_type)

                        output_file = '{}_{}{}'.format(
                            str(self.output_path / filename_path.stem),
                            str(face_idx), '.jpg')
                        face_idx += 1

                        if src_dflimg is not None:
                            #if extracting from dflimg just copy it in order not to lose quality
                            shutil.copy(str(filename_path), str(output_file))
                        else:
                            cv2_imwrite(output_file, face_image,
                                        [int(cv2.IMWRITE_JPEG_QUALITY), 85])

                        DFLJPG.embed_data(
                            output_file,
                            face_type=FaceType.toString(self.face_type),
                            landmarks=face_image_landmarks.tolist(),
                            source_filename=filename_path.name,
                            source_rect=rect,
                            source_landmarks=image_landmarks.tolist())

                        result.append(output_file)

                    if self.debug:
                        cv2_imwrite(debug_output_file, debug_image,
                                    [int(cv2.IMWRITE_JPEG_QUALITY), 50])

                    return result
            return None