コード例 #1
0
 def get_eyes_mouth_mask():
     eyes_mask = LandmarksProcessor.get_image_eye_mask(
         sample_bgr.shape, sample_landmarks)
     mouth_mask = LandmarksProcessor.get_image_mouth_mask(
         sample_bgr.shape, sample_landmarks)
     mask = eyes_mask + mouth_mask
     return np.clip(mask, 0, 1)
コード例 #2
0
 def get_eyes_mask():
     eyes_mask = LandmarksProcessor.get_image_eye_mask(
         sample_bgr.shape, sample_landmarks)
     # set eye masks to 1-2
     clip = np.clip(eyes_mask, 0, 1)
     clip[clip > 0.1] += 1
     return clip
コード例 #3
0
    def process(samples,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPST = SampleProcessor.SampleType
        SPCT = SampleProcessor.ChannelType
        SPFMT = SampleProcessor.FaceMaskType

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample in samples:
            sample_bgr = sample.load_bgr()
            ct_sample_bgr = None
            h, w, c = sample_bgr.shape

            is_face_sample = sample.landmarks is not None

            if debug and is_face_sample:
                LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                                  (0, 1, 0))

            params = imagelib.gen_warp_params(
                sample_bgr,
                sample_process_options.random_flip,
                rotation_range=sample_process_options.rotation_range,
                scale_range=sample_process_options.scale_range,
                tx_range=sample_process_options.tx_range,
                ty_range=sample_process_options.ty_range)

            outputs_sample = []
            for opts in output_sample_types:
                sample_type = opts.get('sample_type', SPST.NONE)
                channel_type = opts.get('channel_type', SPCT.NONE)
                resolution = opts.get('resolution', 0)
                warp = opts.get('warp', False)
                transform = opts.get('transform', False)
                motion_blur = opts.get('motion_blur', None)
                gaussian_blur = opts.get('gaussian_blur', None)
                normalize_tanh = opts.get('normalize_tanh', False)
                ct_mode = opts.get('ct_mode', 'None')
                data_format = opts.get('data_format', 'NHWC')

                if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
                    if not is_face_sample:
                        raise ValueError(
                            "face_samples should be provided for sample_type FACE_*"
                        )

                if is_face_sample:
                    face_type = opts.get('face_type', None)
                    face_mask_type = opts.get('face_mask_type', SPFMT.NONE)

                    if face_type is None:
                        raise ValueError(
                            "face_type must be defined for face samples")

                    if face_type > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, target_ft))

                if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:

                    if sample_type == SPST.FACE_MASK:
                        if face_mask_type == SPFMT.ALL_HULL or \
                           face_mask_type == SPFMT.EYES_HULL or \
                           face_mask_type == SPFMT.ALL_EYES_HULL:
                            if face_mask_type == SPFMT.ALL_HULL or \
                               face_mask_type == SPFMT.ALL_EYES_HULL:
                                if sample.eyebrows_expand_mod is not None:
                                    all_mask = LandmarksProcessor.get_image_hull_mask(
                                        sample_bgr.shape,
                                        sample.landmarks,
                                        eyebrows_expand_mod=sample.
                                        eyebrows_expand_mod)
                                else:
                                    all_mask = LandmarksProcessor.get_image_hull_mask(
                                        sample_bgr.shape, sample.landmarks)

                                all_mask = np.clip(all_mask, 0, 1)

                            if face_mask_type == SPFMT.EYES_HULL or \
                               face_mask_type == SPFMT.ALL_EYES_HULL:
                                eyes_mask = LandmarksProcessor.get_image_eye_mask(
                                    sample_bgr.shape, sample.landmarks)
                                eyes_mask = np.clip(eyes_mask, 0, 1)

                            if face_mask_type == SPFMT.ALL_HULL:
                                img = all_mask
                            elif face_mask_type == SPFMT.EYES_HULL:
                                img = eyes_mask
                            elif face_mask_type == SPFMT.ALL_EYES_HULL:
                                img = all_mask + eyes_mask
                        elif face_mask_type == SPFMT.STRUCT:
                            if sample.eyebrows_expand_mod is not None:
                                img = LandmarksProcessor.get_face_struct_mask(
                                    sample_bgr.shape,
                                    sample.landmarks,
                                    eyebrows_expand_mod=sample.
                                    eyebrows_expand_mod)
                            else:
                                img = LandmarksProcessor.get_face_struct_mask(
                                    sample_bgr.shape, sample.landmarks)

                        if sample.ie_polys is not None:
                            sample.ie_polys.overlay_mask(img)

                        if sample.face_type == FaceType.MARK_ONLY:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0], face_type)
                            img = cv2.warpAffine(
                                img,
                                mat, (sample.shape[0], sample.shape[0]),
                                flags=cv2.INTER_LINEAR)
                            img = imagelib.warp_by_params(
                                params,
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=False,
                                cv2_inter=cv2.INTER_LINEAR)
                            img = cv2.resize(img, (resolution, resolution),
                                             cv2.INTER_LINEAR)[..., None]
                        else:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution, face_type)
                            img = imagelib.warp_by_params(
                                params,
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=False,
                                cv2_inter=cv2.INTER_LINEAR)
                            img = cv2.warpAffine(
                                img,
                                mat, (resolution, resolution),
                                borderMode=cv2.BORDER_CONSTANT,
                                flags=cv2.INTER_LINEAR)[..., None]

                        if channel_type == SPCT.G:
                            out_sample = img.astype(np.float32)
                        else:
                            raise ValueError(
                                "only channel_type.G supported for the mask")

                    elif sample_type == SPST.FACE_IMAGE:
                        img = sample_bgr
                        if motion_blur is not None:
                            chance, mb_max_size = motion_blur
                            chance = np.clip(chance, 0, 100)

                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            mblur_rnd_chance = l_rnd_state.randint(100)
                            mblur_rnd_kernel = l_rnd_state.randint(
                                mb_max_size) + 1
                            mblur_rnd_deg = l_rnd_state.randint(360)

                            if mblur_rnd_chance < chance:
                                img = imagelib.LinearMotionBlur(
                                    img, mblur_rnd_kernel, mblur_rnd_deg)

                        if gaussian_blur is not None:
                            chance, kernel_max_size = gaussian_blur
                            chance = np.clip(chance, 0, 100)

                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed + 1)
                            gblur_rnd_chance = l_rnd_state.randint(100)
                            gblur_rnd_kernel = l_rnd_state.randint(
                                kernel_max_size) * 2 + 1

                            if gblur_rnd_chance < chance:
                                img = cv2.GaussianBlur(
                                    img, (gblur_rnd_kernel, ) * 2, 0)

                        if sample.face_type == FaceType.MARK_ONLY:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0], face_type)
                            img = cv2.warpAffine(
                                img,
                                mat, (sample.shape[0], sample.shape[0]),
                                flags=cv2.INTER_CUBIC)
                            img = imagelib.warp_by_params(
                                params,
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=True)
                            img = cv2.resize(img, (resolution, resolution),
                                             cv2.INTER_CUBIC)
                        else:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution, face_type)
                            img = imagelib.warp_by_params(
                                params,
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=True)
                            img = cv2.warpAffine(
                                img,
                                mat, (resolution, resolution),
                                borderMode=cv2.BORDER_REPLICATE,
                                flags=cv2.INTER_CUBIC)

                        img = np.clip(img.astype(np.float32), 0, 1)

                        # Apply random color transfer
                        if ct_mode is not None and ct_sample is not None:
                            if ct_sample_bgr is None:
                                ct_sample_bgr = ct_sample.load_bgr()
                            img = imagelib.color_transfer(
                                ct_mode, img,
                                cv2.resize(ct_sample_bgr,
                                           (resolution, resolution),
                                           cv2.INTER_LINEAR))

                        # Transform from BGR to desired channel_type
                        if channel_type == SPCT.BGR:
                            out_sample = img
                        elif channel_type == SPCT.BGR_SHUFFLE:
                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            out_sample = np.take(img,
                                                 l_rnd_state.permutation(
                                                     img.shape[-1]),
                                                 axis=-1)
                        elif channel_type == SPCT.BGR_RANDOM_HSV_SHIFT:
                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                            h, s, v = cv2.split(hsv)
                            h = (h + l_rnd_state.randint(360)) % 360
                            s = np.clip(s + l_rnd_state.random() - 0.5, 0, 1)
                            v = np.clip(v + l_rnd_state.random() - 0.5, 0, 1)
                            hsv = cv2.merge([h, s, v])
                            out_sample = np.clip(
                                cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), 0, 1)
                        elif channel_type == SPCT.BGR_RANDOM_RGB_LEVELS:
                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            np_rnd = l_rnd_state.rand
                            inBlack = np.array([
                                np_rnd() * 0.25,
                                np_rnd() * 0.25,
                                np_rnd() * 0.25
                            ],
                                               dtype=np.float32)
                            inWhite = np.array([
                                1.0 - np_rnd() * 0.25, 1.0 - np_rnd() * 0.25,
                                1.0 - np_rnd() * 0.25
                            ],
                                               dtype=np.float32)
                            inGamma = np.array([
                                0.5 + np_rnd(), 0.5 + np_rnd(), 0.5 + np_rnd()
                            ],
                                               dtype=np.float32)
                            outBlack = np.array([0.0, 0.0, 0.0],
                                                dtype=np.float32)
                            outWhite = np.array([1.0, 1.0, 1.0],
                                                dtype=np.float32)
                            out_sample = np.clip(
                                (img - inBlack) / (inWhite - inBlack), 0, 1)
                            out_sample = (out_sample**(1 / inGamma)) * (
                                outWhite - outBlack) + outBlack
                            out_sample = np.clip(out_sample, 0, 1)
                        elif channel_type == SPCT.G:
                            out_sample = cv2.cvtColor(img,
                                                      cv2.COLOR_BGR2GRAY)[...,
                                                                          None]
                        elif channel_type == SPCT.GGG:
                            out_sample = np.repeat(
                                np.expand_dims(
                                    cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), -1),
                                (3, ), -1)

                    # Final transformations
                    if not debug:
                        if normalize_tanh:
                            out_sample = np.clip(out_sample * 2.0 - 1.0, -1.0,
                                                 1.0)
                    if data_format == "NCHW":
                        out_sample = np.transpose(out_sample, (2, 0, 1))
                #else:
                #    img  = imagelib.warp_by_params (params, img,  warp, transform, can_flip=True, border_replicate=True)
                #    img  = cv2.resize( img,  (resolution,resolution), cv2.INTER_CUBIC )
                elif sample_type == SPST.LANDMARKS_ARRAY:
                    l = sample.landmarks
                    l = np.concatenate([
                        np.expand_dims(l[:, 0] / w, -1),
                        np.expand_dims(l[:, 1] / h, -1)
                    ], -1)
                    l = np.clip(l, 0.0, 1.0)
                    out_sample = l
                elif sample_type == SPST.PITCH_YAW_ROLL or sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
                    pitch_yaw_roll = sample.get_pitch_yaw_roll()

                    if params['flip']:
                        yaw = -yaw

                    if sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
                        pitch = np.clip((pitch / math.pi) / 2.0 + 0.5, 0, 1)
                        yaw = np.clip((yaw / math.pi) / 2.0 + 0.5, 0, 1)
                        roll = np.clip((roll / math.pi) / 2.0 + 0.5, 0, 1)

                    out_sample = (pitch, yaw, roll)
                else:
                    raise ValueError('expected sample_type')

                outputs_sample.append(out_sample)
            outputs += [outputs_sample]

        return outputs
コード例 #4
0
    def process(sample, sample_process_options, output_sample_types, debug):
        SPTF = SampleProcessor.TypeFlags

        sample_bgr = sample.load_bgr()
        h, w, c = sample_bgr.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                              (0, 1, 0))

        close_sample = sample.close_target_list[np.random.randint(
            0, len(sample.close_target_list)
        )] if sample.close_target_list is not None else None
        close_sample_bgr = close_sample.load_bgr(
        ) if close_sample is not None else None

        if debug and close_sample_bgr is not None:
            LandmarksProcessor.draw_landmarks(close_sample_bgr,
                                              close_sample.landmarks,
                                              (0, 1, 0))

        params = imagelib.gen_warp_params(
            sample_bgr,
            sample_process_options.random_flip,
            rotation_range=sample_process_options.rotation_range,
            scale_range=sample_process_options.scale_range,
            tx_range=sample_process_options.tx_range,
            ty_range=sample_process_options.ty_range)

        images = [[None] * 3 for _ in range(30)]

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample_type in output_sample_types:
            f = sample_type[0]
            size = sample_type[1]
            random_sub_size = 0 if len(sample_type) < 3 else min(
                sample_type[2], size)

            if f & SPTF.SOURCE != 0:
                img_type = 0
            elif f & SPTF.WARPED != 0:
                img_type = 1
            elif f & SPTF.WARPED_TRANSFORMED != 0:
                img_type = 2
            elif f & SPTF.TRANSFORMED != 0:
                img_type = 3
            elif f & SPTF.LANDMARKS_ARRAY != 0:
                img_type = 4
            else:
                raise ValueError('expected SampleTypeFlags type')

            if f & SPTF.RANDOM_CLOSE != 0:
                img_type += 10
            elif f & SPTF.MORPH_TO_RANDOM_CLOSE != 0:
                img_type += 20

            face_mask_type = 0
            if f & SPTF.FACE_MASK_FULL != 0:
                face_mask_type = 1
            elif f & SPTF.FACE_MASK_EYES != 0:
                face_mask_type = 2

            target_face_type = -1
            if f & SPTF.FACE_TYPE_HALF != 0:
                target_face_type = FaceType.HALF
            elif f & SPTF.FACE_TYPE_FULL != 0:
                target_face_type = FaceType.FULL
            elif f & SPTF.FACE_TYPE_HEAD != 0:
                target_face_type = FaceType.HEAD
            elif f & SPTF.FACE_TYPE_AVATAR != 0:
                target_face_type = FaceType.AVATAR

            apply_motion_blur = f & SPTF.OPT_APPLY_MOTION_BLUR != 0

            if img_type == 4:
                l = sample.landmarks
                l = np.concatenate([
                    np.expand_dims(l[:, 0] / w, -1),
                    np.expand_dims(l[:, 1] / h, -1)
                ], -1)
                l = np.clip(l, 0.0, 1.0)
                img = l
            else:
                if images[img_type][face_mask_type] is None:
                    if img_type >= 10 and img_type <= 19:  #RANDOM_CLOSE
                        img_type -= 10
                        img = close_sample_bgr
                        cur_sample = close_sample

                    elif img_type >= 20 and img_type <= 29:  #MORPH_TO_RANDOM_CLOSE
                        img_type -= 20
                        res = sample.shape[0]

                        s_landmarks = sample.landmarks.copy()
                        d_landmarks = close_sample.landmarks.copy()
                        idxs = list(range(len(s_landmarks)))
                        #remove landmarks near boundaries
                        for i in idxs[:]:
                            s_l = s_landmarks[i]
                            d_l = d_landmarks[i]
                            if s_l[0] < 5 or s_l[1] < 5 or s_l[0] >= res-5 or s_l[1] >= res-5 or \
                               d_l[0] < 5 or d_l[1] < 5 or d_l[0] >= res-5 or d_l[1] >= res-5:
                                idxs.remove(i)
                        #remove landmarks that close to each other in 5 dist
                        for landmarks in [s_landmarks, d_landmarks]:
                            for i in idxs[:]:
                                s_l = landmarks[i]
                                for j in idxs[:]:
                                    if i == j:
                                        continue
                                    s_l_2 = landmarks[j]
                                    diff_l = np.abs(s_l - s_l_2)
                                    if np.sqrt(diff_l.dot(diff_l)) < 5:
                                        idxs.remove(i)
                                        break
                        s_landmarks = s_landmarks[idxs]
                        d_landmarks = d_landmarks[idxs]
                        s_landmarks = np.concatenate([
                            s_landmarks,
                            [[0, 0], [res // 2, 0], [res - 1, 0],
                             [0, res // 2], [res - 1, res // 2], [0, res - 1],
                             [res // 2, res - 1], [res - 1, res - 1]]
                        ])
                        d_landmarks = np.concatenate([
                            d_landmarks,
                            [[0, 0], [res // 2, 0], [res - 1, 0],
                             [0, res // 2], [res - 1, res // 2], [0, res - 1],
                             [res // 2, res - 1], [res - 1, res - 1]]
                        ])
                        img = imagelib.morph_by_points(sample_bgr, s_landmarks,
                                                       d_landmarks)
                        cur_sample = close_sample
                    else:
                        img = sample_bgr
                        cur_sample = sample

                    if is_face_sample:
                        if apply_motion_blur and sample_process_options.motion_blur is not None:
                            chance, mb_range = sample_process_options.motion_blur
                            if np.random.randint(100) < chance:
                                dim = mb_range[np.random.randint(
                                    len(mb_range))]
                                img = imagelib.LinearMotionBlur(
                                    img, dim, np.random.randint(180))

                        if face_mask_type == 1:
                            mask = cur_sample.load_fanseg_mask(
                            )  #using fanseg_mask if exist

                            if mask is None:
                                mask = LandmarksProcessor.get_image_hull_mask(
                                    img.shape, cur_sample.landmarks)

                            if cur_sample.ie_polys is not None:
                                cur_sample.ie_polys.overlay_mask(mask)

                            img = np.concatenate((img, mask), -1)
                        elif face_mask_type == 2:
                            mask = LandmarksProcessor.get_image_eye_mask(
                                img.shape, cur_sample.landmarks)
                            mask = np.expand_dims(
                                cv2.blur(mask, (w // 32, w // 32)), -1)
                            mask[mask > 0.0] = 1.0
                            img = np.concatenate((img, mask), -1)

                    images[img_type][face_mask_type] = imagelib.warp_by_params(
                        params, img, (img_type == 1 or img_type == 2),
                        (img_type == 2 or img_type == 3), img_type != 0,
                        face_mask_type == 0)

                img = images[img_type][face_mask_type]

                if is_face_sample and target_face_type != -1:
                    if target_face_type > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type,
                               target_face_type))
                    img = cv2.warpAffine(img,
                                         LandmarksProcessor.get_transform_mat(
                                             sample.landmarks, size,
                                             target_face_type), (size, size),
                                         flags=cv2.INTER_CUBIC)
                else:
                    img = cv2.resize(img, (size, size), cv2.INTER_CUBIC)

                if random_sub_size != 0:
                    sub_size = size - random_sub_size
                    rnd_state = np.random.RandomState(sample_rnd_seed +
                                                      random_sub_size)
                    start_x = rnd_state.randint(sub_size + 1)
                    start_y = rnd_state.randint(sub_size + 1)
                    img = img[start_y:start_y + sub_size,
                              start_x:start_x + sub_size, :]

                img_bgr = img[..., 0:3]
                img_mask = img[..., 3:4]

                if f & SPTF.MODE_BGR != 0:
                    img = img_bgr
                elif f & SPTF.MODE_BGR_SHUFFLE != 0:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img_bgr = np.take(img_bgr,
                                      rnd_state.permutation(img_bgr.shape[-1]),
                                      axis=-1)
                    img = np.concatenate((img_bgr, img_mask), -1)
                elif f & SPTF.MODE_G != 0:
                    img = np.concatenate((np.expand_dims(
                        cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                        -1), img_mask), -1)
                elif f & SPTF.MODE_GGG != 0:
                    img = np.concatenate((np.repeat(
                        np.expand_dims(
                            cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                        (3, ), -1), img_mask), -1)
                elif is_face_sample and f & SPTF.MODE_M != 0:
                    if face_mask_type == 0:
                        raise ValueError('no face_mask_type defined')
                    img = img_mask
                else:
                    raise ValueError('expected SampleTypeFlags mode')

                if not debug:
                    if sample_process_options.normalize_tanh:
                        img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                    else:
                        img = np.clip(img, 0.0, 1.0)

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs
コード例 #5
0
ファイル: SampleProcessor.py プロジェクト: wzk319/DeepFaceLab
    def process(sample, sample_process_options, output_sample_types, debug):
        SPTF = SampleProcessor.Types

        sample_bgr = sample.load_bgr()
        h, w, c = sample_bgr.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                              (0, 1, 0))

        params = imagelib.gen_warp_params(
            sample_bgr,
            sample_process_options.random_flip,
            rotation_range=sample_process_options.rotation_range,
            scale_range=sample_process_options.scale_range,
            tx_range=sample_process_options.tx_range,
            ty_range=sample_process_options.ty_range)

        cached_images = collections.defaultdict(dict)

        sample_rnd_seed = np.random.randint(0x80000000)

        SPTF_FACETYPE_TO_FACETYPE = {
            SPTF.FACE_TYPE_HALF: FaceType.HALF,
            SPTF.FACE_TYPE_FULL: FaceType.FULL,
            SPTF.FACE_TYPE_HEAD: FaceType.HEAD,
            SPTF.FACE_TYPE_AVATAR: FaceType.AVATAR
        }

        outputs = []
        for opts in output_sample_types:

            resolution = opts.get('resolution', 0)
            types = opts.get('types', [])

            random_sub_res = opts.get('random_sub_res', 0)
            normalize_std_dev = opts.get('normalize_std_dev', False)
            normalize_vgg = opts.get('normalize_vgg', False)
            motion_blur = opts.get('motion_blur', None)

            img_type = SPTF.NONE
            target_face_type = SPTF.NONE
            face_mask_type = SPTF.NONE
            mode_type = SPTF.NONE
            for t in types:
                if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                    img_type = t
                elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                    target_face_type = t
                elif t >= SPTF.FACE_MASK_BEGIN and t < SPTF.FACE_MASK_END:
                    face_mask_type = t
                elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                    mode_type = t

            if img_type == SPTF.NONE:
                raise ValueError('expected IMG_ type')

            if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                l = sample.landmarks
                l = np.concatenate([
                    np.expand_dims(l[:, 0] / w, -1),
                    np.expand_dims(l[:, 1] / h, -1)
                ], -1)
                l = np.clip(l, 0.0, 1.0)
                img = l
            elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                pitch_yaw_roll = sample.pitch_yaw_roll
                if pitch_yaw_roll is not None:
                    pitch, yaw, roll = pitch_yaw_roll
                else:
                    pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                        sample.landmarks)
                if params['flip']:
                    yaw = -yaw

                if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch = (pitch + 1.0) / 2.0
                    yaw = (yaw + 1.0) / 2.0
                    roll = (roll + 1.0) / 2.0

                img = (pitch, yaw, roll)
            else:
                if mode_type == SPTF.NONE:
                    raise ValueError('expected MODE_ type')

                img = cached_images.get(img_type, {}).get(face_mask_type, None)
                if img is None:

                    img = sample_bgr
                    cur_sample = sample

                    if is_face_sample:
                        if motion_blur is not None:
                            chance, mb_range = motion_blur
                            chance = np.clip(chance, 0, 100)

                            if np.random.randint(100) < chance:
                                mb_range = [3, 5, 7,
                                            9][:np.clip(mb_range, 0, 3) + 1]
                                dim = mb_range[np.random.randint(
                                    len(mb_range))]
                                img = imagelib.LinearMotionBlur(
                                    img, dim, np.random.randint(180))

                        if face_mask_type == SPTF.FACE_MASK_FULL:
                            mask = cur_sample.load_fanseg_mask(
                            )  #using fanseg_mask if exist

                            if mask is None:
                                mask = LandmarksProcessor.get_image_hull_mask(
                                    img.shape, cur_sample.landmarks)

                            if cur_sample.ie_polys is not None:
                                cur_sample.ie_polys.overlay_mask(mask)

                            img = np.concatenate((img, mask), -1)
                        elif face_mask_type == SPTF.FACE_MASK_EYES:
                            mask = LandmarksProcessor.get_image_eye_mask(
                                img.shape, cur_sample.landmarks)
                            mask = np.expand_dims(
                                cv2.blur(mask, (w // 32, w // 32)), -1)
                            mask[mask > 0.0] = 1.0
                            img = np.concatenate((img, mask), -1)

                    warp = (img_type == SPTF.IMG_WARPED
                            or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                    transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                                 or img_type == SPTF.IMG_TRANSFORMED)
                    flip = img_type != SPTF.IMG_WARPED
                    is_border_replicate = face_mask_type == SPTF.NONE

                    img = cached_images[img_type][
                        face_mask_type] = imagelib.warp_by_params(
                            params, img, warp, transform, flip,
                            is_border_replicate)

                if is_face_sample and target_face_type != SPTF.NONE:
                    ft = SPTF_FACETYPE_TO_FACETYPE[target_face_type]
                    if ft > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, ft))
                    img = cv2.warpAffine(img,
                                         LandmarksProcessor.get_transform_mat(
                                             sample.landmarks, resolution, ft),
                                         (resolution, resolution),
                                         flags=cv2.INTER_CUBIC)
                else:
                    img = cv2.resize(img, (resolution, resolution),
                                     cv2.INTER_CUBIC)

                if random_sub_res != 0:
                    sub_size = resolution - random_sub_res
                    rnd_state = np.random.RandomState(sample_rnd_seed +
                                                      random_sub_res)
                    start_x = rnd_state.randint(sub_size + 1)
                    start_y = rnd_state.randint(sub_size + 1)
                    img = img[start_y:start_y + sub_size,
                              start_x:start_x + sub_size, :]

                img_bgr = img[..., 0:3]
                img_mask = img[..., 3:4]

                if normalize_std_dev:
                    img_bgr = (img_bgr - img_bgr.mean((0, 1))) / img_bgr.std(
                        (0, 1))
                elif normalize_vgg:
                    img_bgr = np.clip(img_bgr * 255, 0, 255)
                    img_bgr[:, :, 0] -= 103.939
                    img_bgr[:, :, 1] -= 116.779
                    img_bgr[:, :, 2] -= 123.68

                if mode_type == SPTF.MODE_BGR:
                    img = img_bgr
                elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img_bgr = np.take(img_bgr,
                                      rnd_state.permutation(img_bgr.shape[-1]),
                                      axis=-1)
                    img = np.concatenate((img_bgr, img_mask), -1)
                elif mode_type == SPTF.MODE_G:
                    img = np.concatenate((np.expand_dims(
                        cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                        -1), img_mask), -1)
                elif mode_type == SPTF.MODE_GGG:
                    img = np.concatenate((np.repeat(
                        np.expand_dims(
                            cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                        (3, ), -1), img_mask), -1)
                elif mode_type == SPTF.MODE_M and is_face_sample:
                    if face_mask_type == SPTF.NONE:
                        raise ValueError('no face_mask_type defined')
                    img = img_mask

                if not debug:
                    if sample_process_options.normalize_tanh:
                        img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                    else:
                        img = np.clip(img, 0.0, 1.0)

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs
コード例 #6
0
    def process (samples, sample_process_options, output_sample_types, debug, ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample in samples:
            sample_bgr = sample.load_bgr()
            ct_sample_bgr = None
            h,w,c = sample_bgr.shape

            is_face_sample = sample.landmarks is not None

            if debug and is_face_sample:
                LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0))

            params = imagelib.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range, rnd_seed=sample_rnd_seed )

            outputs_sample = []
            for opts in output_sample_types:

                resolution = opts.get('resolution', 0)
                types = opts.get('types', [] )

                motion_blur = opts.get('motion_blur', None)
                gaussian_blur = opts.get('gaussian_blur', None)

                ct_mode = opts.get('ct_mode', 'None')
                normalize_tanh = opts.get('normalize_tanh', False)
                data_format = opts.get('data_format', 'NHWC')


                img_type = SPTF.NONE
                target_face_type = SPTF.NONE
                mode_type = SPTF.NONE
                for t in types:
                    if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                        img_type = t
                    elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                        target_face_type = t
                    elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                        mode_type = t


                if is_face_sample:
                    if target_face_type == SPTF.NONE:
                         raise ValueError("target face type must be defined for face samples")
                else:
                    if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL:
                        raise ValueError("MODE_FACE_MASK_ALL_HULL applicable only for face samples")
                    if mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
                        raise ValueError("MODE_FACE_MASK_EYES_HULL applicable only for face samples")
                    if mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
                        raise ValueError("MODE_FACE_MASK_ALL_EYES_HULL applicable only for face samples")
                    if mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                        raise ValueError("MODE_FACE_MASK_STRUCT applicable only for face samples")

                can_warp      = (img_type==SPTF.IMG_WARPED or img_type==SPTF.IMG_WARPED_TRANSFORMED)
                can_transform = (img_type==SPTF.IMG_WARPED_TRANSFORMED or img_type==SPTF.IMG_TRANSFORMED)

                if img_type == SPTF.NONE:
                    raise ValueError ('expected IMG_ type')

                if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                    l = sample.landmarks
                    l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
                    l = np.clip(l, 0.0, 1.0)
                    out_sample = l
                elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch_yaw_roll = sample.get_pitch_yaw_roll()

                    if params['flip']:
                        yaw = -yaw

                    if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                        pitch = np.clip( (pitch / math.pi) / 2.0 + 0.5, 0, 1)
                        yaw   = np.clip( (yaw / math.pi) / 2.0 + 0.5, 0, 1)
                        roll  = np.clip( (roll / math.pi) / 2.0 + 0.5, 0, 1)

                    out_sample = (pitch, yaw, roll)
                else:
                    if mode_type == SPTF.NONE:
                        raise ValueError ('expected MODE_ type')

                    if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
                       mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                       mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:

                        if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
                           mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
                            if sample.eyebrows_expand_mod is not None:
                                all_mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
                            else:
                                all_mask = LandmarksProcessor.get_image_hull_mask (sample_bgr.shape, sample.landmarks)
                            
                            all_mask = np.clip(all_mask, 0, 1)
                            
                        if mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                           mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
                            eyes_mask = LandmarksProcessor.get_image_eye_mask (sample_bgr.shape, sample.landmarks)
                            eyes_mask = np.clip(eyes_mask, 0, 1)
                            
                        if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL:
                            img = all_mask
                        elif mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
                            img = eyes_mask
                        elif mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL:
                            img = all_mask + eyes_mask
                            
                        if sample.ie_polys is not None:
                            sample.ie_polys.overlay_mask(img)

                    elif mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                        if sample.eyebrows_expand_mod is not None:
                            img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks, eyebrows_expand_mod=sample.eyebrows_expand_mod )
                        else:
                            img = LandmarksProcessor.get_face_struct_mask (sample_bgr.shape, sample.landmarks)
                    else:
                        img = sample_bgr
                        if motion_blur is not None:
                            chance, mb_max_size = motion_blur
                            chance = np.clip(chance, 0, 100)

                            rnd_state = np.random.RandomState (sample_rnd_seed)
                            mblur_rnd_chance = rnd_state.randint(100)
                            mblur_rnd_kernel = rnd_state.randint(mb_max_size)+1
                            mblur_rnd_deg    = rnd_state.randint(360)

                            if mblur_rnd_chance < chance:
                                img = imagelib.LinearMotionBlur (img, mblur_rnd_kernel, mblur_rnd_deg )

                        if gaussian_blur is not None:
                            chance, kernel_max_size = gaussian_blur
                            chance = np.clip(chance, 0, 100)
                            
                            rnd_state = np.random.RandomState (sample_rnd_seed+1)
                            gblur_rnd_chance = rnd_state.randint(100)
                            gblur_rnd_kernel = rnd_state.randint(kernel_max_size)*2+1

                            if gblur_rnd_chance < chance:
                                img = cv2.GaussianBlur(img, (gblur_rnd_kernel,) *2 , 0)

                    if is_face_sample:
                        target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[target_face_type]
                        if target_ft > sample.face_type:
                            raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_ft) )

                        if sample.face_type == FaceType.MARK_ONLY:
                            mat  = LandmarksProcessor.get_transform_mat (sample.landmarks, sample.shape[0], target_ft)

                            if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                                img = cv2.warpAffine( img, mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_LINEAR )
                                img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
                                img = cv2.resize( img, (resolution,resolution), cv2.INTER_LINEAR )[...,None]
                            else:
                                img  = cv2.warpAffine( img,  mat, (sample.shape[0],sample.shape[0]), flags=cv2.INTER_CUBIC )
                                img  = imagelib.warp_by_params (params, img,  can_warp, can_transform, can_flip=True, border_replicate=True)
                                img  = cv2.resize( img,  (resolution,resolution), cv2.INTER_CUBIC )

                        else:
                            mat = LandmarksProcessor.get_transform_mat (sample.landmarks, resolution, target_ft)

                            if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_STRUCT:                                
                                img = imagelib.warp_by_params (params, img, can_warp, can_transform, can_flip=True, border_replicate=False, cv2_inter=cv2.INTER_LINEAR)
                                img = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_CONSTANT, flags=cv2.INTER_LINEAR )[...,None]                                
                            else:
                                img  = imagelib.warp_by_params (params, img,  can_warp, can_transform, can_flip=True, border_replicate=True)
                                img  = cv2.warpAffine( img, mat, (resolution,resolution), borderMode=cv2.BORDER_REPLICATE, flags=cv2.INTER_CUBIC )
                    else:
                        img  = imagelib.warp_by_params (params, img,  can_warp, can_transform, can_flip=True, border_replicate=True)
                        img  = cv2.resize( img,  (resolution,resolution), cv2.INTER_CUBIC )


                    if mode_type == SPTF.MODE_FACE_MASK_ALL_HULL or \
                       mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                       mode_type == SPTF.MODE_FACE_MASK_ALL_EYES_HULL or \
                       mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                        out_sample = img.astype(np.float32)
                    else:
                        img = np.clip(img.astype(np.float32), 0, 1)

                        if ct_mode is not None and ct_sample is not None:
                            if ct_sample_bgr is None:
                                ct_sample_bgr = ct_sample.load_bgr()
                            img = imagelib.color_transfer (ct_mode,
                                                           img,
                                                           cv2.resize( ct_sample_bgr, (resolution,resolution), cv2.INTER_LINEAR ) )

                        if mode_type == SPTF.MODE_BGR:
                            out_sample = img
                        elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                            rnd_state = np.random.RandomState (sample_rnd_seed)
                            out_sample = np.take (img, rnd_state.permutation(img.shape[-1]), axis=-1)

                        elif mode_type == SPTF.MODE_BGR_RANDOM_HSV_SHIFT:
                            rnd_state = np.random.RandomState (sample_rnd_seed)
                            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                            h, s, v = cv2.split(hsv)
                            h = (h + rnd_state.randint(360) ) % 360
                            s = np.clip ( s + rnd_state.random()-0.5, 0, 1 )
                            v = np.clip ( v + rnd_state.random()-0.5, 0, 1 )
                            hsv = cv2.merge([h, s, v])
                            out_sample = np.clip( cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR) , 0, 1 )
                            
                        elif mode_type == SPTF.MODE_BGR_RANDOM_RGB_LEVELS:
                            rnd_state = np.random.RandomState (sample_rnd_seed)
                            np_rnd = rnd_state.rand                            
                            
                            inBlack  = np.array([np_rnd()*0.25    , np_rnd()*0.25    , np_rnd()*0.25], dtype=np.float32)
                            inWhite  = np.array([1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25, 1.0-np_rnd()*0.25], dtype=np.float32)
                            inGamma  = np.array([0.5+np_rnd(), 0.5+np_rnd(), 0.5+np_rnd()], dtype=np.float32)
                            outBlack = np.array([0.0, 0.0, 0.0], dtype=np.float32)
                            outWhite = np.array([1.0, 1.0, 1.0], dtype=np.float32)
                            
                            out_sample = np.clip( (img - inBlack) / (inWhite - inBlack), 0, 1 )                            
                            out_sample = ( out_sample ** (1/inGamma) ) *  (outWhite - outBlack) + outBlack
                            out_sample = np.clip(out_sample, 0, 1)
                        elif mode_type == SPTF.MODE_G:
                            out_sample = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[...,None]
                        elif mode_type == SPTF.MODE_GGG:
                            out_sample = np.repeat ( np.expand_dims(cv2.cvtColor(img, cv2.COLOR_BGR2GRAY),-1), (3,), -1)

                    if not debug:
                        if normalize_tanh:
                            out_sample = np.clip (out_sample * 2.0 - 1.0, -1.0, 1.0)

                    if data_format == "NCHW":
                        out_sample = np.transpose(out_sample, (2,0,1) )

                outputs_sample.append ( out_sample )
            outputs += [outputs_sample]

        return outputs
コード例 #7
0
    def process(sample, sample_process_options, output_sample_types, debug):
        source = sample.load_bgr()
        h, w, c = source.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(source, sample.landmarks,
                                              (0, 1, 0))

        params = image_utils.gen_warp_params(
            source,
            sample_process_options.random_flip,
            rotation_range=sample_process_options.rotation_range,
            scale_range=sample_process_options.scale_range,
            tx_range=sample_process_options.tx_range,
            ty_range=sample_process_options.ty_range)

        images = [[None] * 3 for _ in range(4)]

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample_type in output_sample_types:
            f = sample_type[0]
            size = sample_type[1]
            random_sub_size = 0 if len(sample_type) < 3 else min(
                sample_type[2], size)

            if f & SampleProcessor.TypeFlags.SOURCE != 0:
                img_type = 0
            elif f & SampleProcessor.TypeFlags.WARPED != 0:
                img_type = 1
            elif f & SampleProcessor.TypeFlags.WARPED_TRANSFORMED != 0:
                img_type = 2
            elif f & SampleProcessor.TypeFlags.TRANSFORMED != 0:
                img_type = 3
            else:
                raise ValueError('expected SampleTypeFlags type')

            face_mask_type = 0
            if f & SampleProcessor.TypeFlags.FACE_MASK_FULL != 0:
                face_mask_type = 1
            elif f & SampleProcessor.TypeFlags.FACE_MASK_EYES != 0:
                face_mask_type = 2

            target_face_type = -1
            if f & SampleProcessor.TypeFlags.FACE_ALIGN_HALF != 0:
                target_face_type = FaceType.HALF
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_FULL != 0:
                target_face_type = FaceType.FULL
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_HEAD != 0:
                target_face_type = FaceType.HEAD
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_AVATAR != 0:
                target_face_type = FaceType.AVATAR

            if images[img_type][face_mask_type] is None:
                img = source
                if is_face_sample:
                    if face_mask_type == 1:
                        img = np.concatenate(
                            (img,
                             LandmarksProcessor.get_image_hull_mask(
                                 source, sample.landmarks)), -1)
                    elif face_mask_type == 2:
                        mask = LandmarksProcessor.get_image_eye_mask(
                            source, sample.landmarks)
                        mask = np.expand_dims(
                            cv2.blur(mask, (w // 32, w // 32)), -1)
                        mask[mask > 0.0] = 1.0
                        img = np.concatenate((img, mask), -1)

                images[img_type][face_mask_type] = image_utils.warp_by_params(
                    params, img, (img_type == 1 or img_type == 2),
                    (img_type == 2 or img_type == 3), img_type != 0,
                    face_mask_type == 0)

            img = images[img_type][face_mask_type]

            if is_face_sample and target_face_type != -1:
                if target_face_type > sample.face_type:
                    raise Exception(
                        'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                        %
                        (sample.filename, sample.face_type, target_face_type))

                img = cv2.warpAffine(img,
                                     LandmarksProcessor.get_transform_mat(
                                         sample.landmarks, size,
                                         target_face_type), (size, size),
                                     flags=cv2.INTER_LANCZOS4)
            else:
                img = cv2.resize(img, (size, size), cv2.INTER_LANCZOS4)

            if random_sub_size != 0:
                sub_size = size - random_sub_size
                rnd_state = np.random.RandomState(sample_rnd_seed +
                                                  random_sub_size)
                start_x = rnd_state.randint(sub_size + 1)
                start_y = rnd_state.randint(sub_size + 1)
                img = img[start_y:start_y + sub_size,
                          start_x:start_x + sub_size, :]

            img_bgr = img[..., 0:3]
            img_mask = img[..., 3:4]

            if f & SampleProcessor.TypeFlags.MODE_BGR != 0:
                img = img
            elif f & SampleProcessor.TypeFlags.MODE_BGR_SHUFFLE != 0:
                img_bgr = np.take(img_bgr,
                                  np.random.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                img = np.concatenate((img_bgr, img_mask), -1)
            elif f & SampleProcessor.TypeFlags.MODE_G != 0:
                img = np.concatenate((np.expand_dims(
                    cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1), img_mask),
                                     -1)
            elif f & SampleProcessor.TypeFlags.MODE_GGG != 0:
                img = np.concatenate((np.repeat(
                    np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                                   -1), (3, ), -1), img_mask), -1)
            elif is_face_sample and f & SampleProcessor.TypeFlags.MODE_M != 0:
                if face_mask_type == 0:
                    raise ValueError('no face_mask_type defined')
                img = img_mask
            else:
                raise ValueError('expected SampleTypeFlags mode')

            if not debug and sample_process_options.normalize_tanh:
                img = img * 2.0 - 1.0

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs
コード例 #8
0
    def onProcessSample(self, sample, debug):
        source = sample.load_bgr()
        h, w, c = source.shape

        is_face_sample = self.trainingdatatype >= TrainingDataType.FACE_BEGIN and self.trainingdatatype <= TrainingDataType.FACE_END

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(source, sample.landmarks,
                                              (0, 1, 0))

        params = image_utils.gen_warp_params(
            source,
            self.random_flip,
            rotation_range=self.rotation_range,
            scale_range=self.scale_range,
            tx_range=self.tx_range,
            ty_range=self.ty_range)

        images = [[None] * 3 for _ in range(4)]

        outputs = []
        for t, size in self.output_sample_types:
            if t & self.SampleTypeFlags.SOURCE != 0:
                img_type = 0
            elif t & self.SampleTypeFlags.WARPED != 0:
                img_type = 1
            elif t & self.SampleTypeFlags.WARPED_TRANSFORMED != 0:
                img_type = 2
            elif t & self.SampleTypeFlags.TRANSFORMED != 0:
                img_type = 3
            else:
                raise ValueError('expected SampleTypeFlags type')

            mask_type = 0
            if t & self.SampleTypeFlags.MASK_FULL != 0:
                mask_type = 1
            elif t & self.SampleTypeFlags.MASK_EYES != 0:
                mask_type = 2

            if images[img_type][mask_type] is None:
                img = source
                if is_face_sample:
                    if mask_type == 1:
                        img = np.concatenate(
                            (img,
                             LandmarksProcessor.get_image_hull_mask(
                                 source, sample.landmarks)), -1)
                    elif mask_type == 2:
                        mask = LandmarksProcessor.get_image_eye_mask(
                            source, sample.landmarks)
                        mask = np.expand_dims(
                            cv2.blur(mask, (w // 32, w // 32)), -1)
                        mask[mask > 0.0] = 1.0
                        img = np.concatenate((img, mask), -1)

                images[img_type][mask_type] = image_utils.warp_by_params(
                    params, img, (img_type == 1 or img_type == 2),
                    (img_type == 2 or img_type == 3), img_type != 0)

            img = images[img_type][mask_type]

            target_face_type = -1
            if t & self.SampleTypeFlags.HALF_FACE != 0:
                target_face_type = FaceType.HALF
            elif t & self.SampleTypeFlags.FULL_FACE != 0:
                target_face_type = FaceType.FULL
            elif t & self.SampleTypeFlags.HEAD_FACE != 0:
                target_face_type = FaceType.HEAD
            elif t & self.SampleTypeFlags.AVATAR_FACE != 0:
                target_face_type = FaceType.AVATAR
            elif t & self.SampleTypeFlags.MARK_ONLY_FACE != 0:
                target_face_type = FaceType.MARK_ONLY

            if is_face_sample and target_face_type != -1 and target_face_type != FaceType.MARK_ONLY:
                if target_face_type > sample.face_type:
                    raise Exception(
                        'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                        %
                        (sample.filename, sample.face_type, target_face_type))

                img = cv2.warpAffine(img,
                                     LandmarksProcessor.get_transform_mat(
                                         sample.landmarks, size,
                                         target_face_type), (size, size),
                                     flags=cv2.INTER_LANCZOS4)
            else:
                img = cv2.resize(img, (size, size), cv2.INTER_LANCZOS4)

            img_bgr = img[..., 0:3]
            img_mask = img[..., 3:4]

            if t & self.SampleTypeFlags.MODE_BGR != 0:
                img = img
            elif t & self.SampleTypeFlags.MODE_BGR_SHUFFLE != 0:
                img_bgr = np.take(img_bgr,
                                  np.random.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                img = np.concatenate((img_bgr, img_mask), -1)
            elif t & self.SampleTypeFlags.MODE_G != 0:
                img = np.concatenate((np.expand_dims(
                    cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1), img_mask),
                                     -1)
            elif t & self.SampleTypeFlags.MODE_GGG != 0:
                img = np.concatenate((np.repeat(
                    np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                                   -1), (3, ), -1), img_mask), -1)
            elif is_face_sample and t & self.SampleTypeFlags.MODE_M != 0:
                if mask_type == 0:
                    raise ValueError('no mask mode defined')
                img = img_mask
            else:
                raise ValueError('expected SampleTypeFlags mode')

            if not debug and self.normalize_tanh:
                img = img * 2.0 - 1.0

            outputs.append(img)

        if debug:
            result = ()

            for output in outputs:
                if output.shape[2] < 4:
                    result += (output, )
                elif output.shape[2] == 4:
                    result += (output[..., 0:3] * output[..., 3:4], )

            return result
        else:
            return outputs