Beispiel #1
0
    def process(sample,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_bgr = sample.load_bgr()
        ct_sample_bgr = None
        ct_sample_mask = None
        h, w, c = sample_bgr.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                              (0, 1, 0))

        params = imagelib.gen_warp_params(
            sample_bgr,
            sample_process_options.random_flip,
            rotation_range=sample_process_options.rotation_range,
            scale_range=sample_process_options.scale_range,
            tx_range=sample_process_options.tx_range,
            ty_range=sample_process_options.ty_range)

        cached_images = collections.defaultdict(dict)

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for opts in output_sample_types:

            resolution = opts.get('resolution', 0)
            types = opts.get('types', [])

            border_replicate = opts.get('border_replicate', True)
            random_sub_res = opts.get('random_sub_res', 0)
            normalize_std_dev = opts.get('normalize_std_dev', False)
            normalize_vgg = opts.get('normalize_vgg', False)
            motion_blur = opts.get('motion_blur', None)
            apply_ct = opts.get('apply_ct', ColorTransferMode.NONE)
            normalize_tanh = opts.get('normalize_tanh', False)

            img_type = SPTF.NONE
            target_face_type = SPTF.NONE
            face_mask_type = SPTF.NONE
            mode_type = SPTF.NONE
            for t in types:
                if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                    img_type = t
                elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                    target_face_type = t
                elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                    mode_type = t

            if img_type == SPTF.NONE:
                raise ValueError('expected IMG_ type')

            if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                l = sample.landmarks
                l = np.concatenate([
                    np.expand_dims(l[:, 0] / w, -1),
                    np.expand_dims(l[:, 1] / h, -1)
                ], -1)
                l = np.clip(l, 0.0, 1.0)
                img = l
            elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                pitch_yaw_roll = sample.pitch_yaw_roll
                if pitch_yaw_roll is not None:
                    pitch, yaw, roll = pitch_yaw_roll
                else:
                    pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                        sample.landmarks)
                if params['flip']:
                    yaw = -yaw

                if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch = (pitch + 1.0) / 2.0
                    yaw = (yaw + 1.0) / 2.0
                    roll = (roll + 1.0) / 2.0

                img = (pitch, yaw, roll)
            else:
                if mode_type == SPTF.NONE:
                    raise ValueError('expected MODE_ type')

                def do_transform(img, mask):
                    warp = (img_type == SPTF.IMG_WARPED
                            or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                    transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                                 or img_type == SPTF.IMG_TRANSFORMED)
                    flip = img_type != SPTF.IMG_WARPED

                    img = imagelib.warp_by_params(params, img, warp, transform,
                                                  flip, border_replicate)
                    if mask is not None:
                        mask = imagelib.warp_by_params(params, mask, warp,
                                                       transform, flip, False)
                        if len(mask.shape) == 2:
                            mask = mask[..., np.newaxis]

                        img = np.concatenate((img, mask), -1)
                    return img

                img = sample_bgr

                ### Prepare a mask
                mask = None
                if is_face_sample:
                    mask = sample.load_fanseg_mask(
                    )  #using fanseg_mask if exist

                    if mask is None:
                        if sample.eyebrows_expand_mod is not None:
                            mask = LandmarksProcessor.get_image_hull_mask(
                                img.shape,
                                sample.landmarks,
                                eyebrows_expand_mod=sample.eyebrows_expand_mod)
                        else:
                            mask = LandmarksProcessor.get_image_hull_mask(
                                img.shape, sample.landmarks)

                    if sample.ie_polys is not None:
                        sample.ie_polys.overlay_mask(mask)
                ##################

                if motion_blur is not None:
                    chance, mb_max_size = motion_blur
                    chance = np.clip(chance, 0, 100)

                    if np.random.randint(100) < chance:
                        img = imagelib.LinearMotionBlur(
                            img,
                            np.random.randint(mb_max_size) + 1,
                            np.random.randint(360))

                if is_face_sample and target_face_type != SPTF.NONE:
                    target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[
                        target_face_type]
                    if target_ft > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, target_ft))

                    if sample.face_type == FaceType.MARK_ONLY:
                        #first warp to target facetype
                        img = cv2.warpAffine(
                            img,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0],
                                target_ft), (sample.shape[0], sample.shape[0]),
                            flags=cv2.INTER_CUBIC)
                        mask = cv2.warpAffine(
                            mask,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0],
                                target_ft), (sample.shape[0], sample.shape[0]),
                            flags=cv2.INTER_CUBIC)
                        #then apply transforms
                        img = do_transform(img, mask)
                        img = cv2.resize(img, (resolution, resolution),
                                         cv2.INTER_CUBIC)
                    else:
                        img = do_transform(img, mask)
                        img = cv2.warpAffine(
                            img,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution,
                                target_ft), (resolution, resolution),
                            borderMode=(cv2.BORDER_REPLICATE
                                        if border_replicate else
                                        cv2.BORDER_CONSTANT),
                            flags=cv2.INTER_CUBIC)

                else:
                    img = do_transform(img, mask)
                    img = cv2.resize(img, (resolution, resolution),
                                     cv2.INTER_CUBIC)

                if random_sub_res != 0:
                    sub_size = resolution - random_sub_res
                    rnd_state = np.random.RandomState(sample_rnd_seed +
                                                      random_sub_res)
                    start_x = rnd_state.randint(sub_size + 1)
                    start_y = rnd_state.randint(sub_size + 1)
                    img = img[start_y:start_y + sub_size,
                              start_x:start_x + sub_size, :]

                img = np.clip(img, 0, 1)
                img_bgr = img[..., 0:3]
                img_mask = img[..., 3:4]

                if apply_ct and ct_sample is not None:
                    if ct_sample_bgr is None:
                        ct_sample_bgr = ct_sample.load_bgr()

                    if apply_ct == ColorTransferMode.LCT:
                        img_bgr = imagelib.linear_color_transfer(
                            img_bgr, ct_sample_bgr)

                    elif ColorTransferMode.RCT <= apply_ct <= ColorTransferMode.MASKED_RCT_PAPER_CLIP:
                        ct_options = {
                            ColorTransferMode.RCT: (False, False, False),
                            ColorTransferMode.RCT_CLIP: (False, False, True),
                            ColorTransferMode.RCT_PAPER: (False, True, False),
                            ColorTransferMode.RCT_PAPER_CLIP:
                            (False, True, True),
                            ColorTransferMode.MASKED_RCT: (True, False, False),
                            ColorTransferMode.MASKED_RCT_CLIP:
                            (True, False, True),
                            ColorTransferMode.MASKED_RCT_PAPER:
                            (True, True, False),
                            ColorTransferMode.MASKED_RCT_PAPER_CLIP:
                            (True, True, True),
                        }

                        use_masks, use_paper, use_clip = ct_options[apply_ct]
                        if not use_masks:
                            img_bgr = imagelib.reinhard_color_transfer(
                                img_bgr,
                                ct_sample_bgr,
                                clip=use_clip,
                                preserve_paper=use_paper)
                        else:
                            if ct_sample_mask is None:
                                ct_sample_mask = ct_sample.load_mask()
                            img_bgr = imagelib.reinhard_color_transfer(
                                img_bgr,
                                ct_sample_bgr,
                                clip=use_clip,
                                preserve_paper=use_paper,
                                source_mask=img_mask,
                                target_mask=ct_sample_mask)

                if normalize_std_dev:
                    img_bgr = (img_bgr - img_bgr.mean((0, 1))) / img_bgr.std(
                        (0, 1))
                elif normalize_vgg:
                    img_bgr = np.clip(img_bgr * 255, 0, 255)
                    img_bgr[:, :, 0] -= 103.939
                    img_bgr[:, :, 1] -= 116.779
                    img_bgr[:, :, 2] -= 123.68

                if mode_type == SPTF.MODE_BGR:
                    img = img_bgr
                elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img = np.take(img_bgr,
                                  rnd_state.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                elif mode_type == SPTF.MODE_LAB_RAND_TRANSFORM:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img = random_color_transform(img_bgr, rnd_state)
                elif mode_type == SPTF.MODE_G:
                    img = np.concatenate((np.expand_dims(
                        cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                        -1), img_mask), -1)
                elif mode_type == SPTF.MODE_GGG:
                    img = np.concatenate((np.repeat(
                        np.expand_dims(
                            cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                        (3, ), -1), img_mask), -1)
                elif mode_type == SPTF.MODE_M and is_face_sample:
                    img = img_mask

                if not debug:
                    if normalize_tanh:
                        img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                    else:
                        img = np.clip(img, 0.0, 1.0)

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs
    def process(samples,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPST = SampleProcessor.SampleType
        SPCT = SampleProcessor.ChannelType
        SPFMT = SampleProcessor.FaceMaskType

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample in samples:
            sample_face_type = sample.face_type
            sample_bgr = sample.load_bgr()
            sample_landmarks = sample.landmarks
            ct_sample_bgr = None
            h, w, c = sample_bgr.shape

            def get_full_face_mask():
                if sample.xseg_mask is not None:
                    full_face_mask = sample.xseg_mask
                    if full_face_mask.shape[0] != h or full_face_mask.shape[
                            1] != w:
                        full_face_mask = cv2.resize(
                            full_face_mask, (w, h),
                            interpolation=cv2.INTER_CUBIC)
                        full_face_mask = imagelib.normalize_channels(
                            full_face_mask, 1)
                else:
                    full_face_mask = LandmarksProcessor.get_image_hull_mask(
                        sample_bgr.shape,
                        sample_landmarks,
                        eyebrows_expand_mod=sample.eyebrows_expand_mod)
                return np.clip(full_face_mask, 0, 1)

            def get_eyes_mask():
                eyes_mask = LandmarksProcessor.get_image_eye_mask(
                    sample_bgr.shape, sample_landmarks)
                return np.clip(eyes_mask, 0, 1)

            is_face_sample = sample_landmarks is not None

            if debug and is_face_sample:
                LandmarksProcessor.draw_landmarks(sample_bgr, sample_landmarks,
                                                  (0, 1, 0))

            params_per_resolution = {}
            warp_rnd_state = np.random.RandomState(sample_rnd_seed - 1)
            for opts in output_sample_types:
                resolution = opts.get('resolution', None)
                if resolution is None:
                    continue
                params_per_resolution[resolution] = imagelib.gen_warp_params(
                    resolution,
                    sample_process_options.random_flip,
                    rotation_range=sample_process_options.rotation_range,
                    scale_range=sample_process_options.scale_range,
                    tx_range=sample_process_options.tx_range,
                    ty_range=sample_process_options.ty_range,
                    rnd_state=warp_rnd_state)

            outputs_sample = []
            for opts in output_sample_types:
                sample_type = opts.get('sample_type', SPST.NONE)
                channel_type = opts.get('channel_type', SPCT.NONE)
                resolution = opts.get('resolution', 0)
                warp = opts.get('warp', False)
                transform = opts.get('transform', False)
                motion_blur = opts.get('motion_blur', None)
                gaussian_blur = opts.get('gaussian_blur', None)
                random_bilinear_resize = opts.get('random_bilinear_resize',
                                                  None)
                random_rgb_levels = opts.get('random_rgb_levels', False)
                random_hsv_shift = opts.get('random_hsv_shift', False)
                random_circle_mask = opts.get('random_circle_mask', False)
                normalize_tanh = opts.get('normalize_tanh', False)
                ct_mode = opts.get('ct_mode', None)
                data_format = opts.get('data_format', 'NHWC')

                if sample_type == SPST.FACE_MASK or sample_type == SPST.IMAGE:
                    border_replicate = False
                elif sample_type == SPST.FACE_IMAGE:
                    border_replicate = True

                border_replicate = opts.get('border_replicate',
                                            border_replicate)
                borderMode = cv2.BORDER_REPLICATE if border_replicate else cv2.BORDER_CONSTANT

                if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
                    if not is_face_sample:
                        raise ValueError(
                            "face_samples should be provided for sample_type FACE_*"
                        )

                if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
                    face_type = opts.get('face_type', None)
                    face_mask_type = opts.get('face_mask_type', SPFMT.NONE)

                    if face_type is None:
                        raise ValueError(
                            "face_type must be defined for face samples")

                    if face_type > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, face_type))

                    if sample_type == SPST.FACE_MASK:

                        if face_mask_type == SPFMT.FULL_FACE:
                            img = get_full_face_mask()
                        elif face_mask_type == SPFMT.EYES:
                            img = get_eyes_mask()
                        elif face_mask_type == SPFMT.FULL_FACE_EYES:
                            img = get_full_face_mask()
                            img += get_eyes_mask() * img
                        else:
                            img = np.zeros(sample_bgr.shape[0:2] + (1, ),
                                           dtype=np.float32)

                        if sample_face_type == FaceType.MARK_ONLY:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample_landmarks, warp_resolution, face_type)
                            img = cv2.warpAffine(
                                img,
                                mat, (warp_resolution, warp_resolution),
                                flags=cv2.INTER_LINEAR)

                            img = imagelib.warp_by_params(
                                params_per_resolution[resolution],
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=border_replicate,
                                cv2_inter=cv2.INTER_LINEAR)
                            img = cv2.resize(img, (resolution, resolution),
                                             cv2.INTER_LINEAR)
                        else:
                            if face_type != sample_face_type:
                                mat = LandmarksProcessor.get_transform_mat(
                                    sample_landmarks, resolution, face_type)
                                img = cv2.warpAffine(img,
                                                     mat,
                                                     (resolution, resolution),
                                                     borderMode=borderMode,
                                                     flags=cv2.INTER_LINEAR)
                            else:
                                if w != resolution:
                                    img = cv2.resize(img,
                                                     (resolution, resolution),
                                                     cv2.INTER_CUBIC)

                            img = imagelib.warp_by_params(
                                params_per_resolution[resolution],
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=border_replicate,
                                cv2_inter=cv2.INTER_LINEAR)

                        if len(img.shape) == 2:
                            img = img[..., None]

                        if channel_type == SPCT.G:
                            out_sample = img.astype(np.float32)
                        else:
                            raise ValueError(
                                "only channel_type.G supported for the mask")

                    elif sample_type == SPST.FACE_IMAGE:
                        img = sample_bgr

                        if random_rgb_levels:
                            random_mask = sd.random_circle_faded(
                                [w, w],
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed)
                            ) if random_circle_mask else None
                            img = imagelib.apply_random_rgb_levels(
                                img,
                                mask=random_mask,
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed))

                        if random_hsv_shift:
                            random_mask = sd.random_circle_faded(
                                [w, w],
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed +
                                    1)) if random_circle_mask else None
                            img = imagelib.apply_random_hsv_shift(
                                img,
                                mask=random_mask,
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed + 1))

                        if face_type != sample_face_type:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample_landmarks, resolution, face_type)
                            img = cv2.warpAffine(img,
                                                 mat, (resolution, resolution),
                                                 borderMode=borderMode,
                                                 flags=cv2.INTER_CUBIC)
                        else:
                            if w != resolution:
                                img = cv2.resize(img, (resolution, resolution),
                                                 cv2.INTER_CUBIC)

                        # Apply random color transfer
                        if ct_mode is not None and ct_sample is not None:
                            if ct_sample_bgr is None:
                                ct_sample_bgr = ct_sample.load_bgr()
                            img = imagelib.color_transfer(
                                ct_mode, img,
                                cv2.resize(ct_sample_bgr,
                                           (resolution, resolution),
                                           cv2.INTER_LINEAR))

                        img = imagelib.warp_by_params(
                            params_per_resolution[resolution],
                            img,
                            warp,
                            transform,
                            can_flip=True,
                            border_replicate=border_replicate)

                        img = np.clip(img.astype(np.float32), 0, 1)

                        if motion_blur is not None:
                            random_mask = sd.random_circle_faded(
                                [resolution, resolution],
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed +
                                    2)) if random_circle_mask else None
                            img = imagelib.apply_random_motion_blur(
                                img,
                                *motion_blur,
                                mask=random_mask,
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed + 2))

                        if gaussian_blur is not None:
                            random_mask = sd.random_circle_faded(
                                [resolution, resolution],
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed +
                                    3)) if random_circle_mask else None
                            img = imagelib.apply_random_gaussian_blur(
                                img,
                                *gaussian_blur,
                                mask=random_mask,
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed + 3))

                        if random_bilinear_resize is not None:
                            random_mask = sd.random_circle_faded(
                                [resolution, resolution],
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed +
                                    4)) if random_circle_mask else None
                            img = imagelib.apply_random_bilinear_resize(
                                img,
                                *random_bilinear_resize,
                                mask=random_mask,
                                rnd_state=np.random.RandomState(
                                    sample_rnd_seed + 4))

                        # Transform from BGR to desired channel_type
                        if channel_type == SPCT.BGR:
                            out_sample = img
                        elif channel_type == SPCT.G:
                            out_sample = cv2.cvtColor(img,
                                                      cv2.COLOR_BGR2GRAY)[...,
                                                                          None]
                        elif channel_type == SPCT.GGG:
                            out_sample = np.repeat(
                                np.expand_dims(
                                    cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), -1),
                                (3, ), -1)

                    # Final transformations
                    if not debug:
                        if normalize_tanh:
                            out_sample = np.clip(out_sample * 2.0 - 1.0, -1.0,
                                                 1.0)
                    if data_format == "NCHW":
                        out_sample = np.transpose(out_sample, (2, 0, 1))
                elif sample_type == SPST.IMAGE:
                    img = sample_bgr
                    img = imagelib.warp_by_params(
                        params_per_resolution[resolution],
                        img,
                        warp,
                        transform,
                        can_flip=True,
                        border_replicate=True)
                    img = cv2.resize(img, (resolution, resolution),
                                     cv2.INTER_CUBIC)
                    out_sample = img

                    if data_format == "NCHW":
                        out_sample = np.transpose(out_sample, (2, 0, 1))

                elif sample_type == SPST.LANDMARKS_ARRAY:
                    l = sample_landmarks
                    l = np.concatenate([
                        np.expand_dims(l[:, 0] / w, -1),
                        np.expand_dims(l[:, 1] / h, -1)
                    ], -1)
                    l = np.clip(l, 0.0, 1.0)
                    out_sample = l
                elif sample_type == SPST.PITCH_YAW_ROLL or sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
                    pitch, yaw, roll = sample.get_pitch_yaw_roll()
                    if params_per_resolution[resolution]['flip']:
                        yaw = -yaw

                    if sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
                        pitch = np.clip((pitch / math.pi) / 2.0 + 0.5, 0, 1)
                        yaw = np.clip((yaw / math.pi) / 2.0 + 0.5, 0, 1)
                        roll = np.clip((roll / math.pi) / 2.0 + 0.5, 0, 1)

                    out_sample = (pitch, yaw)
                else:
                    raise ValueError('expected sample_type')

                outputs_sample.append(out_sample)
            outputs += [outputs_sample]

        return outputs
Beispiel #3
0
    def process(samples,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample in samples:
            sample_bgr = sample.load_bgr()
            ct_sample_bgr = None
            ct_sample_mask = None
            h, w, c = sample_bgr.shape

            is_face_sample = sample.landmarks is not None

            if debug and is_face_sample:
                LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                                  (0, 1, 0))

            params = imagelib.gen_warp_params(
                sample_bgr,
                sample_process_options.random_flip,
                rotation_range=sample_process_options.rotation_range,
                scale_range=sample_process_options.scale_range,
                tx_range=sample_process_options.tx_range,
                ty_range=sample_process_options.ty_range,
                rnd_seed=sample_rnd_seed)

            outputs_sample = []
            for opts in output_sample_types:

                resolution = opts.get('resolution', 0)
                types = opts.get('types', [])

                border_replicate = opts.get('border_replicate', True)
                random_sub_res = opts.get('random_sub_res', 0)
                normalize_std_dev = opts.get('normalize_std_dev', False)
                normalize_vgg = opts.get('normalize_vgg', False)
                motion_blur = opts.get('motion_blur', None)
                gaussian_blur = opts.get('gaussian_blur', None)

                ct_mode = opts.get('ct_mode', 'None')
                normalize_tanh = opts.get('normalize_tanh', False)
                data_format = opts.get('data_format', 'NHWC')

                img_type = SPTF.NONE
                target_face_type = SPTF.NONE
                face_mask_type = SPTF.NONE
                mode_type = SPTF.NONE
                for t in types:
                    if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                        img_type = t
                    elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                        target_face_type = t
                    elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                        mode_type = t

                if img_type == SPTF.NONE:
                    raise ValueError('expected IMG_ type')

                if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                    l = sample.landmarks
                    l = np.concatenate([
                        np.expand_dims(l[:, 0] / w, -1),
                        np.expand_dims(l[:, 1] / h, -1)
                    ], -1)
                    l = np.clip(l, 0.0, 1.0)
                    img = l
                elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch_yaw_roll = sample.get_pitch_yaw_roll()

                    if params['flip']:
                        yaw = -yaw

                    if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                        pitch = np.clip((pitch / math.pi) / 2.0 + 1.0, 0, 1)
                        yaw = np.clip((yaw / math.pi) / 2.0 + 1.0, 0, 1)
                        roll = np.clip((roll / math.pi) / 2.0 + 1.0, 0, 1)

                    img = (pitch, yaw, roll)
                else:
                    if mode_type == SPTF.NONE:
                        raise ValueError('expected MODE_ type')

                    def do_transform(img, mask):
                        warp = (img_type == SPTF.IMG_WARPED
                                or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                        transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                                     or img_type == SPTF.IMG_TRANSFORMED)
                        flip = img_type != SPTF.IMG_WARPED

                        img = imagelib.warp_by_params(params, img, warp,
                                                      transform, flip,
                                                      border_replicate)
                        if mask is not None:
                            mask = imagelib.warp_by_params(
                                params, mask, warp, transform, flip, False)
                            if len(mask.shape) == 2:
                                mask = mask[..., np.newaxis]

                        return img, mask

                    img = sample_bgr

                    ### Prepare a mask
                    mask = None
                    if is_face_sample:
                        if sample.eyebrows_expand_mod is not None:
                            mask = LandmarksProcessor.get_image_hull_mask(
                                img.shape,
                                sample.landmarks,
                                eyebrows_expand_mod=sample.eyebrows_expand_mod)
                        else:
                            mask = LandmarksProcessor.get_image_hull_mask(
                                img.shape, sample.landmarks)

                        if sample.ie_polys is not None:
                            sample.ie_polys.overlay_mask(mask)
                    ##################

                    if motion_blur is not None:
                        chance, mb_max_size = motion_blur
                        chance = np.clip(chance, 0, 100)

                        if np.random.randint(100) < chance:
                            img = imagelib.LinearMotionBlur(
                                img,
                                np.random.randint(mb_max_size) + 1,
                                np.random.randint(360))

                    if gaussian_blur is not None:
                        chance, kernel_max_size = gaussian_blur
                        chance = np.clip(chance, 0, 100)

                        if np.random.randint(100) < chance:
                            img = cv2.GaussianBlur(
                                img,
                                (np.random.randint(kernel_max_size) * 2 + 1, )
                                * 2, 0)

                    if is_face_sample and target_face_type != SPTF.NONE:
                        target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[
                            target_face_type]
                        if target_ft > sample.face_type:
                            raise Exception(
                                'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                                %
                                (sample.filename, sample.face_type, target_ft))

                        if sample.face_type == FaceType.MARK_ONLY:
                            #first warp to target facetype
                            img = cv2.warpAffine(
                                img,
                                LandmarksProcessor.get_transform_mat(
                                    sample.landmarks, sample.shape[0],
                                    target_ft),
                                (sample.shape[0], sample.shape[0]),
                                flags=cv2.INTER_CUBIC)
                            mask = cv2.warpAffine(
                                mask,
                                LandmarksProcessor.get_transform_mat(
                                    sample.landmarks, sample.shape[0],
                                    target_ft),
                                (sample.shape[0], sample.shape[0]),
                                flags=cv2.INTER_CUBIC)
                            #then apply transforms
                            img, mask = do_transform(img, mask)
                            img = np.concatenate((img, mask), -1)
                            img = cv2.resize(img, (resolution, resolution),
                                             cv2.INTER_CUBIC)
                        else:
                            img, mask = do_transform(img, mask)

                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution, target_ft)
                            img = cv2.warpAffine(
                                img,
                                mat, (resolution, resolution),
                                borderMode=(cv2.BORDER_REPLICATE
                                            if border_replicate else
                                            cv2.BORDER_CONSTANT),
                                flags=cv2.INTER_CUBIC)
                            mask = cv2.warpAffine(
                                mask,
                                mat, (resolution, resolution),
                                borderMode=cv2.BORDER_CONSTANT,
                                flags=cv2.INTER_CUBIC)
                            img = np.concatenate((img, mask[..., None]), -1)

                    else:
                        img, mask = do_transform(img, mask)
                        img = np.concatenate((img, mask), -1)
                        img = cv2.resize(img, (resolution, resolution),
                                         cv2.INTER_CUBIC)

                    if random_sub_res != 0:
                        sub_size = resolution - random_sub_res
                        rnd_state = np.random.RandomState(sample_rnd_seed +
                                                          random_sub_res)
                        start_x = rnd_state.randint(sub_size + 1)
                        start_y = rnd_state.randint(sub_size + 1)
                        img = img[start_y:start_y + sub_size,
                                  start_x:start_x + sub_size, :]

                    img = np.clip(img, 0, 1).astype(np.float32)
                    img_bgr = img[..., 0:3]
                    img_mask = img[..., 3:4]

                    if ct_mode is not None and ct_sample is not None:
                        if ct_sample_bgr is None:
                            ct_sample_bgr = ct_sample.load_bgr()

                        ct_sample_bgr_resized = cv2.resize(
                            ct_sample_bgr, (resolution, resolution),
                            cv2.INTER_LINEAR)

                        if ct_mode == 'lct':
                            img_bgr = imagelib.linear_color_transfer(
                                img_bgr, ct_sample_bgr_resized)
                            img_bgr = np.clip(img_bgr, 0.0, 1.0)
                        elif ct_mode == 'rct':
                            img_bgr = imagelib.reinhard_color_transfer(
                                np.clip((img_bgr * 255).astype(np.uint8), 0,
                                        255),
                                np.clip((ct_sample_bgr_resized * 255).astype(
                                    np.uint8), 0, 255))
                            img_bgr = np.clip(
                                img_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
                        elif ct_mode == 'mkl':
                            img_bgr = imagelib.color_transfer_mkl(
                                img_bgr, ct_sample_bgr_resized)
                        elif ct_mode == 'idt':
                            img_bgr = imagelib.color_transfer_idt(
                                img_bgr, ct_sample_bgr_resized)
                        elif ct_mode == 'sot':
                            img_bgr = imagelib.color_transfer_sot(
                                img_bgr, ct_sample_bgr_resized)
                            img_bgr = np.clip(img_bgr, 0.0, 1.0)

                    if normalize_std_dev:
                        img_bgr = (img_bgr - img_bgr.mean(
                            (0, 1))) / img_bgr.std((0, 1))
                    elif normalize_vgg:
                        img_bgr = np.clip(img_bgr * 255, 0, 255)
                        img_bgr[:, :, 0] -= 103.939
                        img_bgr[:, :, 1] -= 116.779
                        img_bgr[:, :, 2] -= 123.68

                    if mode_type == SPTF.MODE_BGR:
                        img = img_bgr
                    elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                        rnd_state = np.random.RandomState(sample_rnd_seed)
                        img = np.take(img_bgr,
                                      rnd_state.permutation(img_bgr.shape[-1]),
                                      axis=-1)

                    elif mode_type == SPTF.MODE_BGR_RANDOM_HSV_SHIFT:
                        rnd_state = np.random.RandomState(sample_rnd_seed)
                        hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
                        h, s, v = cv2.split(hsv)
                        h = (h + rnd_state.randint(360)) % 360
                        s = np.clip(s + rnd_state.random() - 0.5, 0, 1)
                        v = np.clip(v + rnd_state.random() - 0.5, 0, 1)
                        hsv = cv2.merge([h, s, v])
                        img = np.clip(cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), 0,
                                      1)
                    elif mode_type == SPTF.MODE_G:
                        img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)[...,
                                                                        None]
                    elif mode_type == SPTF.MODE_GGG:
                        img = np.repeat(
                            np.expand_dims(
                                cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                            (3, ), -1)
                    elif mode_type == SPTF.MODE_M and is_face_sample:
                        img = img_mask

                    if not debug:
                        if normalize_tanh:
                            img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                        else:
                            img = np.clip(img, 0.0, 1.0)

                    if data_format == "NCHW":
                        img = np.transpose(img, (2, 0, 1))

                outputs_sample.append(img)
            outputs += [outputs_sample]

        return outputs
    def process(samples,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPST = SampleProcessor.SampleType
        SPCT = SampleProcessor.ChannelType
        SPFMT = SampleProcessor.FaceMaskType

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample in samples:
            sample_face_type = sample.face_type
            sample_bgr = sample.load_bgr()
            sample_landmarks = sample.landmarks
            ct_sample_bgr = None
            h, w, c = sample_bgr.shape

            def get_full_face_mask():
                if sample.eyebrows_expand_mod is not None:
                    full_face_mask = LandmarksProcessor.get_image_hull_mask(
                        sample_bgr.shape,
                        sample_landmarks,
                        eyebrows_expand_mod=sample.eyebrows_expand_mod)
                else:
                    full_face_mask = LandmarksProcessor.get_image_hull_mask(
                        sample_bgr.shape, sample_landmarks)
                return np.clip(full_face_mask, 0, 1)

            def get_eyes_mask():
                eyes_mask = LandmarksProcessor.get_image_eye_mask(
                    sample_bgr.shape, sample_landmarks)
                return np.clip(eyes_mask, 0, 1)

            is_face_sample = sample_landmarks is not None

            if debug and is_face_sample:
                LandmarksProcessor.draw_landmarks(sample_bgr, sample_landmarks,
                                                  (0, 1, 0))

            params_per_resolution = {}
            warp_rnd_state = np.random.RandomState(sample_rnd_seed - 1)
            for opts in output_sample_types:
                resolution = opts.get('resolution', None)
                if resolution is None:
                    continue
                params_per_resolution[resolution] = imagelib.gen_warp_params(
                    resolution,
                    sample_process_options.random_flip,
                    rotation_range=sample_process_options.rotation_range,
                    scale_range=sample_process_options.scale_range,
                    tx_range=sample_process_options.tx_range,
                    ty_range=sample_process_options.ty_range,
                    rnd_state=warp_rnd_state)

            outputs_sample = []
            for opts in output_sample_types:
                sample_type = opts.get('sample_type', SPST.NONE)
                channel_type = opts.get('channel_type', SPCT.NONE)
                resolution = opts.get('resolution', 0)
                warp = opts.get('warp', False)
                transform = opts.get('transform', False)
                motion_blur = opts.get('motion_blur', None)
                gaussian_blur = opts.get('gaussian_blur', None)
                random_bilinear_resize = opts.get('random_bilinear_resize',
                                                  None)
                normalize_tanh = opts.get('normalize_tanh', False)
                ct_mode = opts.get('ct_mode', None)
                data_format = opts.get('data_format', 'NHWC')

                if sample_type == SPST.FACE_MASK or sample_type == SPST.IMAGE:
                    border_replicate = False
                elif sample_type == SPST.FACE_IMAGE:
                    border_replicate = True

                border_replicate = opts.get('border_replicate',
                                            border_replicate)
                borderMode = cv2.BORDER_REPLICATE if border_replicate else cv2.BORDER_CONSTANT

                if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
                    if not is_face_sample:
                        raise ValueError(
                            "face_samples should be provided for sample_type FACE_*"
                        )

                if sample_type == SPST.FACE_IMAGE or sample_type == SPST.FACE_MASK:
                    face_type = opts.get('face_type', None)
                    face_mask_type = opts.get('face_mask_type', SPFMT.NONE)

                    if face_type is None:
                        raise ValueError(
                            "face_type must be defined for face samples")

                    if face_type > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, face_type))

                    if sample_type == SPST.FACE_MASK:

                        if face_mask_type == SPFMT.FULL_FACE:
                            img = get_full_face_mask()
                        elif face_mask_type == SPFMT.EYES:
                            img = get_eyes_mask()
                        elif face_mask_type == SPFMT.FULL_FACE_EYES:
                            img = get_full_face_mask() + get_eyes_mask()
                        else:
                            img = np.zeros(sample_bgr.shape[0:2] + (1, ),
                                           dtype=np.float32)

                        if sample.ie_polys is not None:
                            sample.ie_polys.overlay_mask(img)

                        if sample_face_type == FaceType.MARK_ONLY:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample_landmarks, warp_resolution, face_type)
                            img = cv2.warpAffine(
                                img,
                                mat, (warp_resolution, warp_resolution),
                                flags=cv2.INTER_LINEAR)

                            img = imagelib.warp_by_params(
                                params_per_resolution[resolution],
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=border_replicate,
                                cv2_inter=cv2.INTER_LINEAR)
                            img = cv2.resize(img, (resolution, resolution),
                                             cv2.INTER_LINEAR)
                        else:
                            if face_type != sample_face_type:
                                mat = LandmarksProcessor.get_transform_mat(
                                    sample_landmarks, resolution, face_type)
                                img = cv2.warpAffine(img,
                                                     mat,
                                                     (resolution, resolution),
                                                     borderMode=borderMode,
                                                     flags=cv2.INTER_LINEAR)
                            else:
                                if w != resolution:
                                    img = cv2.resize(img,
                                                     (resolution, resolution),
                                                     cv2.INTER_CUBIC)

                            img = imagelib.warp_by_params(
                                params_per_resolution[resolution],
                                img,
                                warp,
                                transform,
                                can_flip=True,
                                border_replicate=border_replicate,
                                cv2_inter=cv2.INTER_LINEAR)

                        if len(img.shape) == 2:
                            img = img[..., None]

                        if channel_type == SPCT.G:
                            out_sample = img.astype(np.float32)
                        else:
                            raise ValueError(
                                "only channel_type.G supported for the mask")

                    elif sample_type == SPST.FACE_IMAGE:
                        img = sample_bgr

                        if face_type != sample_face_type:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample_landmarks, resolution, face_type)
                            img = cv2.warpAffine(img,
                                                 mat, (resolution, resolution),
                                                 borderMode=borderMode,
                                                 flags=cv2.INTER_CUBIC)
                        else:
                            if w != resolution:
                                img = cv2.resize(img, (resolution, resolution),
                                                 cv2.INTER_CUBIC)

                        img = imagelib.warp_by_params(
                            params_per_resolution[resolution],
                            img,
                            warp,
                            transform,
                            can_flip=True,
                            border_replicate=border_replicate)

                        img = np.clip(img.astype(np.float32), 0, 1)

                        # Apply random color transfer
                        if ct_mode is not None and ct_sample is not None:
                            if ct_sample_bgr is None:
                                ct_sample_bgr = ct_sample.load_bgr()
                            img = imagelib.color_transfer(
                                ct_mode, img,
                                cv2.resize(ct_sample_bgr,
                                           (resolution, resolution),
                                           cv2.INTER_LINEAR))

                        if motion_blur is not None:
                            chance, mb_max_size = motion_blur
                            chance = np.clip(chance, 0, 100)

                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            mblur_rnd_chance = l_rnd_state.randint(100)
                            mblur_rnd_kernel = l_rnd_state.randint(
                                mb_max_size) + 1
                            mblur_rnd_deg = l_rnd_state.randint(360)

                            if mblur_rnd_chance < chance:
                                img = imagelib.LinearMotionBlur(
                                    img, mblur_rnd_kernel, mblur_rnd_deg)

                        if gaussian_blur is not None:
                            chance, kernel_max_size = gaussian_blur
                            chance = np.clip(chance, 0, 100)

                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed + 1)
                            gblur_rnd_chance = l_rnd_state.randint(100)
                            gblur_rnd_kernel = l_rnd_state.randint(
                                kernel_max_size) * 2 + 1

                            if gblur_rnd_chance < chance:
                                img = cv2.GaussianBlur(
                                    img, (gblur_rnd_kernel, ) * 2, 0)

                        if random_bilinear_resize is not None:
                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed + 2)

                            chance, max_size_per = random_bilinear_resize
                            chance = np.clip(chance, 0, 100)
                            pick_chance = l_rnd_state.randint(100)
                            resize_to = resolution - int(
                                l_rnd_state.rand() *
                                int(resolution * (max_size_per / 100.0)))
                            img = cv2.resize(img, (resize_to, resize_to),
                                             cv2.INTER_LINEAR)
                            img = cv2.resize(img, (resolution, resolution),
                                             cv2.INTER_LINEAR)

                        # Transform from BGR to desired channel_type
                        if channel_type == SPCT.BGR:
                            out_sample = img
                        elif channel_type == SPCT.BGR_SHUFFLE:
                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            out_sample = np.take(img,
                                                 l_rnd_state.permutation(
                                                     img.shape[-1]),
                                                 axis=-1)
                        elif channel_type == SPCT.BGR_RANDOM_HSV_SHIFT:
                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                            h, s, v = cv2.split(hsv)
                            h = (h + l_rnd_state.randint(360)) % 360
                            s = np.clip(s + l_rnd_state.random() - 0.5, 0, 1)
                            v = np.clip(v + l_rnd_state.random() / 2 - 0.25, 0,
                                        1)
                            hsv = cv2.merge([h, s, v])
                            out_sample = np.clip(
                                cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), 0, 1)
                        elif channel_type == SPCT.BGR_RANDOM_RGB_LEVELS:
                            l_rnd_state = np.random.RandomState(
                                sample_rnd_seed)
                            np_rnd = l_rnd_state.rand
                            inBlack = np.array([
                                np_rnd() * 0.25,
                                np_rnd() * 0.25,
                                np_rnd() * 0.25
                            ],
                                               dtype=np.float32)
                            inWhite = np.array([
                                1.0 - np_rnd() * 0.25, 1.0 - np_rnd() * 0.25,
                                1.0 - np_rnd() * 0.25
                            ],
                                               dtype=np.float32)
                            inGamma = np.array([
                                0.5 + np_rnd(), 0.5 + np_rnd(), 0.5 + np_rnd()
                            ],
                                               dtype=np.float32)
                            outBlack = np.array([0.0, 0.0, 0.0],
                                                dtype=np.float32)
                            outWhite = np.array([1.0, 1.0, 1.0],
                                                dtype=np.float32)
                            out_sample = np.clip(
                                (img - inBlack) / (inWhite - inBlack), 0, 1)
                            out_sample = (out_sample**(1 / inGamma)) * (
                                outWhite - outBlack) + outBlack
                            out_sample = np.clip(out_sample, 0, 1)
                        elif channel_type == SPCT.G:
                            out_sample = cv2.cvtColor(img,
                                                      cv2.COLOR_BGR2GRAY)[...,
                                                                          None]
                        elif channel_type == SPCT.GGG:
                            out_sample = np.repeat(
                                np.expand_dims(
                                    cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), -1),
                                (3, ), -1)

                    # Final transformations
                    if not debug:
                        if normalize_tanh:
                            out_sample = np.clip(out_sample * 2.0 - 1.0, -1.0,
                                                 1.0)
                    if data_format == "NCHW":
                        out_sample = np.transpose(out_sample, (2, 0, 1))
                elif sample_type == SPST.IMAGE:
                    img = sample_bgr
                    img = imagelib.warp_by_params(
                        params_per_resolution[resolution],
                        img,
                        warp,
                        transform,
                        can_flip=True,
                        border_replicate=True)
                    img = cv2.resize(img, (resolution, resolution),
                                     cv2.INTER_CUBIC)
                    out_sample = img

                    if data_format == "NCHW":
                        out_sample = np.transpose(out_sample, (2, 0, 1))

                elif sample_type == SPST.LANDMARKS_ARRAY:
                    l = sample_landmarks
                    l = np.concatenate([
                        np.expand_dims(l[:, 0] / w, -1),
                        np.expand_dims(l[:, 1] / h, -1)
                    ], -1)
                    l = np.clip(l, 0.0, 1.0)
                    out_sample = l
                elif sample_type == SPST.PITCH_YAW_ROLL or sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
                    pitch, yaw, roll = sample.get_pitch_yaw_roll()
                    if params_per_resolution[resolution]['flip']:
                        yaw = -yaw

                    if sample_type == SPST.PITCH_YAW_ROLL_SIGMOID:
                        pitch = np.clip((pitch / math.pi) / 2.0 + 0.5, 0, 1)
                        yaw = np.clip((yaw / math.pi) / 2.0 + 0.5, 0, 1)
                        roll = np.clip((roll / math.pi) / 2.0 + 0.5, 0, 1)

                    out_sample = (pitch, yaw)
                else:
                    raise ValueError('expected sample_type')

                outputs_sample.append(out_sample)
            outputs += [outputs_sample]

        return outputs
    def process(sample,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_bgr = sample.load_bgr()
        ct_sample_bgr = None
        ct_sample_mask = None
        h, w, c = sample_bgr.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                              (0, 1, 0))

        cached_images = collections.defaultdict(dict)

        sample_rnd_seed = np.random.randint(0x80000000)

        SPTF_FACETYPE_TO_FACETYPE = {
            SPTF.FACE_TYPE_HALF: FaceType.HALF,
            SPTF.FACE_TYPE_FULL: FaceType.FULL,
            SPTF.FACE_TYPE_HEAD: FaceType.HEAD,
            SPTF.FACE_TYPE_AVATAR: FaceType.AVATAR,
            SPTF.FACE_TYPE_FULL_NO_ROTATION: FaceType.FULL_NO_ROTATION
        }

        outputs = []
        for opts in output_sample_types:

            resolution = opts.get('resolution', 0)
            types = opts.get('types', [])

            random_sub_res = opts.get('random_sub_res', 0)
            normalize_std_dev = opts.get('normalize_std_dev', False)
            normalize_vgg = opts.get('normalize_vgg', False)
            motion_blur = opts.get('motion_blur', None)
            apply_ct = opts.get('apply_ct', False)
            normalize_tanh = opts.get('normalize_tanh', False)

            img_type = SPTF.NONE
            target_face_type = SPTF.NONE
            face_mask_type = SPTF.NONE
            mode_type = SPTF.NONE
            for t in types:
                if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                    img_type = t
                elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                    target_face_type = t
                elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                    mode_type = t

            if img_type == SPTF.NONE:
                raise ValueError('expected IMG_ type')

            if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                l = sample.landmarks
                l = np.concatenate([
                    np.expand_dims(l[:, 0] / w, -1),
                    np.expand_dims(l[:, 1] / h, -1)
                ], -1)
                l = np.clip(l, 0.0, 1.0)
                img = l
            elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                pitch_yaw_roll = sample.pitch_yaw_roll
                if pitch_yaw_roll is not None:
                    pitch, yaw, roll = pitch_yaw_roll
                else:
                    pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                        sample.landmarks)
                if sample_process_options.random_flip:
                    yaw = -yaw

                if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch = (pitch + 1.0) / 2.0
                    yaw = (yaw + 1.0) / 2.0
                    roll = (roll + 1.0) / 2.0

                img = (pitch, yaw, roll)
            else:
                if mode_type == SPTF.NONE:
                    raise ValueError('expected MODE_ type')

                img = sample_bgr
                mask = None
                if is_face_sample:
                    if motion_blur is not None:
                        chance, mb_range = motion_blur
                        chance = np.clip(chance, 0, 100)

                        if np.random.randint(100) < chance:
                            mb_range = [3, 5, 7,
                                        9][:np.clip(mb_range, 0, 3) + 1]
                            dim = mb_range[np.random.randint(len(mb_range))]
                            img = imagelib.LinearMotionBlur(
                                img, dim, np.random.randint(180))

                    mask = sample.load_fanseg_mask(
                    )  #using fanseg_mask if exist

                    if mask is None:
                        mask = LandmarksProcessor.get_image_hull_mask(
                            img.shape, sample.landmarks)

                    if sample.ie_polys is not None:
                        sample.ie_polys.overlay_mask(mask)

                if mask is not None:
                    if len(mask.shape) == 2:
                        mask = mask[..., np.newaxis]
                    img = np.concatenate((img, mask), -1)

                if is_face_sample and target_face_type != SPTF.NONE:
                    ft = SPTF_FACETYPE_TO_FACETYPE[target_face_type]
                    if ft > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, ft))
                    img = cv2.warpAffine(img,
                                         LandmarksProcessor.get_transform_mat(
                                             sample.landmarks, resolution, ft),
                                         (resolution, resolution),
                                         flags=cv2.INTER_CUBIC)
                else:
                    img = cv2.resize(img, (resolution, resolution),
                                     cv2.INTER_CUBIC)

                if mask is not None:
                    mask = img[..., 3:4]
                    img = img[..., 0:3]

                warp = (img_type == SPTF.IMG_WARPED
                        or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                             or img_type == SPTF.IMG_TRANSFORMED)
                flip = img_type != SPTF.IMG_WARPED

                params = imagelib.gen_warp_params(
                    img,
                    sample_process_options.random_flip,
                    rotation_range=sample_process_options.rotation_range,
                    scale_range=sample_process_options.scale_range,
                    tx_range=sample_process_options.tx_range,
                    ty_range=sample_process_options.ty_range)

                img = imagelib.warp_by_params(params, img, warp, transform,
                                              flip, True)
                if mask is not None:
                    mask = imagelib.warp_by_params(params, mask, warp,
                                                   transform, flip, False)
                    if len(mask.shape) == 2:
                        mask = mask[..., np.newaxis]
                    img = np.concatenate((img, mask), -1)

                if random_sub_res != 0:
                    sub_size = resolution - random_sub_res
                    rnd_state = np.random.RandomState(sample_rnd_seed +
                                                      random_sub_res)
                    start_x = rnd_state.randint(sub_size + 1)
                    start_y = rnd_state.randint(sub_size + 1)
                    img = img[start_y:start_y + sub_size,
                              start_x:start_x + sub_size, :]

                img = np.clip(img, 0, 1)
                img_bgr = img[..., 0:3]
                img_mask = img[..., 3:4]

                if apply_ct and ct_sample is not None:
                    if ct_sample_bgr is None:
                        ct_sample_bgr = ct_sample.load_bgr()

                    ct_sample_bgr_resized = cv2.resize(
                        ct_sample_bgr, (resolution, resolution),
                        cv2.INTER_LINEAR)

                    img_bgr = imagelib.linear_color_transfer(
                        img_bgr, ct_sample_bgr_resized)
                    img_bgr = np.clip(img_bgr, 0.0, 1.0)

                if normalize_std_dev:
                    img_bgr = (img_bgr - img_bgr.mean((0, 1))) / img_bgr.std(
                        (0, 1))
                elif normalize_vgg:
                    img_bgr = np.clip(img_bgr * 255, 0, 255)
                    img_bgr[:, :, 0] -= 103.939
                    img_bgr[:, :, 1] -= 116.779
                    img_bgr[:, :, 2] -= 123.68

                if mode_type == SPTF.MODE_BGR:
                    img = img_bgr
                elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img = np.take(img_bgr,
                                  rnd_state.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                elif mode_type == SPTF.MODE_G:
                    img = np.concatenate((np.expand_dims(
                        cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                        -1), img_mask), -1)
                elif mode_type == SPTF.MODE_GGG:
                    img = np.concatenate((np.repeat(
                        np.expand_dims(
                            cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                        (3, ), -1), img_mask), -1)
                elif mode_type == SPTF.MODE_M and is_face_sample:
                    img = img_mask

                if not debug:
                    if normalize_tanh:
                        img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                    else:
                        img = np.clip(img, 0.0, 1.0)

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs
    def process(samples,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample in samples:
            sample_bgr = sample.load_bgr()
            ct_sample_bgr = None
            h, w, c = sample_bgr.shape

            is_face_sample = sample.landmarks is not None

            if debug and is_face_sample:
                LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                                  (0, 1, 0))

            params = imagelib.gen_warp_params(
                sample_bgr,
                sample_process_options.random_flip,
                rotation_range=sample_process_options.rotation_range,
                scale_range=sample_process_options.scale_range,
                tx_range=sample_process_options.tx_range,
                ty_range=sample_process_options.ty_range,
                rnd_seed=sample_rnd_seed)

            outputs_sample = []
            for opts in output_sample_types:

                resolution = opts.get('resolution', 0)
                types = opts.get('types', [])

                motion_blur = opts.get('motion_blur', None)
                gaussian_blur = opts.get('gaussian_blur', None)

                ct_mode = opts.get('ct_mode', 'None')
                normalize_tanh = opts.get('normalize_tanh', False)
                data_format = opts.get('data_format', 'NHWC')

                img_type = SPTF.NONE
                target_face_type = SPTF.NONE
                mode_type = SPTF.NONE
                for t in types:
                    if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                        img_type = t
                    elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                        target_face_type = t
                    elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                        mode_type = t

                if is_face_sample:
                    if target_face_type == SPTF.NONE:
                        raise ValueError(
                            "target face type must be defined for face samples"
                        )
                else:
                    if mode_type == SPTF.MODE_FACE_MASK_HULL:
                        raise ValueError(
                            "MODE_FACE_MASK_HULL applicable only for face samples"
                        )
                    if mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
                        raise ValueError(
                            "MODE_FACE_MASK_EYES_HULL applicable only for face samples"
                        )
                    elif mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                        raise ValueError(
                            "MODE_FACE_MASK_STRUCT applicable only for face samples"
                        )

                can_warp = (img_type == SPTF.IMG_WARPED
                            or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                can_transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                                 or img_type == SPTF.IMG_TRANSFORMED)

                if img_type == SPTF.NONE:
                    raise ValueError('expected IMG_ type')

                if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                    l = sample.landmarks
                    l = np.concatenate([
                        np.expand_dims(l[:, 0] / w, -1),
                        np.expand_dims(l[:, 1] / h, -1)
                    ], -1)
                    l = np.clip(l, 0.0, 1.0)
                    out_sample = l
                elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch_yaw_roll = sample.get_pitch_yaw_roll()

                    if params['flip']:
                        yaw = -yaw

                    if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                        pitch = np.clip((pitch / math.pi) / 2.0 + 0.5, 0, 1)
                        yaw = np.clip((yaw / math.pi) / 2.0 + 0.5, 0, 1)
                        roll = np.clip((roll / math.pi) / 2.0 + 0.5, 0, 1)

                    out_sample = (pitch, yaw, roll)
                else:
                    if mode_type == SPTF.NONE:
                        raise ValueError('expected MODE_ type')

                    if mode_type == SPTF.MODE_FACE_MASK_HULL:
                        if sample.eyebrows_expand_mod is not None:
                            img = LandmarksProcessor.get_image_hull_mask(
                                sample_bgr.shape,
                                sample.landmarks,
                                eyebrows_expand_mod=sample.eyebrows_expand_mod)
                        else:
                            img = LandmarksProcessor.get_image_hull_mask(
                                sample_bgr.shape, sample.landmarks)

                        if sample.ie_polys is not None:
                            sample.ie_polys.overlay_mask(img)
                    elif mode_type == SPTF.MODE_FACE_MASK_EYES_HULL:
                        img = LandmarksProcessor.get_image_eye_mask(
                            sample_bgr.shape, sample.landmarks)

                    elif mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                        if sample.eyebrows_expand_mod is not None:
                            img = LandmarksProcessor.get_face_struct_mask(
                                sample_bgr.shape,
                                sample.landmarks,
                                eyebrows_expand_mod=sample.eyebrows_expand_mod)
                        else:
                            img = LandmarksProcessor.get_face_struct_mask(
                                sample_bgr.shape, sample.landmarks)
                    else:
                        img = sample_bgr
                        if motion_blur is not None:
                            chance, mb_max_size = motion_blur
                            chance = np.clip(chance, 0, 100)

                            rnd_state = np.random.RandomState(sample_rnd_seed)
                            mblur_rnd_chance = rnd_state.randint(100)
                            mblur_rnd_kernel = rnd_state.randint(
                                mb_max_size) + 1
                            mblur_rnd_deg = rnd_state.randint(360)

                            if mblur_rnd_chance < chance:
                                img = imagelib.LinearMotionBlur(
                                    img, mblur_rnd_kernel, mblur_rnd_deg)

                        if gaussian_blur is not None:
                            chance, kernel_max_size = gaussian_blur
                            chance = np.clip(chance, 0, 100)

                            if np.random.randint(100) < chance:
                                img = cv2.GaussianBlur(
                                    img,
                                    (np.random.randint(kernel_max_size) * 2 +
                                     1, ) * 2, 0)

                    if is_face_sample:
                        target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[
                            target_face_type]
                        if target_ft > sample.face_type:
                            raise Exception(
                                'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                                %
                                (sample.filename, sample.face_type, target_ft))

                        if sample.face_type == FaceType.MARK_ONLY:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0], target_ft)

                            if mode_type == SPTF.MODE_FACE_MASK_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                                img = cv2.warpAffine(
                                    img,
                                    mat, (sample.shape[0], sample.shape[0]),
                                    flags=cv2.INTER_CUBIC)
                                img = imagelib.warp_by_params(
                                    params,
                                    img,
                                    can_warp,
                                    can_transform,
                                    can_flip=True,
                                    border_replicate=False)
                                img = cv2.resize(img, (resolution, resolution),
                                                 cv2.INTER_CUBIC)[..., None]
                            else:
                                img = cv2.warpAffine(
                                    img,
                                    mat, (sample.shape[0], sample.shape[0]),
                                    flags=cv2.INTER_CUBIC)
                                img = imagelib.warp_by_params(
                                    params,
                                    img,
                                    can_warp,
                                    can_transform,
                                    can_flip=True,
                                    border_replicate=True)
                                img = cv2.resize(img, (resolution, resolution),
                                                 cv2.INTER_CUBIC)

                        else:
                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution, target_ft)

                            if mode_type == SPTF.MODE_FACE_MASK_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                               mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                                img = imagelib.warp_by_params(
                                    params,
                                    img,
                                    can_warp,
                                    can_transform,
                                    can_flip=True,
                                    border_replicate=False)
                                img = cv2.warpAffine(
                                    img,
                                    mat, (resolution, resolution),
                                    borderMode=cv2.BORDER_CONSTANT,
                                    flags=cv2.INTER_CUBIC)[..., None]
                            else:
                                img = imagelib.warp_by_params(
                                    params,
                                    img,
                                    can_warp,
                                    can_transform,
                                    can_flip=True,
                                    border_replicate=True)
                                img = cv2.warpAffine(
                                    img,
                                    mat, (resolution, resolution),
                                    borderMode=cv2.BORDER_REPLICATE,
                                    flags=cv2.INTER_CUBIC)
                    else:
                        img = imagelib.warp_by_params(params,
                                                      img,
                                                      can_warp,
                                                      can_transform,
                                                      can_flip=True,
                                                      border_replicate=True)
                        img = cv2.resize(img, (resolution, resolution),
                                         cv2.INTER_CUBIC)


                    if mode_type == SPTF.MODE_FACE_MASK_HULL or \
                       mode_type == SPTF.MODE_FACE_MASK_EYES_HULL or \
                       mode_type == SPTF.MODE_FACE_MASK_STRUCT:
                        out_sample = np.clip(img.astype(np.float32), 0, 1)
                    else:
                        img = np.clip(img.astype(np.float32), 0, 1)

                        if ct_mode is not None and ct_sample is not None:
                            if ct_sample_bgr is None:
                                ct_sample_bgr = ct_sample.load_bgr()
                            img = imagelib.color_transfer(
                                ct_mode, img,
                                cv2.resize(ct_sample_bgr,
                                           (resolution, resolution),
                                           cv2.INTER_LINEAR))

                        if mode_type == SPTF.MODE_BGR:
                            out_sample = img
                        elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                            rnd_state = np.random.RandomState(sample_rnd_seed)
                            out_sample = np.take(img,
                                                 rnd_state.permutation(
                                                     img.shape[-1]),
                                                 axis=-1)

                        elif mode_type == SPTF.MODE_BGR_RANDOM_HSV_SHIFT:
                            rnd_state = np.random.RandomState(sample_rnd_seed)
                            hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
                            h, s, v = cv2.split(hsv)
                            h = (h + rnd_state.randint(360)) % 360
                            s = np.clip(s + rnd_state.random() - 0.5, 0, 1)
                            v = np.clip(v + rnd_state.random() - 0.5, 0, 1)
                            hsv = cv2.merge([h, s, v])
                            out_sample = np.clip(
                                cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), 0, 1)
                        elif mode_type == SPTF.MODE_G:
                            out_sample = cv2.cvtColor(img,
                                                      cv2.COLOR_BGR2GRAY)[...,
                                                                          None]
                        elif mode_type == SPTF.MODE_GGG:
                            out_sample = np.repeat(
                                np.expand_dims(
                                    cv2.cvtColor(img, cv2.COLOR_BGR2GRAY), -1),
                                (3, ), -1)

                    if not debug:
                        if normalize_tanh:
                            out_sample = np.clip(out_sample * 2.0 - 1.0, -1.0,
                                                 1.0)

                    if data_format == "NCHW":
                        out_sample = np.transpose(out_sample, (2, 0, 1))

                outputs_sample.append(out_sample)
            outputs += [outputs_sample]

        return outputs
    def process (sample, sample_process_options, output_sample_types, debug):
        sample_bgr = sample.load_bgr()
        h,w,c = sample_bgr.shape

        is_face_sample = sample.landmarks is not None 
        
        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks (sample_bgr, sample.landmarks, (0, 1, 0))
        
        close_sample = sample.close_target_list[ np.random.randint(0, len(sample.close_target_list)) ] if sample.close_target_list is not None else None
        close_sample_bgr = close_sample.load_bgr() if close_sample is not None else None
        
        if debug and close_sample_bgr is not None:
            LandmarksProcessor.draw_landmarks (close_sample_bgr, close_sample.landmarks, (0, 1, 0))        
        
        params = image_utils.gen_warp_params(sample_bgr, sample_process_options.random_flip, rotation_range=sample_process_options.rotation_range, scale_range=sample_process_options.scale_range, tx_range=sample_process_options.tx_range, ty_range=sample_process_options.ty_range )

        images = [[None]*3 for _ in range(30)]
        
        sample_rnd_seed = np.random.randint(0x80000000)
            
        outputs = []       
        for sample_type in output_sample_types:
            f = sample_type[0]
            size = sample_type[1]
            random_sub_size = 0 if len (sample_type) < 3 else min( sample_type[2] , size)
            
            if f & SampleProcessor.TypeFlags.SOURCE != 0:
                img_type = 0
            elif f & SampleProcessor.TypeFlags.WARPED != 0:
                img_type = 1
            elif f & SampleProcessor.TypeFlags.WARPED_TRANSFORMED != 0:
                img_type = 2
            elif f & SampleProcessor.TypeFlags.TRANSFORMED != 0:
                img_type = 3
            elif f & SampleProcessor.TypeFlags.LANDMARKS_ARRAY != 0:
                img_type = 4                
            else:
                raise ValueError ('expected SampleTypeFlags type')
                
            if f & SampleProcessor.TypeFlags.RANDOM_CLOSE != 0:
                img_type += 10
            elif f & SampleProcessor.TypeFlags.MORPH_TO_RANDOM_CLOSE != 0:
                img_type += 20
                
            face_mask_type = 0
            if f & SampleProcessor.TypeFlags.FACE_MASK_FULL != 0:
                face_mask_type = 1               
            elif f & SampleProcessor.TypeFlags.FACE_MASK_EYES != 0:
                face_mask_type = 2
                  
            target_face_type = -1
            if f & SampleProcessor.TypeFlags.FACE_ALIGN_HALF != 0:
                target_face_type = FaceType.HALF            
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_FULL != 0:
                target_face_type = FaceType.FULL
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_HEAD != 0:
                target_face_type = FaceType.HEAD
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_AVATAR != 0:
                target_face_type = FaceType.AVATAR
            
            if img_type == 4:
                l = sample.landmarks 
                l = np.concatenate ( [ np.expand_dims(l[:,0] / w,-1), np.expand_dims(l[:,1] / h,-1) ], -1 )
                l = np.clip(l, 0.0, 1.0)
                img = l
            else:                
                if images[img_type][face_mask_type] is None:
                    if img_type >= 10 and img_type <= 19: #RANDOM_CLOSE
                        img_type -= 10
                        img = close_sample_bgr
                        cur_sample = close_sample
                        
                    elif img_type >= 20 and img_type <= 29: #MORPH_TO_RANDOM_CLOSE
                        img_type -= 20
                        res = sample.shape[0]
                    
                        s_landmarks = sample.landmarks.copy()                    
                        d_landmarks = close_sample.landmarks.copy()                        
                        idxs = list(range(len(s_landmarks)))                        
                        #remove landmarks near boundaries
                        for i in idxs[:]:
                            s_l = s_landmarks[i]   
                            d_l = d_landmarks[i]
                            if s_l[0] < 5 or s_l[1] < 5 or s_l[0] >= res-5 or s_l[1] >= res-5 or \
                               d_l[0] < 5 or d_l[1] < 5 or d_l[0] >= res-5 or d_l[1] >= res-5:
                               idxs.remove(i)
                        #remove landmarks that close to each other in 5 dist
                        for landmarks in [s_landmarks, d_landmarks]:
                            for i in idxs[:]:
                                s_l = landmarks[i]
                                for j in idxs[:]:
                                    if i == j:
                                        continue
                                    s_l_2 = landmarks[j]
                                    diff_l = np.abs(s_l - s_l_2)
                                    if np.sqrt(diff_l.dot(diff_l)) < 5:
                                        idxs.remove(i)
                                        break                                    
                        s_landmarks = s_landmarks[idxs]
                        d_landmarks = d_landmarks[idxs]
                        s_landmarks = np.concatenate ( [s_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] ) 
                        d_landmarks = np.concatenate ( [d_landmarks, [ [0,0], [ res // 2, 0], [ res-1, 0], [0, res//2], [res-1, res//2] ,[0,res-1] ,[res//2, res-1] ,[res-1,res-1] ] ] )
                        img = image_utils.morph_by_points (sample_bgr, s_landmarks, d_landmarks)
                        cur_sample = close_sample
                    else:
                        img = sample_bgr
                        cur_sample = sample
                    
                    if is_face_sample:
                        if face_mask_type == 1:
                            img = np.concatenate( (img, LandmarksProcessor.get_image_hull_mask (img.shape, cur_sample.landmarks) ), -1 )                    
                        elif face_mask_type == 2:
                            mask = LandmarksProcessor.get_image_eye_mask (img.shape, cur_sample.landmarks)
                            mask = np.expand_dims (cv2.blur (mask, ( w // 32, w // 32 ) ), -1)
                            mask[mask > 0.0] = 1.0
                            img = np.concatenate( (img, mask ), -1 )               

                    images[img_type][face_mask_type] = image_utils.warp_by_params (params, img, (img_type==1 or img_type==2), (img_type==2 or img_type==3), img_type != 0, face_mask_type == 0)
                    
                img = images[img_type][face_mask_type]
                
                if is_face_sample and target_face_type != -1:
                    if target_face_type > sample.face_type:
                        raise Exception ('sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.' % (sample.filename, sample.face_type, target_face_type) )
                    img = cv2.warpAffine( img, LandmarksProcessor.get_transform_mat (sample.landmarks, size, target_face_type), (size,size), flags=cv2.INTER_CUBIC )
                else:
                    img = cv2.resize( img, (size,size), cv2.INTER_CUBIC )
                    
                if random_sub_size != 0:
                    sub_size = size - random_sub_size                
                    rnd_state = np.random.RandomState (sample_rnd_seed+random_sub_size)
                    start_x = rnd_state.randint(sub_size+1)
                    start_y = rnd_state.randint(sub_size+1)
                    img = img[start_y:start_y+sub_size,start_x:start_x+sub_size,:]

                img_bgr  = img[...,0:3]
                img_mask = img[...,3:4]

                if f & SampleProcessor.TypeFlags.MODE_BGR != 0:
                    img = img
                elif f & SampleProcessor.TypeFlags.MODE_BGR_SHUFFLE != 0:
                    img_bgr = np.take (img_bgr, np.random.permutation(img_bgr.shape[-1]), axis=-1)
                    img = np.concatenate ( (img_bgr,img_mask) , -1 )
                elif f & SampleProcessor.TypeFlags.MODE_G != 0:
                    img = np.concatenate ( (np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1),img_mask) , -1 )
                elif f & SampleProcessor.TypeFlags.MODE_GGG != 0:
                    img = np.concatenate ( ( np.repeat ( np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),-1), (3,), -1), img_mask), -1)
                elif is_face_sample and f & SampleProcessor.TypeFlags.MODE_M != 0:
                    if face_mask_type== 0:
                        raise ValueError ('no face_mask_type defined')
                    img = img_mask
                else:
                    raise ValueError ('expected SampleTypeFlags mode')
         
                if not debug and sample_process_options.normalize_tanh:
                    img = img * 2.0 - 1.0
                
            outputs.append ( img )

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [output,]
                elif output.shape[2] == 4:
                    result += [output[...,0:3]*output[...,3:4],]

            return result            
        else:
            return outputs
    def process(sample, sample_process_options, output_sample_types, debug):
        source = sample.load_bgr()
        h, w, c = source.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(source, sample.landmarks,
                                              (0, 1, 0))

        params = image_utils.gen_warp_params(
            source,
            sample_process_options.random_flip,
            rotation_range=sample_process_options.rotation_range,
            scale_range=sample_process_options.scale_range,
            tx_range=sample_process_options.tx_range,
            ty_range=sample_process_options.ty_range)

        images = [[None] * 3 for _ in range(4)]

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample_type in output_sample_types:
            f = sample_type[0]
            size = sample_type[1]
            random_sub_size = 0 if len(sample_type) < 3 else min(
                sample_type[2], size)

            if f & SampleProcessor.TypeFlags.SOURCE != 0:
                img_type = 0
            elif f & SampleProcessor.TypeFlags.WARPED != 0:
                img_type = 1
            elif f & SampleProcessor.TypeFlags.WARPED_TRANSFORMED != 0:
                img_type = 2
            elif f & SampleProcessor.TypeFlags.TRANSFORMED != 0:
                img_type = 3
            else:
                raise ValueError('expected SampleTypeFlags type')

            face_mask_type = 0
            if f & SampleProcessor.TypeFlags.FACE_MASK_FULL != 0:
                face_mask_type = 1
            elif f & SampleProcessor.TypeFlags.FACE_MASK_EYES != 0:
                face_mask_type = 2

            target_face_type = -1
            if f & SampleProcessor.TypeFlags.FACE_ALIGN_HALF != 0:
                target_face_type = FaceType.HALF
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_FULL != 0:
                target_face_type = FaceType.FULL
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_HEAD != 0:
                target_face_type = FaceType.HEAD
            elif f & SampleProcessor.TypeFlags.FACE_ALIGN_AVATAR != 0:
                target_face_type = FaceType.AVATAR

            if images[img_type][face_mask_type] is None:
                img = source
                if is_face_sample:
                    if face_mask_type == 1:
                        img = np.concatenate(
                            (img,
                             LandmarksProcessor.get_image_hull_mask(
                                 source, sample.landmarks)), -1)
                    elif face_mask_type == 2:
                        mask = LandmarksProcessor.get_image_eye_mask(
                            source, sample.landmarks)
                        mask = np.expand_dims(
                            cv2.blur(mask, (w // 32, w // 32)), -1)
                        mask[mask > 0.0] = 1.0
                        img = np.concatenate((img, mask), -1)

                images[img_type][face_mask_type] = image_utils.warp_by_params(
                    params, img, (img_type == 1 or img_type == 2),
                    (img_type == 2 or img_type == 3), img_type != 0)

            img = images[img_type][face_mask_type]

            if is_face_sample and target_face_type != -1:
                if target_face_type > sample.face_type:
                    raise Exception(
                        'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                        %
                        (sample.filename, sample.face_type, target_face_type))

                img = cv2.warpAffine(img,
                                     LandmarksProcessor.get_transform_mat(
                                         sample.landmarks, size,
                                         target_face_type), (size, size),
                                     flags=cv2.INTER_LANCZOS4)
            else:
                img = cv2.resize(img, (size, size), cv2.INTER_LANCZOS4)

            if random_sub_size != 0:
                sub_size = size - random_sub_size
                rnd_state = np.random.RandomState(sample_rnd_seed +
                                                  random_sub_size)
                start_x = rnd_state.randint(sub_size + 1)
                start_y = rnd_state.randint(sub_size + 1)
                img = img[start_y:start_y + sub_size,
                          start_x:start_x + sub_size, :]

            img_bgr = img[..., 0:3]
            img_mask = img[..., 3:4]

            if f & SampleProcessor.TypeFlags.MODE_BGR != 0:
                img = img
            elif f & SampleProcessor.TypeFlags.MODE_BGR_SHUFFLE != 0:
                img_bgr = np.take(img_bgr,
                                  np.random.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                img = np.concatenate((img_bgr, img_mask), -1)
            elif f & SampleProcessor.TypeFlags.MODE_G != 0:
                img = np.concatenate((np.expand_dims(
                    cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1), img_mask),
                                     -1)
            elif f & SampleProcessor.TypeFlags.MODE_GGG != 0:
                img = np.concatenate((np.repeat(
                    np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                                   -1), (3, ), -1), img_mask), -1)
            elif is_face_sample and f & SampleProcessor.TypeFlags.MODE_M != 0:
                if face_mask_type == 0:
                    raise ValueError('no face_mask_type defined')
                img = img_mask
            else:
                raise ValueError('expected SampleTypeFlags mode')

            if not debug and sample_process_options.normalize_tanh:
                img = img * 2.0 - 1.0

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs
Beispiel #9
0
    def onProcessSample(self, sample, debug):
        source = sample.load_bgr()
        h, w, c = source.shape

        is_face_sample = self.trainingdatatype >= TrainingDataType.FACE_BEGIN and self.trainingdatatype <= TrainingDataType.FACE_END

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(source, sample.landmarks,
                                              (0, 1, 0))

        params = image_utils.gen_warp_params(
            source,
            self.random_flip,
            rotation_range=self.rotation_range,
            scale_range=self.scale_range,
            tx_range=self.tx_range,
            ty_range=self.ty_range)

        images = [[None] * 3 for _ in range(4)]

        outputs = []
        for t, size in self.output_sample_types:
            if t & self.SampleTypeFlags.SOURCE != 0:
                img_type = 0
            elif t & self.SampleTypeFlags.WARPED != 0:
                img_type = 1
            elif t & self.SampleTypeFlags.WARPED_TRANSFORMED != 0:
                img_type = 2
            elif t & self.SampleTypeFlags.TRANSFORMED != 0:
                img_type = 3
            else:
                raise ValueError('expected SampleTypeFlags type')

            mask_type = 0
            if t & self.SampleTypeFlags.MASK_FULL != 0:
                mask_type = 1
            elif t & self.SampleTypeFlags.MASK_EYES != 0:
                mask_type = 2

            if images[img_type][mask_type] is None:
                img = source
                if is_face_sample:
                    if mask_type == 1:
                        img = np.concatenate(
                            (img,
                             LandmarksProcessor.get_image_hull_mask(
                                 source, sample.landmarks)), -1)
                    elif mask_type == 2:
                        mask = LandmarksProcessor.get_image_eye_mask(
                            source, sample.landmarks)
                        mask = np.expand_dims(
                            cv2.blur(mask, (w // 32, w // 32)), -1)
                        mask[mask > 0.0] = 1.0
                        img = np.concatenate((img, mask), -1)

                images[img_type][mask_type] = image_utils.warp_by_params(
                    params, img, (img_type == 1 or img_type == 2),
                    (img_type == 2 or img_type == 3), img_type != 0)

            img = images[img_type][mask_type]

            target_face_type = -1
            if t & self.SampleTypeFlags.HALF_FACE != 0:
                target_face_type = FaceType.HALF
            elif t & self.SampleTypeFlags.FULL_FACE != 0:
                target_face_type = FaceType.FULL
            elif t & self.SampleTypeFlags.HEAD_FACE != 0:
                target_face_type = FaceType.HEAD
            elif t & self.SampleTypeFlags.AVATAR_FACE != 0:
                target_face_type = FaceType.AVATAR
            elif t & self.SampleTypeFlags.MARK_ONLY_FACE != 0:
                target_face_type = FaceType.MARK_ONLY

            if is_face_sample and target_face_type != -1 and target_face_type != FaceType.MARK_ONLY:
                if target_face_type > sample.face_type:
                    raise Exception(
                        'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                        %
                        (sample.filename, sample.face_type, target_face_type))

                img = cv2.warpAffine(img,
                                     LandmarksProcessor.get_transform_mat(
                                         sample.landmarks, size,
                                         target_face_type), (size, size),
                                     flags=cv2.INTER_LANCZOS4)
            else:
                img = cv2.resize(img, (size, size), cv2.INTER_LANCZOS4)

            img_bgr = img[..., 0:3]
            img_mask = img[..., 3:4]

            if t & self.SampleTypeFlags.MODE_BGR != 0:
                img = img
            elif t & self.SampleTypeFlags.MODE_BGR_SHUFFLE != 0:
                img_bgr = np.take(img_bgr,
                                  np.random.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                img = np.concatenate((img_bgr, img_mask), -1)
            elif t & self.SampleTypeFlags.MODE_G != 0:
                img = np.concatenate((np.expand_dims(
                    cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1), img_mask),
                                     -1)
            elif t & self.SampleTypeFlags.MODE_GGG != 0:
                img = np.concatenate((np.repeat(
                    np.expand_dims(cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                                   -1), (3, ), -1), img_mask), -1)
            elif is_face_sample and t & self.SampleTypeFlags.MODE_M != 0:
                if mask_type == 0:
                    raise ValueError('no mask mode defined')
                img = img_mask
            else:
                raise ValueError('expected SampleTypeFlags mode')

            if not debug and self.normalize_tanh:
                img = img * 2.0 - 1.0

            outputs.append(img)

        if debug:
            result = ()

            for output in outputs:
                if output.shape[2] < 4:
                    result += (output, )
                elif output.shape[2] == 4:
                    result += (output[..., 0:3] * output[..., 3:4], )

            return result
        else:
            return outputs