Exemple #1
0
def ConvertMaskedFace(cfg, frame_info, img_bgr_uint8, img_bgr,
                      img_face_landmarks):

    #if debug:
    #    debugs = [img_bgr.copy()]

    img_size = img_bgr.shape[1], img_bgr.shape[0]

    img_face_mask_a = LandmarksProcessor.get_image_hull_mask(
        img_bgr.shape, img_face_landmarks)

    if cfg.mode == 'original':
        if cfg.export_mask_alpha:
            img_bgr = np.concatenate([img_bgr, img_face_mask_a], -1)
        return img_bgr, img_face_mask_a

    out_img = img_bgr.copy()
    out_merging_mask = None

    output_size = cfg.predictor_input_shape[0]
    if cfg.super_resolution_mode != 0:
        output_size *= 2

    face_mat = LandmarksProcessor.get_transform_mat(img_face_landmarks,
                                                    output_size,
                                                    face_type=cfg.face_type)
    face_output_mat = LandmarksProcessor.get_transform_mat(
        img_face_landmarks,
        output_size,
        face_type=cfg.face_type,
        scale=1.0 + 0.01 * cfg.output_face_scale)

    dst_face_bgr = cv2.warpAffine(img_bgr,
                                  face_mat, (output_size, output_size),
                                  flags=cv2.INTER_CUBIC)
    dst_face_mask_a_0 = cv2.warpAffine(img_face_mask_a,
                                       face_mat, (output_size, output_size),
                                       flags=cv2.INTER_CUBIC)

    predictor_input_bgr = cv2.resize(dst_face_bgr,
                                     cfg.predictor_input_shape[0:2])

    if cfg.predictor_masked:
        prd_face_bgr, prd_face_mask_a_0 = cfg.predictor_func(
            predictor_input_bgr)

        prd_face_bgr = np.clip(prd_face_bgr, 0, 1.0)
        prd_face_mask_a_0 = np.clip(prd_face_mask_a_0, 0.0, 1.0)
    else:
        predicted = cfg.predictor_func(predictor_input_bgr)
        prd_face_bgr = np.clip(predicted, 0, 1.0)
        prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                       cfg.predictor_input_shape[0:2])

    if cfg.super_resolution_mode:
        #if debug:
        #    tmp = cv2.resize (prd_face_bgr,  (output_size,output_size), cv2.INTER_CUBIC)
        #    debugs += [ np.clip( cv2.warpAffine( tmp, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

        prd_face_bgr = cfg.superres_func(cfg.super_resolution_mode,
                                         prd_face_bgr)
        #if debug:
        #    debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, img_bgr.copy(), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

        if cfg.predictor_masked:
            prd_face_mask_a_0 = cv2.resize(prd_face_mask_a_0,
                                           (output_size, output_size),
                                           cv2.INTER_CUBIC)
        else:
            prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                           (output_size, output_size),
                                           cv2.INTER_CUBIC)

    if cfg.mask_mode == 2:  #dst
        prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                       (output_size, output_size),
                                       cv2.INTER_CUBIC)
    elif cfg.mask_mode >= 3 and cfg.mask_mode <= 7:

        if cfg.mask_mode == 3 or cfg.mask_mode == 5 or cfg.mask_mode == 6:
            prd_face_fanseg_bgr = cv2.resize(prd_face_bgr,
                                             (cfg.fanseg_input_size, ) * 2)
            prd_face_fanseg_mask = cfg.fanseg_extract_func(
                FaceType.FULL, prd_face_fanseg_bgr)
            FAN_prd_face_mask_a_0 = cv2.resize(prd_face_fanseg_mask,
                                               (output_size, output_size),
                                               cv2.INTER_CUBIC)

        if cfg.mask_mode >= 4 or cfg.mask_mode <= 7:

            full_face_fanseg_mat = LandmarksProcessor.get_transform_mat(
                img_face_landmarks,
                cfg.fanseg_input_size,
                face_type=FaceType.FULL)
            dst_face_fanseg_bgr = cv2.warpAffine(img_bgr,
                                                 full_face_fanseg_mat,
                                                 (cfg.fanseg_input_size, ) * 2,
                                                 flags=cv2.INTER_CUBIC)
            dst_face_fanseg_mask = cfg.fanseg_extract_func(
                FaceType.FULL, dst_face_fanseg_bgr)

            if cfg.face_type == FaceType.FULL:
                FAN_dst_face_mask_a_0 = cv2.resize(dst_face_fanseg_mask,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)
            elif cfg.face_type == FaceType.HALF:
                half_face_fanseg_mat = LandmarksProcessor.get_transform_mat(
                    img_face_landmarks,
                    cfg.fanseg_input_size,
                    face_type=FaceType.HALF)

                fanseg_rect_corner_pts = np.array(
                    [[0, 0], [cfg.fanseg_input_size - 1, 0],
                     [0, cfg.fanseg_input_size - 1]],
                    dtype=np.float32)
                a = LandmarksProcessor.transform_points(fanseg_rect_corner_pts,
                                                        half_face_fanseg_mat,
                                                        invert=True)
                b = LandmarksProcessor.transform_points(
                    a, full_face_fanseg_mat)
                m = cv2.getAffineTransform(b, fanseg_rect_corner_pts)
                FAN_dst_face_mask_a_0 = cv2.warpAffine(
                    dst_face_fanseg_mask,
                    m, (cfg.fanseg_input_size, ) * 2,
                    flags=cv2.INTER_CUBIC)
                FAN_dst_face_mask_a_0 = cv2.resize(FAN_dst_face_mask_a_0,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)
            else:
                raise ValueError("cfg.face_type unsupported")

        if cfg.mask_mode == 3:  #FAN-prd
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0
        elif cfg.mask_mode == 4:  #FAN-dst
            prd_face_mask_a_0 = FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 5:
            prd_face_mask_a_0 = FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 6:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
        elif cfg.mask_mode == 7:
            prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_dst_face_mask_a_0

    prd_face_mask_a_0[prd_face_mask_a_0 < 0.001] = 0.0

    prd_face_mask_a = prd_face_mask_a_0[..., np.newaxis]
    prd_face_mask_aaa = np.repeat(prd_face_mask_a, (3, ), axis=-1)

    img_face_mask_aaa = cv2.warpAffine(prd_face_mask_aaa,
                                       face_output_mat,
                                       img_size,
                                       np.zeros(img_bgr.shape,
                                                dtype=np.float32),
                                       flags=cv2.WARP_INVERSE_MAP
                                       | cv2.INTER_CUBIC)
    img_face_mask_aaa = np.clip(img_face_mask_aaa, 0.0, 1.0)
    img_face_mask_aaa[img_face_mask_aaa <= 0.1] = 0.0  #get rid of noise

    #if debug:
    #    debugs += [img_face_mask_aaa.copy()]

    if 'raw' in cfg.mode:
        face_corner_pts = np.array(
            [[0, 0], [output_size - 1, 0], [output_size - 1, output_size - 1],
             [0, output_size - 1]],
            dtype=np.float32)
        square_mask = np.zeros(img_bgr.shape, dtype=np.float32)
        cv2.fillConvexPoly(square_mask, \
                           LandmarksProcessor.transform_points (face_corner_pts, face_output_mat, invert=True ).astype(np.int), \
                           (1,1,1) )

        if cfg.mode == 'raw-rgb':
            out_merging_mask = square_mask

        if cfg.mode == 'raw-rgb' or cfg.mode == 'raw-rgb-mask':
            out_img = cv2.warpAffine(prd_face_bgr, face_output_mat, img_size,
                                     out_img,
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)

        if cfg.mode == 'raw-rgb-mask':
            out_img = np.concatenate(
                [out_img,
                 np.expand_dims(img_face_mask_aaa[:, :, 0], -1)], -1)
            out_merging_mask = square_mask

        elif cfg.mode == 'raw-mask-only':
            out_img = img_face_mask_aaa
            out_merging_mask = img_face_mask_aaa
        elif cfg.mode == 'raw-predicted-only':
            out_img = cv2.warpAffine(prd_face_bgr, face_output_mat, img_size,
                                     np.zeros(img_bgr.shape, dtype=np.float32),
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)
            out_merging_mask = square_mask

        out_img = np.clip(out_img, 0.0, 1.0)
    else:
        #averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
        ar = []
        for i in range(1, 10):
            maxregion = np.argwhere(img_face_mask_aaa > i / 10.0)
            if maxregion.size != 0:
                miny, minx = maxregion.min(axis=0)[:2]
                maxy, maxx = maxregion.max(axis=0)[:2]
                lenx = maxx - minx
                leny = maxy - miny
                if min(lenx, leny) >= 4:
                    ar += [[lenx, leny]]

        if len(ar) > 0:
            lenx, leny = np.mean(ar, axis=0)
            lowest_len = min(lenx, leny)
            #if debug:
            #    io.log_info ("lenx/leny:(%d/%d) " % (lenx, leny  ) )
            #    io.log_info ("lowest_len = %f" % (lowest_len) )

            if cfg.erode_mask_modifier != 0:
                ero = int(lowest_len * (0.126 - lowest_len * 0.00004551365) *
                          0.01 * cfg.erode_mask_modifier)
                #if debug:
                #    io.log_info ("erode_size = %d" % (ero) )
                if ero > 0:
                    img_face_mask_aaa = cv2.erode(img_face_mask_aaa,
                                                  cv2.getStructuringElement(
                                                      cv2.MORPH_ELLIPSE,
                                                      (ero, ero)),
                                                  iterations=1)
                elif ero < 0:
                    img_face_mask_aaa = cv2.dilate(img_face_mask_aaa,
                                                   cv2.getStructuringElement(
                                                       cv2.MORPH_ELLIPSE,
                                                       (-ero, -ero)),
                                                   iterations=1)

            if cfg.clip_hborder_mask_per > 0:  #clip hborder before blur
                prd_hborder_rect_mask_a = np.ones(prd_face_mask_a.shape,
                                                  dtype=np.float32)
                prd_border_size = int(prd_hborder_rect_mask_a.shape[1] *
                                      cfg.clip_hborder_mask_per)
                prd_hborder_rect_mask_a[:, 0:prd_border_size, :] = 0
                prd_hborder_rect_mask_a[:, -prd_border_size:, :] = 0
                prd_hborder_rect_mask_a[-prd_border_size:, :, :] = 0
                prd_hborder_rect_mask_a = np.expand_dims(
                    cv2.blur(prd_hborder_rect_mask_a,
                             (prd_border_size, prd_border_size)), -1)

                img_prd_hborder_rect_mask_a = cv2.warpAffine(
                    prd_hborder_rect_mask_a, face_output_mat, img_size,
                    np.zeros(img_bgr.shape, dtype=np.float32),
                    cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC)
                img_prd_hborder_rect_mask_a = np.expand_dims(
                    img_prd_hborder_rect_mask_a, -1)
                img_face_mask_aaa *= img_prd_hborder_rect_mask_a
                img_face_mask_aaa = np.clip(img_face_mask_aaa, 0, 1.0)

                #if debug:
                #    debugs += [img_face_mask_aaa.copy()]

            if cfg.blur_mask_modifier > 0:
                blur = int(lowest_len * 0.10 * 0.01 * cfg.blur_mask_modifier)
                #if debug:
                #    io.log_info ("blur_size = %d" % (blur) )
                if blur > 0:
                    img_face_mask_aaa = cv2.blur(img_face_mask_aaa,
                                                 (blur, blur))

            img_face_mask_aaa = np.clip(img_face_mask_aaa, 0, 1.0)

            #if debug:
            #    debugs += [img_face_mask_aaa.copy()]

            if 'seamless' not in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == 1:
                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

                    prd_face_bgr = imagelib.reinhard_color_transfer(
                        np.clip((prd_face_bgr * 255).astype(np.uint8), 0, 255),
                        np.clip((dst_face_bgr * 255).astype(np.uint8), 0, 255),
                        source_mask=prd_face_mask_a,
                        target_mask=prd_face_mask_a)
                    prd_face_bgr = np.clip(
                        prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

                elif cfg.color_transfer_mode == 2:
                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

                    prd_face_bgr = imagelib.linear_color_transfer(
                        prd_face_bgr, dst_face_bgr)
                    prd_face_bgr = np.clip(prd_face_bgr, 0.0, 1.0)

                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

            if cfg.mode == 'hist-match-bw':
                prd_face_bgr = cv2.cvtColor(prd_face_bgr, cv2.COLOR_BGR2GRAY)
                prd_face_bgr = np.repeat(np.expand_dims(prd_face_bgr, -1),
                                         (3, ), -1)

            if cfg.mode == 'hist-match' or cfg.mode == 'hist-match-bw':
                #if debug:
                #    debugs += [ cv2.warpAffine( prd_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ) ]

                hist_mask_a = np.ones(prd_face_bgr.shape[:2] + (1, ),
                                      dtype=np.float32)

                if cfg.masked_hist_match:
                    hist_mask_a *= prd_face_mask_a

                white = (1.0 - hist_mask_a) * np.ones(
                    prd_face_bgr.shape[:2] + (1, ), dtype=np.float32)

                hist_match_1 = prd_face_bgr * hist_mask_a + white
                hist_match_1[hist_match_1 > 1.0] = 1.0

                hist_match_2 = dst_face_bgr * hist_mask_a + white
                hist_match_2[hist_match_1 > 1.0] = 1.0

                prd_face_bgr = imagelib.color_hist_match(
                    hist_match_1, hist_match_2, cfg.hist_match_threshold)

                #if cfg.masked_hist_match:
                #    prd_face_bgr -= white

            if cfg.mode == 'hist-match-bw':
                prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)

            out_img = cv2.warpAffine(prd_face_bgr, face_output_mat, img_size,
                                     out_img,
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)
            out_img = np.clip(out_img, 0.0, 1.0)

            #if debug:
            #    debugs += [out_img.copy()]

            if cfg.mode == 'overlay':
                pass

            if 'seamless' in cfg.mode:
                #mask used for cv2.seamlessClone
                img_face_seamless_mask_a = None
                img_face_mask_a = img_face_mask_aaa[..., 0:1]
                for i in range(1, 10):
                    a = img_face_mask_a > i / 10.0
                    if len(np.argwhere(a)) == 0:
                        continue
                    img_face_seamless_mask_a = img_face_mask_aaa[...,
                                                                 0:1].copy()
                    img_face_seamless_mask_a[a] = 1.0
                    img_face_seamless_mask_a[img_face_seamless_mask_a <= i /
                                             10.0] = 0.0
                    break

                try:
                    #calc same bounding rect and center point as in cv2.seamlessClone to prevent jittering (not flickering)
                    l, t, w, h = cv2.boundingRect(
                        (img_face_seamless_mask_a * 255).astype(np.uint8))
                    s_maskx, s_masky = int(l + w / 2), int(t + h / 2)

                    out_img = cv2.seamlessClone(
                        (out_img * 255).astype(np.uint8), img_bgr_uint8,
                        (img_face_seamless_mask_a * 255).astype(np.uint8),
                        (s_maskx, s_masky), cv2.NORMAL_CLONE)
                    out_img = out_img.astype(dtype=np.float32) / 255.0
                except Exception as e:
                    #seamlessClone may fail in some cases
                    e_str = traceback.format_exc()

                    if 'MemoryError' in e_str:
                        raise Exception(
                            "Seamless fail: " + e_str
                        )  #reraise MemoryError in order to reprocess this data by other processes
                    else:
                        print("Seamless fail: " + e_str)

                #if debug:
                #    debugs += [out_img.copy()]

            out_img = img_bgr * (1 - img_face_mask_aaa) + (out_img *
                                                           img_face_mask_aaa)

            out_face_bgr = cv2.warpAffine(out_img, face_mat,
                                          (output_size, output_size))

            if 'seamless' in cfg.mode and cfg.color_transfer_mode != 0:
                if cfg.color_transfer_mode == 1:
                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]
                    face_mask_aaa = cv2.warpAffine(img_face_mask_aaa, face_mat,
                                                   (output_size, output_size))

                    out_face_bgr = imagelib.reinhard_color_transfer(
                        np.clip((out_face_bgr * 255).astype(np.uint8), 0, 255),
                        np.clip((dst_face_bgr * 255).astype(np.uint8), 0, 255),
                        source_mask=face_mask_aaa,
                        target_mask=face_mask_aaa)
                    out_face_bgr = np.clip(
                        out_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

                elif cfg.color_transfer_mode == 2:
                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

                    out_face_bgr = imagelib.linear_color_transfer(
                        out_face_bgr, dst_face_bgr)
                    out_face_bgr = np.clip(out_face_bgr, 0.0, 1.0)

                    #if debug:
                    #    debugs += [ np.clip( cv2.warpAffine( out_face_bgr, face_output_mat, img_size, np.zeros(img_bgr.shape, dtype=np.float32), cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC, cv2.BORDER_TRANSPARENT ), 0, 1.0) ]

            if cfg.mode == 'seamless-hist-match':
                out_face_bgr = imagelib.color_hist_match(
                    out_face_bgr, dst_face_bgr, cfg.hist_match_threshold)

            cfg_mp = cfg.motion_blur_power / 100.0
            if cfg_mp != 0:
                k_size = int(frame_info.motion_power * cfg_mp)
                if k_size >= 1:
                    k_size = np.clip(k_size + 1, 2, 50)
                    if cfg.super_resolution_mode:
                        k_size *= 2
                    out_face_bgr = imagelib.LinearMotionBlur(
                        out_face_bgr, k_size, frame_info.motion_deg)

            if cfg.sharpen_mode != 0 and cfg.sharpen_amount != 0:
                out_face_bgr = cfg.sharpen_func(out_face_bgr, cfg.sharpen_mode,
                                                3, cfg.sharpen_amount)

            new_out = cv2.warpAffine(out_face_bgr, face_mat, img_size,
                                     img_bgr.copy(),
                                     cv2.WARP_INVERSE_MAP | cv2.INTER_CUBIC,
                                     cv2.BORDER_TRANSPARENT)
            out_img = np.clip(
                img_bgr * (1 - img_face_mask_aaa) +
                (new_out * img_face_mask_aaa), 0, 1.0)

            if cfg.color_degrade_power != 0:
                #if debug:
                #    debugs += [out_img.copy()]
                out_img_reduced = imagelib.reduce_colors(out_img, 256)
                if cfg.color_degrade_power == 100:
                    out_img = out_img_reduced
                else:
                    alpha = cfg.color_degrade_power / 100.0
                    out_img = (out_img * (1.0 - alpha) +
                               out_img_reduced * alpha)

            if cfg.export_mask_alpha:
                out_img = np.concatenate(
                    [out_img, img_face_mask_aaa[:, :, 0:1]], -1)
        out_merging_mask = img_face_mask_aaa

    #if debug:
    #    debugs += [out_img.copy()]

    return out_img, out_merging_mask
    def process(sample,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_bgr = sample.load_bgr()
        ct_sample_bgr = None
        ct_sample_mask = None
        h, w, c = sample_bgr.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                              (0, 1, 0))

        params = imagelib.gen_warp_params(
            sample_bgr,
            sample_process_options.random_flip,
            rotation_range=sample_process_options.rotation_range,
            scale_range=sample_process_options.scale_range,
            tx_range=sample_process_options.tx_range,
            ty_range=sample_process_options.ty_range)

        cached_images = collections.defaultdict(dict)

        sample_rnd_seed = np.random.randint(0x80000000)

        SPTF_FACETYPE_TO_FACETYPE = {
            SPTF.FACE_TYPE_HALF: FaceType.HALF,
            SPTF.FACE_TYPE_FULL: FaceType.FULL,
            SPTF.FACE_TYPE_HEAD: FaceType.HEAD,
            SPTF.FACE_TYPE_FULL_NO_ALIGN: FaceType.FULL_NO_ALIGN
        }

        outputs = []
        for opts in output_sample_types:

            resolution = opts.get('resolution', 0)
            types = opts.get('types', [])

            random_sub_res = opts.get('random_sub_res', 0)
            normalize_std_dev = opts.get('normalize_std_dev', False)
            normalize_vgg = opts.get('normalize_vgg', False)
            motion_blur = opts.get('motion_blur', None)
            apply_ct = opts.get('apply_ct', False)
            normalize_tanh = opts.get('normalize_tanh', False)

            img_type = SPTF.NONE
            target_face_type = SPTF.NONE
            face_mask_type = SPTF.NONE
            mode_type = SPTF.NONE
            for t in types:
                if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                    img_type = t
                elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                    target_face_type = t
                elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                    mode_type = t

            if img_type == SPTF.NONE:
                raise ValueError('expected IMG_ type')

            if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                l = sample.landmarks
                l = np.concatenate([
                    np.expand_dims(l[:, 0] / w, -1),
                    np.expand_dims(l[:, 1] / h, -1)
                ], -1)
                l = np.clip(l, 0.0, 1.0)
                img = l
            elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                pitch_yaw_roll = sample.pitch_yaw_roll
                if pitch_yaw_roll is not None:
                    pitch, yaw, roll = pitch_yaw_roll
                else:
                    pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                        sample.landmarks)
                if params['flip']:
                    yaw = -yaw

                if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch = (pitch + 1.0) / 2.0
                    yaw = (yaw + 1.0) / 2.0
                    roll = (roll + 1.0) / 2.0

                img = (pitch, yaw, roll)
            else:
                if mode_type == SPTF.NONE:
                    raise ValueError('expected MODE_ type')

                def do_transform(img, mask):
                    warp = (img_type == SPTF.IMG_WARPED
                            or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                    transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                                 or img_type == SPTF.IMG_TRANSFORMED)
                    flip = img_type != SPTF.IMG_WARPED

                    img = imagelib.warp_by_params(params, img, warp, transform,
                                                  flip, True)
                    if mask is not None:
                        mask = imagelib.warp_by_params(params, mask, warp,
                                                       transform, flip, False)
                        if len(mask.shape) == 2:
                            mask = mask[..., np.newaxis]

                        img = np.concatenate((img, mask), -1)
                    return img

                img = cached_images.get(img_type, None)
                if img is None:

                    img = sample_bgr
                    mask = None
                    cur_sample = sample

                    if is_face_sample:
                        if motion_blur is not None:
                            chance, mb_range = motion_blur
                            chance = np.clip(chance, 0, 100)

                            if np.random.randint(100) < chance:
                                mb_range = [3, 5, 7,
                                            9][:np.clip(mb_range, 0, 3) + 1]
                                dim = mb_range[np.random.randint(
                                    len(mb_range))]
                                img = imagelib.LinearMotionBlur(
                                    img, dim, np.random.randint(180))

                        mask = cur_sample.load_fanseg_mask(
                        )  #using fanseg_mask if exist

                        if mask is None:
                            mask = LandmarksProcessor.get_image_hull_mask(
                                img.shape, cur_sample.landmarks)

                        if cur_sample.ie_polys is not None:
                            cur_sample.ie_polys.overlay_mask(mask)

                    if sample.face_type == FaceType.MARK_ONLY:
                        if mask is not None:
                            img = np.concatenate((img, mask), -1)
                    else:
                        img = do_transform(img, mask)

                    cached_images[img_type] = img

                if is_face_sample and target_face_type != SPTF.NONE:
                    ft = SPTF_FACETYPE_TO_FACETYPE[target_face_type]
                    if ft > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, ft))

                    if sample.face_type == FaceType.MARK_ONLY:
                        img = cv2.warpAffine(
                            img,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0],
                                ft), (sample.shape[0], sample.shape[0]),
                            flags=cv2.INTER_CUBIC)

                        mask = img[..., 3:4] if img.shape[2] > 3 else None
                        img = img[..., 0:3]
                        img = do_transform(img, mask)
                        img = cv2.resize(img, (resolution, resolution),
                                         cv2.INTER_CUBIC)
                    else:
                        img = cv2.warpAffine(
                            img,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution,
                                ft), (resolution, resolution),
                            flags=cv2.INTER_CUBIC)

                else:
                    img = cv2.resize(img, (resolution, resolution),
                                     cv2.INTER_CUBIC)

                if random_sub_res != 0:
                    sub_size = resolution - random_sub_res
                    rnd_state = np.random.RandomState(sample_rnd_seed +
                                                      random_sub_res)
                    start_x = rnd_state.randint(sub_size + 1)
                    start_y = rnd_state.randint(sub_size + 1)
                    img = img[start_y:start_y + sub_size,
                              start_x:start_x + sub_size, :]

                img = np.clip(img, 0, 1)
                img_bgr = img[..., 0:3]
                img_mask = img[..., 3:4]

                if apply_ct and ct_sample is not None:
                    if ct_sample_bgr is None:
                        ct_sample_bgr = ct_sample.load_bgr()

                    ct_sample_bgr_resized = cv2.resize(
                        ct_sample_bgr, (resolution, resolution),
                        cv2.INTER_LINEAR)

                    img_bgr = imagelib.linear_color_transfer(
                        img_bgr, ct_sample_bgr_resized)
                    img_bgr = np.clip(img_bgr, 0.0, 1.0)

                if normalize_std_dev:
                    img_bgr = (img_bgr - img_bgr.mean((0, 1))) / img_bgr.std(
                        (0, 1))
                elif normalize_vgg:
                    img_bgr = np.clip(img_bgr * 255, 0, 255)
                    img_bgr[:, :, 0] -= 103.939
                    img_bgr[:, :, 1] -= 116.779
                    img_bgr[:, :, 2] -= 123.68

                if mode_type == SPTF.MODE_BGR:
                    img = img_bgr
                elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img = np.take(img_bgr,
                                  rnd_state.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                elif mode_type == SPTF.MODE_G:
                    img = np.concatenate((np.expand_dims(
                        cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                        -1), img_mask), -1)
                elif mode_type == SPTF.MODE_GGG:
                    img = np.concatenate((np.repeat(
                        np.expand_dims(
                            cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                        (3, ), -1), img_mask), -1)
                elif mode_type == SPTF.MODE_M and is_face_sample:
                    img = img_mask

                if not debug:
                    if normalize_tanh:
                        img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                    else:
                        img = np.clip(img, 0.0, 1.0)

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs
Exemple #3
0
    def cli_convert_face(self, img_bgr, img_face_landmarks, debug):
        if debug:
            debugs = [img_bgr.copy()]

        img_size = img_bgr.shape[1], img_bgr.shape[0]

        img_face_mask_a = LandmarksProcessor.get_image_hull_mask(
            img_bgr.shape, img_face_landmarks)

        output_size = self.predictor_input_size
        if self.super_resolution:
            output_size *= 2

        face_mat = LandmarksProcessor.get_transform_mat(
            img_face_landmarks, output_size, face_type=self.face_type)
        face_output_mat = LandmarksProcessor.get_transform_mat(
            img_face_landmarks,
            output_size,
            face_type=self.face_type,
            scale=self.output_face_scale)

        dst_face_bgr = cv2.warpAffine(img_bgr,
                                      face_mat, (output_size, output_size),
                                      flags=cv2.INTER_LANCZOS4)
        dst_face_mask_a_0 = cv2.warpAffine(img_face_mask_a,
                                           face_mat,
                                           (output_size, output_size),
                                           flags=cv2.INTER_LANCZOS4)

        predictor_input_bgr = cv2.resize(
            dst_face_bgr,
            (self.predictor_input_size, self.predictor_input_size))

        if self.predictor_masked:
            prd_face_bgr, prd_face_mask_a_0 = self.predictor_func(
                predictor_input_bgr)

            prd_face_bgr = np.clip(prd_face_bgr, 0, 1.0)
            prd_face_mask_a_0 = np.clip(prd_face_mask_a_0, 0.0, 1.0)
        else:
            predicted = self.predictor_func(predictor_input_bgr)
            prd_face_bgr = np.clip(predicted, 0, 1.0)
            prd_face_mask_a_0 = cv2.resize(
                dst_face_mask_a_0,
                (self.predictor_input_size, self.predictor_input_size))

        if self.super_resolution:
            if debug:
                tmp = cv2.resize(prd_face_bgr, (output_size, output_size),
                                 cv2.INTER_CUBIC)
                debugs += [
                    np.clip(
                        cv2.warpAffine(
                            tmp, face_output_mat, img_size, img_bgr.copy(),
                            cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                            cv2.BORDER_TRANSPARENT), 0, 1.0)
                ]

            prd_face_bgr = self.dc_upscale(prd_face_bgr)
            if debug:
                debugs += [
                    np.clip(
                        cv2.warpAffine(
                            prd_face_bgr, face_output_mat, img_size,
                            img_bgr.copy(),
                            cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                            cv2.BORDER_TRANSPARENT), 0, 1.0)
                ]

            if self.predictor_masked:
                prd_face_mask_a_0 = cv2.resize(prd_face_mask_a_0,
                                               (output_size, output_size),
                                               cv2.INTER_CUBIC)
            else:
                prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                               (output_size, output_size),
                                               cv2.INTER_CUBIC)

        if self.mask_mode == 2:  #dst
            prd_face_mask_a_0 = cv2.resize(dst_face_mask_a_0,
                                           (output_size, output_size),
                                           cv2.INTER_CUBIC)
        elif self.mask_mode >= 3 and self.mask_mode <= 6:

            if self.mask_mode == 3 or self.mask_mode == 5 or self.mask_mode == 6:
                prd_face_bgr_256 = cv2.resize(prd_face_bgr, (256, 256))
                prd_face_bgr_256_mask = self.fan_seg.extract_from_bgr(
                    prd_face_bgr_256[np.newaxis, ...])[0]
                FAN_prd_face_mask_a_0 = cv2.resize(prd_face_bgr_256_mask,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)

            if self.mask_mode == 4 or self.mask_mode == 5 or self.mask_mode == 6:
                face_256_mat = LandmarksProcessor.get_transform_mat(
                    img_face_landmarks, 256, face_type=FaceType.FULL)
                dst_face_256_bgr = cv2.warpAffine(img_bgr,
                                                  face_256_mat, (256, 256),
                                                  flags=cv2.INTER_LANCZOS4)
                dst_face_256_mask = self.fan_seg.extract_from_bgr(
                    dst_face_256_bgr[np.newaxis, ...])[0]
                FAN_dst_face_mask_a_0 = cv2.resize(dst_face_256_mask,
                                                   (output_size, output_size),
                                                   cv2.INTER_CUBIC)

            if self.mask_mode == 3:  #FAN-prd
                prd_face_mask_a_0 = FAN_prd_face_mask_a_0
            elif self.mask_mode == 4:  #FAN-dst
                prd_face_mask_a_0 = FAN_dst_face_mask_a_0
            elif self.mask_mode == 5:
                prd_face_mask_a_0 = FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0
            elif self.mask_mode == 6:
                prd_face_mask_a_0 = prd_face_mask_a_0 * FAN_prd_face_mask_a_0 * FAN_dst_face_mask_a_0

        prd_face_mask_a_0[prd_face_mask_a_0 < 0.001] = 0.0

        prd_face_mask_a = prd_face_mask_a_0[..., np.newaxis]
        prd_face_mask_aaa = np.repeat(prd_face_mask_a, (3, ), axis=-1)

        img_face_mask_aaa = cv2.warpAffine(prd_face_mask_aaa,
                                           face_output_mat,
                                           img_size,
                                           np.zeros(img_bgr.shape,
                                                    dtype=np.float32),
                                           flags=cv2.WARP_INVERSE_MAP
                                           | cv2.INTER_LANCZOS4)
        img_face_mask_aaa = np.clip(img_face_mask_aaa, 0.0, 1.0)
        img_face_mask_aaa[img_face_mask_aaa <= 0.1] = 0.0  #get rid of noise

        if debug:
            debugs += [img_face_mask_aaa.copy()]

        out_img = img_bgr.copy()

        if self.mode == 'raw':
            if self.raw_mode == 'rgb' or self.raw_mode == 'rgb-mask':
                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size, out_img,
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)

            if self.raw_mode == 'rgb-mask':
                out_img = np.concatenate(
                    [out_img,
                     np.expand_dims(img_face_mask_aaa[:, :, 0], -1)], -1)

            if self.raw_mode == 'mask-only':
                out_img = img_face_mask_aaa

            if self.raw_mode == 'predicted-only':
                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size,
                    np.zeros(out_img.shape, dtype=np.float32),
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)

        else:
            #averaging [lenx, leny, maskx, masky] by grayscale gradients of upscaled mask
            ar = []
            for i in range(1, 10):
                maxregion = np.argwhere(img_face_mask_aaa > i / 10.0)
                if maxregion.size != 0:
                    miny, minx = maxregion.min(axis=0)[:2]
                    maxy, maxx = maxregion.max(axis=0)[:2]
                    lenx = maxx - minx
                    leny = maxy - miny
                    if min(lenx, leny) >= 4:
                        ar += [[lenx, leny]]

            if len(ar) > 0:
                lenx, leny = np.mean(ar, axis=0)
                lowest_len = min(lenx, leny)
                if debug:
                    io.log_info("lenx/leny:(%d/%d) " % (lenx, leny))
                    io.log_info("lowest_len = %f" % (lowest_len))

                if self.erode_mask_modifier != 0:
                    ero = int(lowest_len *
                              (0.126 - lowest_len * 0.00004551365) * 0.01 *
                              self.erode_mask_modifier)
                    if debug:
                        io.log_info("erode_size = %d" % (ero))
                    if ero > 0:
                        img_face_mask_aaa = cv2.erode(
                            img_face_mask_aaa,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (ero, ero)),
                            iterations=1)
                    elif ero < 0:
                        img_face_mask_aaa = cv2.dilate(
                            img_face_mask_aaa,
                            cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (-ero, -ero)),
                            iterations=1)

                img_mask_blurry_aaa = img_face_mask_aaa

                if self.clip_hborder_mask_per > 0:  #clip hborder before blur
                    prd_hborder_rect_mask_a = np.ones(prd_face_mask_a.shape,
                                                      dtype=np.float32)
                    prd_border_size = int(prd_hborder_rect_mask_a.shape[1] *
                                          self.clip_hborder_mask_per)
                    prd_hborder_rect_mask_a[:, 0:prd_border_size, :] = 0
                    prd_hborder_rect_mask_a[:, -prd_border_size:, :] = 0
                    prd_hborder_rect_mask_a = np.expand_dims(
                        cv2.blur(prd_hborder_rect_mask_a,
                                 (prd_border_size, prd_border_size)), -1)

                    img_prd_hborder_rect_mask_a = cv2.warpAffine(
                        prd_hborder_rect_mask_a, face_output_mat, img_size,
                        np.zeros(img_bgr.shape, dtype=np.float32),
                        cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4)
                    img_prd_hborder_rect_mask_a = np.expand_dims(
                        img_prd_hborder_rect_mask_a, -1)
                    img_mask_blurry_aaa *= img_prd_hborder_rect_mask_a
                    img_mask_blurry_aaa = np.clip(img_mask_blurry_aaa, 0, 1.0)

                    if debug:
                        debugs += [img_mask_blurry_aaa.copy()]

                if self.blur_mask_modifier > 0:
                    blur = int(lowest_len * 0.10 * 0.01 *
                               self.blur_mask_modifier)
                    if debug:
                        io.log_info("blur_size = %d" % (blur))
                    if blur > 0:
                        img_mask_blurry_aaa = cv2.blur(img_mask_blurry_aaa,
                                                       (blur, blur))

                img_mask_blurry_aaa = np.clip(img_mask_blurry_aaa, 0, 1.0)
                face_mask_blurry_aaa = cv2.warpAffine(
                    img_mask_blurry_aaa, face_mat, (output_size, output_size))

                if debug:
                    debugs += [img_mask_blurry_aaa.copy()]

                if 'seamless' not in self.mode and self.color_transfer_mode is not None:
                    if self.color_transfer_mode == 'rct':
                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                        prd_face_bgr = imagelib.reinhard_color_transfer(
                            np.clip((prd_face_bgr * 255).astype(np.uint8), 0,
                                    255),
                            np.clip((dst_face_bgr * 255).astype(np.uint8), 0,
                                    255),
                            source_mask=prd_face_mask_a,
                            target_mask=prd_face_mask_a)
                        prd_face_bgr = np.clip(
                            prd_face_bgr.astype(np.float32) / 255.0, 0.0, 1.0)

                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                    elif self.color_transfer_mode == 'lct':
                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                        prd_face_bgr = imagelib.linear_color_transfer(
                            prd_face_bgr, dst_face_bgr)
                        prd_face_bgr = np.clip(prd_face_bgr, 0.0, 1.0)

                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        prd_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                if self.mode == 'hist-match-bw':
                    prd_face_bgr = cv2.cvtColor(prd_face_bgr,
                                                cv2.COLOR_BGR2GRAY)
                    prd_face_bgr = np.repeat(np.expand_dims(prd_face_bgr, -1),
                                             (3, ), -1)

                if self.mode == 'hist-match' or self.mode == 'hist-match-bw':
                    if debug:
                        debugs += [
                            cv2.warpAffine(
                                prd_face_bgr, face_output_mat, img_size,
                                np.zeros(img_bgr.shape, dtype=np.float32),
                                cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                                cv2.BORDER_TRANSPARENT)
                        ]

                    hist_mask_a = np.ones(prd_face_bgr.shape[:2] + (1, ),
                                          dtype=np.float32)

                    if self.masked_hist_match:
                        hist_mask_a *= prd_face_mask_a

                    white = (1.0 - hist_mask_a) * np.ones(
                        prd_face_bgr.shape[:2] + (1, ), dtype=np.float32)

                    hist_match_1 = prd_face_bgr * hist_mask_a + white
                    hist_match_1[hist_match_1 > 1.0] = 1.0

                    hist_match_2 = dst_face_bgr * hist_mask_a + white
                    hist_match_2[hist_match_1 > 1.0] = 1.0

                    prd_face_bgr = imagelib.color_hist_match(
                        hist_match_1, hist_match_2, self.hist_match_threshold)

                    #if self.masked_hist_match:
                    #    prd_face_bgr -= white

                if self.mode == 'hist-match-bw':
                    prd_face_bgr = prd_face_bgr.astype(dtype=np.float32)

                out_img = cv2.warpAffine(
                    prd_face_bgr, face_output_mat, img_size, out_img,
                    cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                    cv2.BORDER_TRANSPARENT)
                out_img = np.clip(out_img, 0.0, 1.0)

                if debug:
                    debugs += [out_img.copy()]

                if self.mode == 'overlay':
                    pass

                if 'seamless' in self.mode:
                    #mask used for cv2.seamlessClone
                    img_face_seamless_mask_a = None
                    img_face_mask_a = img_mask_blurry_aaa[..., 0:1]
                    for i in range(1, 10):
                        a = img_face_mask_a > i / 10.0
                        if len(np.argwhere(a)) == 0:
                            continue
                        img_face_seamless_mask_a = img_mask_blurry_aaa[
                            ..., 0:1].copy()
                        img_face_seamless_mask_a[a] = 1.0
                        img_face_seamless_mask_a[
                            img_face_seamless_mask_a <= i / 10.0] = 0.0
                        break

                    try:
                        #calc same bounding rect and center point as in cv2.seamlessClone to prevent jittering
                        l, t, w, h = cv2.boundingRect(
                            (img_face_seamless_mask_a * 255).astype(np.uint8))
                        s_maskx, s_masky = int(l + w / 2), int(t + h / 2)

                        out_img = cv2.seamlessClone(
                            (out_img * 255).astype(np.uint8),
                            (img_bgr * 255).astype(np.uint8),
                            (img_face_seamless_mask_a * 255).astype(np.uint8),
                            (s_maskx, s_masky), cv2.NORMAL_CLONE)
                        out_img = out_img.astype(dtype=np.float32) / 255.0
                    except Exception as e:
                        #seamlessClone may fail in some cases
                        e_str = traceback.format_exc()

                        if 'MemoryError' in e_str:
                            raise Exception(
                                "Seamless fail: " + e_str
                            )  #reraise MemoryError in order to reprocess this data by other processes
                        else:
                            print("Seamless fail: " + e_str)

                    if debug:
                        debugs += [out_img.copy()]

                out_img = np.clip(
                    img_bgr * (1 - img_mask_blurry_aaa) +
                    (out_img * img_mask_blurry_aaa), 0, 1.0)

                if 'seamless' in self.mode and self.color_transfer_mode is not None:
                    out_face_bgr = cv2.warpAffine(out_img, face_mat,
                                                  (output_size, output_size))

                    if self.color_transfer_mode == 'rct':
                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        out_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                        new_out_face_bgr = imagelib.reinhard_color_transfer(
                            np.clip((out_face_bgr * 255).astype(np.uint8), 0,
                                    255),
                            np.clip((dst_face_bgr * 255).astype(np.uint8), 0,
                                    255),
                            source_mask=face_mask_blurry_aaa,
                            target_mask=face_mask_blurry_aaa)
                        new_out_face_bgr = np.clip(
                            new_out_face_bgr.astype(np.float32) / 255.0, 0.0,
                            1.0)

                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        new_out_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                    elif self.color_transfer_mode == 'lct':
                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        out_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                        new_out_face_bgr = imagelib.linear_color_transfer(
                            out_face_bgr, dst_face_bgr)
                        new_out_face_bgr = np.clip(new_out_face_bgr, 0.0, 1.0)

                        if debug:
                            debugs += [
                                np.clip(
                                    cv2.warpAffine(
                                        new_out_face_bgr, face_output_mat,
                                        img_size,
                                        np.zeros(img_bgr.shape,
                                                 dtype=np.float32),
                                        cv2.WARP_INVERSE_MAP
                                        | cv2.INTER_LANCZOS4,
                                        cv2.BORDER_TRANSPARENT), 0, 1.0)
                            ]

                    new_out = cv2.warpAffine(
                        new_out_face_bgr, face_mat, img_size, img_bgr.copy(),
                        cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                        cv2.BORDER_TRANSPARENT)
                    out_img = np.clip(
                        img_bgr * (1 - img_mask_blurry_aaa) +
                        (new_out * img_mask_blurry_aaa), 0, 1.0)

                if self.mode == 'seamless-hist-match':
                    out_face_bgr = cv2.warpAffine(out_img, face_mat,
                                                  (output_size, output_size))
                    new_out_face_bgr = imagelib.color_hist_match(
                        out_face_bgr, dst_face_bgr, self.hist_match_threshold)
                    new_out = cv2.warpAffine(
                        new_out_face_bgr, face_mat, img_size, img_bgr.copy(),
                        cv2.WARP_INVERSE_MAP | cv2.INTER_LANCZOS4,
                        cv2.BORDER_TRANSPARENT)
                    out_img = np.clip(
                        img_bgr * (1 - img_mask_blurry_aaa) +
                        (new_out * img_mask_blurry_aaa), 0, 1.0)

                if self.final_image_color_degrade_power != 0:
                    if debug:
                        debugs += [out_img.copy()]
                    out_img_reduced = imagelib.reduce_colors(out_img, 256)
                    if self.final_image_color_degrade_power == 100:
                        out_img = out_img_reduced
                    else:
                        alpha = self.final_image_color_degrade_power / 100.0
                        out_img = (out_img * (1.0 - alpha) +
                                   out_img_reduced * alpha)

                if self.alpha:
                    out_img = np.concatenate([
                        out_img,
                        np.expand_dims(img_mask_blurry_aaa[:, :, 0], -1)
                    ], -1)

        out_img = np.clip(out_img, 0.0, 1.0)

        if debug:
            debugs += [out_img.copy()]

        return debugs if debug else out_img
Exemple #4
0
    def process(samples,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for sample in samples:
            sample_bgr = sample.load_bgr()
            ct_sample_bgr = None
            ct_sample_mask = None
            h, w, c = sample_bgr.shape

            is_face_sample = sample.landmarks is not None

            if debug and is_face_sample:
                LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                                  (0, 1, 0))

            params = imagelib.gen_warp_params(
                sample_bgr,
                sample_process_options.random_flip,
                rotation_range=sample_process_options.rotation_range,
                scale_range=sample_process_options.scale_range,
                tx_range=sample_process_options.tx_range,
                ty_range=sample_process_options.ty_range,
                rnd_seed=sample_rnd_seed)

            outputs_sample = []
            for opts in output_sample_types:

                resolution = opts.get('resolution', 0)
                types = opts.get('types', [])

                border_replicate = opts.get('border_replicate', True)
                random_sub_res = opts.get('random_sub_res', 0)
                normalize_std_dev = opts.get('normalize_std_dev', False)
                normalize_vgg = opts.get('normalize_vgg', False)
                motion_blur = opts.get('motion_blur', None)
                gaussian_blur = opts.get('gaussian_blur', None)

                ct_mode = opts.get('ct_mode', 'None')
                normalize_tanh = opts.get('normalize_tanh', False)

                img_type = SPTF.NONE
                target_face_type = SPTF.NONE
                face_mask_type = SPTF.NONE
                mode_type = SPTF.NONE
                for t in types:
                    if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                        img_type = t
                    elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                        target_face_type = t
                    elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                        mode_type = t

                if img_type == SPTF.NONE:
                    raise ValueError('expected IMG_ type')

                if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                    l = sample.landmarks
                    l = np.concatenate([
                        np.expand_dims(l[:, 0] / w, -1),
                        np.expand_dims(l[:, 1] / h, -1)
                    ], -1)
                    l = np.clip(l, 0.0, 1.0)
                    img = l
                elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch_yaw_roll = sample.pitch_yaw_roll
                    if pitch_yaw_roll is not None:
                        pitch, yaw, roll = pitch_yaw_roll
                    else:
                        pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                            sample.landmarks)
                    if params['flip']:
                        yaw = -yaw

                    if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                        pitch = (pitch + 1.0) / 2.0
                        yaw = (yaw + 1.0) / 2.0
                        roll = (roll + 1.0) / 2.0

                    img = (pitch, yaw, roll)
                else:
                    if mode_type == SPTF.NONE:
                        raise ValueError('expected MODE_ type')

                    def do_transform(img, mask):
                        warp = (img_type == SPTF.IMG_WARPED
                                or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                        transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                                     or img_type == SPTF.IMG_TRANSFORMED)
                        flip = img_type != SPTF.IMG_WARPED

                        img = imagelib.warp_by_params(params, img, warp,
                                                      transform, flip,
                                                      border_replicate)
                        if mask is not None:
                            mask = imagelib.warp_by_params(
                                params, mask, warp, transform, flip, False)
                            if len(mask.shape) == 2:
                                mask = mask[..., np.newaxis]

                        return img, mask

                    img = sample_bgr

                    ### Prepare a mask
                    mask = None
                    if is_face_sample:
                        mask = sample.load_fanseg_mask(
                        )  #using fanseg_mask if exist

                        if mask is None:
                            if sample.eyebrows_expand_mod is not None:
                                mask = LandmarksProcessor.get_image_hull_mask(
                                    img.shape,
                                    sample.landmarks,
                                    eyebrows_expand_mod=sample.
                                    eyebrows_expand_mod)
                            else:
                                mask = LandmarksProcessor.get_image_hull_mask(
                                    img.shape, sample.landmarks)

                        if sample.ie_polys is not None:
                            sample.ie_polys.overlay_mask(mask)
                    ##################

                    if motion_blur is not None:
                        chance, mb_max_size = motion_blur
                        chance = np.clip(chance, 0, 100)

                        if np.random.randint(100) < chance:
                            img = imagelib.LinearMotionBlur(
                                img,
                                np.random.randint(mb_max_size) + 1,
                                np.random.randint(360))

                    if gaussian_blur is not None:
                        chance, kernel_max_size = gaussian_blur
                        chance = np.clip(chance, 0, 100)

                        if np.random.randint(100) < chance:
                            img = cv2.GaussianBlur(
                                img,
                                (np.random.randint(kernel_max_size) * 2 + 1, )
                                * 2, 0)

                    if is_face_sample and target_face_type != SPTF.NONE:
                        target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[
                            target_face_type]
                        if target_ft > sample.face_type:
                            raise Exception(
                                'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                                %
                                (sample.filename, sample.face_type, target_ft))

                        if sample.face_type == FaceType.MARK_ONLY:
                            #first warp to target facetype
                            img = cv2.warpAffine(
                                img,
                                LandmarksProcessor.get_transform_mat(
                                    sample.landmarks, sample.shape[0],
                                    target_ft),
                                (sample.shape[0], sample.shape[0]),
                                flags=cv2.INTER_CUBIC)
                            mask = cv2.warpAffine(
                                mask,
                                LandmarksProcessor.get_transform_mat(
                                    sample.landmarks, sample.shape[0],
                                    target_ft),
                                (sample.shape[0], sample.shape[0]),
                                flags=cv2.INTER_CUBIC)
                            #then apply transforms
                            img, mask = do_transform(img, mask)
                            img = np.concatenate((img, mask), -1)
                            img = cv2.resize(img, (resolution, resolution),
                                             cv2.INTER_CUBIC)
                        else:
                            img, mask = do_transform(img, mask)

                            mat = LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution, target_ft)
                            img = cv2.warpAffine(
                                img,
                                mat, (resolution, resolution),
                                borderMode=(cv2.BORDER_REPLICATE
                                            if border_replicate else
                                            cv2.BORDER_CONSTANT),
                                flags=cv2.INTER_CUBIC)
                            mask = cv2.warpAffine(
                                mask,
                                mat, (resolution, resolution),
                                borderMode=cv2.BORDER_CONSTANT,
                                flags=cv2.INTER_CUBIC)
                            img = np.concatenate((img, mask[..., None]), -1)

                    else:
                        img, mask = do_transform(img, mask)
                        img = np.concatenate((img, mask), -1)
                        img = cv2.resize(img, (resolution, resolution),
                                         cv2.INTER_CUBIC)

                    if random_sub_res != 0:
                        sub_size = resolution - random_sub_res
                        rnd_state = np.random.RandomState(sample_rnd_seed +
                                                          random_sub_res)
                        start_x = rnd_state.randint(sub_size + 1)
                        start_y = rnd_state.randint(sub_size + 1)
                        img = img[start_y:start_y + sub_size,
                                  start_x:start_x + sub_size, :]

                    img = np.clip(img, 0, 1).astype(np.float32)
                    img_bgr = img[..., 0:3]
                    img_mask = img[..., 3:4]

                    if ct_mode is not None and ct_sample is not None:
                        if ct_sample_bgr is None:
                            ct_sample_bgr = ct_sample.load_bgr()

                        ct_sample_bgr_resized = cv2.resize(
                            ct_sample_bgr, (resolution, resolution),
                            cv2.INTER_LINEAR)

                        if ct_mode == 'lct':
                            img_bgr = imagelib.linear_color_transfer(
                                img_bgr, ct_sample_bgr_resized)
                            img_bgr = np.clip(img_bgr, 0.0, 1.0)
                        elif ct_mode == 'rct':
                            img_bgr = imagelib.reinhard_color_transfer(
                                np.clip((img_bgr * 255).astype(np.uint8), 0,
                                        255),
                                np.clip((ct_sample_bgr_resized * 255).astype(
                                    np.uint8), 0, 255))
                            img_bgr = np.clip(
                                img_bgr.astype(np.float32) / 255.0, 0.0, 1.0)
                        elif ct_mode == 'mkl':
                            img_bgr = imagelib.color_transfer_mkl(
                                img_bgr, ct_sample_bgr_resized)
                        elif ct_mode == 'idt':
                            img_bgr = imagelib.color_transfer_idt(
                                img_bgr, ct_sample_bgr_resized)
                        elif ct_mode == 'sot':
                            img_bgr = imagelib.color_transfer_sot(
                                img_bgr, ct_sample_bgr_resized)
                            img_bgr = np.clip(img_bgr, 0.0, 1.0)

                    if normalize_std_dev:
                        img_bgr = (img_bgr - img_bgr.mean(
                            (0, 1))) / img_bgr.std((0, 1))
                    elif normalize_vgg:
                        img_bgr = np.clip(img_bgr * 255, 0, 255)
                        img_bgr[:, :, 0] -= 103.939
                        img_bgr[:, :, 1] -= 116.779
                        img_bgr[:, :, 2] -= 123.68

                    if mode_type == SPTF.MODE_BGR:
                        img = img_bgr
                    elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                        rnd_state = np.random.RandomState(sample_rnd_seed)
                        img = np.take(img_bgr,
                                      rnd_state.permutation(img_bgr.shape[-1]),
                                      axis=-1)

                    elif mode_type == SPTF.MODE_BGR_RANDOM_HSV_SHIFT:
                        rnd_state = np.random.RandomState(sample_rnd_seed)
                        hsv = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2HSV)
                        h, s, v = cv2.split(hsv)
                        h = (h + rnd_state.randint(360)) % 360
                        s = np.clip(s + rnd_state.random() - 0.5, 0, 1)
                        v = np.clip(v + rnd_state.random() - 0.5, 0, 1)
                        hsv = cv2.merge([h, s, v])
                        img = np.clip(cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR), 0,
                                      1)
                    elif mode_type == SPTF.MODE_G:
                        img = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY)[...,
                                                                        None]
                    elif mode_type == SPTF.MODE_GGG:
                        img = np.repeat(
                            np.expand_dims(
                                cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                            (3, ), -1)
                    elif mode_type == SPTF.MODE_M and is_face_sample:
                        img = img_mask

                    if not debug:
                        if normalize_tanh:
                            img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                        else:
                            img = np.clip(img, 0.0, 1.0)

                outputs_sample.append(img)
            outputs += [outputs_sample]

        return outputs
Exemple #5
0
    def process(sample,
                sample_process_options,
                output_sample_types,
                debug,
                ct_sample=None):
        SPTF = SampleProcessor.Types

        sample_bgr = sample.load_bgr()
        ct_sample_bgr = None
        ct_sample_mask = None
        h, w, c = sample_bgr.shape

        is_face_sample = sample.landmarks is not None

        if debug and is_face_sample:
            LandmarksProcessor.draw_landmarks(sample_bgr, sample.landmarks,
                                              (0, 1, 0))

        params = imagelib.gen_warp_params(
            sample_bgr,
            sample_process_options.random_flip,
            rotation_range=sample_process_options.rotation_range,
            scale_range=sample_process_options.scale_range,
            tx_range=sample_process_options.tx_range,
            ty_range=sample_process_options.ty_range)

        cached_images = collections.defaultdict(dict)

        sample_rnd_seed = np.random.randint(0x80000000)

        outputs = []
        for opts in output_sample_types:

            resolution = opts.get('resolution', 0)
            types = opts.get('types', [])

            border_replicate = opts.get('border_replicate', True)
            random_sub_res = opts.get('random_sub_res', 0)
            normalize_std_dev = opts.get('normalize_std_dev', False)
            normalize_vgg = opts.get('normalize_vgg', False)
            motion_blur = opts.get('motion_blur', None)
            apply_ct = opts.get('apply_ct', ColorTransferMode.NONE)
            normalize_tanh = opts.get('normalize_tanh', False)

            img_type = SPTF.NONE
            target_face_type = SPTF.NONE
            face_mask_type = SPTF.NONE
            mode_type = SPTF.NONE
            for t in types:
                if t >= SPTF.IMG_TYPE_BEGIN and t < SPTF.IMG_TYPE_END:
                    img_type = t
                elif t >= SPTF.FACE_TYPE_BEGIN and t < SPTF.FACE_TYPE_END:
                    target_face_type = t
                elif t >= SPTF.MODE_BEGIN and t < SPTF.MODE_END:
                    mode_type = t

            if img_type == SPTF.NONE:
                raise ValueError('expected IMG_ type')

            if img_type == SPTF.IMG_LANDMARKS_ARRAY:
                l = sample.landmarks
                l = np.concatenate([
                    np.expand_dims(l[:, 0] / w, -1),
                    np.expand_dims(l[:, 1] / h, -1)
                ], -1)
                l = np.clip(l, 0.0, 1.0)
                img = l
            elif img_type == SPTF.IMG_PITCH_YAW_ROLL or img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                pitch_yaw_roll = sample.pitch_yaw_roll
                if pitch_yaw_roll is not None:
                    pitch, yaw, roll = pitch_yaw_roll
                else:
                    pitch, yaw, roll = LandmarksProcessor.estimate_pitch_yaw_roll(
                        sample.landmarks)
                if params['flip']:
                    yaw = -yaw

                if img_type == SPTF.IMG_PITCH_YAW_ROLL_SIGMOID:
                    pitch = (pitch + 1.0) / 2.0
                    yaw = (yaw + 1.0) / 2.0
                    roll = (roll + 1.0) / 2.0

                img = (pitch, yaw, roll)
            else:
                if mode_type == SPTF.NONE:
                    raise ValueError('expected MODE_ type')

                def do_transform(img, mask):
                    warp = (img_type == SPTF.IMG_WARPED
                            or img_type == SPTF.IMG_WARPED_TRANSFORMED)
                    transform = (img_type == SPTF.IMG_WARPED_TRANSFORMED
                                 or img_type == SPTF.IMG_TRANSFORMED)
                    flip = img_type != SPTF.IMG_WARPED

                    img = imagelib.warp_by_params(params, img, warp, transform,
                                                  flip, border_replicate)
                    if mask is not None:
                        mask = imagelib.warp_by_params(params, mask, warp,
                                                       transform, flip, False)
                        if len(mask.shape) == 2:
                            mask = mask[..., np.newaxis]

                        img = np.concatenate((img, mask), -1)
                    return img

                img = sample_bgr

                ### Prepare a mask
                mask = None
                if is_face_sample:
                    mask = sample.load_fanseg_mask(
                    )  #using fanseg_mask if exist

                    if mask is None:
                        if sample.eyebrows_expand_mod is not None:
                            mask = LandmarksProcessor.get_image_hull_mask(
                                img.shape,
                                sample.landmarks,
                                eyebrows_expand_mod=sample.eyebrows_expand_mod)
                        else:
                            mask = LandmarksProcessor.get_image_hull_mask(
                                img.shape, sample.landmarks)

                    if sample.ie_polys is not None:
                        sample.ie_polys.overlay_mask(mask)
                ##################

                if motion_blur is not None:
                    chance, mb_max_size = motion_blur
                    chance = np.clip(chance, 0, 100)

                    if np.random.randint(100) < chance:
                        img = imagelib.LinearMotionBlur(
                            img,
                            np.random.randint(mb_max_size) + 1,
                            np.random.randint(360))

                if is_face_sample and target_face_type != SPTF.NONE:
                    target_ft = SampleProcessor.SPTF_FACETYPE_TO_FACETYPE[
                        target_face_type]
                    if target_ft > sample.face_type:
                        raise Exception(
                            'sample %s type %s does not match model requirement %s. Consider extract necessary type of faces.'
                            % (sample.filename, sample.face_type, target_ft))

                    if sample.face_type == FaceType.MARK_ONLY:
                        #first warp to target facetype
                        img = cv2.warpAffine(
                            img,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0],
                                target_ft), (sample.shape[0], sample.shape[0]),
                            flags=cv2.INTER_CUBIC)
                        mask = cv2.warpAffine(
                            mask,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, sample.shape[0],
                                target_ft), (sample.shape[0], sample.shape[0]),
                            flags=cv2.INTER_CUBIC)
                        #then apply transforms
                        img = do_transform(img, mask)
                        img = cv2.resize(img, (resolution, resolution),
                                         cv2.INTER_CUBIC)
                    else:
                        img = do_transform(img, mask)
                        img = cv2.warpAffine(
                            img,
                            LandmarksProcessor.get_transform_mat(
                                sample.landmarks, resolution,
                                target_ft), (resolution, resolution),
                            borderMode=(cv2.BORDER_REPLICATE
                                        if border_replicate else
                                        cv2.BORDER_CONSTANT),
                            flags=cv2.INTER_CUBIC)

                else:
                    img = do_transform(img, mask)
                    img = cv2.resize(img, (resolution, resolution),
                                     cv2.INTER_CUBIC)

                if random_sub_res != 0:
                    sub_size = resolution - random_sub_res
                    rnd_state = np.random.RandomState(sample_rnd_seed +
                                                      random_sub_res)
                    start_x = rnd_state.randint(sub_size + 1)
                    start_y = rnd_state.randint(sub_size + 1)
                    img = img[start_y:start_y + sub_size,
                              start_x:start_x + sub_size, :]

                img = np.clip(img, 0, 1)
                img_bgr = img[..., 0:3]
                img_mask = img[..., 3:4]

                if apply_ct and ct_sample is not None:
                    if ct_sample_bgr is None:
                        ct_sample_bgr = ct_sample.load_bgr()

                    if apply_ct == ColorTransferMode.LCT:
                        img_bgr = imagelib.linear_color_transfer(
                            img_bgr, ct_sample_bgr)

                    elif ColorTransferMode.RCT <= apply_ct <= ColorTransferMode.MASKED_RCT_PAPER_CLIP:
                        ct_options = {
                            ColorTransferMode.RCT: (False, False, False),
                            ColorTransferMode.RCT_CLIP: (False, False, True),
                            ColorTransferMode.RCT_PAPER: (False, True, False),
                            ColorTransferMode.RCT_PAPER_CLIP:
                            (False, True, True),
                            ColorTransferMode.MASKED_RCT: (True, False, False),
                            ColorTransferMode.MASKED_RCT_CLIP:
                            (True, False, True),
                            ColorTransferMode.MASKED_RCT_PAPER:
                            (True, True, False),
                            ColorTransferMode.MASKED_RCT_PAPER_CLIP:
                            (True, True, True),
                        }

                        use_masks, use_paper, use_clip = ct_options[apply_ct]
                        if not use_masks:
                            img_bgr = imagelib.reinhard_color_transfer(
                                img_bgr,
                                ct_sample_bgr,
                                clip=use_clip,
                                preserve_paper=use_paper)
                        else:
                            if ct_sample_mask is None:
                                ct_sample_mask = ct_sample.load_mask()
                            img_bgr = imagelib.reinhard_color_transfer(
                                img_bgr,
                                ct_sample_bgr,
                                clip=use_clip,
                                preserve_paper=use_paper,
                                source_mask=img_mask,
                                target_mask=ct_sample_mask)

                if normalize_std_dev:
                    img_bgr = (img_bgr - img_bgr.mean((0, 1))) / img_bgr.std(
                        (0, 1))
                elif normalize_vgg:
                    img_bgr = np.clip(img_bgr * 255, 0, 255)
                    img_bgr[:, :, 0] -= 103.939
                    img_bgr[:, :, 1] -= 116.779
                    img_bgr[:, :, 2] -= 123.68

                if mode_type == SPTF.MODE_BGR:
                    img = img_bgr
                elif mode_type == SPTF.MODE_BGR_SHUFFLE:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img = np.take(img_bgr,
                                  rnd_state.permutation(img_bgr.shape[-1]),
                                  axis=-1)
                elif mode_type == SPTF.MODE_LAB_RAND_TRANSFORM:
                    rnd_state = np.random.RandomState(sample_rnd_seed)
                    img = random_color_transform(img_bgr, rnd_state)
                elif mode_type == SPTF.MODE_G:
                    img = np.concatenate((np.expand_dims(
                        cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY),
                        -1), img_mask), -1)
                elif mode_type == SPTF.MODE_GGG:
                    img = np.concatenate((np.repeat(
                        np.expand_dims(
                            cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY), -1),
                        (3, ), -1), img_mask), -1)
                elif mode_type == SPTF.MODE_M and is_face_sample:
                    img = img_mask

                if not debug:
                    if normalize_tanh:
                        img = np.clip(img * 2.0 - 1.0, -1.0, 1.0)
                    else:
                        img = np.clip(img, 0.0, 1.0)

            outputs.append(img)

        if debug:
            result = []

            for output in outputs:
                if output.shape[2] < 4:
                    result += [
                        output,
                    ]
                elif output.shape[2] == 4:
                    result += [
                        output[..., 0:3] * output[..., 3:4],
                    ]

            return result
        else:
            return outputs