예제 #1
0
def face_to_cartoon(img):
    num_styles = 1
    scale = 1.0
    aligned=False

    original_shape = (img.shape[1], img.shape[0])  

    if not aligned:
        img, tfm = detect_align(img)

    img2 = cv2.warpAffine(img, tfm, original_shape, flags=cv2.WARP_INVERSE_MAP)

    img = (img - 127.5) / 128.0

    images = np.tile(img[None], [num_styles, 1, 1, 1])
    scales = scale * np.ones((num_styles))
    styles = np.zeros((num_styles, network.input_style.shape[1]))

    output = network.generate_BA(images, scales, 16, styles=styles)
    output = 0.5*output + 0.5

    mask_size = (int(output[0].shape[0]), int(output[0].shape[1]))
    alpha_mask = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, mask_size)
    alpha_mask = cv2.blur(alpha_mask.astype(np.float64),(25,25), borderType=cv2.BORDER_CONSTANT)

    face = cv2.warpAffine(output[0], tfm, original_shape, flags=cv2.WARP_INVERSE_MAP)
    alpha_mask = cv2.warpAffine(alpha_mask, tfm, original_shape, flags=cv2.WARP_INVERSE_MAP)

    return face, alpha_mask
예제 #2
0
def process(network, img, aligned=False, num_styles=5, styles=None, scale=1.):

    if not aligned:
        img = detect_align(img)
        if img is None:
            return None

    img = (img - 127.5) / 128.0

    images = np.tile(img[None], [num_styles, 1, 1, 1])
    scales = scale * np.ones((num_styles))

    if styles is None:
        styles = get_styles(network, num_styles)

    output = network.generate_BA(images, scales, 16, styles=styles)
    output = 0.5 * output + 0.5

    return output
예제 #3
0
def process(inputs, ctx, **kwargs):

    LOG.info("process incoming")

    frame, is_streaming = helpers.load_image(inputs, 'input')
    if frame is None:
        raise RuntimeError("Unable to read frame")

    LOG.info("input frame size: {}".format(frame.shape))

    network = ctx.global_ctx[0]
    styles = ctx.global_ctx[1]
    scale = 1.0

    frame = detect_align(frame)
    LOG.info("aligned frame size: {}".format(frame.shape))

    if frame is None:
        output = np.zeros((256, 256, 1), dtype="uint8")
        LOG.info("no aligned image")

    else:
        frame = (frame - 127.5) / 128.0
        LOG.info("aligned image exists")

        images = np.tile(frame[None], [1, 1, 1, 1])
        scales = scale * np.ones(1)

        output = network.generate_BA(images, scales, 16, styles=styles)
        output = 0.5*output + 0.5

        output = (output[0] * 256).astype('uint8')

    LOG.info("output frame size: {}".format(output.shape))
    if not is_streaming:
        _, buf = cv2.imencode('.jpg', output[:, :, ::-1])
        output = buf.tostring()

    return {'output': output}
예제 #4
0
    def generate_cartoon(self, img):

        if not self.isAligned:
            s = time.time()
            img = detect_align(img)
            e = time.time()
            print("detect time cost ", e - s, "   s")
            if img is None:
                print("no face in img ******")
                return
        img = (img - 127.5) / 128.0

        images = np.tile(img[None], [self.num_styles, 1, 1, 1])
        scales = 1.0 * np.ones((self.num_styles))
        styles = np.random.normal(
            0., 1., (self.num_styles, self.warpGAN.input_style.shape[1].value))

        start = time.time()
        output = self.warpGAN.generate_BA(images, scales, 16, styles=styles)
        output = 0.5 * output + 0.5
        end = time.time()
        print("generate caricatue time cost: ", end - start, "   s.")
        return output
예제 #5
0
                    type=float,
                    default=1.0)
parser.add_argument("--aligned",
                    help="Set true if the input face is already normalized",
                    action='store_true')
args = parser.parse_args()

if __name__ == '__main__':

    network = WarpGAN()
    network.load_model(args.model_dir)

    img = misc.imread(args.input, mode='RGB')

    if not args.aligned:
        from align.detect_align import detect_align
        img = detect_align(img)

    img = (img - 127.5) / 128.0

    images = np.tile(img[None], [args.num_styles, 1, 1, 1])
    scales = args.scale * np.ones((args.num_styles))
    styles = np.random.normal(
        0., 1., (args.num_styles, network.input_style.shape[1].value))

    output = network.generate_BA(images, scales, 16, styles=styles)
    output = 0.5 * output + 0.5

    for i in range(args.num_styles):
        misc.imsave(args.output + '_{}.jpg'.format(i), output[i])