Exemple #1
0
def engine_process_image(engine_model, image, style_name):
    content = engine_model['content']
    style = engine_model['style']
    output_image = engine_model['output_image']
    sess = engine_model['sess']

    content_img = [image_pil_to_ndarray(image)]
    content_img = np.stack(content_img, axis=0)

    # 根据 style_name 获取风格图片路径
    _styles_dict = get_all_styles()
    if style_name not in _styles_dict:
        raise Exception('no such style: {}'.format(style_name))
    style_img = get_images(_styles_dict[style_name])

    logger.info('processing image with style %s' % style_name)
    result = sess.run(output_image,
                      feed_dict={
                          content: content_img,
                          style: style_img
                      })

    _img_out = result[0].astype('uint8')
    #print('--> type of _img_out:', type(_img_out))
    image_return = image_ndarray_to_pil(_img_out)

    return image_return
Exemple #2
0
def init_model(gpu_id=-1):
    # model init
    net = BiSeNet(n_classes=19)
    if gpu_id >= 0:
        #print('--> gpu_id:', gpu_id)
        with torch.cuda.device(gpu_id):
            net = net.cuda()
    ckp = os.path.join(app_config.MODEL_DIR, 'face_makeup_pt/cp/79999_iter.pth')
    net.load_state_dict(torch.load(ckp))
    net.eval()

    to_tensor = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])

    model = {}
    model['net'] = net
    model['to_tensor'] = to_tensor
    model['model_size'] = 512 # 默认为 512,其他大小是否可处理未知
    model['gpu_id'] = gpu_id
    #model['filterBlur'] = MyGaussianBlur(radius=9, bounds=(0,0,_w,_w))
    model['filterBlur'] = MyGaussianBlur(radius=4)

    #if ENABLE_ESRGAN:
    #    model['esrgan'] = esrgan_adapter.init_model(
    #                              gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    logger.info('face_makeup_pt init_model done! gpu_id: {}'.format(gpu_id))
    return model
Exemple #3
0
def init_model(gpu_id=-1, style='manga-face'):

    # Should computation be done on the GPU if available?
    use_cuda = gpu_id >= 0 and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # Load the style transfer model
    model_path = os.path.join(
        app_config.MODEL_DIR,
        'face_preserving_style_transfer_pt/models/{}.pth'.format(style))
    with torch.no_grad():
        img_transform = utils.load_model(model_path,
                                         ImageTransformNet()).to(device)
    logger.info(
        "Loading the style transfer model ({}) ... Done.".format(style))

    engine_model = {}
    engine_model['device'] = device
    engine_model['img_transform'] = img_transform
    engine_model['model_size'] = 500  # 该模型对输入尺寸没要求,故此指定为标准大小

    #if ENABLE_ESRGAN:
    #    engine_model['esrgan'] = esrgan_adapter.init_model(
    #                                      gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    logger.info(
        'face_preserving_style_transfer_pt init_model done! gpu_id: {}'.format(
            gpu_id))

    return engine_model
Exemple #4
0
def init_model(gpu_id=0):
    # _model= unet.UNet(11)
    # _model = r18unet.ResNetUNet(11)
    _model = mobileunet.MobileUNet(11)

    save_path = os.path.join(app_config.MODEL_DIR, 'face_parsing/model_80.pth')
    state_dict = torch.load(save_path)
    from collections import OrderedDict
    new_state_dict = OrderedDict()
    for k, v in state_dict.items():
        namekey = k[7:]
        new_state_dict[namekey] = v
    _model.load_state_dict(new_state_dict)

    if gpu_id >= 0:
        with torch.cuda.device(gpu_id):
            _model = _model.cuda()
    _model.eval()

    model = {}
    model['model'] = _model
    model['face_areas'] = ('face', 'l_brow', 'r_brow', 'l_eye', 'r_eye',
                           'nose', 'u_lip', 'in_mouth', 'l_lip', 'hair')
    _w = app_config.FACE_MIN_SIZE
    model['filterBlur'] = MyGaussianBlur(radius=9, bounds=(0, 0, _w, _w))

    logger.info('face_parsing init_model done! gpu_id: {}'.format(gpu_id))
    return model
Exemple #5
0
def init_model(gpu_id=-1):
    # model init
    _model = BiSeNet(n_classes=19)
    if gpu_id >= 0:
        #print('--> gpu_id:', gpu_id)
        with torch.cuda.device(gpu_id):
            _model = _model.cuda()
    model_path = os.path.join(app_config.MODEL_DIR,
                              'face_makeup_pt/cp/79999_iter.pth')
    _model.load_state_dict(torch.load(model_path))
    _model.eval()

    model = {}
    model['model'] = _model
    model['gpu_id'] = gpu_id

    to_tensor = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
    ])
    model['to_tensor'] = to_tensor

    model['face_areas'] = ('face', 'l_brow', 'r_brow', 'l_eye', 'r_eye',
                           'eye_g', 'l_ear', 'r_ear', 'ear_r', 'nose', 'mouth',
                           'u_lip', 'l_lip', 'neck', 'neck_l', 'cloth', 'hair',
                           'hat')
    _w = app_config.FACE_MIN_SIZE
    #model['filterBlur'] = MyGaussianBlur(radius=9, bounds=(0,0,_w,_w))
    model['filterBlur'] = MyGaussianBlur(radius=6)

    logger.info('face_parsing init_model DONE! gpu_id: {}'.format(gpu_id))
    return model
Exemple #6
0
def init_model(gpu_id=-1, effect=None):

    parser = test_parse.ArgumentParser()
    args = parser.parse_args()
    args.local_model = True
    args.pretrained = not args.local_model
    if gpu_id >= 0:
        args.gpu_id = gpu_id

    if effect not in ('facehair', 'feminization', 'masculinization', 'older',
                      'younger'):
        raise Exception('unsupported effect: {}'.format(effect))

    # 不同 effect 是在一个子进程中加载好,还是分为不同子进程加载好?
    # 分析结论:不同进程可能更合适,可以更好地并行处理,发挥多进程和多显卡的优势。
    facelet = Facelet(args)
    if args.local_model:
        #pretrain_path = 'checkpoints'
        pretrain_path = 'facelet_bank'
        pretrain_path = os.path.join(app_config.MODEL_DIR, pretrain_path)
        facelet.load(effect, pretrain_path)

    vgg = VGG()

    logger.info('torch version: {}'.format(torch.version.__version__))
    decoder = vgg_decoder()

    if args.gpu_id >= 0:
        with torch.cuda.device(args.gpu_id):
            #vgg = vgg.cuda()
            vgg = torch.nn.DataParallel(vgg).cuda()
            facelet = facelet.cuda()
            decoder = decoder.cuda()

    model = {}
    model['args'] = args
    model['vgg'] = vgg
    model['facelet'] = facelet
    model['decoder'] = decoder
    model['model_size'] = 384

    #if ENABLE_ESRGAN:
    #    model['esrgan'] = esrgan_adapter.init_model(gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    logger.info('facelet_pt init_model done! gpu_id: {}, effect: {}'.format(
        gpu_id, effect))
    return model
Exemple #7
0
def init_model(gpu_id=-1, style='Hayao'):

    tf.Graph().as_default()
    sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))

    # 这段代码只是用来查看 tf 的运行设备信息,没啥其他用途
    #a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
    #b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
    #c = tf.matmul(a, b)
    # print(sess.run(c))

    # build the dataflow graph
    content = tf.placeholder(tf.float32,
                             shape=(1, None, None, 3),
                             name='content')
    style = tf.placeholder(tf.float32, shape=(1, None, None, 3), name='style')

    encoder_path = os.path.join(app_config.MODEL_DIR, 'vgg19_normalised.npz')
    stn = StyleTransferNet(encoder_path)

    output_image = stn.transform(content, style)

    print(sess.run(tf.global_variables_initializer()))

    # restore the trained model and run the style transferring
    saver = tf.train.Saver()
    model_path = os.path.join(
        app_config.MODEL_DIR,
        'arbitrary_style_transfer/models/style_weight_2e0.ckpt')
    saver.restore(sess, model_path)

    engine_model = {}
    engine_model['content'] = content
    engine_model['style'] = style
    engine_model['output_image'] = output_image
    engine_model['sess'] = sess
    engine_model['model_size'] = 500  # 模型未指定,使用标准大小 500(模型越大效果越好,耗时也越长)

    #if ENABLE_ESRGAN:
    #    engine_model['esrgan'] = esrgan_adapter.init_model(
    #        gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    logger.info(
        'arbitrary_style_transfer init_model done! gpu_id: {}, style: {}'.
        format(gpu_id, style))
    return engine_model
Exemple #8
0
def process_image(image, engine_model, output_w, output_h):
    # 如果输入尺寸大于等于输出尺寸,则不必做超解析
    in_w, in_h = image.size
    if in_h >= output_h:
        return image

    t1 = time.time()
    image = np.array(image).astype(np.uint8)  # pil to numpy.array

    image = image * 1.0 / 255
    image = torch.from_numpy(np.transpose(image[:, :, [2, 1, 0]],
                                          (2, 0, 1))).float()
    img_LR = image.unsqueeze(0)

    model = engine_model['model']

    if torch.__version__ >= '0.4':
        img_LR = img_LR.to(torch.device('cuda'))
        with torch.no_grad():
            output = model(img_LR).data.squeeze().float().cpu().clamp_(
                0, 1).numpy()
    else:
        from torch.autograd import Variable
        img_LR = Variable(img_LR, volatile=True)
        img_LR = img_LR.cuda()
        output = model(img_LR).data.squeeze().float().cpu().clamp_(0,
                                                                   1).numpy()

    output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
    output = (output * 255.0).round().astype(np.uint8)

    output = transforms.ToPILImage()(output)  # numpy.array to pil
    output = image_resize_default(output, output_h)

    t2 = time.time()
    print('--> esrgan: {}s used'.format(t2 - t1))
    #print('--> type(output): {}'.format(type(output)))
    logger.info(
        'esrgan process image, input size: {} w, {} h, output size: {} w, {} h'
        .format(in_w, in_h, output.size[0], output.size[1]))

    return output
Exemple #9
0
def init_model(gpu_id=-1):
    # RRDB_ESRGAN_x4.pth OR RRDB_PSNR_x4.pth
    #print('--> app_config.MODEL_DIR:', app_config.MODEL_DIR)
    model_path = osp.join(app_config.MODEL_DIR,
                          'ESRGAN_models/RRDB_ESRGAN_x4.pth')

    _model = arch.RRDBNet(3, 3, 64, 23, gc=32)
    _model.load_state_dict(torch.load(model_path), strict=True)
    _model.eval()

    if torch.__version__ >= '0.4':
        _model = _model.to(torch.device('cuda'))  # or 'cpu'
    else:
        _model = _model.cuda()

    model = {}
    model['model'] = _model

    logger.info('esrgan init_model DONE! gpu_id: {}'.format(gpu_id))
    return model
Exemple #10
0
def init_model(gpu_id=-1, style='Hayao'):

    parser = argparse.ArgumentParser()
    parser.add_argument('--input_dir', default='test_img')
    parser.add_argument('--load_size',
                        default=360)  # 默认模型处理尺寸 450(越大所需显存越多,默认时为3.6G)
    parser.add_argument('--model_path', default='./pretrained_model')
    parser.add_argument('--style', default='Hayao')
    parser.add_argument('--output_dir', default='test_output')
    parser.add_argument('--gpu', type=int, default=0)
    opt = parser.parse_args()
    opt.gpu = gpu_id
    opt.style = style

    # load pretrained model
    model = Transformer()
    model.load_state_dict(
        torch.load(
            os.path.join(script_dir, opt.model_path,
                         opt.style + '_net_G_float.pth')))
    model.eval()

    if opt.gpu > -1:
        print('GPU mode')
        model.cuda()
    else:
        print('CPU mode')
        model.float()

    engine_model = {}
    engine_model['opt'] = opt
    engine_model['model'] = model
    engine_model['model_size'] = opt.load_size

    #if ENABLE_ESRGAN:
    #    engine_model['esrgan'] = esrgan_adapter.init_model(
    #        gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    logger.info('cartoongan_pt init_model done! gpu_id: {}, style: {}'.format(
        gpu_id, style))
    return engine_model
Exemple #11
0
def init_model(gpu_id=-1, model_size=256): #default model_size 256

    tf.reset_default_graph()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    saver = tf.train.import_meta_graph(
        os.path.join(script_dir, 'model', 'model.meta'))
    saver.restore(sess, tf.train.latest_checkpoint(
        os.path.join(script_dir, 'model')))

    graph = tf.get_default_graph()
    X = graph.get_tensor_by_name('X:0')
    Y = graph.get_tensor_by_name('Y:0')
    Xs = graph.get_tensor_by_name('generator/xs:0')

    model = {}
    model['sess'] = sess
    model['X'] = X
    model['Y'] = Y
    model['Xs'] = Xs

    model['model_size'] = model_size
    print('set model_size: {0}'.format(model_size))

    makeup_files = glob.glob(os.path.join(script_dir, 'imgs', 'makeup', '*.*'))
    #print('--> makeup_files: {0}'.format(makeup_files))
    model['makeups'] = {makeup_files[i].split('/')[-1].split('.')[0]: cv2.resize(
        imread(makeup_files[i]), (model_size, model_size)) for i in range(len(makeup_files))}
    for k in model['makeups']:
        model['makeups'][k] = cv2.cvtColor(model['makeups'][k], cv2.COLOR_RGBA2RGB)
    
    model['all_styles'] = get_all_styles()
    
    #if ENABLE_ESRGAN:
    #    model['esrgan'] = esrgan_adapter.init_model(gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    logger.info('beautygan init_model done! gpu_id: {}, loaded makeups: {}'
                .format(gpu_id, model['makeups'].keys()))
    return model
Exemple #12
0
def check_if_save_demo_icon(img_pil, icon_dir, icon_name):
    """将 icon demo 图片的人脸处理结果保存为图标"""
    
    if app_config.DEMO_ICON_ENABLE:
        os.makedirs(icon_dir, exist_ok=True)
        icon_file = os.path.join(icon_dir, '{}.jpg'.format(icon_name))
        
        image_out = img_pil.copy()

        # 对于非人脸检测图,由于会扩展为正方形,处理后图片需要居中截取,否则会有边框,做图标不好看
        img_w, img_h = img_pil.size
        #print('--> img_w: %s, img_h: %s' % (img_w, img_h))
        if img_w >= 380:
            _sz = 380
            x0 = int((img_w - _sz) / 2)
            y0 = int((img_h - _sz) / 2)
            x1 = x0 + _sz
            y1 = y0 + _sz
            image_out = image_out.crop((x0, y0, x1, y1))
        
        icon_img_pil = image_resize_default(image_out, 200) # 缩放为前端所需图标大小
        icon_img_pil.save(icon_file)
        logger.info('icon demo image saved to icon {:s} done!'.format(icon_file))
Exemple #13
0
def init_model(gpu_id=-1):

    args = parse_args()

    # open session
    #tf.reset_default_graph()
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    gan = UGATIT(sess, args)

    # build graph
    gan.build_model()

    # show network architecture
    show_all_variables()

    # load checkpoint
    tf.global_variables_initializer().run(session=sess)
    gan.saver = tf.train.Saver()
    checkpoint_dir = os.path.join(app_config.MODEL_DIR, 'UGATIT/checkpoint-tf')
    could_load, checkpoint_counter = gan.load(checkpoint_dir)
    if could_load:
        logger.info(" [*] Load checkpoint SUCCESS")
    else:
        logger.error(
            " [!] Load checkpoint failed... {}".format(checkpoint_dir))

    model = {}
    model['args'] = args
    model['gan'] = gan
    model['model_size'] = 256  # default 256

    logger.info('ugatit init_model done! gpu_id: {}'.format(gpu_id))

    #if ENABLE_ESRGAN:
    #    model['esrgan'] = esrgan_adapter.init_model(gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    return model
Exemple #14
0
def init_model(experiment_name='384_shortcut1_inject0_none_hq', gpu_id=-1):
    """Function that loads Deep Learning model.
    Args: 
        experiment_name: 384_shortcut1_inject0_none_hq, 384_shortcut1_inject1_none_hq 或其他
    Returns:
        model: Loaded Deep Learning model.
    """

    with open(join(script_dir, 'output', experiment_name, 'setting.txt'),
              'r') as f:
        args = json.load(f, object_hook=lambda d: argparse.Namespace(**d))
    logger.info('torch version: {}'.format(torch.version.__version__))

    args.gpu_id = gpu_id
    args.load_epoch = 'latest'
    args.num_test = None
    args.my_data = './data/my'
    args.my_attr = './data/list_attr_my.txt'
    logger.info('args: {}'.format(args))

    os.makedirs(join(script_dir, args.my_data), exist_ok=True)

    attgan = AttGAN(args)
    attgan.load(
        find_model(
            join(script_dir, 'output', args.experiment_name, 'checkpoint'),
            args.load_epoch))
    attgan.eval()

    model = {}
    model['attgan'] = attgan
    model['args'] = args
    model['model_size'] = 384  # attgan_pt 所用的模型尺寸为384

    #if ENABLE_ESRGAN:
    #    model['esrgan'] = esrgan_adapter.init_model(gpu_id=app_config.ENGINE_GPU_ID['esrgan'])

    logger.info('attgan_pt init_model done! gpu_id: {}'.format(gpu_id))
    return model
Exemple #15
0
def init_model(gpu_id=-1):

    # configure by command-line arguments
    parser = argparse.ArgumentParser(
        description='Generate high resolution face transformations.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--backend',
                        type=str,
                        default='torch',
                        choices=['torch', 'caffe+scipy'],
                        help='reconstruction implementation')
    parser.add_argument('--device_id',
                        type=int,
                        default=-1,
                        help='zero-indexed CUDA device')
    parser.add_argument(
        '--K',
        type=int,
        default=15,  # default 100
        help='number of nearest neighbors')
    parser.add_argument('--scaling',
                        type=str,
                        default='none',
                        choices=['none', 'beta'],
                        help='type of step scaling')
    parser.add_argument(
        '--iter',
        type=int,
        default=50,  # default 500
        help='number of reconstruction iterations')
    parser.add_argument(
        '--postprocess',
        type=str,
        default='mask',
        help='comma-separated list of postprocessing operations')
    parser.add_argument(
        '--delta',
        type=str,
        default='2.5',  #0.0-5.0
        help='comma-separated list of interpolation steps')
    parser.add_argument('--output_format',
                        type=str,
                        default='png',
                        choices=['png', 'jpg'],
                        help='output image format')
    parser.add_argument('--comment',
                        type=str,
                        default='',
                        help='the comment is appended to the output filename')
    parser.add_argument('--extradata',
                        action='store_true',
                        default=False,
                        help='extra data is saved')
    parser.add_argument('--output',
                        type=str,
                        default='',
                        help='output is written to this pathname')
    parser.add_argument(
        '--include_original',
        action='store_true',
        default=False,
        help='the first column of the output is the original image')

    config = parser.parse_args()
    if gpu_id >= 0:
        config.device_id = gpu_id
    logger.info('deep_feat_interp config: {}'.format(
        json.dumps(config.__dict__)))

    # load models
    if config.backend == 'torch':
        from . import deepmodels_torch
        _model = deepmodels_torch.vgg19g_torch(device_id=config.device_id)
    elif config.backend == 'caffe+scipy':
        _model = deepmodels.vgg19g(device_id=config.device_id)
    else:
        raise ValueError('Unknown backend')
    classifier = deepmodels.facemodel_attributes()

    face_d, face_p = alignface.load_face_detector(predictor_path=os.path.join(
        app_config.MODEL_DIR, 'shape_predictor_68_face_landmarks.dat'))

    model = {}
    model['config'] = config
    model['model'] = _model
    model['classifier'] = classifier
    model['face_d'] = face_d
    model['face_p'] = face_p

    logger.info('deep_feat_interp init_model done! gpu_id: {}'.format(gpu_id))
    return model
Exemple #16
0
def process_image(image,
                  face_data,
                  engine_model,
                  func_name,
                  func_params=None,
                  worker=None):
    """ 对上传图片进行处理
    Returns:
        image_output: 处理后的图片数据
    """

    logger.info('call process_image. func_name: "{}", func_params: "{}", '
                'face_data["attr_dict"]: {}'.format(func_name, func_params,
                                                    face_data["attr_dict"]))

    model = engine_model['model']
    classifier = engine_model['classifier']
    fields = classifier.fields()
    face_d = engine_model['face_d']
    face_p = engine_model['face_p']

    effect = func_name
    effect_options_a = ['older', 'younger', 'facehair', 'senior']
    effect_options_b = []
    #effect_options_b = ['Arched_Eyebrows', 'Big_Nose', 'Bushy_Eyebrows', 'Eyeglasses', 'Pale_Skin', 'Pointy_Nose', 'Smiling', 'asian', 'white', 'Wavy_Hair', 'child', 'shiny_skin', 'sunglasses', 'strong_nose_mouth_lines']
    effect_options_b.extend(fields)
    #print('--> effect_options_b:', effect_options_b)
    if effect not in effect_options_a + effect_options_b:
        raise Exception('unsupported effect: {}'.format(effect))

    image_output = image

    minimum_resolution = 200
    config = engine_model['config']
    postprocess = set(config.postprocess.split(','))
    # Set the free parameters
    K = config.K
    config.delta = str(func_params[0])  # 使用前端传入的参数
    delta_params = [float(x.strip()) for x in config.delta.split(',')]
    multi_num = 4  # 样本乘积倍数 default: 4

    t0 = time.time()

    xX = image
    print("processing test image {}...".format(xX))
    template, original = alignface.detect_landmarks(None,
                                                    face_d,
                                                    face_p,
                                                    image=xX)
    image_dims = original.shape[:2]
    if min(image_dims) < minimum_resolution:
        s = float(minimum_resolution) / min(image_dims)
        image_dims = (int(round(image_dims[0] * s)),
                      int(round(image_dims[1] * s)))
        original = imageutils.resize(original, image_dims)
    t1 = time.time()
    print('--> {} seconds to detect landmark'.format(int(t1 - t0)))

    XF = model.mean_F([original])
    XA = classifier.score([xX])[0]
    print(xX, ', '.join(k for i, k in enumerate(fields) if XA[i] >= 0))
    t2 = time.time()
    print('--> {} seconds to get image score'.format(int(t2 - t1)))

    # select positive and negative sets based on gender and mouth
    print("select positive and negative sets based on gender and mouth")
    # effect options:
    # '5_o_Clock_Shadow', 'Arched_Eyebrows', 'Bags_Under_Eyes', 'Bald', 'Bangs', 'Big_Lips', 'Big_Nose', 'Black_Hair', 'Blond_Hair', 'Brown_Hair', 'Bushy_Eyebrows', 'Eyeglasses', 'Goatee', 'Gray_Hair', 'Heavy_Makeup', 'Male', 'Mouth_Slightly_Open', 'Mustache', 'Narrow_Eyes', 'No_Beard', 'Pale_Skin', 'Pointy_Nose', 'Receding_Hairline', 'Sideburns','Smiling', 'Straight_Hair', 'Wavy_Hair', 'Wearing_Lipstick', 'Young', 'asian', 'baby', 'black', 'brown_eyes', 'child', 'eyes_open', 'frowning', 'fully_visible_forehead', 'indian', 'middle_aged', 'mouth_closed', 'mouth_wide_open', 'no_eyewear', 'obstructed_forehead', 'partially_visible_forehead', 'senior', 'shiny_skin', 'strong_nose_mouth_lines', 'sunglasses', 'teeth_not_visible', 'white'
    _idx = fields.index
    _attr_dict = face_data['attr_dict']
    lambda_filter = lambda xa, idx, _true: xa[idx] >= 0 if _true else xa[idx
                                                                         ] < 0
    # eg: lambda_filter(XA, _idx('Male'), _attr_dict['Male']))
    select_done = False
    # 先按标准特征过滤
    for option in effect_options_b:
        if effect == option:
            cP = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx(option), False)]  # 负样本
            for x in ('Bald', 'Bangs'):  # 为提升图片质量,补充刘海过滤
                if effect != x:
                    cP.append((_idx(x), _attr_dict[x]))
            cQ = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx(option), True)]  # 正样本
            for x in ('Bald', 'Bangs'):  # 为提升图片质量,补充刘海过滤
                if effect != x:
                    cQ.append((_idx(x), _attr_dict[x]))
            select_done = True
            break
    # 再按自定义特征过滤
    if not select_done:
        if effect == 'older':
            cP = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('Young'), True)]
            cQ = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('Young'), False)]
        elif effect == 'younger':
            cP = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('Young'), False)]
            cQ = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('Young'), True)]
        elif effect == 'facehair':
            cP = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('No_Beard'), True), (_idx('Mustache'), False)]
            cQ = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('No_Beard'), False), (_idx('Mustache'), True)]
        elif effect == 'senior':
            cP = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('senior'), False)]
            cQ = [(_idx('Male'),
                   lambda_filter(XA, _idx('Male'), _attr_dict['Male'])),
                  (_idx('senior'), True)]
        else:
            raise ValueError('Unknown effect: {}'.format(effect))

    P = classifier.select(cP, XA)
    Q = classifier.select(cQ, XA)
    if len(P) < K * multi_num or len(Q) < K * multi_num:
        msg = '{}: Not enough images in database (|P|={}, |Q|={}).'.format(
            xX, len(P), len(Q))
        logger.warn(msg)
        raise Exception(msg)

    t3 = time.time()
    print('--> {} seconds to select images set'.format(int(t3 - t2)))

    print("fit the best {} database images to input image".format(K *
                                                                  multi_num))
    Plm = classifier.lookup_landmarks(P[:K * multi_num])
    Qlm = classifier.lookup_landmarks(Q[:K * multi_num])
    idxP, lossP, MP = fit_submanifold_landmarks_to_image(
        template, original, Plm, face_d, face_p)  # 耗时 5s
    idxQ, lossQ, MQ = fit_submanifold_landmarks_to_image(
        template, original, Qlm, face_d, face_p)  # 耗时 5s
    t4 = time.time()
    print('--> {} seconds to fit {} images'.format(int(t4 - t3),
                                                   K * multi_num))

    print("use the {} best fitted images".format(K))
    xP = [P[i] for i in idxP[:K]]
    xQ = [Q[i] for i in idxQ[:K]]
    PF = model.mean_F(utils.warped_image_feed(xP, MP[idxP[:K]],
                                              image_dims))  # 耗时 3s
    QF = model.mean_F(utils.warped_image_feed(xQ, MQ[idxQ[:K]],
                                              image_dims))  # 耗时 3s
    if config.scaling == 'beta':
        WF = (QF - PF) / ((QF - PF)**2).mean()
    elif config.scaling == 'none':
        WF = (QF - PF)
    t5 = time.time()
    print('--> {} seconds to mean_F {} best images'.format(int(t5 - t4), K))

    print("for each interpolation step ...")
    max_iter = config.iter
    init = original
    result = []
    for delta in delta_params:
        print(xX, image_dims, delta, len(xP), len(xQ))
        t7 = time.time()
        Y = model.F_inverse(XF + WF * delta,
                            max_iter=max_iter,
                            initial_image=init)  # 耗时 15s
        t8 = time.time()
        print('--> {} seconds to reconstruct at delta {}'.format(
            int(t8 - t7), delta))
        result.append(Y)
        max_iter = config.iter // 2
        init = Y
    result = np.asarray([result])
    original = np.asarray([original])
    X_mask = '-mask.png'  #FIXME: 使用真实的 mask 地址,如果有的话
    if 'mask' in postprocess and os.path.exists(X_mask):
        mask = imageutils.resize(imageutils.read(X_mask), image_dims)
        result *= mask
        result += original * (1 - mask)
    if 'color' in postprocess:
        result = utils.color_match(np.asarray([original]), result)
    if 'mask' in postprocess and os.path.exists(X_mask):
        result *= mask
        result += original * (1 - mask)
    if config.include_original:
        m = imageutils.montage(
            np.concatenate([np.expand_dims(original, 1), result], axis=1))
    else:
        m = imageutils.montage(result)

    image_output = Image.fromarray((m * 255).astype(np.uint8))

    t10 = time.time()
    print('{} seconds ({} seconds per image).'.format(
        int(t10 - t0),
        int(t10 - t0) / len(delta_params)))

    return image_output
Exemple #17
0
def process_image(image,
                  face_data,
                  engine_model,
                  func_name,
                  func_params=None,
                  worker=None):
    """ 对上传图片进行处理
    Returns:
        image_output: 处理后的图片数据
    """

    # 处理和替换图片中的人脸部分
    if 'face_image' in face_data:
        _img_np = face_data['face_image']
        x = face_data['x']
        y = face_data['y']
        w = face_data['w']
        h = face_data['h']

        face_image_orig = image_ndarray_to_pil(_img_np)

        # 如果 face_image 尺寸大于模型标准尺寸,则缩放为标准尺寸再处理
        _img_w, _img_h = w, h
        _h = engine_model['model_size']
        if _img_h != _h:
            _img = image_resize_default(image_ndarray_to_pil(_img_np), _h)
            _img_np = image_pil_to_ndarray(_img)

        # 使用 face_image 构造测试数据集,包括内容、特征和大小
        logger.info('face_image attr: {}, h {}. model size: {}.'.format(
            face_data['attr'], h, _h))
        _test_dataset = MyTestData(_img_np, face_data['attr'], _h)

        # 处理图片
        # test_atts 为目的特征列表;test_ints 为相应特征处理强度,取值 0~1
        test_atts, test_ints = func_name, func_params[0]
        logger.info('func_params: {}, test_atts: {}, test_ints: {}'.format(
            func_params, test_atts, test_ints))
        _img_out_pil = attgan_multi_process(engine_model, _test_dataset,
                                            test_atts, test_ints)

        # 如果为调试模式,则保存生成的人脸图片做分析用,产品化部署不需要
        #if app_config.DEBUG:
        #    out_file = os.path.join(script_dir, 'new_face.jpg')
        #    _img_out_pil.save(out_file)
        #    print('new image saved to {:s} done!'.format(out_file))

        # 将返回图片缩放为原尺寸
        if _img_h != _h:
            if _img_h > _h and ENABLE_ESRGAN:
                #_img_out_pil = esrgan_adapter.process_image(
                #    _img_out_pil, engine_model['esrgan'], w, h)
                # 调用管理进程共享对象接口处理
                ret = exec_esrgan(_img_out_pil, _img_w, _img_h, worker)
                if ret:
                    _img_out_pil = ret
                else:
                    _img_out_pil = image_resize_default(_img_out_pil, _img_h)
        else:
            _img_out_pil = image_resize_default(_img_out_pil, _img_h)

        # 将新旧人脸混合后贴回原图
        x = x + int((image.size[0] - app_config.OUTPUT_SIZE_EDITOR[0]) / 2)
        y = y + int((image.size[1] - app_config.OUTPUT_SIZE_EDITOR[1]) / 2)
        edit_area = func_params[1]
        image_output = face_image_blend(image, _img_out_pil, face_image_orig,
                                        x, y, w, h, edit_area, worker)

    return image_output
Exemple #18
0
 def load(self, label, pretrain_path='checkpoints'):
     logger.info('loading facelet model {}'.format(label))
     self.load_network(self.model, pretrain_path, label)
     logger.info('facelet model loaded successfully')
Exemple #19
0
def process_image(image,
                  face_data,
                  engine_model,
                  func_name,
                  func_params=None,
                  worker=None):
    """ 对上传图片进行处理
    Returns:
        image_output: 处理后的图片数据
    注意:对于 facelet_pt 来说,由于引擎是根据 effect 分别在子进程中加载的,所以这里 func_name 没用到
    """

    logger.info(
        'call process_image. func_name: "{}", func_params: "{}"'.format(
            func_name, func_params))

    # 处理和替换图片中的人脸部分
    if 'face_image' in face_data:
        _img_np = face_data['face_image']
        x = face_data['x']
        y = face_data['y']
        w = face_data['w']
        h = face_data['h']

        face_image_orig = image_ndarray_to_pil(_img_np)

        # 由于真实处理过程使用CPU执行太慢,笔记本上运行会导致任务超时,故暂时用 sleep 模拟之
        run_fake = False  # 笔记本上运行设为 True,开发和生产服务器上为 False
        if run_fake:
            time.sleep(3)
            _img_out_pil = image_ndarray_to_pil(_img_np)
        else:
            # 如果 face_image 尺寸大于模型标准尺寸,则缩放为标准尺寸再处理
            _img_w, _img_h = w, h
            _h = engine_model['model_size']
            if _img_h != _h:
                _img = image_resize_default(image_ndarray_to_pil(_img_np), _h)
                _img_np = image_pil_to_ndarray(_img)

            _img_out = engine_process_image(engine_model, _img_np, func_name,
                                            func_params)

            # ndarray/Tensor 转成PIL.Image
            _img_out_pil = image_ndarray_to_pil(_img_out)

            # 如果为调试模式,则保存生成的人脸图片做分析用,产品化部署不需要
            if app_config.DEBUG:
                out_file = os.path.join(script_dir, 'new_face.jpg')
                _img_out_pil.save(out_file)
                print('new image saved to {:s} done!'.format(out_file))

            # 将返回图片缩放为原尺寸
            if _img_h != _h:
                if _img_h > _h and ENABLE_ESRGAN:
                    #_img_out_pil = esrgan_adapter.process_image(
                    #          _img_out_pil, engine_model['esrgan'], w, h)
                    # 调用管理进程共享对象接口处理
                    ret = exec_esrgan(_img_out_pil, _img_w, _img_h, worker)
                    if ret:
                        _img_out_pil = ret
                    else:
                        _img_out_pil = image_resize_default(
                            _img_out_pil, _img_h)
                else:
                    _img_out_pil = image_resize_default(_img_out_pil, _img_h)

        # 将新旧人脸混合后贴回原图
        x = x + int((image.size[0] - app_config.OUTPUT_SIZE_EDITOR[0]) / 2)
        y = y + int((image.size[1] - app_config.OUTPUT_SIZE_EDITOR[1]) / 2)
        edit_area = []
        image_output = face_image_blend(image, _img_out_pil, face_image_orig,
                                        x, y, w, h, edit_area, worker)

    return image_output
Exemple #20
0
def process_image(image, face_data, engine_model, func_name, func_params=None, worker=None):
    """ 对上传图片进行处理
    Returns:
        image_output: 处理后的图片数据
    """

    if 'img_hash' not in face_data: # 本引擎只能处理人脸图片
        return None

    # !!直接使用 pil 原图而不是 face_data 中的人脸图做处理
    
    # 如果 image 尺寸不等于模型标准尺寸,则缩放为标准尺寸再处理
    _img_w, _img_h = image.size[0:2]
    _h = engine_model['model_size']
    if _img_h > _h:
        _img_pil = image_resize_default(image, _h)
    else:
        _img_pil = image
    _img_cv2 = image_pil_to_cv2(_img_pil)

    # 开始处理人脸上妆

    # 根据输入参数获取要修改的人脸区域及处理方式
    func_name, func_arg = func_name, func_params[0]
    edit_area = []
    for i in func_params[1]:
        if i == 'skin': # 肤色
            edit_area.extend(['face', 'l_ear', 'r_ear', 'nose', 'l_brow', 'r_brow',
                    'u_lip', 'l_lip', 'neck'])
        elif i == 'lips': # 嘴唇
            edit_area.extend(['u_lip', 'l_lip'])
        else:
            edit_area.append(i)
    logger.info('func_name: {}, func_arg: {}, func_params: {}, edit_area: {}'
                .format(func_name, func_arg, func_params, edit_area))
    
    # 人脸解析器 - 内置 face-parsing.PyTorch:总共 18 个人脸部分(当前使用)
    # 正脸效果较好,侧脸或者小脸不佳
    # 1 'face', 2 'l_brow', 3 'r_brow', 4 'l_eye', 5 'r_eye', 
    # 6 'eye_g', 7 'l_ear', 8 'r_ear', 9 'ear_r', 10 'nose', 
    # 11 'mouth', 12 'u_lip', 13 'l_lip', 14 'neck', 15 'neck_l', 
    # 16 'cloth', 17 'hair', 18 'hat'
    face_area_map = { # 取值从数字 1 开始依次增加;0 为背景部分
        'face': 1,
        'l_brow': 2,
        'r_brow': 3,
        'l_eye': 4,
        'r_eye': 5,
        'eye_g': 6,
        'l_ear': 7,
        'r_ear': 8,
        'ear_r': 9,
        'nose': 10,
        'teeth': 11,
        'u_lip': 12,
        'l_lip': 13,
        'neck': 14,
        'neck_l': 15,
        'cloth': 16,
        'hair': 17,
        'hat': 18
    }
    # 首先得到人脸解析图
    face_areas = list(face_area_map.keys())
    parsing, mask_pil = evaluate(_img_pil, engine_model, face_areas, edit_area)
    parsing = cv2.resize(
        parsing, _img_cv2.shape[0:2], interpolation=cv2.INTER_NEAREST)
    if ENABLE_MASK and mask_pil:
        mask_pil = image_resize_default(mask_pil, _img_cv2.shape[0])
    
    _img_out = _img_cv2
    
    if func_name == 'chg_color': # 着色处理

        parts = [] # 需处理的人脸部分列表
        colors = [] # 各人脸部分相应颜色列表
        # colors 取值数量应与 parts 一致。
        
        # 设定着色参数
        def __set_color(part, color):
            if type(part) is list:
                for i in part:
                    parts.append(face_area_map[i])
                    colors.append(color)
            else:
                parts.append(face_area_map[part])
                colors.append(color)
        
        # 颜色示例 b,g,r:
        # [230, 50, 20] # 蓝
        # [10, 250, 10] # 绿
        # [10, 50, 250] # 红
        # [20, 70, 180] # 唇红
        #print('--> func_params:', func_params)
        for p in edit_area:
          __set_color(p, func_params[2])
        if func_arg in ('avatar', 'hawk', 'red'): # 肤色系列 1
            __set_color(['u_lip', 'l_lip'], [20, 70, 180])
            c1, c2 = 0, 1 # 只修改蓝夜色
        elif func_arg in ('jade', 'golden'): # 肤色系列 2
            __set_color(['u_lip', 'l_lip'], [20, 70, 180])
            c1, c2 = 0, 2 # 修改蓝绿夜色
        else: # 部分着色,如发色
            c1, c2 = 0, 1 # 默认只修改蓝夜色
        
        # 置换指定区域颜色
        for part, color in zip(parts, colors):
            print('--> change color to {} for {}'.format(color, part))
            _img_out = change_color(_img_out, parsing, part, color, c1, c2)

    # 转换输出图从 cv2 到 PIL.Image
    _img_out_pil = image_cv2_to_pil(_img_out)

    if ENABLE_MASK:
        # 使用边缘模糊后的掩模图进行修正
        _img_out_pil.paste(_img_out_pil, (0, 0), mask_pil)

    # 将返回图片缩放为原尺寸
    if _img_h > _h:
        if _img_h > _h and ENABLE_ESRGAN:
            #_img_out_pil = esrgan_adapter.process_image(
            #    _img_out_pil, engine_model['esrgan'], _img_w, _img_h)
            # 调用管理进程共享对象接口处理
            ret = exec_esrgan(_img_out_pil, _img_w, _img_h, worker)
            if ret:
                _img_out_pil = ret
            else:
                _img_out_pil = image_resize_default(_img_out_pil, _img_h)
        else:
            _img_out_pil = image_resize_default(_img_out_pil, _img_h)

    # 检查是否为主demo,若是则将人脸处理结果保存为图标
    if 'img_hash' in face_data:
        img_hash = face_data['img_hash']
        if img_hash in app_config.IMG_HASH_ICON_DEMO: # 为生成界面图标用的指定图片
            check_if_save_demo_icon(_img_out_pil, SAVE_ICON_DIR, func_arg)

    return _img_out_pil
Exemple #21
0
#import time
#import numpy as np
#from skimage.io import imread
#from skimage import transform, util
#from keras.models import load_model

BASE_DIR = os.path.join(
    os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "..")
sys.path.append(BASE_DIR)

from app.engine_v1 import app_config, logger
from app.engine_v1.m_server import ManagerServer

if __name__ == '__main__':

    logger.info('')
    logger.info('-' * 40)
    logger.info('- Start Manager Server')
    logger.info('-' * 40)

    # to make sure multiprocess running on CUDA, you have to set start method as "spawn"
    #import multiprocessing as mp
    #mp.set_start_method('spawn')
    # or
    #import torch
    #torch.cuda.current_device()
    #torch.cuda._initialized = True
    # or
    #import torch
    #torch.multiprocessing.set_start_method('spawn')