Ejemplo n.º 1
0
Archivo: app.py Proyecto: ceynri/FIC
def compress():
    '''批量压缩图片并返回压缩结果'''

    if model.quality_level != 'medium':
        model.switch_quality_level('medium')
    # 获取文件对象
    files = request.files.getlist('files')
    ret = []
    for rawfile in files:
        file = File(rawfile)
        # 将二进制转为tensor
        input = file.load_tensor().cuda()

        data = model.encode(input)

        # 保存压缩数据
        fic_name = f'{file.name}.fic'
        fic_path = get_path(fic_name)
        File.save_binary(
            {
                'feat': data['feat'],
                'tex': data['tex'],
                'intervals': data['intervals'],
                'ext': file.ext,
            }, fic_path)
        fic_size = path.getsize(fic_path)

        # 获取原图大小
        input_path = get_path(file.name_suffix('input', ext='.bmp'))
        save_image(input, input_path)
        input_size = path.getsize(input_path)
        fic_compression_ratio = fic_size / input_size

        # 待返回的结果数据
        result = {
            'name': fic_name,
            'data': get_url(fic_name),
            'size': fic_size,
            'compression_ratio': fic_compression_ratio,
        }
        ret.append(result)

    # 响应请求
    response = jsonify(ret)
    return response
Ejemplo n.º 2
0
Archivo: app.py Proyecto: ceynri/FIC
def demo_process():
    '''提供demo展示功能'''

    # 获取文件对象
    file = request.files['file']
    file = File(file)

    feature_model = request.form['feature_model']
    quality_level = request.form['quality_level']
    if model.quality_level != quality_level:
        model.switch_quality_level(quality_level)
    # 将二进制转为tensor
    input = file.load_tensor().cuda()

    # 输入模型,得到返回结果
    e_data = model.encode(input)
    d_data = model.decode(feat=e_data['feat'],
                          tex=e_data['tex'],
                          intervals=e_data['intervals'],
                          recon=e_data['recon'])
    data = {**e_data, **d_data}

    # 保存压缩数据
    fic_path = get_path(f'{file.name}.fic')
    File.save_binary(
        {
            'feat': data['feat'],
            'tex': data['tex'],
            'intervals': data['intervals'],
            'ext': file.ext,
        }, fic_path)
    # fic 相关参数
    fic_size = path.getsize(fic_path)
    fic_bpp = get_bpp(fic_size)

    # 单独保存特征以计算特征和纹理的大小
    feat_path = get_path(f'{file.name}_feat.fic')
    File.save_binary({
        'feat': data['feat'],
    }, feat_path)
    # 特征相关参数
    feat_size = path.getsize(feat_path)
    feat_bpp = get_bpp(feat_size)
    # 纹理相关参数
    tex_size = fic_size - feat_size
    tex_bpp = get_bpp(tex_size)

    # 待保存图片
    imgs = {
        'input': data['input'],
        'recon': data['recon'],
        'resi': data['resi'],
        'resi_decoded': data['resi_decoded'],
        'resi_norm': data['resi_norm'],
        'resi_decoded_norm': data['resi_decoded_norm'],
        'output': data['output'],
    }

    # 将 imgs 保存并获得对应URL
    img_urls = {}
    for key, value in imgs.items():
        # 保存图片
        file_name = file.name_suffix(key, ext='.bmp')
        file_path = get_path(file_name)
        save_image(value, file_path)
        # 返回图片url链接
        img_urls[key] = get_url(file_name)

    # 计算压缩率
    input_name = file.name_suffix('input', ext='.bmp')
    input_path = get_path(input_name)
    input_size = path.getsize(input_path)
    fic_compression_ratio = fic_size / input_size

    # jpeg对照组处理
    jpeg_name = file.name_suffix('jpeg', ext='.jpg')
    jpeg_path = get_path(jpeg_name)
    dichotomy_compress(input_path, jpeg_path, target_size=tex_size)
    img_urls['jpeg'] = get_url(jpeg_name)

    # jpeg 相关参数计算
    jpeg_size = path.getsize(jpeg_path)
    jpeg_compression_ratio = jpeg_size / input_size
    jpeg_bpp = get_bpp(jpeg_size)

    # 其他数据
    input_arr = tensor_to_array(data['input'])
    output_arr = tensor_to_array(data['output'])
    jpeg_arr = load_image_array(jpeg_path)

    # 返回的对象
    ret = {
        'image': img_urls,
        'data': get_url(f'{file.name}.fic'),
        'eval': {
            'fic_bpp': fic_bpp,
            'feat_bpp': feat_bpp,
            'tex_bpp': tex_bpp,
            'jpeg_bpp': jpeg_bpp,
            'fic_compression_ratio': fic_compression_ratio,
            'jpeg_compression_ratio': jpeg_compression_ratio,
            'fic_psnr': psnr(input_arr, output_arr),
            'fic_ssim': ssim(input_arr, output_arr),
            'jpeg_psnr': psnr(input_arr, jpeg_arr),
            'jpeg_ssim': ssim(input_arr, jpeg_arr),
        },
        'size': {
            'fic': fic_size,
            'input': input_size,
            # 'output': fic_size,
            'output': tex_size,
            'feat': feat_size,
            'tex': tex_size,
            'jpeg': jpeg_size,
        }
    }
    # 响应请求
    response = jsonify(ret)
    return response
Ejemplo n.º 3
0
Archivo: test.py Proyecto: ceynri/FIC
    b_layer = DeconvRecon().eval()
    b_layer = b_layer.cuda()
    b_layer = nn.DataParallel(b_layer).cuda()
    b_param = torch.load('../params/deconv_recon/30w/baseLayer_7.pth',
                         map_location='cuda:0')
    b_layer.load_state_dict(b_param)

    e_layer = GdnModel().eval().cuda()
    e_layer = CustomDataParallel(e_layer).cuda()
    c_param = torch.load('../params/gdn_model/5120/enhanceLayer_7.pth', map_location='cuda:0')
    # c_param = torch.load('../params/gdn_model/1024/gdnmodel_10.pth', map_location='cuda:0')
    e_layer.load_state_dict(c_param)

    file_path = sys.argv[1]
    file = File(file_path)
    file.load_tensor()

    with torch.no_grad():
        input = file.tensor
        input = input.cuda()
        feat = resnet(input)

        # process feat shape to [N, 512, 1, 1]
        feat = torch.squeeze(feat, 1)
        feat = torch.unsqueeze(feat, 2)
        feat = torch.unsqueeze(feat, 3)
        feat = feat.cuda()

        # reconstruct feature image
        recon = b_layer(feat)