def ver1to2(layer: Layer) -> Layer: """Convert a 1.8 skin to 1.8_bedrock. Args: layer (Layer): texture layer to upscale Returns: Layer: upscaled layer """ image = layer.image.convert("RGBA") args = argparse.Namespace( gpu=-1, method="scale", noise_level=1, color="rgb", model_dir=f"{Path(__file__).resolve().parent}/models/vgg7/", arch="VGG7", scale_ratio=2, tta_level=8, tta=False, block_size=128, batch_size=16, ) model = load_models(args) image = cleanImg(upscale_image(args, image, model["scale"])) return Layer( layer.name, image, image.size, (layer.offsets[0] * 2, layer.offsets[1] * 2), layer.opacity, layer.visible, layer.blendmode, )
def test_foreground_png(): """test_foreground_png""" src = Image.open(f"{THISDIR}/data/foreground.png") dst = src.copy() dst = upscale_image(args, dst, models["scale"]) # dst.convert(src.mode).save(f"{THISDIR}/data/foreground_upresnet10_expected.png") assert imgcompare.is_equal( dst, f"{THISDIR}/data/foreground_upresnet10_expected.png", tolerance=0.2, )
def png_to_waifu2x(data, method, arch, color): import sys import os import types if 'waifu2x_chainer' not in sys.path: sys.path.append('waifu2x_chainer') import waifu2x if not hasattr(png_to_waifu2x, 'gpu'): png_to_waifu2x.gpu = -1 cfg = types.SimpleNamespace() cfg.scale_ratio = 2.0 cfg.tta = True cfg.tta_level = 8 cfg.block_size = 128 cfg.batch_size = 16 cfg.method = method # scale, noise, scale_noise cfg.arch = arch # VGG7, UpConv7, ResNet10, UpResNet10 cfg.color = color # rgb, y cfg.gpu = png_to_waifu2x.gpu cfg.model_dir = os.path.join('waifu2x_chainer','models',cfg.arch.lower()) models = waifu2x.load_models(cfg) src = data_to_image(data) # Can get out of memory errors if cuda is multiprocessed # TODO: Maybe revisit this? Seems to get away with it for abit before crashing and burning try: with HiddenPrints(): dst = waifu2x.upscale_image(cfg, src, models['scale']) except Exception as e: print(e) raise Exception('Could not waifu2x upscale, caught error.') return image_to_data(dst)