if out['channel']: fPreview[4] = BGR2RGB else: fPreview[4] = identity ns.append( appendFuncs(BGR2RGB, Node(dict(op='Channel')), fs1, False)) out['channel'] = 1 ns.append( appendFuncs(toBuffer(bitDepthOut), Node(dict(op='toBuffer', bits=bitDepthOut), load), fs1, False)) return fs, ns, out procs = dict(file=(lambda _, _0, nodes: procInput( 'file', 8, [context.getFile, readFile(nodes, context)], dict(bitDepth=8, channel=0, source=0))), buffer=(lambda opt, *_: procInput( 'buffer', opt['bitDepth'], [toNumPy(opt['bitDepth'])], dict(bitDepth=opt['bitDepth'], channel=1, source=1))), DN=procDN, SR=procSR, output=procOutput, slomo=procSlomo, dehaze=procDehaze, resize=procResize, VSR=procVSR, demob=procDemob) stepOpts = dict(SR={ 'toInt': ['scale', 'ensemble'],
return [res] else: o = lambda im: [reduce(applyNonNull, fs1, im)] fs = [o] if out['channel']: fPreview[4] = BGR2RGB else: fPreview[4] = identity ns.append(appendFuncs(BGR2RGB, Node(dict(op='Channel')), fs1, False)) out['channel'] = 1 ns.append(appendFuncs(toBuffer(bitDepthOut), Node(dict(op='toBuffer', bits=bitDepthOut), load), fs1, False)) return fs, ns, out procs = dict( file=(lambda _, _0, nodes: procInput('file', 8, [context.getFile, readFile(nodes, context)], dict(bitDepth=8, channel=0, source=0))), buffer=(lambda opt, *_: procInput('buffer', opt['bitDepth'], [toNumPy(opt['bitDepth'])], dict(bitDepth=opt['bitDepth'], channel=1, source=1))), DN=procDN, SR=procSR, output=procOutput, slomo=procSlomo, dehaze=procDehaze, resize=procResize ) def genProcess(steps, root=True, outType=None): funcs=[] nodes=[] last = identity rf = lambda im: reduce(apply, funcs, im) if root: for i, opt in enumerate(steps): opt['name'] = i + (0 if steps[0]['op'] == 'file' else 2) for opt in filter((lambda opt: opt['op'] == 'SR'), steps): toInt(opt, ['scale', 'ensemble'])
refFile = 0 #'test/1566005911.7879605_ci.png' def context(): pass opt = Option( ('test/{}.pth' if test else 'model/demoire/{}.pth').format(modelName)) opt.padding = 31 opt.ramCoef = 1 / 8000. opt.align = 128 opt.modelCached = initModel(opt, weights=opt.model, f=lambda _: Net()) toTorch = lambda x: torch.from_numpy(np.array(x)).permute(2, 0, 1).to( dtype=config.dtype(), device=config.device()) / 256 time = 0.0 for pic in os.listdir(inputFolder): original = toTorch(readFile(context=context)(inputFolder + '/' + pic)) ref = toTorch(readFile(context=context)(refFile + '/' + pic)) if refFile else original start = perf_counter() y = ensemble(opt)(original) time += perf_counter() - start print(pic, float(y.mean(dtype=torch.float)), float((y - ref).abs().mean(dtype=torch.float))) out = toOutput(8)(toFloat(y)) writeFile( out, 'download/{}.{}.png'.format(splitext(split(pic)[1])[0], modelName), context) print(time)
sys.path.append(os.path.abspath('../python')) sys.path.append('../site-packages/nvidia-ml-py') from PIL import Image import numpy as np from imageProcess import readFile, initModel, toFloat, toOutput, ensemble, writeFile, Option from config import config show = lambda im: Image.fromarray(toOutput(8)(im).transpose(1, 2, 0)) toTorch = lambda x: torch.from_numpy(np.array(x)).permute(2, 0, 1).to( dtype=config.dtype(), device=config.device()) / 256 def context(): pass readFile = readFile(context=context) readPic = lambda path: toTorch(readFile(path)) import re from functools import reduce getRoot = lambda w, r: tuple(filter(lambda s: s.startswith(r), w.keys())) replaces = lambda w, r, p, s: list( (t, t.replace(p, s)) for t in getRoot(w, r + p)) inserts = lambda w, r, p, s: list( (t, t.replace(r + p, r + s + p)) for t in getRoot(w, r + p)) getMatch = lambda o: o.group(1) if len(o.groups()) else o.group(0) def find(names, r): r = re.compile(r) return set(map(getMatch, filter(None, map(r.match, names))))