def translated(gt, fl): srctxt = io.expand_pj('@/' + fl.name + '.src.txt') prorsp = io.expand_pj('@/' + fl.name + '.pro.rsp') if os.path.exists(srctxt): with open(srctxt, 'r') as f: fl.N = sum(1 for L in f) # WARNING: manually deleting lines in 'prorsp' allowed only from the end if os.path.exists(prorsp): with open(prorsp, 'r') as f: lst = f.readlines() proN = len(lst) if not hasattr(fl, 'N') or not fl.N: fl.N = proN for line in lst: yield line # ATTENTION: Will die if srctxt is non-existent/corrupted with open(srctxt, 'r') as f: lst = f.readlines() # Process only not cached part if 'proN' in locals(): lst = lst[proN:] for line in lst: line = gt.translate(line) with open(prorsp, 'a') as rsp: rsp.write(line + '\n') yield line
def res(dom): print('> color-syntax.css') io.export_cache('fza/color-syntax.css', SynGenXHTML(dom).colors()) print('> *.css') fnm = io.expand_pj([':', 'forestanza', 'ftype', 'xhtml.css']) with open(fnm) as f: io.export_cache('fza/color-theme-wood.css', f.read()) print('> *.js') fnm = io.expand_pj([':', 'forestanza', 'ftype', 'xhtml.js']) with open(fnm) as f: io.export_cache('fza/scrollPos.js', f.read())
def main(dom, fl): src = fl.name + '.src.html' io.export_cache(src, lambda: web.load(fl.url), keep=True) io.export_cache(fl.name + '.src.txt', lambda: web.refine(io.expand_pj('@/' + src)), keep=True) fts = [fb2, fza, xhtml] exs = [t.Exporter(dom, author='Xz', title=fl.name) for t in fts] # Write main body lst = translated(google.Translator(), fl) for i, line in enumerate(lst): sec = google.ResponseParser(line) for e in exs: e.p_section(i+1, sec) progress_bar(i, fl.N) for t, e in zip(fts, exs): io.export_cache(fl.name + t.EXT, lambda: e.dump())
def main(dom, fl): src = fl.name + '.src.html' io.export_cache(src, lambda: web.load(fl.url), keep=True) io.export_cache(fl.name + '.src.txt', lambda: web.refine(io.expand_pj('@/' + src)), keep=True) fts = [fb2, fza, xhtml] exs = [t.Exporter(dom, author='Xz', title=fl.name) for t in fts] # Write main body lst = translated(google.Translator(), fl) for i, line in enumerate(lst): sec = google.ResponseParser(line) for e in exs: e.p_section(i + 1, sec) progress_bar(i, fl.N) for t, e in zip(fts, exs): io.export_cache(fl.name + t.EXT, lambda: e.dump())
def _make_req(self, text, sl='ja', tl='en', hl='en'): cmd = (expand_pj(':/scripts/tk_hack.pl'), text) tk = check_output(cmd).decode('utf-8').rstrip() return REQ_GLETR + urllib.parse.urlencode([('sl', sl), ('tl', tl), ('hl', hl), ('tk', tk), ('q', text)])
def _make_req(self, text, sl='ja', tl='en', hl='en'): cmd = (expand_pj(':/scripts/tk_hack.pl'), text) tk = check_output(cmd).decode('utf-8').rstrip() return REQ_GLETR + urllib.parse.urlencode( [('sl', sl), ('tl', tl), ('hl', hl), ('tk', tk), ('q', text)])
def __init__(self, args, chapter): self.url = args.url.format(chapter) self.name = args.name.format(chapter) self.basepath = io.expand_pj('@/' + self.name)