def generate_output_pc(setup, title, pc_cutoff, pc_height, img_height, skin, cache, imgs=[]): #not the penultimate bag, need to go deeper if len(setup.continuations[0].continuations) > 0: #store images in list to print at the end new_imgs = imgs.copy() #don't think this needs to be a deepcopy new_imgs.append(fumen_to_image(setup.solution.fumen, img_height, skin)) for i, s in enumerate(tqdm(setup.continuations, unit="setup", leave=False)): generate_output_pc(s, title + (" - Sub-Setup %d" % i), pc_cutoff, pc_height, img_height, skin, cache, new_imgs) else: sf = SFinder(setup_cache=cache) h2(title) with div(): best_continuation = setup.continuations[0].solution best_pc = sf.path( fumen=best_continuation.to_fumen(), pieces=best_continuation.get_remaining_pieces(), height=pc_height)[0] #todo: hack! change this when i fix cache for url in imgs: img(src=url) img(src=fumen_to_image(setup.solution.fumen, img_height, skin)) img(src=fumen_to_image(best_continuation.fumen, img_height, skin)) img(src=fumen_to_image(best_pc.fumen, img_height, skin)) with p(): text("Best continuation: ") b("%.2f%%" % setup.continuations[0].PC_rate) text(" PC success rate – ") b(a("%d continuations" % len(setup.continuations), href=fumen_url + setup.to_fumen())) text("with >%.2f%% PC success rate" % pc_cutoff)
def generate_output(setup, title, img_height, conts_to_display, skin, imgs=[]): """Recursively generate output for setup+continuations.""" #not the penultimate bag, need to go deeper if len(setup.continuations[0].continuations) > 0: #store images in list to print at the end new_imgs = imgs.copy() #don't think this needs to be a deepcopy new_imgs.append(fumen_to_image(setup.solution.fumen, img_height, skin)) # this naming scheme could get messy, anything better? maybe Setup 1-A-A? # but I'm not sure what to do if more continuatons than 26, maybe just AA, then AAA new_ctd = conts_to_display - 1 if conts_to_display > 1 else 1 for i, s in enumerate(tqdm(setup.continuations, unit="setup", leave=False)): generate_output(s, title + (" - Sub-Setup %d" % i), img_height, new_ctd, skin, new_imgs) else: h2(title) with div(): for url in imgs: img(src=url) #final setup with conts, still need to display it's image img(src=fumen_to_image(setup.solution.fumen, img_height, skin)) conts_to_display -= 1 for cont in setup.continuations[:conts_to_display]: img(src=fumen_to_image(cont.solution.fumen, img_height, skin)) with p(): total_conts = len(setup.continuations) text("Showing ") b("%d" % min(conts_to_display, total_conts)) text(" of ") b(a("%d continuations" % total_conts, href=fumen_url + setup.to_fumen()))
def write_index(storages, output_dir: Path): now = datetime.now() doc = dominate.document( title= f'axol index for {[s.name for s in storages]}, rendered at {fdate(now)}' ) # TODO don't need this anymore? rss = True if rss: outlines = [] for storage in storages: name = storage.name htmlUrl = 'https://whatever' url = f'https://unstable.beepb00p.xyz/atom/{name}.xml' outlines.append( f'<outline title="{name}" text="{name}" xmlUrl="{url}" htmlUrl="{htmlUrl}"></outline>' ) outliness = "\n".join(outlines) XML = f""" <?xml version="1.0" encoding="UTF-8"?> <opml version="2.0"> <body> <outline text="All"> {outliness} </outline> </body> </opml> """ (output_dir / 'rendered' / 'atom' / 'feeds.opml').write_text(XML) with doc.head: T.style(STYLE) with doc.body: with T.table(): for storage in storages: with T.tr(): T.td(storage.name) T.td(T.a('summary', href=f'summary/{storage.name}.html')) T.td(T.a('history', href=f'rendered/{storage.name}.html')) T.div(T.b(T.a('pinboard users summary', href=f'pinboard_users.html'))) T.div(T.b(T.a('reddit users summary', href=f'reddit_users.html'))) T.div(T.b(T.a('github users summary', href=f'github_users.html'))) T.div(T.b(T.a('twitter users summary', href=f'twitter_users.html'))) # TODO 'last updated'? (output_dir / 'index.html').write_text(str(doc))
def format(trait, objs, *args, **kwargs) -> Htmlish: res = T.div(cls='github') res.add(T.div(T.a(trait.title(objs), href=trait.link(objs)))) # TODO total stars? with adhoc_html('github', cb=lambda ch: res.add(*ch)): for _, obj in objs: if not isempty(obj.description): T.div(obj.description) with T.div(): if obj.stars > 0: sts = '' if obj.stars == 1 else str(obj.stars) T.b(sts + '★') T.a(f'{obj.when.strftime("%Y-%m-%d %H:%M")} by {obj.user}', href=obj.link, cls='permalink') return res
def format(trait, objs) -> Htmlish: res = T.div(cls='twitter') # res.add(T.div(T.a(trait.title(objs), href=tw(trait.link(objs))))) with adhoc_html('twitter', cb=lambda ch: res.add(*ch)): for _, obj in objs: T.div(obj.text) with T.div(): if obj.likes + obj.retweets + obj.replies > 0: ll = f'★{obj.likes} ♺{obj.retweets} 🗬{obj.replies}' T.b(ll) T.a( f'{obj.when.strftime("%Y-%m-%d %H:%M")} by {obj.user}', href=tw(obj.link), cls='permalink', ) T.a('X', user=obj.user, cls='blacklist') return res
def sources_summary(cls, items): res = T.div() res.add(T.div(T.b('Tag summary:'))) for src, cnt in cls.sources_stats( items, key=lambda i: i.ntags): # TODO ntags? x = T.div() x.add(cls.FTrait.tag_link(tag=src)) x.add(f': {cnt}') res.add(x) # TODO dunno, it takes quite a bit of space... but cutting off those with 1 would be too annoying? res.add(T.div(T.b('User summary:'))) for src, cnt in cls.sources_stats(items, key=lambda i: i.user): x = T.div() x.add(cls.FTrait.user_link(user=src)) x.add(f': {cnt}') res.add(x) return res
def user_summary_for(rtype, storages, output_path: Path): ustats = {} def reg(user, query, stats): if user not in ustats: ustats[user] = {} ustats[user][query] = stats with ProcessPoolExecutor() as pp: digests = pp.map(get_digest, [s.path for s in storages]) for s, digest in zip(storages, digests): everything = flatten([ch for ch in digest.changes.values()]) for user, items in group_by_key(everything, key=lambda x: x.user).items(): reg(user, s.name, len(items)) now = datetime.now() doc = dominate.document( title= f'axol tags summary for {[s.name for s in storages]}, rendered at {fdate(now)}' ) with doc.head: T.style(STYLE) raw_script(JS) # TODO necessary? # TODO FIXME can't inline due to some utf shit sortable_js = Path(__file__).absolute().parent / 'js' / 'sorttable.js' T.script(src=str(sortable_js)) ft = FormatTrait.for_(rtype) with doc.body: with T.table(cls='sortable'): emitted_head = False for user, stats in sorted(ustats.items(), key=lambda x: (-len(x[1]), x)): if not emitted_head: with T.thead(): T.td('user') for q, _ in stats.items(): T.td(q) emitted_head = True with T.tr(): T.td(ft.user_link(user)) for q, st in stats.items(): with T.td(sorttable_customkey=str(st)): # TODO I guess unclear which tag to choose though. T.a( q, href=f'summary/{q}.html' ) # TODO link to source in index? or on pinboard maybe # TODO also project onto user's tags straight away T.sup( str(st) if st < 5 else T.b( T.font(str(st), color='red'))) # TODO css output_path.write_text(str(doc)) logger.info('Dumped user summary to %s', output_path)
def test_font_styles(self): c = Converter('Man', file='font_styles.man') c.translate() text = c.html.render() text = c.change_special_symbols(text) doc = tags.html(lang='en') doc = add_head(doc) doc_body = doc.add(tags.body()) lines = [ tags.b('one'), '\ntwo', tags.b('three'), tags.b('four'), tags.i('five'), tags.b('six'), '\nseven eight', tags.b('nine'), '\nten eleven twelve', tags.i('13'), tags.b('14'), tags.i('15'), tags.i('file'), '\n.', tags.small('bbbbb'), tags.small('aaaaaa dfghjhg'), tags.b('--posix'), tags.i('file1 file2') ] with doc_body: paragraph = tags.p() for line in lines: paragraph += line self.assertEqual(doc.render(), text)
def get_details_row(title: str, text) -> div: """Returns 'div' element with row class, containing 'b' tag as title and 'p' tag as the text. :param title: Text that will display in 'b' tag on the left side. :param text: Text that will display in 'p' tag on the right side. """ return div(b(f'{title.title()}:', cls='mx-1'), p(text, cls='mb-0'), cls='row')
def external_resources(container, records, show_cw): if records: resource_block = None subject_name = None content = None resource_name = None resource_detail = None for record in records: if record['subject_name'] != subject_name: subject_name = record['subject_name'] if resource_block: resource_block += t.div( cls='clear' ) # force resource_block container to be tall enough for all content container += resource_block # add old one before creating new one resource_block = t.div(cls='resource_block') resource_block += t.div(t.b(record['subject_name']), cls='subject_title') content = t.div(cls='subject_content') resource_block += content # will be filled in below with content: if record['resource_name'] != resource_name: resource_name = record['resource_name'] resource_title = t.div(cls='resource_name') if record['optional']: resource_title += '[optional] ' resource_title += t.b(resource_name) if record['note']: resource_title += ' (%s)' % record['note'] resource_detail = t.div(cls='resource_details') _add_shopping_links(resource_detail, record) #TODO: the next 3 lines duplicates 3 lines above; consolidate! if resource_block: resource_block += t.div( cls='clear' ) # force resource_block container to be tall enough for all content container += resource_block # add old one before creating new one
def get_html(self, ): gr_name = div(b(self.type.name.title()), cls="panel-heading text-center") group = div(cls="panel panel-primary") group_body = div(cls="panel-body") for element in self.elements: group_body.add(element.get_html()) group.add(gr_name, group_body) return group
def add_summary(self, dict_): """ Add a summary with key value pairs from the dictionary """ self.doc += br() self.doc += hr() self.doc += br() self.add_header("Test Summary") self.doc += br() for k, v in dict_.items(): self.doc += p(b(k), ' = ', str(v)) self.doc += br()
def to_html(self): """ This method renders html to a string which is then passed as the popup label. Inputs: - the popup object Outputs: - rendered_html (str): html, cast to a string, for the popup label """ self.rendered_html.add(b(self.name)) # Bold the name # Next the address, br() is line break self.rendered_html.add(br(), self.addr, br()) for attr, prefix in self.to_label: if attr: # Loop through and bold prefixes, render attributes as strings # and add to the document self.rendered_html.add(b(prefix), str(attr), br()) return str(self.rendered_html)
def format(trait, objs, *args, **kwargs) -> Htmlish: res = T.div(cls='reddit') title = trait.title(objs) link = trait.link(objs) ll = reddit(link) res.add(T.div(T.a(title, href=ll))) with adhoc_html('reddit', cb=lambda ch: res.add(*ch)): for _, obj in objs: if not isempty(obj.description): T.div(obj.description) T.div(trait.subreddit_link(obj.subreddit)) with T.div(): ud = f'{obj.ups}⇅{obj.downs}' # TODO sum all ups and downs?? T.b(ud) T.a(f'{obj.when.strftime("%Y-%m-%d %H:%M")}', href=ll, cls='permalink') text(' by ') trait.user_link(user=obj.user) return res
def format(trait, objs) -> Htmlish: res = T.div(cls='hackernews') with adhoc_html('hackernews', cb=lambda ch: res.add(*ch)): for _, obj in objs: if obj.url is not None: T.div(T.a(obj.title, href=obj.url)) T.div(raw(obj.text), cls='text') # eh, it's html with T.div(): extra = [] if obj.points > 0: extra.append(f'🠅{obj.points}') if obj.comments > 0: extra.append(f'🗬{obj.comments}') T.b(' '.join(extra)) T.a( obj.when.strftime('%Y-%m-%d %H:%M'), href=obj.link, cls= 'permalink', # TODO FIXME not sure if should use 'timestamp' class?? ) text(' by ') trait.user_link(user=obj.user) return res
def motionvote_preselecthtml(): motionvotekey = request.args.get(MOTIONVOTE_KEY_URLARG) motionvote = MotionVote.query.filter_by(motionvotekey=motionvotekey).one() meeting = motionvote.meeting motion = motionvote.motion user = motionvote.user html = div() with html: h1('{} {}: {}\'s Vote'.format(meeting.date, meeting.purpose, user.name)) p(b('Motion')) with div(style='margin-left: 1em;'): raw(motion.motion) if motion.comments: raw(motion.comments) return html.render()
def test_file_structure(self): c = Converter('Man', file='structured_file.man') c.translate() text = c.html.render() text = c.change_special_symbols(text) doc = tags.html(lang='en') doc = add_head(doc) doc_body = tags.body() row = tags.div(cls='row') row = add_row(row, 'BASH(1)', '', 'BASH(1)') doc_body.add(row) with doc_body: tags.h2('NAME') content = tags.div(cls='content') paragraph = tags.p() paragraph += '\ngrep, egrep, fgrep, rgrep' content.add(paragraph) content.add(tags.h4('Simple Commands')) content2 = tags.div(cls='content') content2.add(tags.br()) paragraph = tags.p() paragraph += '\nA \\fIsimple command\\fP' content2.add(paragraph) def_list = tags.dl() def_termin = tags.dt() def_termin.add('\nInterpret') def_list.add(def_termin) def_list.add(tags.dd(cls='indent')) content2.add(def_list) def_list = tags.dl() def_termin = tags.dt(cls='short') def_termin.add((tags.b('%%'))) def_list.add(def_termin) def_def = tags.dd(cls='indent') def_def.add('\nA literal') def_list.add(def_def) content2.add(def_list) content.add(content2) row = tags.div(cls='row') row = add_row(row, 'GNU Bash 4.4', '2016-08-26', 'BASH(1)') doc_body.add(row) doc.add(doc_body) doc = c.change_special_symbols(doc.render()) self.assertEqual(doc, text)
def create_html_news(path, News): if os.path.isdir(path) is False: raise RssException("Error. It isn't a folder") path = os.path.join(path, "News.html") news_html = html() news_html.add(head(meta(charset='utf-8'))) news_body = news_html.add(body()) with news_body: for item_news in News: news_body = news_body.add(div()) news_body += h1(item_news.title) news_body += p(b("Date: "), a(item_news.date)) text = item_news.news # remove links in the text and add pictures if len(item_news.links) > 0: start = text.find(']', 0, len(text)) text = text[start + 1:] this_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(this_dir) news_body += img(src=f"file:///{this_dir}/images/{correct_title(item_news.title)}.jpg") else: # if there are no pictures, just remove the links start = text.find(']', 0, len(text)) text = text[start + 1:] news_body += p(text.encode("utf-8").decode("utf-8"), br(), br()) try: with open(path, 'w', encoding='utf-8') as rss_html: rss_html.write(str(news_html)) except FileNotFoundError: raise RssException('Error. No such folder\n') print("file News.html created")
def _resources(container, records, show_cw, subject_title, subject_directory, add_record, audio_widgets): with container: with t.div(cls='resource_block'): t.div(t.b(subject_title), cls='subject_title') cycle_week = None for record in records: if cycle_week != (record['cycle'], record['week']): # For each new week encountered, add the cycle and week numbers on rhs... cycle_week = (record['cycle'], record['week']) resource_div = t.div(cls='resource_record') buttonstrip = t.div(cls='buttonstrip') if audio_widgets: filename_base = subject_directory + '/c%sw%s' % ( record['cycle'], record['week']) with buttonstrip: t.audio(t.source(src=_aurl(filename_base + '.mp3'), type='audio/mpeg'), id=filename_base) # invisible t.button('>', onclick='getElementById("%s").play();' % filename_base), t.button('$', onclick='window.open("%s","_blank");' % _aurl(filename_base + '.pdf')), t.button('@', onclick='') _add_cw(record, buttonstrip) resource_div += buttonstrip add_record(record, resource_div) t.div( cls='clear' ) # force resource_block container to be tall enough for all content
def add_record(record, div): # callback function, see _resources() div += t.div(t.b(record['prompt'])) div += t.div(record['answer'])
def resources( url, filters, qargs ): # TODO: this is basically identical to select_user (and presumably other search-driven pages whose content comes via websocket); consolidate! d = _doc('Resources') with d: with t.div( cls='resource_block' ): # TODO: make a 'header_block' or something; different border color, perhaps t.div(t.b('Search'), cls='subject_title' ) # TODO: replace with a magnifying-glass gif! with t.table(): with t.tr(): for filt in filters: _dropdown2(t.td(cls='dropdown', colspan=2), filt, qargs, False) #TODO:DEPRECATE: _dropdown(t.td(cls = 'dropdown', colspan = 2), 'choose_context', filters['context'], False, qargs.get('context')) _dropdown(t.td(cls='dropdown', colspan=2), 'choose_subject', (('All Subjects', 'bogus'), ('Timeline', 'bogus'), ('History', 'bogus'), ('Geography', 'bogus'), ('Math', 'bogus'), ('Science', 'bogus'), ('Latin', 'bogus'), ('English', 'bogus'), ('All', 'bogus')), False) with t.tr(): t.td(_text_input('search', None, ('autofocus', ), { 'autocomplete': 'off', 'oninput': 'search(this.value)' }, 'Search', type_='search'), style='width: 87%', colspan=6) _dropdown(t.td(style='width:10%', cls='dropdown'), 'cycle_dropdown', (('Any Cycle', 'bogus'), ('Cycle 1', 'bogus'), ('Cycle 2', 'bogus'), ('Cycle 3', 'bogus')), False) with t.td(style='width:20%'): t.input(type='number', placeholder='first wk', id='first_week_selector', min='1', max='28', oninput='filter_first_week(this.value)') t.br() t.input(type='number', placeholder='last wk', id='last_week_selector', min='1', max='28', oninput='filter_last_week(this.value)') t.div( id='search_result' ) # filtered results themselves are added here, in this `result` div, via websocket, as search text is typed (see javascript) # JS (intentionally at bottom of file; see https://faqs.skillcrush.com/article/176-where-should-js-script-tags-be-linked-in-html-documents and many stackexchange answers): t.script( _js_filter_list(url, (('choose_context', qargs.get('context')), ))) t.script(_js_dropdown()) t.script(_js_filter_weeks()) t.script(_js_calendar_widget()) return d.render()
def create_page_1(phone, data): doc = dominate.document(title="Phone alignments – " + phone) lexicon = load_lexicon() with doc.head: T.meta(**{'content': 'text/html;charset=utf-8', 'http-equiv': 'Content-Type'}) T.meta(**{'content': 'utf-8', 'http-equiv': 'encoding'}) T.link(rel='stylesheet', href='../style.css') T.script( src="https://code.jquery.com/jquery-3.4.1.min.js", integrity="sha256-CSXorXvZcTkaix6Yvo6HppcZGetbYMGWSFlBw8HfCJo=", crossorigin="anonymous", ) T.script(type='text/javascript', src='../script.js') with doc: T.h3("Phone: ", phone) with T.div(cls="hint"): T.p("Notes:") with T.ul(): T.li("hover over the images to play the gif and the audio") T.li("below each clip we indicate the utterance id, speaker id and text") T.li("the word in bold and brown indicates the \"location\" of the uttered phone") for k, sample in data.items(): # Copy gif for t in {"still", "anima"}: src = os.path.join(DATA_DIR, "crops", k + f"_{t}.gif") dst = os.path.join("www", DATA_DIR, k + f"_{t}.gif") shutil.copyfile(src, dst) # Copy audio src = os.path.join(DATA_DIR, "crops", k + f".wav") dst = os.path.join("www", DATA_DIR, k + f".wav") shutil.copyfile(src, dst) # Create item with T.div(cls="sample"): src = os.path.join("..", DATA_DIR, k + "_still.gif") T.img(src=src, width=100, height=80) with T.audio(controls=True): src = os.path.join("..", DATA_DIR, k + ".wav") T.source(src=src, type="audio/wav") with T.div(cls="info"): T.span(sample.key + " | " + sample.speaker) with T.div(cls="text"): text = [word for word in sample.sentence.split() if word != "sil"] text = " ".join(text) phone_id = k.split("_")[1] phone_id = int(phone_id) phones = [phone for phone, _, _ in sample.audio_alignment] i = find_word_id(lexicon, text, phones, phone_id) words = text.split() T.span(" ".join(words[:i])) T.span(T.b(words[i]), style="color:brown") T.span(" ".join(words[i + 1:])) directory = os.path.join("www", phone) path = os.path.join(directory, "index.html") os.makedirs(directory, exist_ok=True) with open(path, "w") as f: f.write(doc.render()) print(path)
def func_wrapper(*args, **kwargs): field = span(cls = "field") field_name = div(b(args[0].name), cls = "text-center") field_value = func(args[0]) field.add(field_name, field_value) return field
def add_record(record, div): # callback function, see _resources() div += t.div(t.b(record['word']), ' = ', cls='pair') div += t.div(record['translation'])
def product(file_in): file, remote = file_in prod = Product(file) with div() as di: h4(f"{type(prod).__name__}") with ul(): with li(): b("Description: ") span(mydescription(prod)) with li(): b("Descriptor: ") span(mydescriptor(prod)) with li(): b("Free field: ") span(myfreefield(prod)) with li(): b("Level: ") span(prod.level) with li(): b("File cadence: ") span(myfilecadence(prod)) with li(): b("Download example: ") a(remote, href=remote) h5("PRIMARY Header") hdul = fits.open(file) header2table(hdul["PRIMARY"].header) for extname in ["DATA", "CONTROL", "ENERGIES", "IDB_VERSIONS"]: try: data = read_qtable(file, hdu=extname, hdul=hdul) h5(f"Extension: '{extname}'") # header2table(hdu[extname].header) data2table(data, prod.level) except KeyError: pass return ((prod.level, prod.type, di))
def add_record(record, div): # callback function, see _resources() with div: t.div(t.b('%s - tell me more' % record['name'])) t.div(record['primary_sentence'])
def add_bold(self, line): self.add_paragraph() self.paragraph += tags.b(self.remove_operator(line))
def render_latest(repo: Path, digest, rendered: Path): logger.info('processing %s', repo) rtype = get_result_type(repo) Format = FormatTrait.for_(rtype) Ignore = IgnoreTrait.for_(rtype) import pytz NOW = datetime.now(tz=pytz.utc) name = repo.stem doc = dominate.document( title=f'axol results for {name}, rendered at {fdate(NOW)}') with doc.head: T.style(STYLE) raw_script(JS) T.link( rel='stylesheet', href= "https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.48.2/codemirror.min.css" ) T.script( src= 'https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.48.2/codemirror.js' ) # TODO use min? citems: Iterator[Tuple[datetime, Item]] = chain.from_iterable( ((d, x) for x in zz) for d, zz in digest.changes.items()) # group according to link, so we can display already occuring items along with newer occurences items2: List[Sequence[Tuple[datetime, Item]]] = [ grp for _, grp in group_by_key(citems, key=lambda p: f'{p[1].link}').items() ] # TODO sort within each group? def min_dt(group: Sequence[Tuple[datetime, Item]]) -> datetime: return min(g[0] for g in group) # TODO ok, this is def too many types here... items3: Mapping[datetime, List[Sequence[Tuple[datetime, Item]]]] = group_by_key(items2, key=min_dt) rss = True if rss: # pip3 install feedgen from feedgen.feed import FeedGenerator # type: ignore fg = FeedGenerator() # TODO memorize items? fg.title(name) fg.id('axol/' + name) first = True for d, items in sorted(items3.items()): litems = list(items) logger.info('%s %s: atom, dumping %d items', name, d, len(litems)) if first: logger.info("SKIPPING first batch to prevent RSS bloat") first = False continue for zz in litems: fe = fg.add_entry() # TODO not sure about css? # TODO not sure which date should use? I gues crawling date makes more sense.. _d, z = zz[0] # TODO meh! id_ = z.uid # TODO FIXME!! fe.id(id_) title = Format.title(zz) or '<no title>' # meh fe.title(title) fe.link(href=Format.link(zz)) # TODO not sure if it's a reasonable date to use... fe.published(published=d) fe.author(author={'name': z.user}) # TODO maybe, concat users? ignored = Ignore.ignore_group(zz) if ignored is not None: # TODO not sure if it highlights with read or something? content = ignored else: content = Format.format(zz) # eh, XML was complaining at some non-utf characters content = str(content) # https://stackoverflow.com/a/25920392/706389 make lxml happy... content = re.sub( u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]+', '', content) fe.content(content=content, type='CDATA') # fe.updated(updated=NOW) # TODO assemble a summary similar to HTML? # fe.summary() atomfeed = fg.atom_str(pretty=True) # eh, my feed reader (miniflux) can't handle it if it's 'cdata' # not sure which one is right # ugh, that didn't work because escaping desicion is based on CDATA attribute... atomfeed = atomfeed.replace(b'type="CDATA"', b'type="html"') # fe._FeedEntry__atom_content['type'] = 'html' atomdir = rendered / 'atom' atomdir.mkdir(parents=True, exist_ok=True) (atomdir / (name + '.xml')).write_bytes(atomfeed) with doc: with T.div(id='sidebar'): T.label('Blacklisted:', for_='blacklisted') T.div(id='blacklisted') T.textarea(id='blacklist-edit', rows=10) T.button('apply', id='blacklist-apply') odd = True for d, items in sorted(items3.items(), reverse=True): litems = list(items) odd = not odd logger.info('%s %s: dumping %d items', name, d, len(litems)) with T.div(cls='day-changes'): with T.div(): T.b(fdate(d)) T.span(f'{len(litems)} items') with T.div( cls=f'day-changes-inner {"odd" if odd else "even"}'): for i in items: # TODO FIXME use getattr to specialise trait? # TODO FIXME ignore should be at changes collecting stage? ignored = Ignore.ignore_group(i) if ignored is not None: # TODO maybe let format result handle that... not sure T.div(ignored, cls='item ignored') # TODO log maybe? # TODO eh. need to handle in cumulatives... else: fi = Format.format(i) T.div(fi, cls='item') # f*****g hell.. didn't manage to render content inside iframe no matter how I tried.. # with T.iframe(id='blacklist', src=''): # pass # TODO perhaps needs to be iterative... rf = rendered / (name + '.html') with rf.open('w') as fo: fo.write(str(doc)) return rf
def add_image_with_text(self, visuals, aspect_ratio=1.0, width=256): """ Add images along with textual metadata """ image_dir = self.get_image_dir() visuals['image'] = visuals['image'][0] self.add_header(visuals['image']) pred = util.tensor2im(visuals['Predicted']) image_name = visuals['image'] save_path = os.path.join(image_dir, 'pred_' + image_name) util.save_image(pred, save_path, aspect_ratio=aspect_ratio) t = table( border=1, style="table-layout: fixed; width: 1200px;") # Insert a table self.doc.add(t) with t: with tr(): with td(style="word-wrap: break-word;", halign="center", valign="top"): with p(): hr() p(b('ImageID: '), visuals['image']) br() #### Add image and feature maps #### with figure(style="display: inline-block;"): img(style= "border:0px;margin:0px;float:left;width:%dpx;" % width, src=os.path.relpath( os.path.join(visuals['low_path'], visuals['image']), self.web_dir)) figcaption('Low-Light', style="text-align: center;") with figure(style="display: inline-block;"): img(style= "border:0px;margin:0px;float:left;width:%dpx;" % width, src=os.path.join('images', 'pred_' + visuals['image'])) figcaption('Prediction', style="text-align: center;") with figure(style="display: inline-block;"): img(style= "border:0px;margin:0px;float:left;width:%dpx;" % width, src=os.path.relpath( os.path.join(visuals['target_path'], visuals['image']), self.web_dir)) figcaption('Ground Truth', style="text-align: center;") #### Add image and feature maps #### br() # Add a table for class probabilities for k, v in visuals['metrics'].items(): p("{} = {}".format(k, float(v))) br()
def futurize_code(args=None): """Main program. Args: fixer_pkg: the name of a package where the fixers are located. args: optional; a list of command line arguments. If omitted, sys.argv[1:] is used. Returns a suggested exit status (0, 1, 2). """ # Set up option parser parser = optparse.OptionParser(usage="futurize [options] file|dir ...") parser.add_option("-V", "--version", action="store_true", help="Report the version number of futurize") parser.add_option("-a", "--all-imports", action="store_true", help="Add all __future__ and future imports to each module") parser.add_option("-1", "--stage1", action="store_true", help="Modernize Python 2 code only; no compatibility with Python 3 (or dependency on ``future``)") parser.add_option("-2", "--stage2", action="store_true", help="Take modernized (stage1) code and add a dependency on ``future`` to provide Py3 compatibility.") parser.add_option("-0", "--both-stages", action="store_true", help="Apply both stages 1 and 2") parser.add_option("-u", "--unicode-literals", action="store_true", help="Add ``from __future__ import unicode_literals`` to implicitly convert all unadorned string literals '' into unicode strings") parser.add_option("-f", "--fix", action="append", default=[], help="Each FIX specifies a transformation; default: all.\nEither use '-f division -f metaclass' etc. or use the fully-qualified module name: '-f lib2to3.fixes.fix_types -f libfuturize.fixes.fix_unicode_keep_u'") parser.add_option("-j", "--processes", action="store", default=1, type="int", help="Run 2to3 concurrently") parser.add_option("-x", "--nofix", action="append", default=[], help="Prevent a fixer from being run.") parser.add_option("-l", "--list-fixes", action="store_true", help="List available transformations") parser.add_option("-p", "--print-function", action="store_true", help="Modify the grammar so that print() is a function") parser.add_option("-v", "--verbose", action="store_true", help="More verbose logging") parser.add_option("--no-diffs", action="store_true", help="Don't show diffs of the refactoring") parser.add_option("-w", "--write", action="store_true", help="Write back modified files") parser.add_option("-n", "--nobackups", action="store_true", default=False, help="Don't write backups for modified files.") parser.add_option("-o", "--output-dir", action="store", type="str", default="", help="Put output files in this directory " "instead of overwriting the input files. Requires -n. " "For Python >= 2.7 only.") parser.add_option("-W", "--write-unchanged-files", action="store_true", help="Also write files even if no changes were required" " (useful with --output-dir); implies -w.") parser.add_option("--add-suffix", action="store", type="str", default="", help="Append this string to all output filenames." " Requires -n if non-empty. For Python >= 2.7 only." "ex: --add-suffix='3' will generate .py3 files.") # Parse command line arguments flags = {} refactor_stdin = False options, args = parser.parse_args(args) if options.write_unchanged_files: flags["write_unchanged_files"] = True if not options.write: warn("--write-unchanged-files/-W implies -w.") options.write = True # If we allowed these, the original files would be renamed to backup names # but not replaced. if options.output_dir and not options.nobackups: parser.error("Can't use --output-dir/-o without -n.") if options.add_suffix and not options.nobackups: parser.error("Can't use --add-suffix without -n.") if not options.write and options.no_diffs: warn("not writing files and not printing diffs; that's not very useful") if not options.write and options.nobackups: parser.error("Can't use -n without -w") if "-" in args: refactor_stdin = True if options.write: print("Can't write to stdin.", file=sys.stderr) return 2 # Is this ever necessary? if options.print_function: flags["print_function"] = True # Set up logging handler level = logging.DEBUG if options.verbose else logging.INFO logging.basicConfig(format='%(name)s: %(message)s', level=level) logger = logging.getLogger('libfuturize.main') if options.stage1 or options.stage2: assert options.both_stages is None options.both_stages = False else: options.both_stages = True avail_fixes = set() if options.stage1 or options.both_stages: avail_fixes.update(lib2to3_fix_names_stage1) avail_fixes.update(libfuturize_fix_names_stage1) if options.stage2 or options.both_stages: avail_fixes.update(lib2to3_fix_names_stage2) avail_fixes.update(libfuturize_fix_names_stage2) if options.unicode_literals: avail_fixes.add('libfuturize.fixes.fix_unicode_literals_import') if options.version: print(__version__) return 0 if options.list_fixes: print("Available transformations for the -f/--fix option:") # for fixname in sorted(refactor.get_all_fix_names(fixer_pkg)): for fixname in sorted(avail_fixes): print(fixname) if not args: return 0 if not args: print("At least one file or directory argument required.", file=sys.stderr) print("Use --help to show usage.", file=sys.stderr) return 2 unwanted_fixes = set() for fix in options.nofix: if ".fix_" in fix: unwanted_fixes.add(fix) else: # Infer the full module name for the fixer. # First ensure that no names clash (e.g. # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah): found = [f for f in avail_fixes if f.endswith('fix_{0}'.format(fix))] if len(found) > 1: print("Ambiguous fixer name. Choose a fully qualified " "module name instead from these:\n" + "\n".join(" " + myf for myf in found), file=sys.stderr) return 2 elif len(found) == 0: print("Unknown fixer. Use --list-fixes or -l for a list.", file=sys.stderr) return 2 unwanted_fixes.add(found[0]) extra_fixes = set() if options.all_imports: if options.stage1: prefix = 'libfuturize.fixes.' extra_fixes.add(prefix + 'fix_add__future__imports_except_unicode_literals') else: # In case the user hasn't run stage1 for some reason: prefix = 'libpasteurize.fixes.' extra_fixes.add(prefix + 'fix_add_all__future__imports') extra_fixes.add(prefix + 'fix_add_future_standard_library_import') extra_fixes.add(prefix + 'fix_add_all_future_builtins') explicit = set() if options.fix: all_present = False for fix in options.fix: if fix == 'all': all_present = True else: if ".fix_" in fix: explicit.add(fix) else: # Infer the full module name for the fixer. # First ensure that no names clash (e.g. # lib2to3.fixes.fix_blah and libfuturize.fixes.fix_blah): found = [f for f in avail_fixes if f.endswith('fix_{0}'.format(fix))] if len(found) > 1: print("Ambiguous fixer name. Choose a fully qualified " "module name instead from these:\n" + "\n".join(" " + myf for myf in found), file=sys.stderr) return 2 elif len(found) == 0: print("Unknown fixer. Use --list-fixes or -l for a list.", file=sys.stderr) return 2 explicit.add(found[0]) if len(explicit & unwanted_fixes) > 0: print("Conflicting usage: the following fixers have been " "simultaneously requested and disallowed:\n" + "\n".join(" " + myf for myf in (explicit & unwanted_fixes)), file=sys.stderr) return 2 requested = avail_fixes.union(explicit) if all_present else explicit else: requested = avail_fixes.union(explicit) fixer_names = (requested | extra_fixes) - unwanted_fixes input_base_dir = os.path.commonprefix(args) if (input_base_dir and not input_base_dir.endswith(os.sep) and not os.path.isdir(input_base_dir)): # One or more similar names were passed, their directory is the base. # os.path.commonprefix() is ignorant of path elements, this corrects # for that weird API. input_base_dir = os.path.dirname(input_base_dir) # if options.output_dir: # input_base_dir = input_base_dir.rstrip(os.sep) # logger.info('Output in %r will mirror the input directory %r layout.', # options.output_dir, input_base_dir) # Initialize the refactoring tool if future.utils.PY26: extra_kwargs = {} else: extra_kwargs = { 'append_suffix': options.add_suffix, 'output_dir': options.output_dir, 'input_base_dir': input_base_dir, } # Remove results directory. if os.path.isdir(RESULTS_DIR): shutil.rmtree(RESULTS_DIR) os.mkdir(RESULTS_DIR) if os.path.isdir(DIFF_DIR): shutil.rmtree(DIFF_DIR) os.mkdir(DIFF_DIR) # We override their RefactoringTool with `FileRefactoringTool` rt = FileRefactoringTool( sorted(fixer_names), flags, sorted(explicit), options.nobackups, not options.no_diffs, **extra_kwargs) # Refactor all files and directories passed as arguments if not rt.errors: if refactor_stdin: rt.refactor_stdin() else: try: rt.refactor(args, options.write, None, options.processes) except refactor.MultiprocessingUnsupported: assert options.processes > 1 print("Sorry, -j isn't supported on this platform.", file=sys.stderr) return 1 rt.summarize() # This is our own custom html reporting. table_body = tbody() remove_line_count_total = 0 with table_body: for file_name, file_summary in DiffSummary.list_all(): with tr(): td(a(file_name, href=file_summary.href)) td(file_summary.add_line_count, style="text-align:right") td(file_summary.remove_line_count, style="text-align:right") td(file_summary.percent_coverage, style="text-align:right") remove_line_count_total += file_summary.remove_line_count with document(title='2/3 Summary') as doc: h1('2/3 Summary', style='padding: 0 40px;') p('Total lines that need to be removed:', style='padding: 0 40px;').add(b(remove_line_count_total)) summary_table = table(width='100%', style="padding: 20px 40px; margin: 0 auto;") with summary_table.add(thead()): with tr(): th('File Name', style="text-align:left") th('Add Lines', style="text-align:right") th('Remove Lines', style="text-align:right") th('Coverage %', style="text-align:right") summary_table.add(table_body) with open('{results_dir}/summary.html'.format(results_dir=RESULTS_DIR), 'w+') as summary_file: summary_file.write(doc.render()) # Write a machine readable report that can be parsed later. json_report = { 'summary': { 'remove_line_count_total': remove_line_count_total }, 'files': [ { 'file_name': file_name, 'add_line_count': file_summary.add_line_count, 'remove_line_count': file_summary.remove_line_count, 'percent_coverage': file_summary.percent_coverage, } for file_name, file_summary in DiffSummary.list_all() ] } json.dump(json_report, open('{results_dir}/report.json'.format(results_dir=RESULTS_DIR), 'w+')) # Return error status (0 if rt.errors is zero) return int(bool(rt.errors))