def processFooter(self): doc, tag, text = Doc().tagtext() with tag('html'): with tag('head'): with tag('title'): text('footer html generates by ARG101') with tag('style'): text('span{text-align: left;}') with tag('body'): doc.stag('hr') with tag('span'): text("{} ({})".format(self.studentName,self.enrollmentId)) return doc.getvalue()
def show_xml(): pages = [ f for f in os.listdir(CACHEDIR) if os.path.isdir(os.path.join(CACHEDIR,f)) ] doc, tag, text = Doc().tagtext() doc.asis('<?xml version="1.0" encoding="utf-8" ?>') # xml tags with tag('sac_uo'): for p in pages: item = get_data_from_page('%s/latest' % p) if item: with tag('item'): with tag('id'): text(item['id']) with tag('nom'): text(item['nom']) with tag('resp'): text(item['resp']) # mini hack with tag('iddep'): text(item['iddep']) with tag('dep'): text(item['dep']) with tag('centers'): text(item['centers']) return indent( doc.getvalue(), indentation = ' ' * 4, newline = '\r\n' )
def media_player(request, media_src=''): doc, tag, text = Doc().tagtext() types = {'video':'V', 'audio':'A'} requested_type = media_src.split('/')[0] media_servers = MediaServer.objects.filter(media_type=types[requested_type]) image_src = '' next_path = '' if len(media_src.split('/')) >= 2: requested_server = media_src.split('/')[1] else: requested_server = '' for server in media_servers: if server.name == requested_server: media_src = server.server_url + media_src[len(requested_type + '/' + requested_server):] parent_dir = server.directory_path + os.path.split(media_src)[0][len(server.server_url):] plist = sorted(os.listdir(parent_dir)) media_path = server.directory_path + media_src[len(server.server_url):] media_base = os.path.basename(media_path) for item in os.listdir(parent_dir): if os.path.splitext(item)[1].lower() in ['.jpg','.png']: image_src = os.path.join(os.path.split(media_src)[0],item) if len(plist)-1 > plist.index(media_base): next_path = urlpath(server, os.path.join(parent_dir, plist[plist.index(media_base)+1])) display_directory(server.directory_path, server, doc) ml = media_src.split('/') context = { 'medialisting':doc.getvalue(), 'mediasource':media_src, 'mediatype':requested_type, 'imagesource':image_src, 'heading':os.path.splitext(ml[len(ml)-1])[0], 'nextmedia': next_path, } return render(request, 'media_player/media_player.html', context)
def print_subsection(section,subsection,html=False,current=None): """Method for printing subsection data. Parameters ---------- section: list section data subsection: list subsection data html: bool, optional activate html tags current: list, optional list containing current section, subsection and slide number used for highlighting current slide into the TOC Returns ------- str string containing the pretty printed subsection data """ if html: doc, tag, text = Doc().tagtext() with tag('a',href='#slide-'+str(subsection[4])): if current and current[0] == section[0] and current[1] == subsection[0]: with tag('span',klass='toc-subsection emph'): text(' '+str(section[0])+'.'+str(subsection[3])+' '+subsection[1]) else: with tag('span',klass='toc-subsection'): text(' '+str(section[0])+'.'+str(subsection[3])+' '+subsection[1]) string = '\n'+doc.getvalue() else: string = '\n'+' '+str(section[0])+'.'+str(subsection[3])+' '+subsection[1] return string
def environment_begin_end(env, label=None, number=None, optional=None): if env == "document": return '<html><meta charset="UTF-8">' + mathjax_snipped, "</html>\n" doc, tag, text = Doc().tagtext() stop_sign = "!!!chahpieXeiDu3zeer3Ki" if env == "itemize" or env == "enumerate": with tag(list_envs[env]): text(stop_sign) else: with tag("div", klass="env_" + mk_safe_css_ident(env)): if label: doc.attr(id="label_" + mk_safe_css_ident(label)) if env in named_envs: with tag("span", klass="env__name"): text(named_envs[env]) text(" ") if number: with tag("span", klass="env__number"): text(number) text(" ") if optional: with tag("span", klass="env__opt_text"): text(optional) text(stop_sign) ret = doc.getvalue() index = ret.index(stop_sign) begin = ret[:index] + "\n" end = ret[index + len(stop_sign) :] + "\n" return (begin, end)
def handle_list(self, tag: QqTag, type): doc, html, text = Doc().tagtext() with html(type): for item in tag("item"): with html("li"): doc.asis(self.format(item)) return doc.getvalue()
def now_playing(request, data): sk = data['sk'] session = Session.load(sk) if not session: print "Invalid session" return "NOPE" track = data['track'] artist = data['artist'] album = data['album'] albumArtist = data['albumArtist'] print "NOW PLAYING- User: %s, Artist: %s, Track: %s, Album: %s" \ % (session.user.name, artist, track, album) doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('nowplaying'): with tag('track', corrected="0"): text(track) with tag('artist', corrected="0"): text(artist) with tag('album', corrected="0"): text(album) with tag('albumArtist', corrected="0"): text(albumArtist) with tag('ignoredMessage', code="0"): text('') return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def handle_proof(self, tag: QqTag) -> str: """ Uses tags: proof, label, outline, of Examples: \proof Here is the proof \proof \of theorem \ref{thm:1} Now we pass to proof of theorem \ref{thm:1} :param tag: :return: HTML of proof """ doc, html, text = Doc().tagtext() with html("div", klass="env env__proof"): if tag.find("label"): doc.attr(id=self.label2id(tag._label.value)) with html("span", klass="env-title env-title__proof"): if tag.exists("outline"): proofline = 'Proof outline' else: proofline = 'Proof' doc.asis(join_nonempty(self.localize(proofline), self.format(tag.find("of"), blanks_to_pars=False)).rstrip()+".") doc.asis(rstrip_p(" " + self.format(tag, blanks_to_pars=True))) doc.asis("<span class='end-of-proof'>∎</span>") return doc.getvalue()+"\n<p>"
def handle_h(self, tag: QqTag) -> str: """ Uses tags: h1, h2, h3, h4, label, number Example: \h1 This is first header \h2 This is the second header \label{sec:second} :param tag: :return: """ doc, html, text = Doc().tagtext() with html(tag.name): doc.attr(id=self.tag_id(tag)) if tag.find("number"): with html("span", klass="section__number"): with html("a", href="#"+self.tag_id(tag), klass="section__number"): text(tag._number.value) text(self.format(tag, blanks_to_pars=False)) ret = doc.getvalue() if tag.next() and isinstance(tag.next(), str): ret += "<p>" return doc.getvalue()
def test_list_jobs(self): project_name = 'test_project' jobs = [ {'name': 'job 1', 'id': '1', 'project': project_name, 'description': 'job 1 description'}] doc, tag, text = Doc().tagtext() with tag('jobs', count=len(jobs)): for job in jobs: with tag('job'): with tag('name'): text(job['name']) with tag('project'): text(job['project']) with tag('description'): text(job['description']) responses.add(responses.GET, 'http://rundeck.host/api/11/jobs?project=test_project', match_querystring=True, body=self.rundeck_success(doc.getvalue()), content_type='text/xml', status=200) project_jobs = self.rundeck_api.list_jobs(project_name) self.assertEquals(len(project_jobs), len(jobs)) self.assertEquals(project_jobs[0]['name'], jobs[0]['name'])
def process_dump_file(self, p): add_event = 0 main_doc, main_tag, main_text = Doc().tagtext() for line in iter(p.stdout.readline, b''): # Compares first with timestamp regex. Timestamp regex indicates a new packet m = reg_timestamp.match(line.decode()) if m: # add_event indicates if there is a processed event already if add_event == 1: events.append(event) self.add_event_to_html(event, main_doc, main_tag, main_text) add_event = 1 event = Event(m.group('timestamp'), m.group('protocol')) m = reg_ip_1.search(line.decode()) if m: event.id = m.group('id') event.t_protocol = m.group('transport_protocol') event.length = m.group('length') else: m = reg_ip_2.search(line.decode()) if m: event.src = m.group('src') event.src_port = m.group('src_port') event.dst = m.group('dst') event.dst_port = m.group('dst_port') m = reg_length.search(line.decode()) # If TCP data is not 0, the packet gets further processing if m and m.group('length') != 0: length = int(m.group('length')) self.process_host(event, length) # If there is a port unreachable error, event gets discarded m = reg_port_error.search(line.decode()) if m: add_event = 0 return indent(main_doc.getvalue())
def saveRelease(self, release): """ Create an XML file of release metadata that Dalet will be happy with :param release: Processed release metadata from MusicBrainz """ output_dir = self.release_meta_dir doc, tag, text = Doc().tagtext() doc.asis('<?xml version="1.0" encoding="UTF-8"?>') with tag('Titles'): with tag('GlossaryValue'): with tag('GlossaryType'): text('Release') with tag('Key1'): text(release.mbID) with tag('ItemCode'): text(release.mbID) with tag('KEXPReviewRich'): text(release.review) formatted_data = indent(doc.getvalue()) output_file = path.join(output_dir, 'r' + release.mbID + ".xml") with open(output_file, "wb") as f: f.write(formatted_data.encode("UTF-8"))
def as_xml(self, indentation=False): doc, tag, text = Doc().tagtext() with tag('shell', xmlns="uri:oozie:shell-action:0.2"): #do we actually need these even if we dont use them? with tag('job-tracker'): text(os.environ["JOBTRACKER"]) with tag('name-node'): text(os.environ["NAMENODE"]) with tag('exec'): text(self.command) for argument in self.arguments: with tag('argument'): text(argument) for env in self.environment_vars: with tag('env-var'): text(env) for archive in self.archives: with tag('archive'): text(archive) for f in self.files: with tag('file'): text(f) xml = doc.getvalue() if indentation: return indent(xml) else: return xml
def getSession(request, data): token = Token.load(data['token']) if not token: print "Invalid token" return "NOPE" if not token.user: print "Token not validated" return "NOPE" print "GRANTING SESSION for token %s" % token.token token.consume() session = Session.create(token.user) doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('session'): with tag('name'): text(session.user.name) with tag('key'): text(session.id) with tag('subscriber'): text('0') return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def __init__(self, projectname = None): if projectname is None: # Used for just an empty html page with the same style and template self.doc, self.tag, self.text = Doc().tagtext() self.this_is_a_log_html_page = True else: Htmlpage.htmlpages[projectname] = self self.doc, self.tag, self.text = Doc().tagtext()
def _repr_html_(self): doc, tag, txt = Doc().tagtext() with tag("div", klass="tree-node tree-leaf"): self._label_html(doc) with tag("span", klass="tree-text"): txt(self.text) return doc.getvalue()
def submit(self, bund, files=[]): hdfs = PyWebHdfsClient(host=os.environ["WEBHDFS_HOST"], port='14000', user_name='oozie') for f in files: hdfs.create_file("{}/{}".format(bund.path, f.name), f.read()) doc, tag, text = Doc().tagtext() with tag("configuration"): with tag("property"): with tag("name"): text("user.name") with tag("value"): text("oozie") with tag("property"): with tag("name"): text("oozie.bundle.application.path") with tag("value"): text("/"+bund.path + "/" + bund.name) configuration = doc.getvalue() response = post("{0}/oozie/v1/jobs".format(self.url), data=configuration, headers={'Content-Type': 'application/xml'}) if response.status_code > 399: print response.headers["oozie-error-message"] print response.status_code print response.content
def handle_paragraph(self, tag: QqTag): """ :param tag: :return: """ doc, html, text = Doc().tagtext() with html("span", klass="paragraph"): doc.asis(self.format(tag, blanks_to_pars=False).strip() + ".") return "<p>" + doc.getvalue()+" "
def test_input_no_slash(self): doc = Doc(stag_end = '>') doc.input('passw', type="password") self.assertTrue( doc.getvalue() in ( '<input name="passw" type="password">', '<input type="password" name="passw">' ) )
def as_xml(self): doc, tag, text = Doc().tagtext() doc.asis("<?xml version='1.0' encoding='UTF-8'?>") with tag('bundle-app', name=self.name, xmlns="uri:oozie:bundle:0.1"): for coordinator in self.coordinators: with tag("coordinator", name=coordinator.name): with tag("app-path"): text("/"+coordinator.path + "/" + coordinator.name) return indent(doc.getvalue())
def getToken(request, data): token = Token.generate() print "ISSUING TOKEN %s" % token.token doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('token'): text(token.token) return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def record_listens(request, data): """ Submit the listen in the lastfm format to be inserted in db. Accepts listens for both track.updateNowPlaying and track.scrobble methods. """ output_format = data.get('format', 'xml') try: sk, api_key = data['sk'], data['api_key'] except KeyError: raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Invalid parameters session = Session.load(sk) if not session: if not Token.is_valid_api_key(api_key): raise InvalidAPIUsage(CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_KEY raise InvalidAPIUsage(CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session KEY lookup = defaultdict(dict) for key, value in data.items(): if key in ["sk", "token", "api_key", "method", "api_sig"]: continue matches = re.match('(.*)\[(\d+)\]', key) if matches: key = matches.group(1) number = matches.group(2) else: number = 0 lookup[number][key] = value if request.form['method'].lower() == 'track.updatenowplaying': for i, listen in lookup.items(): if 'timestamp' not in listen: listen['timestamp'] = calendar.timegm(datetime.now().utctimetuple()) # Convert to native payload then submit 'em after validation. listen_type, native_payload = _to_native_api(lookup, data['method'], output_format) for listen in native_payload: validate_listen(listen, listen_type) user = db_user.get(session.user_id) augmented_listens = insert_payload(native_payload, user, listen_type=listen_type) # With corrections than the original submitted listen. doc, tag, text = Doc().tagtext() with tag('lfm', status='ok'): if listen_type == 'playing_now': doc.asis(create_response_for_single_listen(list(lookup.values())[0], augmented_listens[0], listen_type)) else: accepted_listens = len(lookup.values()) # Currently LB accepts all the listens and ignores none with tag('scrobbles', accepted=accepted_listens, ignored='0'): for original_listen, augmented_listen in zip(list(lookup.values()), augmented_listens): doc.asis(create_response_for_single_listen(original_listen, augmented_listen, listen_type)) return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()), output_format)
def to_html(self, match, toc_depth=None, max_time=None, current=None): """Convert logo metadata to html stream. Parameters ---------- match: re.match object max_time: str max time for presentation Returns ------- str: html stream """ if 'toc' in self.name: return self.toc_to_html(match=match, current=current, depth=int(toc_depth)) if 'logo' in self.name: return self.logo_to_html(match) elif 'timer' in self.name: return self.timer_to_html(match=match, max_time=max_time) elif 'custom' in self.name: return self.custom_to_html(match) else: doc = Doc() with doc.tag('span', klass='metadata'): style = None if match.group('style'): style = str(match.group('style')) if style: doc.attr(style=style) if isinstance(self.value, list): doc.asis(', '.join(self.value)) else: doc.asis(str(self.value)) return doc.getvalue()
def section_tag(level, title, label=None, number=None): doc, tag, text = Doc().tagtext() with tag("h" + str(level), klass=mk_safe_css_ident("section")): if label: doc.attr(id="label_" + mk_safe_css_ident(label)) if number: with tag("span", klass="section__number"): text(number + ".") text(" ") text(title) return doc.getvalue() + "\n"
def resolve_ref_match(m): label = m.group(1) if not label: return "(??)" number = label_to_counter.get(label, "(??)") target = "label_" + mk_safe_css_ident(label) doc, tag, text = Doc().tagtext() with tag("a", href="#" + target, klass="ref"): text(number) return doc.getvalue()
def _repr_html_(self): """TODO: document Including why we call out to a separate method (for the index argument). """ doc, tag, txt = Doc().tagtext() with tag("pre", style="padding: 2px;"): self._to_html(doc, 0) return doc.getvalue()
def as_xml(self, indentation=False): doc, tag, text = Doc().tagtext() with tag('sub-workflow'): with tag('app-path'): text("/"+self.sub_wf_path + "/" + self.name) doc.stag("propagate-configuration") xml = doc.getvalue() if indentation: return indent(xml) else: return xml
def to_html(self): """Method for inserting box to the html doc.""" doc = Doc() with doc.tag('div',klass='box'): if self.style: doc.attr(style=self.style) with doc.tag('div',klass='box content'): if self.ctn_options: doc.attr(style=self.ctn_options) doc.asis(self.ctn) self.put_caption(doc=doc) return doc.getvalue()
def scrobble(request, data): sk = data['sk'] session = Session.load(sk) if not session: print "Invalid session" return "NOPE" # FUUUUUUU PHP ARRAYS lookup = defaultdict(dict) for key, value in data.items(): matches = re.match('(.*)\[(\d+)\]', key) if matches: key = matches.group(1) number = matches.group(2) else: number = 0 lookup[number][key] = value doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('scrobbles'): for _, dataset in lookup.items(): if 'track' not in dataset: continue artist = dataset['artist'] track = dataset['track'] album = dataset['album'] albumArtist = dataset['albumArtist'] timestamp = dataset['timestamp'] print "SCROBBLE- User: %s, Artist: %s, Track: %s, Album: %s" \ % (session.user.name, artist, track, album) session.user.scrobble(timestamp, artist, track, album, albumArtist) with tag('scrobble'): with tag('track', corrected="0"): text(track) with tag('artist', corrected="0"): text(artist) with tag('album', corrected="0"): text(album) with tag('albumArtist', corrected="0"): text(albumArtist) with tag('timestamp', corrected="0"): text(timestamp) with tag('ignoredMessage', code="0"): text('') return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def create_response_for_single_listen(original_listen, augmented_listen, listen_type): """Create XML response for a single listen. Args: original_listen (dict): Original submitted listen. augmented_listen (dict): Augmented(corrected) listen. listen_type (string): Type of listen ('playing_now' or 'listens'). Returns: XML response for a single listen. If listen is of type 'playing_now' response is as described in following link https://www.last.fm/api/show/track.updateNowPlaying Otherwise response is as described in following link https://www.last.fm/api/show/track.scrobble . """ corrected = defaultdict(lambda: '0') track = augmented_listen['track_metadata']['track_name'] if original_listen['track'] != augmented_listen['track_metadata']['track_name']: corrected['track'] = '1' artist = augmented_listen['track_metadata']['artist_name'] if original_listen['artist'] != augmented_listen['track_metadata']['artist_name']: corrected['artist'] = '1' ts = augmented_listen['listened_at'] albumArtist = artist if original_listen.get('albumArtist', original_listen['artist']) != artist: corrected['albumArtist'] = '1' album = augmented_listen['track_metadata'].get('release_name', '') if original_listen.get('album', '') != album: corrected['album'] = '1' doc, tag, text = Doc().tagtext() with tag('nowplaying' if listen_type == 'playing_now' else 'scrobble'): with tag('track', corrected=corrected['track']): text(track) with tag('artist', corrected=corrected['artist']): text(artist) with tag('album', corrected=corrected['album']): text(album) with tag('albumArtist', corrected=corrected['albumArtist']): text(albumArtist) with tag('timestamp'): text(ts) with tag('ignoredMessage', code="0"): text('') return doc.getvalue()
def simulate(self, policy, iterations, output, figure_template, html, gif, anim_delay): raw_policy = deepcopy(policy) # Policy dictionary policy = { tuple([pol['state'][agent][1] for agent in self.agents]): pol['action'] for pol in policy } print '' print 'Goal State Set:' print self.name_goal print '' print 'Initial State:' agent_locs = OrderedDict([(agent, None) for agent in self.agents]) for agent in self.agents: agent_locs[agent] = np.random.choice(self.locs) print agent_locs.values() rwd = 0 print '' print 'Steps' history = {} for i in range(iterations): # Types of agent on each location agent_classes_on_loc = OrderedDict([(loc, []) for loc in self.locs]) for agent in self.agents: agent_classes_on_loc[agent_locs[agent]].append( self.agents_types[agent]) # State idx state_idx = self.s.index( tuple([self.locs[s] for s in agent_locs.values()])) # Computing rewards rwd = 0 for loc, types in self.name_goal.items(): if not (set(types) - set(agent_classes_on_loc[loc])): rwd += 1 # Printing history history[i] = { 'state_idx': state_idx, 'agent_locs': copy.deepcopy(agent_locs), 'rwd': rwd } # Updating agent location, given policy for agent in self.agents: # Locations adjacent to the agent location adj_locs = self.name_loc_roads[agent_locs[agent]] # Baseline probability of going to any adjacent location (error) pdf = np.zeros(len(self.locs), dtype=np.float32) for adj_loc in adj_locs: pdf[self.locs[adj_loc]] = self.error / (len(adj_locs) - 1) # Agent intended next state sn = self.locs[policy[tuple(agent_locs.values())][agent][-1]] # Success probability pdf[sn] = 1.0 - self.error # Transition agent_locs[agent] = np.random.choice(self.locs, p=pdf) # Printing history self.plot(raw_policy, output + figure_template, states=[hist['state_idx'] for hist in history.values()]) # Printing history for h, hist in history.items(): print h, hist # Creating animated GIF imgs = ImageList() for h, hist in history.items(): imgs.append(Image(output + figure_template % hist['state_idx'])) imgs.animationDelayImages(anim_delay) imgs.writeImages(output + gif) # Generating HTML report doc, tag, text, line = Doc().ttl() with tag('html'): with tag('body'): with tag('p', id='main'): with tag('h1'): text('Simulation Results') line('h2', 'Animation') with tag('div', id='frame'): doc.stag('img', src=gif) for h, hist in history.items(): with tag('p', id='%d' % h): line('h2', 'Iteration %d' % h) doc.stag('br') text('State ID: %d' % hist['state_idx']) doc.stag('br') text('Reward: %d' % hist['rwd']) doc.stag('br') for agent, loc in hist['agent_locs'].items(): text('Agent %s at location %s.' % (agent, loc)) doc.stag('br') with tag('div', id='frame'): doc.stag('img', src=figure_template % hist['state_idx']) # Storing HTML file result = indent(doc.getvalue()) with open(output + html, 'w') as f: f.write(result)
from settings import Settings as set from ical import getEventList from htmlUtils import getUnicodeClock from yattag import Doc, indent eventDaysList = getEventList() count_days = len(eventDaysList) doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') # HTML Head with tag('html'): with tag('head'): doc.asis('<meta name="description" content="' + set.text_title + '" charset="utf-8">') doc.asis('<link rel="stylesheet" href="bootstrap.css">') with tag('body', klass=set.body_style): with tag('div', klass='container'): # Headline with tag('div', klass='row justify-content-center mb-5'): with tag('div', klass='col-auto'): with tag('h1', klass=set.heading_style): text(set.text_title) # Events with tag('div', klass='row justify-content-around'): for day in eventDaysList: with tag('div', klass='col-4'): with tag('div', klass='card', style='display: block; width: 100%'):
def build_tmx(langpair_set, xml_source_lang, xml_target_lang): # convert to tmx doc, tag, text = Doc().tagtext() doc.asis('<?xml version="1.0" encoding="UTF-8"?>') with tag('tmx', version="1.4"): with tag('header', creationtool="cApps", creationtoolversion="2020.10", segtype="paragraph", adminlang="en", datatype="HTML", srclang=xml_source_lang): doc.attr(('o-tmf', "omt") # o_tmf="omt", ) text('') with tag('body'): for tu in langpair_set: src_txt = str(tu[0]).strip() tgt_txt = str(tu[1]).strip() with tag('tu'): with tag('tuv'): doc.attr(('xml:lang', xml_source_lang)) with tag('seg'): text(src_txt) with tag('tuv'): doc.attr(('xml:lang', xml_target_lang)) with tag('seg'): text(tgt_txt) tmx_output = indent(doc.getvalue(), indentation=' ' * 2, newline='\r\n') return tmx_output # .replace("o_tmf=", "o-tmf=")
class Report: def __init__(self, scanDir, targetlist): self.reportDir = scanDir self.targetlist = targetlist self.doc = Doc() self.tag = self.doc.tag self.line = self.doc.line self.stag = self.doc.stag self.text = self.doc.text def createReport(self): print() cprint("[-] Starting the generation of the report", 'blue') self.copyJSCSS() self.doc.asis('<!DOCTYPE html>') with self.tag('html'): self.createHead() self.createBody() self.writeReport() cprint("[+] Report generated", 'green') cprint( "[+] All results could be found in {}/report.html".format( self.reportDir), 'green') def copyJSCSS(self): if (os.path.isdir("{}/html".format(self.reportDir)) == False): shutil.copytree('resources', "{}/html".format(self.reportDir)) def createHead(self): with self.tag('head'): self.line('title', 'IDontSpeakSSL Report') self.stag('link', ("rel", "stylesheet"), ("href", "./html/css/bootstrap.min.css".format( self.reportDir))) self.line('script', '', src="./html/js/jquery.min.js".format(self.reportDir)) self.line('script', '', src="./html/js/bootstrap.min.js".format(self.reportDir)) def addScope(self): with self.tag('ul'): for target in self.targetlist: self.line('li', ":".join(target)) def createBody(self): with self.tag('body'): with self.tag('div'): self.doc.attr(klass='container') self.line('h1', 'IDontSpeakSSL Report') with self.tag('p'): self.text( 'Report of IDontSpeakSSL script, all findings are splitted into sections.' ) self.stag('br') self.text('The scope was:') self.addScope() self.addSection("Certificate Findings", "Certificates", "Certificates", "Insecure SSL/TLS Certificate Configuration") self.addSection("Weak Cipher Suites", "CipherSuites", "Ciphers", "Weak Crypto suites") self.addSection("Weak Protocols", "Protocols", "Protocols", "Insecure Network Transmission") self.addSection("Bad Configurations", "Configurations", "Configurations", "Missing Security Headers") self.addSection( "Known Vulnerabilities", "Flaws", "Flaws", "Insecure Network Transmission or Weak Cryptography") def listAssets(self, folder, assetfile): if (os.path.exists("{}/{}/{}".format(self.reportDir, folder, assetfile))): with self.tag('ul'): with open("{}/{}/{}".format(self.reportDir, folder, assetfile), 'r') as assets: for asset in assets: self.line('li', asset) else: self.text("No affected location for this finding.") def addSection(self, SectionName, folder, findingType, findingDBRef): if (len(listdir("{}/{}".format(self.reportDir, folder))) < 1): return global findingConfig self.line('h2', SectionName) self.line('p', findingDBRef) with self.tag('div'): self.doc.attr(klass='panel-group') findingid = 0 for finding in (findingConfig[findingType]).keys(): if (os.path.exists("{}/{}/{}".format( self.reportDir, folder, (findingConfig[findingType])[finding][0]))): with self.tag('div', klass='panel panel-default'): with self.tag('div', klass='panel-heading'): with self.tag('div', klass='panel-title'): with self.tag('h4', klass='panel-title'): with self.tag( 'a', ("data-toggle", "collapse"), ("href", "#{}{}".format( findingType, findingid))): self.text("{}".format( (findingConfig[findingType] )[finding][2])) with self.tag('div'): self.doc.attr(klass='panel-collapse collapse', id='{}{}'.format( findingType, findingid)) with self.tag('div', klass='panel-body'): self.text("{}".format( (findingConfig[findingType])[finding][3])) self.listAssets( folder, (findingConfig[findingType])[finding][0]) findingid += 1 if (findingType == "Certificates"): if (os.path.exists("{}/{}/{}".format( self.reportDir, folder, "TooLongCetificateValidity.txt"))): self.addCertificateValidity(findingType, findingid, folder) findingid += 1 if (os.path.exists("{}/{}/{}".format(self.reportDir, folder, "Issuers.txt"))): self.addCertificateIssuers(findingType, findingid, folder) def addCertificateValidity(self, findingType, findingid, folder): with self.tag('div', klass='panel panel-default'): with self.tag('div', klass='panel-heading'): with self.tag('div', klass='panel-title'): with self.tag('h4', klass='panel-title'): with self.tag( 'a', ("data-toggle", "collapse"), ("href", "#{}{}".format(findingType, findingid))): self.text("{}".format( "Certificate With Too Long Validity Period")) with self.tag('div'): self.doc.attr(klass='panel-collapse collapse', id='{}{}'.format(findingType, findingid)) with self.tag('div', klass='panel-body'): self.text("{}".format( "Certificate validity period must be limited to 39 months for certificates issued before March 1st, 2018, or 825 days for certificates issued after March 1st, 2018.<br>(https://www.globalsign.com/en/blog/ssl-certificate-validity-capped-at-maximum-two-years/)<br>(https://www.symantec.com/connect/blogs/new-39-month-ssl-certificate-maximum-validity)" )) self.listAssets(folder, "TooLongCetificateValidity.txt") def addCertificateIssuers(self, findingType, findingid, folder): with self.tag('div', klass='panel panel-default'): with self.tag('div', klass='panel-heading'): with self.tag('div', klass='panel-title'): with self.tag('h4', klass='panel-title'): with self.tag( 'a', ("data-toggle", "collapse"), ("href", "#{}{}".format(findingType, findingid))): self.text("{}".format("Certificate Issuers")) with self.tag('div'): self.doc.attr(klass='panel-collapse collapse', id='{}{}'.format(findingType, findingid)) with self.tag('div', klass='panel-body'): self.text("{}".format( "Certificate Issuers must be check from your end, all certificates must be issued by a trusted Certificate Authority. The CA could be a publicly known certificate authorithy such as Symantec, Verisign, etc. or an internal CA." )) self.listAssets(folder, "Issuers.txt") def writeReport(self): with open("{}/report.html".format(self.reportDir), 'w') as report: report.write(indent(self.doc.getvalue()))
def get_qualitative_examples_html(orig_sents, sys_sents, refs_sents): title_key_print = [ ('Randomly sampled simplifications', lambda c, s, refs: 0, lambda value: ''), ( 'Best simplifications according to SARI', lambda c, s, refs: -corpus_sari([c], [s], [refs]), lambda value: f'SARI={-value:.2f}', ), ( 'Worst simplifications according to SARI', lambda c, s, refs: corpus_sari([c], [s], [refs]), lambda value: f'SARI={value:.2f}', ), ( 'Simplifications with the most compression', lambda c, s, refs: get_compression_ratio(c, s), lambda value: f'compression_ratio={value:.2f}', ), ( 'Simplifications with a high amount of paraphrasing', lambda c, s, refs: get_levenshtein_similarity(c, s) / get_compression_ratio(c, s), lambda value: f'levenshtein_similarity={value:.2f}', ), ( 'Simplifications with the most sentence splits (if any)', lambda c, s, refs: -(count_sentences(s) - count_sentences(c)), lambda value: f'#sentence_splits={-value:.2f}', ), ] def get_one_sample_html(orig_sent, sys_sent, ref_sents, sort_key, print_func): orig_sent, sys_sent, *ref_sents = [ html.escape(sent) for sent in [orig_sent, sys_sent, *ref_sents] ] doc = Doc() with doc.tag('div', klass='mb-2 p-1'): # Sort key with doc.tag('div', klass='text-muted small'): doc.asis(print_func(sort_key(orig_sent, sys_sent, ref_sents))) with doc.tag('div', klass='ml-2'): orig_sent_bold, sys_sent_bold = make_differing_words_bold( orig_sent, sys_sent, make_text_bold_html) # Source with doc.tag('div'): doc.asis(orig_sent_bold) # Prediction with doc.tag('div'): doc.asis(sys_sent_bold) # References collapse_id = get_random_html_id() with doc.tag('div', klass='position-relative'): with doc.tag('a', ('data-toggle', 'collapse'), ('href', f'#{collapse_id}'), klass='stretched-link small'): doc.text('References') with doc.tag('div', klass='collapse', id=collapse_id): for ref_sent in refs: _, ref_sent_bold = make_differing_words_bold( orig_sent, ref_sent, make_text_bold_html) with doc.tag('div', klass='text-muted'): doc.asis(ref_sent_bold) return doc.getvalue() doc = Doc() for title, sort_key, print_func in title_key_print: with doc.tag('div', klass='container-fluid mt-4 p-2 border'): collapse_id = get_random_html_id() with doc.tag('a', ('data-toggle', 'collapse'), ('href', f'#{collapse_id}')): doc.line('h3', klass='m-2', text_content=title) # Now lets print the examples sample_generator = sorted( zip(orig_sents, sys_sents, zip(*refs_sents)), key=lambda args: sort_key(*args), ) # Samples displayed by default with doc.tag('div', klass='collapse', id=collapse_id): n_samples = 50 for i, (orig_sent, sys_sent, refs) in enumerate(sample_generator): if i >= n_samples: break doc.asis( get_one_sample_html(orig_sent, sys_sent, refs, sort_key, print_func)) return doc.getvalue()
from __future__ import division import requests, json from shutil import copyfile from yattag import Doc, indent from os import listdir from os.path import isfile, join doc, tag, text = Doc().tagtext() with tag('html'): with tag('body'): files = [f for f in listdir("/root/poss-ci.github.io/archives") if isfile(join("/root/poss-ci.github.io/archives", f)) and f != 'helper.js'] revfiles = files[::-1] for f in revfiles: with tag('p'): text() with tag('a',href="https://poss-ci.github.io/archives/"+f): text(f) result = doc.getvalue() print "Writing result to a file at /root/poss-ci.github.io/archives.html" with open('/root/poss-ci.github.io/archives.html','w') as afile : afile.write(result.encode('utf-8'))
class IDontSpeaksSSLReporterHTML(IDontSpeaksSSLReporter): pass def run(self): self.doc = Doc() copy_js_css(self.report_folder) cprint("[-] Generating the HTML report...", 'blue') self.doc.asis('<!DOCTYPE html>') self.create_header() self.create_body() self.write_report() cprint("[+] Report generated.", 'green') cprint( "[+] All results can be found in {}/report.html.".format( self.report_folder), 'green') def create_body(self): with self.doc.tag('body'): with self.doc.tag('div'): self.doc.attr(klass='container') self.doc.line('h1', 'IDontSpeakSSL Report') self.add_summary_section( "Findings Summary", self.results['analyzer results']["summary"]) self.add_hosts_section( "Findings per Host", self.results['analyzer results']["hosts"]) def add_summary_section(self, section, subsections): self.doc.line('h2', section) with self.doc.tag('div'): self.doc.attr(klass='panel-group') for subsection, instances in subsections.items(): if instances: with self.doc.tag('div', klass='panel panel-default'): with self.doc.tag('div', klass='panel-heading'): with self.doc.tag('div', klass='panel-title'): with self.doc.tag('h4', klass='panel-title'): with self.doc.tag( 'a', ("data-toggle", "collapse"), ("href", "#{}".format( capitalize_sentence(subsection)))): self.doc.text("{}".format( capitalize_sentence(subsection))) with self.doc.tag('div'): self.doc.attr(klass='panel-collapse collapse', id='{}'.format( capitalize_sentence(subsection))) with self.doc.tag('div', klass='panel-body'): if (subsection != "vulnerabilities"): with self.doc.tag('ul'): for instance in instances: self.doc.line( 'li', "Host: {}, Port: {}".format( instance['host'], instance['port'])) else: with self.doc.tag('ul'): for vulnerability_name, vulnerability_instance in instances.items( ): self.doc.line( 'li', "{}".format( vulnerability_name)) with self.doc.tag('ul'): for location in vulnerability_instance: self.doc.line( 'li', "Host: {}, Port: {}". format( location['host'], location['port'])) def add_hosts_section(self, section, hosts): self.doc.line('h2', section) with self.doc.tag('div'): self.doc.attr(klass='panel-group') for host, findings in hosts.items(): if findings: with self.doc.tag('div', klass='panel panel-default'): with self.doc.tag('div', klass='panel-heading'): with self.doc.tag('div', klass='panel-title'): with self.doc.tag('h4', klass='panel-title'): with self.doc.tag( 'a', ("data-toggle", "collapse"), ("href", "#{}".format( host.replace(".", "_")))): instance_host, instance_port = extract_host_and_port( host) self.doc.text( "Host: {}, Port: {}".format( instance_host, instance_port)) with self.doc.tag('div'): self.doc.attr(klass='panel-collapse collapse', id='{}'.format(host.replace(".", "_"))) with self.doc.tag('div', klass='panel-body'): for finding_type, finding_instance in findings.items( ): self.doc.text("{}".format(finding_type)) if (isinstance(finding_instance, list)): with self.doc.tag('ul'): for instance in finding_instance: self.doc.line( 'li', "{}".format(instance)) elif (isinstance(finding_instance, bool)): if (finding_instance): self.doc.text(": Vulnerable") def create_header(self): with self.doc.tag('head'): self.doc.line('title', 'IDontSpeakSSL Report') self.doc.stag('link', ("rel", "stylesheet"), ("href", "html/css/bootstrap.min.css")) self.doc.line('script', '', src="html/js/jquery.min.js") self.doc.line('script', '', src="html/js/bootstrap.min.js") def write_report(self): with open("{}/report.html".format(self.report_folder), 'w') as report: report.write(indent(self.doc.getvalue()))
def gen_html_str(self, img_dir, title=None): assert len(img_dir) > 0, "image directory {img_dir} must be non-empty." if img_dir[-1] == os.sep: img_dir = img_dir[:-1] mm = self.motif_manager kc = self.kmer_counter img_files = [ self.logo_forward_file, self.logo_revcom_file, self.kmer_hamdis_file, self.motif_posdis_file, self.motif_cooccur_dis_file ] doc, tag, text = Doc().tagtext() with tag('h2'): if title: text(title) else: text(f'K={kc.k}') if mm.is_palindrome: tmpstr = 'palindrome' else: tmpstr = 'non-palindrome' with tag('p'): text(f'Consensus (forward) [{tmpstr}]: {mm.consensus_seq}') with tag('p'): text( f'Consensus (revcom) [{tmpstr}]: {kc.revcom(mm.consensus_seq)}' ) with tag('p'): text(f'Number of maximum allowed mutations: {mm.n_max_mutation}') with tag('p'): text(f'Total number of input sequences: {mm.n_seq}') with tag('p'): tmp_prec = round(mm.n_tfbs_forward_seq / mm.n_seq * 100, 2) text( f'Number of Forward motif Sequences: {mm.n_tfbs_forward_seq} ({tmp_prec}%) ' ) with tag('p'): tmp_prec = round(mm.n_tfbs_revcom_seq / mm.n_seq * 100, 2) text( f'Number of Revcom motif Sequences: {mm.n_tfbs_revcom_seq} ({tmp_prec}%)' ) with tag('p'): tmp_prec = round(mm.n_tfbs_seq / mm.n_seq * 100, 2) text( f'Number of motif (forward/revcom) Sequences: {mm.n_tfbs_seq} ({tmp_prec}%)' ) with tag('p'): text( f'Forward-Forward motif co-occurence index: {mm.ff_co_occur_index}' ) with tag('p'): text( f'Forward-RevCom motif co-occurence index: {mm.fr_co_occur_index}' ) with tag('div'): for imgf in img_files: if imgf == self.kmer_hamdis_file: doc.stag( 'img', klass="hamdis", src=img_dir + '/' + imgf, alt=imgf, onclick=f"window.open('{img_dir}/{imgf}', '_blank');") else: doc.stag( 'img', src=img_dir + '/' + imgf, alt=imgf, onclick=f"window.open('{img_dir}/{imgf}', '_blank');") html_str = indent( doc.getvalue(), indent_text=True ) # will also indent the text directly contained between <tag> and </tag> return html_str
def gen_html_k(self, html_div_list, kmer_len, trend_fig_dir, trend_fig_name): # generate html file style_str = FileProcessor.get_style_str() doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html', lang="en"): with tag('head'): with tag('title'): text('SELEX-Seq Result') doc.stag('meta', charset="utf-8") doc.stag('meta', name="viewport", content="width=device-width, initial-scale=1") with tag('style'): text(style_str) # specify style with tag('body'): with tag('h1'): text(f'IniMotif: SELEX-seq Results. kmer_len={kmer_len}') with tag('h2'): text(f'identifier={self.identifier}') doc.stag('br') text(f'minimum round number: {self.min_selex_round}') doc.stag('br') text(f'maximum round number: {self.max_selex_round}') doc.stag('br') for tmpstr in html_div_list: doc.stag('hr') doc.asis(tmpstr) # add trend figure doc.stag('hr') with tag('h2'): text('SELEX kmer trend figure') with tag('div'): doc.stag( 'img', klass="hamdis", src=trend_fig_dir + '/' + trend_fig_name, alt=trend_fig_name, onclick= f"window.open('{trend_fig_dir}/{trend_fig_name}', '_blank');" ) # output html string html_str = indent( doc.getvalue(), indent_text=True ) # will also indent the text directly contained between <tag> and </tag> return html_str
def to_html(self, position, theme): """Method for converting slide content into html format. Parameters ---------- position : SlidePosition object current slide position doc : yattag.Doc object the main html doc theme : Theme object the base theme """ doc = Doc() with doc.tag('div'): self.put_attributes(doc=doc) # get slide positioning data actual_theme = None if self.overtheme: actual_theme = self.overtheme.slide else: actual_theme = theme position.set_position(theme=actual_theme) if self.title != '$overview': doc.attr(('class', 'step slide')) doc.attr(('data-x', str(position.position[0]))) doc.attr(('data-y', str(position.position[1]))) doc.attr(('data-z', str(position.position[2]))) doc.attr(('data-scale', str(position.scale))) doc.attr(('data-rotate-x', str(position.rotation[0]))) doc.attr(('data-rotate-y', str(position.rotation[1]))) doc.attr(('data-rotate-z', str(position.rotation[2]))) # inserting elements for header in actual_theme.loop_over_headers(): header.to_html(doc=doc, metadata=self.data) for sidebar in actual_theme.loop_over_sidebars(): if sidebar.position == 'L': sidebar.to_html(doc=doc, metadata=self.data) actual_theme.content.to_html(doc=doc, content='\n' + self.raw_body_parse()) for sidebar in actual_theme.loop_over_sidebars(): if sidebar.position == 'R': sidebar.to_html(doc=doc, metadata=self.data) for footer in actual_theme.loop_over_footers(): footer.to_html(doc=doc, metadata=self.data) else: doc.attr(('class', 'step overview')) doc.attr(('style', '')) doc.attr(('data-x', str(position.position[0]))) doc.attr(('data-y', str(position.position[1]))) doc.attr(('data-z', str(position.position[2]))) #doc.attr(('data-scale',str(position.scale))) doc.attr(('data-scale', '100')) return doc.getvalue()
def wrapper(func): _doc, _tag, _text, _line = Doc().ttl() func(_doc, _tag, _text, _line) value = _doc.getvalue() klass.var[func.__name__] = value return value
def raw_atom_link(title: str, path: str): doc = Doc() doc.stag("link", rel="alternate", type="application/atom+xml", title=title, href=path) return doc.getvalue()
def raw_link(path: str): """A link tag""" doc = Doc() doc.stag("link", rel="stylesheet", type="text/css", href=path) return doc.getvalue()
def raw_script(path: str): """A script tag""" doc = Doc() with doc.tag("script", src=path, type="text/javascript"): pass return doc.getvalue()
def main(): # check if there's a network. If not, use 0.0.0.0 if len(sys.argv) > 1: network = sys.argv[1] else: network = '0.0.0.0/0' networkObj = ipaddress.ip_network(network) # initialize the host to CVE map object hostCveMap = {} # get list of assets with OIDs associated hostList = db.hosts.find({'oids': {'$exists': 'true'}}) # for each asset... for host in hostList: # make string of vulns found, resolving them into CVE IDs (if they exist, otherwise NOCVE) ip = host['ip'] # check if it's in our range if ipaddress.ip_address(ip) not in networkObj: continue for oidItem in host['oids']: cveList = db.vulnerabilities.find_one({'oid': oidItem['oid']})['cve'] # because this is a list of one or more items: for cve in cveList: # skip the NOCVE type, since it's not really useful to us if cve == "NOCVE": continue # if there are already IPs mapped to this vulnerability if cve in hostCveMap.keys(): # ignore duplicates if ip not in hostCveMap[cve]: hostCveMap[cve].append(ip) # if there aren't any IPs yet, create a new list with this as the first item else: hostCveMap[cve] = [ip] # start the output doc doc, tag, text, line = Doc().ttl() with tag('html'): with tag('head'): line('title', 'Vulnerability report for ' + network) with tag('body'): line('h1', 'Vulnerability report for ' + network) # now, for each CVE (sorted) that we've found... for cve in sorted(hostCveMap.keys()): # look up CVE details in cve-search database cvedetails = cvedb.cves.find_one({'id': cve}) # get affected host information into a list affectedHosts = len(hostCveMap[cve]) listOfHosts = hostCveMap[cve] # assemble into HTML line('h2', cve) line('b', 'Affected hosts: ') text(affectedHosts) doc.stag('br') if (cvedetails): # if it's not empty! with tag('table'): with tag('tr'): line('td', 'Summary') line('td', cvedetails['summary']) with tag('tr'): line('td', 'CWE') with tag('td'): id = 'Unknown' if cvedetails['cwe'] != 'Unknown': id = cvedetails['cwe'].split('-')[1] with tag( 'a', href= "https://cwe.mitre.org/data/definitions/" + id): text(cvedetails['cwe']) cweDetails = cvedb.cwe.find_one({'id': id}) if cweDetails: text(" (" + cweDetails['name'] + ")") else: text(" (no title)") with tag('tr'): line('td', 'Published') line('td', cvedetails['Published'].strftime("%Y-%m-%d")) with tag('tr'): line('td', 'Modified') line('td', cvedetails['Modified'].strftime("%Y-%m-%d")) with tag('tr'): line('td', 'CVSS') line('td', cvedetails['cvss'] or 'Unknown') with tag('tr'): with tag('td'): line('b', 'Impacts') if 'impact' in cvedetails: with tag('tr'): line('td', "Confidentiality") line('td', cvedetails['impact']['confidentiality']) with tag('tr'): line('td', "Integrity") line('td', cvedetails['impact']['integrity']) with tag('tr'): line('td', "Availability") line('td', cvedetails['impact']['availability']) with tag('tr'): with tag('td'): line('b', 'Access') if 'access' in cvedetails: with tag('tr'): line('td', "Vector") line('td', cvedetails['access']['vector']) with tag('tr'): line('td', "Complexity") line('td', cvedetails['access']['complexity']) with tag('tr'): line('td', "Authentication") line('td', cvedetails['access']['authentication']) with tag('tr'): with tag('td'): line('b', "References") for reference in cvedetails['references']: with tag('tr'): with tag('td'): with tag('a', href=reference): text(reference) else: # if it's empty line('i', "Details unknown -- update your CVE database") doc.stag('br') line('b', "Affected hosts:") doc.stag('br') for host in sorted(listOfHosts): text(host) doc.stag('br') # output final HTML with open(outputFile, 'w') as htmlOut: htmlOut.write(indent(doc.getvalue())) htmlOut.close()
def generate_individual_html(name, dates, calibrations, measurements, sums, max_measurements, earthquake_measurement): doc, tag, text, line = Doc().ttl() with tag('a', 'href=index.html'): line('h3', '<-----GO BACK') line('h2', 'GENERATED AT: ' + datetime.datetime.now().isoformat()) line('h2', 'EVENT NUMBER: '+str(int(name))+ " at "+str(dates[earthquake_measurement])) line('h2', 'X AXIS MAXIMUM ACCELERATION: ' + str(max_measurements[0])[:7] + " g") line('h2', 'Y AXIS MAXIMUM ACCELERATION: ' + str(max_measurements[1])[:7] + " g") line('h2', 'Z AXIS MAXIMUM ACCELERATION: ' + str(max_measurements[2])[:7] + " g") line('h2', 'MAXIMUM ACCELERATION VECTOR MAGNITUDE: ' + str(max_measurements[3])[:7] + " g") with open(graph_location+name+"_x.png", "rb") as f2b64: base64img = str(base64.b64encode(f2b64.read())) doc.stag('img', src='data:image/png;base64,'+base64img[2:-1], width="100%") # ~ line('h3', 'LAST 86400 MEASUREMENTS') with open(graph_location+name+"_y.png", "rb") as f2b64: base64img = str(base64.b64encode(f2b64.read())) doc.stag('img', src='data:image/png;base64,'+base64img[2:-1], width="100%") # ~ line('h3', 'LAST 3600 MEASUREMENTS') with open(graph_location+name+"_z.png", "rb") as f2b64: base64img = str(base64.b64encode(f2b64.read())) doc.stag('img', src='data:image/png;base64,'+base64img[2:-1], width="100%") # ~ line('h3', 'LAST 1800 MEASUREMENTS') with open(graph_location+name+"_mag.png", "rb") as f2b64: base64img = str(base64.b64encode(f2b64.read())) doc.stag('img', src='data:image/png;base64,'+base64img[2:-1], width="100%") # ~ line('h3', 'LAST 600 MEASUREMENTS') with tag('a', 'href=index.html'): line('h3', '<-----GO BACK') tw = open(graph_location+name+".html","w") tw.write(doc.getvalue()) tw.close()
def record_listens(request, data): """ Submit the listen in the lastfm format to be inserted in db. Accepts listens for both track.updateNowPlaying and track.scrobble methods. """ output_format = data.get('format', 'xml') try: sk, api_key = data['sk'], data['api_key'] except KeyError: raise InvalidAPIUsage( CompatError.INVALID_PARAMETERS, output_format=output_format) # Invalid parameters session = Session.load(sk) if not session: if not Token.is_valid_api_key(api_key): raise InvalidAPIUsage( CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_KEY raise InvalidAPIUsage( CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session KEY lookup = defaultdict(dict) for key, value in data.items(): if key in ["sk", "token", "api_key", "method", "api_sig"]: continue matches = re.match('(.*)\[(\d+)\]', key) if matches: key = matches.group(1) number = matches.group(2) else: number = 0 lookup[number][key] = value if request.form['method'].lower() == 'track.updatenowplaying': for i, listen in lookup.items(): if 'timestamp' not in listen: listen['timestamp'] = calendar.timegm( datetime.now().utctimetuple()) # Convert to native payload then submit 'em after validation. listen_type, native_payload = _to_native_api(lookup, data['method'], output_format) for listen in native_payload: validate_listen(listen, listen_type) user = db_user.get(session.user_id) augmented_listens = insert_payload(native_payload, user, listen_type=listen_type) # With corrections than the original submitted listen. doc, tag, text = Doc().tagtext() with tag('lfm', status='ok'): if listen_type == 'playing_now': doc.asis( create_response_for_single_listen( list(lookup.values())[0], augmented_listens[0], listen_type)) else: accepted_listens = len(lookup.values()) # Currently LB accepts all the listens and ignores none with tag('scrobbles', accepted=accepted_listens, ignored='0'): for original_listen, augmented_listen in zip( list(lookup.values()), augmented_listens): doc.asis( create_response_for_single_listen( original_listen, augmented_listen, listen_type)) return format_response( '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()), output_format)
def generate_html(data, css): doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html'): with tag('head'): doc.asis( '<link rel="stylesheet" href="http://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css">' ) doc.asis('<link rel="stylesheet" href="test.css">') with tag( 'script', src= "https://ajax.googleapis.com/ajax/libs/jquery/1.12.0/jquery.min.js" ): pass with tag( 'script', src= "http://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/js/bootstrap.min.js" ): pass # with tag('style'): # doc.asis(css) with tag('body'): for d in data: doc.asis(d) result = indent(doc.getvalue()) print(result) with open('test.html', 'w') as HTMLCode: HTMLCode.write(result)
from yattag import Doc import os import webbrowser as wb doc, tag, text = Doc().tagtext() with tag('body', style='background-color:black'): for colour in ['red', 'white', 'green']: with tag('text', style='white-space:PRE'): with tag('span', style='color:{}'.format(colour)): text(' *') with tag('text', style='white-space:PRE'): with tag('span', style='color:white'): text('HELLO') # color = [(255, 255, 0), (255, 0, 255), (255, 123, 123)] # for i in range(3): # colour = color[i] # print(colour) # with tag('text', style='white-space:PRE'): # with tag('span', style='color:rgb{}'.format(colour)): # text(' *') # doc.stag('br') # # doc._append('<br/>') # for i in range(3): # colour = color[i] # print(colour)
def to_html(self, config): """Generate a html stream of the whole presentation. Parameters ---------- config : MatisseConfig MaTiSSe configuration """ doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html'): # doc.attr(title=self.metadata['title'].value) self.__put_html_tag_head(doc=doc, tag=tag, text=text, config=config) with tag('body', onload="resetCountdown(" + str(self.metadata['max_time'].value) + ");"): doc.attr(klass='impress-not-supported') with tag('div', id='impress'): # numbering: [local_chap, local_sec, local_subsec, local_slide] current = [0, 0, 0, 0] for chapter in self.chapters: current[0] += 1 current[1] = 0 current[2] = 0 current[3] = 0 self.metadata['chaptertitle'].update_value( value=chapter.title) self.metadata['chapternumber'].update_value( value=chapter.number) for section in chapter.sections: current[1] += 1 current[2] = 0 current[3] = 0 self.metadata['sectiontitle'].update_value( value=section.title) self.metadata['sectionnumber'].update_value( value=section.number) for subsection in section.subsections: current[2] += 1 current[3] = 0 self.metadata['subsectiontitle'].update_value( value=subsection.title) self.metadata['subsectionnumber'].update_value( value=subsection.number) for slide in subsection.slides: current[3] += 1 self.metadata['slidetitle'].update_value( value=slide.title) self.metadata['slidenumber'].update_value( value=slide.number) with doc.tag('div'): chapter.put_html_attributes(doc=doc) section.put_html_attributes(doc=doc) subsection.put_html_attributes(doc=doc) slide.put_html_attributes(doc=doc) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='header', current=current, overtheme=slide.overtheme) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='sidebar', position='L', current=current, overtheme=slide.overtheme) slide.to_html(doc=doc, parser=self.parser, metadata=self.metadata, theme=self.theme, current=current) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='sidebar', position='R', current=current, overtheme=slide.overtheme) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='footer', current=current, overtheme=slide.overtheme) self.__put_html_tags_scripts(doc=doc, tag=tag, config=config) # source = re.sub(r"<li>(?P<item>.*)</li>", r"<li><span>\g<item></span></li>", source) html = indent(doc.getvalue()) return html
def generate_report(self): logging.info("report generation") report_table = pd.read_csv(self.path_to_report_table) engine = create_engine("sqlite://", echo=False) report_table.to_sql("report_table", con=engine, index=False) doc, tag, text = Doc().tagtext() doc.asis("<!DOCTYPE html>") # header with tag("html"): with tag("head"): doc.stag("link", href="style1.css", rel="stylesheet") with tag("h1", klass="main-header"): text("REcomp report") result = indent(doc.getvalue()) with open(self.path_to_output_html, "w") as output: output.write(result) # run parameters with tag("h2", klass="run-parameters"): text("Run parameters and thresholds:") with tag("h4", klass="thresholds"): text(f"Query cover calculated threshold: {self.ok_qcovs[0]}%") doc.stag("br") text(f"Percent identity calculated threshold: " f"{self.ok_perc_identity[0]}%") doc.stag("br") text(f"Include ribosomal clusters: " f"{self.args[0].include_ribosomal}") doc.stag("br") text(f"Include 'Other' clusters: {self.args[0].include_other}") doc.stag("br") result = indent(doc.getvalue()) with open(self.path_to_output_html, "w") as output: output.write(result) # Identified superclusters with tag("h2", klass="identified-header"): text("Identified superclusters") all_identified = engine.execute( "SELECT SuperclusterName FROM report_table " "WHERE SuperclusterType=='identified'").fetchall() all_identified = list(itertools.chain(*all_identified)) all_identified = natsorted(list(np.unique(all_identified))) for scl in all_identified: scl_name = re.search(r"[a-z]+", scl).group(0).capitalize() scl_num = re.search(r"\d+", scl).group(0) scl_name = f"{scl_name} {scl_num}" with tag("h3", klass="supercluster-name"): text(scl_name) # get path to fasta path_to_fasta = engine.execute(f"SELECT Path_to_fasta \ FROM report_table \ WHERE SuperclusterType=='identified' \ AND SuperclusterName=='{scl}'").fetchone()[0] with tag("a", download="Supercluster fasta", href=path_to_fasta): text("Supercluster fasta") with tag("table", klass="identified-table"): # table header with tag("tr"): with tag("th"): text("Cluster") with tag("th"): text("ClusterID") with tag("th"): text("Proportion, %") with tag("th"): text("Number of reads") with tag("th"): text("Sequence length, bp") with tag("th"): text("Sequence") with tag("th"): text("Graph layout") with tag("th"): text("TAREAN annotation") with tag("th"): text("Feature") # table for consensuses consensuses_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='identified' \ AND RecordSource=='REconsensus'").fetchall() consensuses_id = list(itertools.chain(*consensuses_id)) for consensus in consensuses_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='identified' \ AND RecordID=='{consensus}'").fetchall()[0] with tag("tr"): with tag("td"): text(int(scl_info[1])) with tag("td"): text(scl_info[6]) with tag("td"): text(round(scl_info[2], 3)) with tag("td"): text(int(scl_info[3])) with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): with tag("a", href=scl_info[4]): doc.stag("img", src=(self.__picture_to_string( scl_info[4]).decode("utf-8")), width="120", border=0) with tag("td"): text(scl_info[5]) with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) # other contigs other_contigs_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='identified' \ AND RecordSource=='REother_contig'").fetchall() other_contigs_id = list(itertools.chain(*other_contigs_id)) for other in other_contigs_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='identified' \ AND RecordID=='{other}'").fetchall()[0] with tag("tr"): with tag("td"): text(int(scl_info[1])) with tag("td"): text(scl_info[6]) with tag("td"): text(round(scl_info[2], 3)) with tag("td"): text(int(scl_info[3])) with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): with tag("a", href=scl_info[4]): doc.stag("img", src=(self.__picture_to_string( scl_info[4]).decode("utf-8")), width="120", border=0) with tag("td"): text(scl_info[5]) with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) # references references_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='identified' \ AND RecordSource=='reference'").fetchall() references_id = list(itertools.chain(*references_id)) for reference in references_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='identified' \ AND RecordID=='{reference}'").fetchall()[0] with tag("tr"): with tag("td"): text("") with tag("td"): text(scl_info[6]) with tag("td"): text("") with tag("td"): text("") with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): text("") with tag("td"): text("") with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) # Not identified superclusters with tag("h2", klass="not-identified-header"): text("Not identified superclusters") not_identified = engine.execute( "SELECT SuperclusterName FROM report_table WHERE SuperclusterType=" "='not_identified'").fetchall() not_identified = list(itertools.chain(*not_identified)) not_identified = natsorted(list(np.unique(not_identified))) for scl in not_identified: scl_name = re.search(r"[a-z]+", scl).group(0).capitalize() scl_num = re.search(r"\d+", scl).group(0) scl_name = f"{scl_name} {scl_num}" with tag("h3", klass="supercluster-name"): text(scl_name) # get path to fasta path_to_fasta = engine.execute(f"SELECT Path_to_fasta \ FROM report_table \ WHERE SuperclusterType=='not_identified' \ AND SuperclusterName=='{scl}'").fetchone()[0] with tag("a", download="Supercluster fasta", href=path_to_fasta): text("Supercluster fasta") with tag("table", klass="not-identified-table"): # table header with tag("tr"): with tag("th"): text("Cluster") with tag("th"): text("ClusterID") with tag("th"): text("Proportion, %") with tag("th"): text("Number of reads") with tag("th"): text("Sequence length, bp") with tag("th"): text("Sequence") with tag("th"): text("Graph layout") with tag("th"): text("TAREAN annotation") with tag("th"): text("Feature") # table for consensuses consensuses_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='not_identified' \ AND RecordSource=='REconsensus'").fetchall() consensuses_id = list(itertools.chain(*consensuses_id)) for consensus in consensuses_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='not_identified' \ AND RecordID=='{consensus}'").fetchall()[0] with tag("tr"): with tag("td"): text(int(scl_info[1])) with tag("td"): text(scl_info[6]) with tag("td"): text(round(scl_info[2], 3)) with tag("td"): text(int(scl_info[3])) with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): with tag("a", href=scl_info[4]): doc.stag("img", src=(self.__picture_to_string( scl_info[4]).decode("utf-8")), width="120", border=0) with tag("td"): text(scl_info[5]) with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) # other contigs other_contigs_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='not_identified' \ AND RecordSource=='REother_contig'").fetchall() other_contigs_id = list(itertools.chain(*other_contigs_id)) for other in other_contigs_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='not_identified' \ AND RecordID=='{other}'").fetchall()[0] with tag("tr"): with tag("td"): text(int(scl_info[1])) with tag("td"): text(scl_info[6]) with tag("td"): text(round(scl_info[2], 3)) with tag("td"): text(int(scl_info[3])) with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): with tag("a", href=scl_info[4]): doc.stag("img", src=(self.__picture_to_string( scl_info[4]).decode("utf-8")), width="120", border=0) with tag("td"): text(scl_info[5]) with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) # Probable unique superclusters with tag("h2", klass="probable-unique-header"): text("Probable unique superclusters") probable_unique = engine.execute( "SELECT SuperclusterName FROM report_table WHERE SuperclusterType=" "='probable_unique' AND Features!='Truly unique'").fetchall() probable_unique = list(itertools.chain(*probable_unique)) probable_unique = natsorted(list(np.unique(probable_unique))) for scl in probable_unique: scl_name = re.search(r"[a-z]+", scl).group(0).capitalize() scl_num = re.search(r"\d+", scl).group(0) scl_name = f"{scl_name} {scl_num}" with tag("h3", klass="supercluster-name"): text(scl_name) # get path to fasta path_to_fasta = engine.execute(f"SELECT Path_to_fasta \ FROM report_table \ WHERE SuperclusterType=='probable_unique' \ AND SuperclusterName=='{scl}'").fetchone()[0] with tag("a", download="Supercluster fasta", href=path_to_fasta): text("Supercluster fasta") with tag("table", klass="probable-unique-table"): # table header with tag("tr"): with tag("th"): text("Cluster") with tag("th"): text("ClusterID") with tag("th"): text("Proportion, %") with tag("th"): text("Number of reads") with tag("th"): text("Sequence length, bp") with tag("th"): text("Sequence") with tag("th"): text("Graph layout") with tag("th"): text("TAREAN annotation") with tag("th"): text("Feature") # table for consensuses consensuses_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='probable_unique' \ AND RecordSource=='REconsensus'").fetchall() consensuses_id = list(itertools.chain(*consensuses_id)) for consensus in consensuses_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='probable_unique' \ AND RecordID=='{consensus}'").fetchall()[0] with tag("tr"): with tag("td"): text(int(scl_info[1])) with tag("td"): text(scl_info[6]) with tag("td"): text(round(scl_info[2], 3)) with tag("td"): text(int(scl_info[3])) with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): with tag("a", href=scl_info[4]): doc.stag("img", src=(self.__picture_to_string( scl_info[4]).decode("utf-8")), width="120", border=0) with tag("td"): text(scl_info[5]) with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) # other contigs other_contigs_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='probable_unique' \ AND RecordSource=='REother_contig'").fetchall() other_contigs_id = list(itertools.chain(*other_contigs_id)) for other in other_contigs_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='probable_unique' \ AND RecordID=='{other}'").fetchall()[0] with tag("tr"): with tag("td"): text(int(scl_info[1])) with tag("td"): text(scl_info[6]) with tag("td"): text(round(scl_info[2], 3)) with tag("td"): text(int(scl_info[3])) with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): with tag("a", href=scl_info[4]): doc.stag("img", src=(self.__picture_to_string( scl_info[4]).decode("utf-8")), width="120", border=0) with tag("td"): text(scl_info[5]) with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) # truly unique with tag("h2", klass="truly-unique-header"): text("Truly unique superclusters") truly_unique = engine.execute( "SELECT SuperclusterName FROM report_table WHERE SuperclusterType=" "='probable_unique' AND Features=='Truly unique'").fetchall() truly_unique = list(itertools.chain(*truly_unique)) truly_unique = natsorted(list(np.unique(truly_unique))) with tag("table", klass="probable-unique-table"): # table header with tag("tr"): with tag("th"): text("Cluster") with tag("th"): text("ClusterID") with tag("th"): text("Proportion, %") with tag("th"): text("Number of reads") with tag("th"): text("Sequence length, bp") with tag("th"): text("Sequence") with tag("th"): text("Graph layout") with tag("th"): text("TAREAN annotation") with tag("th"): text("Feature") with tag("th"): text("Fasta") # table for consensuses for scl in truly_unique: consensuses_id = engine.execute( f"SELECT RecordID FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='probable_unique' \ AND RecordSource=='REconsensus'").fetchall() consensuses_id = list(itertools.chain(*consensuses_id)) for consensus in consensuses_id: scl_info = engine.execute(f"SELECT * FROM report_table \ WHERE SuperclusterName=='{scl}' \ AND SuperclusterType=='probable_unique' \ AND RecordID=='{consensus}'").fetchall()[0] with tag("tr"): with tag("td"): text(int(scl_info[1])) with tag("td"): text(scl_info[6]) with tag("td"): text(round(scl_info[2], 3)) with tag("td"): text(int(scl_info[3])) with tag("td"): text(len(scl_info[7])) sequence = re.sub("(.{80})", "\\1\n", scl_info[7], 0) with tag("td"): with tag("pre"): text(sequence) with tag("td"): with tag("a", href=scl_info[4]): doc.stag("img", src=(self.__picture_to_string( scl_info[4]).decode("utf-8")), width="120", border=0) with tag("td"): text(scl_info[5]) with tag("td"): if scl_info[11] is None: text("") else: text(scl_info[11]) with tag("td"): with tag("a", download="Fasta", href=scl_info[12]): text("FASTA") result = indent(doc.getvalue()) with open(self.path_to_output_html, "w") as output: output.write(result)
def genSubpage(data): doc, tag, text = Doc().tagtext() # get pictures dirName = 'hikes/%s/' % data[0] f = [] pics = [] for (dirpath, dirnames, filenames) in walk(dirName): f.extend(filenames) for fileN in f: fn, fext = splitext(fileN) if (fext.lower() == ".jpg" or fext.lower() == ".jpg"): pics.append(fileN) with tag('html'): with tag('head'): with tag('title'): text("%s " % data[1]) doc.stag('link', rel="stylesheet", type="text/css", href="../../style.css", title="style") with tag("script", src="https://code.jquery.com/jquery-3.3.1.js", integrity= "sha256-2Kok7MbOyxpgUVvAk/HJ2jigOSYS2auK4Pfzbm7uH60=", crossorigin="anonymous"): text("") with tag("script", type="text/javascript"): text( "function loadFrame (elm){var frame1 = document.getElementById('frame1');frame1.src = elm.dataset.src}" ) with tag("script"): text('$(function(){$("#more").load("more.html"); });') with tag('body'): with tag('div', id='canvas_proj', style="width: 850px;"): with tag('div', id='nav'): with tag('a', href='../../'): with tag('button', type='submit'): text("← Back") with tag("h1"): #text("Hiking and Travel") text(data[1]) doc.stag("br") with tag("div", style="text-align:center;"): with tag("iframe", src="map.html", width="850", height="800"): text("") doc.stag("br") doc.stag("br") with tag('p'): with tag("h2"): text("Statistics") with tag("b"): text("Start time:") datestart = data[3] - timedelta(hours=7) date_time = datestart.strftime( "%m/%d/%Y, %I:%M %p Pacific Time") text(date_time) doc.stag("br") with tag("b"): text("End time:") dateend = data[4] - timedelta(hours=7) date_time = dateend.strftime( "%m/%d/%Y, %I:%M %p Pacific Time") text(date_time) doc.stag("br") with tag("b"): text("Total time:") text(data[2]) doc.stag("br") doc.stag("br") with tag("b"): text("Total distance:") text("%s miles" % data[5]) doc.stag("br") with tag("b"): text("Elevation change:") text("+%s feet, -%s feet" % (data[6], data[7])) doc.stag("br") doc.stag("br") moreInfo = 'hikes/%s/more.html' % data[0] if (exists(moreInfo)): with tag("h2"): text("Details") with tag("div", id="more"): text("") doc.stag("br") doc.stag("br") if (len(pics) != 0): with tag("h2"): text("Photos") text("Click on the picture to view it full size.") doc.stag("br") for pic in pics: with tag("a", href=pic): doc.stag("img", src=pic, width="280px") doc.stag("br") doc.stag("br") with tag("h2"): text("Elevation profile") with tag("a", href="elev.html", target="eleviframe"): with tag('button', type='submit'): text("Click to load figure below.") with tag("iframe", name="eleviframe", src="about:blank", width="850", height="400"): text("") doc.stag("br") doc.stag("br") with tag("h2"): text("3D trip view") with tag("a", href="3d.html", target="myiFrame"): with tag('button', type='submit'): text("Click to load figure below.") with tag("iframe", name="myiFrame", src="about:blank", width="850", height="850"): text("") doc.stag("br") doc.stag("br") with open('hikes/%s/index.html' % data[0], 'w') as f: f.write(indent(doc.getvalue(), indent_text=True))
def HTML_Template(log_document): events = load_events() doc, tag, text = Doc().tagtext() try: with tag('div', dir='ltr'): with tag('div',align='center'): with tag('h1'): text('! Important Alert from Shoham SIEM System !') doc.stag('br') with tag('div', align='left'): with tag('h3'): text('Report details:') doc.stag('br') with tag('b'): text('Event name: ') text(events[log_document['event']]['name']) doc.stag('br') with tag('b'): text('Description: ') text(events[log_document['event']]['description']) doc.stag('br') with tag('b'): text('Type: ') text(log_document['type']) doc.stag('br') with tag('b'): text('Alerted time: ') text(str(log_document['offense_close_time'])) doc.stag('br') with tag('b'): text('Devices included: ') doc.stag('br') with tag('ul'): for device in log_document['device']: with tag('li'): text(device) return doc.getvalue() except Exception as e: print("Exception in HTML: " + str(e))
def genProject(): doc, tag, text = Doc().tagtext() with tag('table', ("class", "tg"), style="margin-right:auto; margin-left:auto"): with tag("tr"): with tag("th", ("class", "tg-yw4l")): text("Hike name") with tag("th", ("class", "tg-yw4l")): text("Start date") with tag("th", ("class", "tg-yw4l")): text("Hike time") with tag("th", ("class", "tg-yw4l")): text("Distance (miles)") with tag("th", ("class", "tg-yw4l")): text("Elevation gain (feet)") with tag("th", ("class", "tg-yw4l")): text("Elevation loss (feet)") totTime = 0 totDist = 0 totGain = 0 totLoss = 0 for hike in listdir('./hikes/'): if (hike == "other"): continue print("Getting data for hike %s" % hike) data = analyzeGpx(hike) print("Generating HTML pages for hike %s" % hike) genSubpage(data) with tag("tr"): with tag("td", ("class", "tg-yw4l")): with tag("a", href="hikes/%s" % (data[0])): text(data[1]) with tag("td", ("class", "tg-yw4l")): datestart = data[3] - timedelta(hours=7) date_time = datestart.strftime( "%m/%d/%Y, %I:%M %p Pacific Time") text(date_time) with tag("td", ("class", "tg-yw4l")): text(data[2]) with tag("td", ("class", "tg-yw4l")): text(data[5]) with tag("td", ("class", "tg-yw4l")): text(data[6]) with tag("td", ("class", "tg-yw4l")): text(data[7]) totTime = totTime + data[8] totDist = totDist + float(data[5]) totGain = totGain + float(data[6]) totLoss = totLoss + float(data[7]) copyfile("./mapTemplate.html", "hikes/%s/map.html" % hike) with tag("div", style="text-align:center;"): with tag("b"): text("Total distance: ") text("%.2f miles" % totDist) doc.stag('br') with tag("b"): timeTot = "%i days, %i hours, %i minutes" % (floor( totTime / 60 / 60 / 24), floor( totTime / 60 / 60), floor(totTime) / 60 % 60) text("Total time: ") text(timeTot) doc.stag('br') with tag("b"): text("Total elevation gain: ") text("%.0f feet" % totGain) doc.stag('br') with tag("b"): text("Total elevation loss: ") text("%.0f feet" % totLoss) doc.stag('br') with open('hikes.html', 'w') as f: f.write(indent(doc.getvalue(), indent_text=True))
output_str.append(' {\n') for key in data: output_str.append("\t" + key + ": " + data[key] + ";\n") output_str.append("}\n") return ''.join(output_str) content = [] css_list = [] xmax = data["xmax"] ymax = data["ymax"] #TODO: Sort the regions in a top down fashion for element in data['regions']: doc, tag, text = Doc().tagtext() if element["type"] == "Label": with tag('label'): text(element["content"]) content.append(doc.getvalue()) content.append("<br/>") elif element["type"] == "TextBox": doc.stag('input', type='text', klass='col-sm-6', placeholder=element['content'], id=element['id']) content.append(doc.getvalue()) content.append("<br/>") elif element["type"] == "CheckBox": with tag('input',
def generateXML(self, name: str = 'NetInformation') -> str: doc, tag, text, line = Doc().ttl() with tag(name): if self.__netTypeName is None: raise WQXException("Attribute 'netTypeName' is required.") line('NetTypeName', self.__netTypeName) if self.__netSurfaceAreaMeasure is not None: doc.asis( self.__netSurfaceAreaMeasure.generateXML( 'NetSurfaceAreaMeasure')) if self.__netMeshSizeMeasure is not None: doc.asis( self.__netMeshSizeMeasure.generateXML( 'NetMeshSizeMeasure')) if self.__boatSpeedMeasure is not None: doc.asis( self.__boatSpeedMeasure.generateXML('BoatSpeedMeasure')) if self.__currentSpeedMeasure is not None: doc.asis( self.__currentSpeedMeasure.generateXML( 'CurrentSpeedMeasure')) return doc.getvalue()
def gen_html_round(self, html_div_list, i_round): # generate html file style_str = FileProcessor.get_style_str() doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html', lang="en"): with tag('head'): with tag('title'): text(f'SELEX-Seq Result, Round {i_round}') doc.stag('meta', charset="utf-8") doc.stag('meta', name="viewport", content="width=device-width, initial-scale=1") with tag('style'): text(style_str) # specify style with tag('body'): with tag('h1'): text(f'IniMotif: SELEX-Seq Result, Round {i_round}') with tag('h2'): text(f'identifier={self.identifier}') doc.stag('br') text(f'minimum kmer length: {self.min_kmer_len}') doc.stag('br') text(f'maximum kmer length: {self.max_kmer_len}') doc.stag('br') for tmpstr in html_div_list: doc.stag('hr') doc.asis(tmpstr) # output html string html_str = indent( doc.getvalue(), indent_text=True ) # will also indent the text directly contained between <tag> and </tag> return html_str
from yattag import Doc from yattag import indent from yattag import Doc doc, tag, text, line = Doc(defaults={ 'ingredient': ['chocolate', 'coffee'] }).ttl() with tag('form', action=""): line('label', 'Select one or more ingredients') with doc.select(name='ingredient', multiple="multiple"): for value, description in (("chocolate", "Dark Chocolate"), ("almonds", "Roasted almonds"), ("honey", "Acacia honey"), ("coffee", "Ethiopian coffee")): with doc.option(value=value): text(description) doc.stag('input', type="submit", value="Validate") result = indent(doc.getvalue(), indentation=' ', newline='\r\n', indent_text=True) print(result)
from yattag import Doc import csv import os import pickle doc, tag, text = Doc().tagtext() FILE_PATH = 'device_function_urls.csv' LIVESTREAM_URL = '' # HTML markup variables TITLE = "GIX IoT Hub" IMAGES = { 'porg': "https://images-na.ssl-images-amazon.com/images/I/917jytDQwJL" "._AC_SL1500_.jpg", 'screen': "https://images-na.ssl-images-amazon.com/images/I/51b9oSYE-rL.jpg", 'servo': "https://images-na.ssl-images-amazon.com/images/I/515cu4qXTrL" "._AC_SL1100_.jpg", 'blink': "https://images-na.ssl-images-amazon.com/images/I/71JLez8iW5L" "._AC_SL1002_.jpg", 'generic': "https://images-na.ssl-images-amazon.com/images/I/61W7jyyhFxL" "._AC_SL1200_.jpg" } # Get devices try: DEVICES = pickle.load(open("devices.pickle", "rb"))
print "<!DOCTYPE html>" url ='http://IP:5984/accounts/_design/top_balance/_view/top_balance' response =requests.get(url, auth=('admin','uptime_challenge')) data = response.json() balancedict={} for key,value in data.iteritems() : if isinstance(value, list) : for element in value : balancedict.update({element['key']: element['value']}) sortedlist= sorted(balancedict.items(), key=lambda x:x[1], reverse=True) resultdict={} doc,tag, text=Doc().tagtext() with tag('html'): with tag('head') : #with tag('link',('rel','stylesheet'),('type','text/css'),('href','mystyle.css')) : print '<link rel="stylesheet" type="text/css" href="mystyle.css">' with tag('body'): with tag('h1'): text('Uptime Challenge Top User Balance') with tag('table',('border', '1'),('style','width:50%')): with tag('tr') : with tag('td', ('style', 'font-weight:bold')) : text('Group') with tag('td',('style','font-weight:bold')) : text('Balance') for i,(a,b) in enumerate(sortedlist) : with tag('tr') :