def build(self): doc, tag, text, line = Doc().ttl() doc.asis('<!DOCTYPE html>') with tag('html'): with tag('head'): doc.stag('meta', charset="utf-8") with tag('script'): doc.attr(src="https://code.jquery.com/jquery-3.3.1.slim.min.js") doc.attr(integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo") doc.attr(crossorigin="anonymous") with tag('script'): doc.attr(src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js") doc.attr(integrity="sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49") doc.attr(crossorigin="anonymous") doc.stag('link', rel="stylesheet", href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css", integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO", crossorigin="anonymous") with tag('script'): doc.attr(src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js") doc.attr(integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy") doc.attr(crossorigin="anonymous") line('title', self.title) with tag('body'): for element in self._content: if element['type'] == self.LINE: doc.asis('<p>' + element['element'] + '</p>') elif element['type'] == self.TABLE: table = element['element'] with tag('table', klass='table table-bordered table-striped'): with tag('thead'): with tag('tr'): for cell in table[0]: line('th', cell) with tag('tbody'): for row in range(1, len(table)): with tag('tr'): for cell in table[row]: c = str(cell) c = c.replace('=>', '=>') doc.asis('<td>' + c + '</td>') result = indent(doc.getvalue()) with open(self._file_path, 'w') as out_file: out_file.write(result)
def show_help(message = None): import yattag doc, tag, text = yattag.Doc().tagtext() assert isinstance(doc, yattag.Doc) with tag("html"): with tag("head"): with tag("title"): text("BatchProfiler: view text file") with tag("body"): with tag("div"): text("This page is designed to be used by ") with tag("a", href="ViewBatch.py"): text("View Batch") if message is not None: text("The page reports the following error: \"%s\"" % message) text(". Most likely you have reached this page in error. ") text("If you feel that's not the case, you can contact your ") text("sysadmin and if you are the sysadmin, you can post to ") text("the ") with tag("a", href="http://cellprofiler.org/forum"): text("CellProfiler forum") text(".") print "Content-Type: text/html" print print yattag.indent(doc.getvalue())
def as_xml(self): doc, tag, text = Doc().tagtext() doc.asis("<?xml version='1.0' encoding='UTF-8'?>") with tag('workflow-app', ('xmlns:sla', 'uri:oozie:sla:0.2'), name=self.name, xmlns="uri:oozie:workflow:0.5"): doc.stag('start', to=self.actions[0].name) for index, action in enumerate(self.actions): with tag("action", name=action.name): doc.asis(action.as_xml(indent)) if index + 1 < len(self.actions): next_action = self.actions[index+1] doc.stag("ok", to=next_action.name) else: doc.stag("ok", to="end") doc.stag("error", to="notify") with tag("action", name="notify"): with tag("email", xmlns="uri:oozie:email-action:0.1"): with tag("to"): text(self.email) with tag("subject"): text("WF ${wf:name()} failed at ${wf:lastErrorNode()}") with tag("body"): text("http://hue.data.shopify.com/oozie/list_oozie_workflow/${wf:id()}") doc.stag("ok", to="kill") doc.stag("error", to="kill") with tag("kill", name="kill"): with tag("message"): text("${wf:lastErrorNode()} - ${wf:id()}") doc.stag('end', name="end") return indent(doc.getvalue())
def process_dump_file(self, p): add_event = 0 main_doc, main_tag, main_text = Doc().tagtext() for line in iter(p.stdout.readline, b''): # Compares first with timestamp regex. Timestamp regex indicates a new packet m = reg_timestamp.match(line.decode()) if m: # add_event indicates if there is a processed event already if add_event == 1: events.append(event) self.add_event_to_html(event, main_doc, main_tag, main_text) add_event = 1 event = Event(m.group('timestamp'), m.group('protocol')) m = reg_ip_1.search(line.decode()) if m: event.id = m.group('id') event.t_protocol = m.group('transport_protocol') event.length = m.group('length') else: m = reg_ip_2.search(line.decode()) if m: event.src = m.group('src') event.src_port = m.group('src_port') event.dst = m.group('dst') event.dst_port = m.group('dst_port') m = reg_length.search(line.decode()) # If TCP data is not 0, the packet gets further processing if m and m.group('length') != 0: length = int(m.group('length')) self.process_host(event, length) # If there is a port unreachable error, event gets discarded m = reg_port_error.search(line.decode()) if m: add_event = 0 return indent(main_doc.getvalue())
def html_from_tree(tree): """Generate indented HTML from VOTL Object tree. Params: tree (Object): A VOTL Object containing the tree, from which HTML will be generated. Returns: String containing a pretty-formatted HTML document. """ doc, tag, text = Doc().tagtext() doc.asis("<!DOCTYPE html>") first = tree.children[0] if first.object_type == "header": title = first.first_line else: title = "Untitled" with tag("html"): with tag("head"): with tag("title"): text(title) doc.stag("meta", charset="utf-8") with tag("body"): _recurse_tags(tree, doc, tag, text) return indent(doc.getvalue())
def as_xml(self, indentation=False): doc, tag, text = Doc().tagtext() with tag('shell', xmlns="uri:oozie:shell-action:0.2"): #do we actually need these even if we dont use them? with tag('job-tracker'): text(os.environ["JOBTRACKER"]) with tag('name-node'): text(os.environ["NAMENODE"]) with tag('exec'): text(self.command) for argument in self.arguments: with tag('argument'): text(argument) for env in self.environment_vars: with tag('env-var'): text(env) for archive in self.archives: with tag('archive'): text(archive) for f in self.files: with tag('file'): text(f) xml = doc.getvalue() if indentation: return indent(xml) else: return xml
def write_junit_xml(report, tests_file, dataset_dir, dataset_name=None): # use a different file name depending on whether this is a # single dataset report or not (``dataset_name=None``) junit_filename_parts = [ stem(tests_file), basename(dataset_dir), # can't use `.isoformat()` here because otherwise the `:` # separating the time part make the first part of the file # name look like an (invalid) protocol specifier in # relative links datetime.now().strftime('%Y-%m-%d-%H.%M.%S') ] if dataset_name: junit_filename_parts.insert(2, dataset_name) # `junit2html` has a bug: it will lowercase target file names in # links, but still create those files with the case they're given, # resulting in broken links. Try to prevent it by ensuring our # JUnit XML files have lowercase names... junit_file = ( '-'.join(junit_filename_parts) + '.xml' ).lower() logging.debug("Writing XML test report to file: `%s`", junit_file) with open(junit_file, 'w') as output: output.write(indent(report.as_junit_xml()))
def get_session(request, data): """ Create new session after validating the API_key and token. """ output_format = data.get('format', 'xml') try: api_key = data['api_key'] token = Token.load(data['token'], api_key) except KeyError: raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Missing Required Params if not token: if not Token.is_valid_api_key(api_key): raise InvalidAPIUsage(CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_key raise InvalidAPIUsage(CompatError.INVALID_TOKEN, output_format=output_format) # Invalid token if token.has_expired(): raise InvalidAPIUsage(CompatError.TOKEN_EXPIRED, output_format=output_format) # Token expired if not token.user: raise InvalidAPIUsage(CompatError.UNAUTHORIZED_TOKEN, output_format=output_format) # Unauthorized token session = Session.create(token) doc, tag, text = Doc().tagtext() with tag('lfm', status='ok'): with tag('session'): with tag('name'): text(session.user.name) with tag('key'): text(session.sid) with tag('subscriber'): text('0') return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()), data.get('format', "xml"))
def send_batch_submission_email(self, batch, vb_url): doc, tag, text = yattag.Doc().tagtext() with tag("html"): with tag("head"): with tag("title"): text("Batch # %d" % batch.batch_id) with tag("style", type="text/css"): doc.asis(""" table { border-spacing: 0px; border-collapse: collapse; } td { text-align: left; vertical-align: baseline; padding: 0.1em 0.5em; border: 1px solid #666666; } """) with tag("body"): with tag("h1"): text("Results for batch # ") with tag("a", href = vb_url): text(str(batch.batch_id)) with tag("div"): text("Data Directory: %s" % BATCHPROFILER_DEFAULTS[DATA_DIR]) email_text = yattag.indent(doc.getvalue()) send_html_mail(recipient=BATCHPROFILER_DEFAULTS[EMAIL], subject = "Batch %d submitted"%(batch.batch_id), html=email_text)
def getSession(request, data): token = Token.load(data['token']) if not token: print "Invalid token" return "NOPE" if not token.user: print "Token not validated" return "NOPE" print "GRANTING SESSION for token %s" % token.token token.consume() session = Session.create(token.user) doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('session'): with tag('name'): text(session.user.name) with tag('key'): text(session.id) with tag('subscriber'): text('0') return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def saveRelease(self, release): """ Create an XML file of release metadata that Dalet will be happy with :param release: Processed release metadata from MusicBrainz """ output_dir = self.release_meta_dir doc, tag, text = Doc().tagtext() doc.asis('<?xml version="1.0" encoding="UTF-8"?>') with tag('Titles'): with tag('GlossaryValue'): with tag('GlossaryType'): text('Release') with tag('Key1'): text(release.mbID) with tag('ItemCode'): text(release.mbID) with tag('KEXPReviewRich'): text(release.review) formatted_data = indent(doc.getvalue()) output_file = path.join(output_dir, 'r' + release.mbID + ".xml") with open(output_file, "wb") as f: f.write(formatted_data.encode("UTF-8"))
def show_xml(): pages = [ f for f in os.listdir(CACHEDIR) if os.path.isdir(os.path.join(CACHEDIR,f)) ] doc, tag, text = Doc().tagtext() doc.asis('<?xml version="1.0" encoding="utf-8" ?>') # xml tags with tag('sac_uo'): for p in pages: item = get_data_from_page('%s/latest' % p) if item: with tag('item'): with tag('id'): text(item['id']) with tag('nom'): text(item['nom']) with tag('resp'): text(item['resp']) # mini hack with tag('iddep'): text(item['iddep']) with tag('dep'): text(item['dep']) with tag('centers'): text(item['centers']) return indent( doc.getvalue(), indentation = ' ' * 4, newline = '\r\n' )
def now_playing(request, data): sk = data['sk'] session = Session.load(sk) if not session: print "Invalid session" return "NOPE" track = data['track'] artist = data['artist'] album = data['album'] albumArtist = data['albumArtist'] print "NOW PLAYING- User: %s, Artist: %s, Track: %s, Album: %s" \ % (session.user.name, artist, track, album) doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('nowplaying'): with tag('track', corrected="0"): text(track) with tag('artist', corrected="0"): text(artist) with tag('album', corrected="0"): text(album) with tag('albumArtist', corrected="0"): text(albumArtist) with tag('ignoredMessage', code="0"): text('') return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def __to_html(self, pmid, title, abstract, output_dir): """Generate HTML file for Anndoc Write a HTML file required for Anndoc, formatted according to TagTog's standards that can be viewed at the link below. https://github.com/jmcejuela/tagtog-doc/wiki By default, the MEDLINE identifier will be used as the title, unless something else is specified. Args: title (str): Title of the paper abstract (str): Abstract contents of the paper output_file (Optional[str]): Path to the output file. Defaults to none. """ from yattag import Doc from yattag import indent from os.path import join doc, tag, text = Doc().tagtext() # Compute hashId (TODO find out what hashing is used, currently random) hashId = self.__random_hashId(pmid) # Use Yattag to generate HTML syntax doc.asis('<!DOCTYPE html>') with tag('html', ('data-origid', pmid), ('data-anndoc-version', "2.0"), ('lang', ""), ('xml:lang', ""), ('xmlns', "http://www.w3.org/1999/xhtml"), klass='anndoc', id=hashId): with tag('head'): doc.stag('meta', charset='UTF-8') doc.stag('meta', name='generator', content='org.rostlab.relna') with tag('title'): text(hashId) with tag('body'): with tag('article'): with tag('section', ('data-type', 'title')): with tag('h2', id='s1h1'): text(title) with tag('section', ('data-type', 'abstract')): with tag('h3', id='s2h1'): text("Abstract") with tag('div', klass='content'): with tag('p', id='s2p1'): text(abstract) # Write to file result = indent(doc.getvalue()) try: with open(join(output_dir, pmid+'.html'), 'w') as fw: fw.write(result) except IOError as e: print('I/O Error({0}): {1}'.format(e.errno, e.strerror)) raise
def session_info(request, data): try: sk = data['sk'] api_key = data['api_key'] output_format = data.get('format', 'xml') username = data['username'] except KeyError: raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Missing Required Params session = Session.load(sk) if (not session) or User.load_by_name(username).id != session.user.id: raise InvalidAPIUsage(CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session KEY print("SESSION INFO for session %s, user %s" % (session.id, session.user.name)) doc, tag, text = Doc().tagtext() with tag('lfm', status='ok'): with tag('application'): with tag('session'): with tag('name'): text(session.user.name) with tag('key'): text(session.id) with tag('subscriber'): text('0') with tag('country'): text('US') return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()), output_format)
def to_html(self, config): """Generate a html stream of the whole presentation. Parameters ---------- config : MatisseConfig MaTiSSe configuration """ doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html'): # doc.attr(title=self.metadata['title'].value) self.__put_html_tag_head(doc=doc, tag=tag, text=text, config=config) with tag('body', onload="resetCountdown(" + str(self.metadata['max_time'].value) + ");"): doc.attr(klass='impress-not-supported') with tag('div', id='impress'): # numbering: [local_chap, local_sec, local_subsec, local_slide] current = [0, 0, 0, 0] for chapter in self.chapters: current[0] += 1 current[1] = 0 current[2] = 0 current[3] = 0 self.metadata['chaptertitle'].update_value(value=chapter.title) self.metadata['chapternumber'].update_value(value=chapter.number) for section in chapter.sections: current[1] += 1 current[2] = 0 current[3] = 0 self.metadata['sectiontitle'].update_value(value=section.title) self.metadata['sectionnumber'].update_value(value=section.number) for subsection in section.subsections: current[2] += 1 current[3] = 0 self.metadata['subsectiontitle'].update_value(value=subsection.title) self.metadata['subsectionnumber'].update_value(value=subsection.number) for slide in subsection.slides: current[3] += 1 self.metadata['slidetitle'].update_value(value=slide.title) self.metadata['slidenumber'].update_value(value=slide.number) with doc.tag('div'): chapter.put_html_attributes(doc=doc) section.put_html_attributes(doc=doc) subsection.put_html_attributes(doc=doc) slide.put_html_attributes(doc=doc) self.__put_html_slide_decorators(tag=tag, doc=doc, decorator='header', current=current, overtheme=slide.overtheme) # with doc.tag('div'): # doc.attr(style='clear: both;') self.__put_html_slide_decorators(tag=tag, doc=doc, decorator='sidebar', position='L', current=current, overtheme=slide.overtheme) slide.to_html(doc=doc, parser=self.parser, metadata=self.metadata, theme=self.theme, current=current) self.__put_html_slide_decorators(tag=tag, doc=doc, decorator='sidebar', position='R', current=current, overtheme=slide.overtheme) # with doc.tag('div'): # doc.attr(style='clear: both;') self.__put_html_slide_decorators(tag=tag, doc=doc, decorator='footer', current=current, overtheme=slide.overtheme) self.__put_html_tags_scripts(doc=doc, tag=tag, config=config) # source = re.sub(r"<li>(?P<item>.*)</li>", r"<li><span>\g<item></span></li>", source) html = indent(doc.getvalue()) return html
def as_xml(self): doc, tag, text = Doc().tagtext() doc.asis("<?xml version='1.0' encoding='UTF-8'?>") with tag('bundle-app', name=self.name, xmlns="uri:oozie:bundle:0.1"): for coordinator in self.coordinators: with tag("coordinator", name=coordinator.name): with tag("app-path"): text("/"+coordinator.path + "/" + coordinator.name) return indent(doc.getvalue())
def getToken(request, data): token = Token.generate() print "ISSUING TOKEN %s" % token.token doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('token'): text(token.token) return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def record_listens(request, data): """ Submit the listen in the lastfm format to be inserted in db. Accepts listens for both track.updateNowPlaying and track.scrobble methods. """ output_format = data.get('format', 'xml') try: sk, api_key = data['sk'], data['api_key'] except KeyError: raise InvalidAPIUsage(CompatError.INVALID_PARAMETERS, output_format=output_format) # Invalid parameters session = Session.load(sk) if not session: if not Token.is_valid_api_key(api_key): raise InvalidAPIUsage(CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_KEY raise InvalidAPIUsage(CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session KEY lookup = defaultdict(dict) for key, value in data.items(): if key in ["sk", "token", "api_key", "method", "api_sig"]: continue matches = re.match('(.*)\[(\d+)\]', key) if matches: key = matches.group(1) number = matches.group(2) else: number = 0 lookup[number][key] = value if request.form['method'].lower() == 'track.updatenowplaying': for i, listen in lookup.items(): if 'timestamp' not in listen: listen['timestamp'] = calendar.timegm(datetime.now().utctimetuple()) # Convert to native payload then submit 'em after validation. listen_type, native_payload = _to_native_api(lookup, data['method'], output_format) for listen in native_payload: validate_listen(listen, listen_type) user = db_user.get(session.user_id) augmented_listens = insert_payload(native_payload, user, listen_type=listen_type) # With corrections than the original submitted listen. doc, tag, text = Doc().tagtext() with tag('lfm', status='ok'): if listen_type == 'playing_now': doc.asis(create_response_for_single_listen(list(lookup.values())[0], augmented_listens[0], listen_type)) else: accepted_listens = len(lookup.values()) # Currently LB accepts all the listens and ignores none with tag('scrobbles', accepted=accepted_listens, ignored='0'): for original_listen, augmented_listen in zip(list(lookup.values()), augmented_listens): doc.asis(create_response_for_single_listen(original_listen, augmented_listen, listen_type)) return format_response('<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()), output_format)
def as_xml(self, indentation=False): doc, tag, text = Doc().tagtext() with tag('sub-workflow'): with tag('app-path'): text("/"+self.sub_wf_path + "/" + self.name) doc.stag("propagate-configuration") xml = doc.getvalue() if indentation: return indent(xml) else: return xml
def scrobble(request, data): sk = data['sk'] session = Session.load(sk) if not session: print "Invalid session" return "NOPE" # FUUUUUUU PHP ARRAYS lookup = defaultdict(dict) for key, value in data.items(): matches = re.match('(.*)\[(\d+)\]', key) if matches: key = matches.group(1) number = matches.group(2) else: number = 0 lookup[number][key] = value doc, tag, text = Doc().tagtext() with tag('lfm', status="ok"): with tag('scrobbles'): for _, dataset in lookup.items(): if 'track' not in dataset: continue artist = dataset['artist'] track = dataset['track'] album = dataset['album'] albumArtist = dataset['albumArtist'] timestamp = dataset['timestamp'] print "SCROBBLE- User: %s, Artist: %s, Track: %s, Album: %s" \ % (session.user.name, artist, track, album) session.user.scrobble(timestamp, artist, track, album, albumArtist) with tag('scrobble'): with tag('track', corrected="0"): text(track) with tag('artist', corrected="0"): text(artist) with tag('album', corrected="0"): text(album) with tag('albumArtist', corrected="0"): text(albumArtist) with tag('timestamp', corrected="0"): text(timestamp) with tag('ignoredMessage', code="0"): text('') return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
def handle_get(): '''Display a form for fixing the permissions''' batch_id_id = "input_%s" % BATCH_ID button_id = "button_%s" % BATCH_ID fix_permissions_action = "on_click_%s()" % button_id doc, tag, text = yattag.Doc().tagtext() assert isinstance(doc, yattag.Doc) with tag("html"): with tag("head"): with tag("title"): text(TITLE) with tag("script", language="JavaScript"): doc.asis(FIX_PERMISSIONS_AJAX_JAVASCRIPT) doc.asis(""" function on_click_%s() { fix_permissions(document.getElementById('%s')); }""" % (button_id, button_id)) with tag("body"): with tag("h1"): text(TITLE) with tag("div", style="width: 6in; margin-bottom: 24pt"): text(""" This webpage fixes permission problems for the files and folders in your batch that were created outside BatchProfiler's control. It will grant read permission for the job script folder, the text output folder, the text and error output files and the measurements file.""") with tag("div"): with tag("label", **{ "for":batch_id_id }): text("Batch ID") doc.input(name=BATCH_ID, id=batch_id_id, type="text") with tag("button", id=button_id, onclick=fix_permissions_action): text("Fix permissions") print "Content-Type: text/html" print print '<!DOCTYPE html PUBLIC ' \ '"-//W3C//DTD XHTML 1.0 Transitional//EN"' \ '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">' print yattag.indent(doc.getvalue())
def write_job_application(job_applications, path): print ">> writing to file `job_applications.xml`"; with open(path, "a") as f: for job_application in job_applications: with tag('jobApplication'): with tag('userId'): text(job_application["userId"]) with tag('itemId'): text(job_application["itemId"]) with tag('applyDate'): text(job_application["applyDate"]) result = indent(doc.getvalue()) f.write(result) f.close()
def main(): doc.asis('<!DOCTYPE html>') with tag('html'): doc.asis('<style>') text('table, th, td {border: 1px solid black;}') doc.asis('</style>') with tag ('body'): load_tokens('source.txt') section_id = [] for i in range(0, len(token_stack)): token = token_stack[i] if token[0] == 'iziSection': section = [ ] id = token[1] print(id) for j in range(i+1, (len(token_stack))): temp_token = token_stack[j] if(temp_token[0] != 'iziSection'): section.append(temp_token) else: break section_stack.append(section) section_id.append(id) print(section_id) counter = 0 for j in range(0, len(section_stack)): section_class = token_stack[j] if (section_id[counter] not in string_collection) : print("stop bullshitting!") else : print(string_collection.get(section_id[counter])) with tag ('section', klass = string_collection.get(section_id[counter])): text('\n') for element in section_stack[j]: instruction_checker(element) counter = counter + 1 result = indent(doc.getvalue(), indentation = '', newline = '\r\n') outpath = "index.html" with open (outpath, "wt") as outfile: outfile.write(result) outfile.close()
def set_qti_metadata(max_attempts): meta, tag, text = Doc().tagtext() meta.asis("<!-- Metadata -->") metadata = DEFAULT_QTI_META metadata['cc_maxattempts'] = max_attempts with tag('qtimetadata'): for key, value in metadata.iteritems(): with tag('qtimetadatafield'): with tag('fieldlabel'): text(key) with tag('fieldentry'): text(value) return indent(meta.getvalue())
def profileEdit(profile_id): cri = {} # Global Criteria groupPolicyID = '' profile = MpOsConfigProfiles.query.filter(MpOsConfigProfiles.profileID == profile_id).first() if profile is not None: if profile.isglobal == 1: groupPolicy = MpOsProfilesGroupAssigned.query.filter(MpOsProfilesGroupAssigned.groupID == 0, MpOsProfilesGroupAssigned.profileID == profile_id).first() if groupPolicy is not None: groupPolicyID = groupPolicy.gPolicyID criteriaQuery = MpOsProfilesCriteria.query.filter(MpOsProfilesCriteria.gPolicyID == groupPolicyID).order_by( MpOsProfilesCriteria.type_order.asc()).all() profileCritLst = [] for crit in criteriaQuery: patchCritDict = crit.__dict__ del patchCritDict['_sa_instance_state'] del patchCritDict['rid'] if patchCritDict['type'] == "Script": patchCritDict['type_data'] = escapeStringForACEEditor(patchCritDict['type_data']) profileCritLst.append(patchCritDict) cri = {} for c in criteriaQuery: cri[c.type] = c.type_data # Parse BLOB Data pData = str(profile.profileData) pData = unicode(pData, errors='replace') # Using RE get the data between <?xml ... </plist> stringlist = re.findall('<\?.+</plist>', pData, re.DOTALL) pData2 = "" pretty_string = "" if len(stringlist) >= 1: pData2 = stringlist[0] pData2 = pData2.replace('\\n', '') # Remove \n from string purely for formatting pData2 = pData2.replace('\\t', '') pretty_string = indent(pData2,indentation=' ') return render_template('os_managment/os_profile_manager.html', profileData=profile, profileDataAlt=pData, profileCriteriaAlt=cri, profileDataRE=pretty_string, profileID=profile_id, groupPolicyID=groupPolicyID)
def main(args): doc, tag, text = yattag.SimpleDoc().tagtext() doc.asis('<?xml version="1.0" encoding="UTF-8"?>') with tag('urlset', xmlns='http://www.sitemaps.org/schemas/sitemap/0.9'): for url in urls: with tag('url'): with tag('loc'): text('{}/{}'.format(args.root, url.ltrim('/'))) with tag('lastmod'): pass with tag('changefreq'): pass #default to weekly with tag('priority'): pass #default to 0.5 return yattag.indent(doc.getValue(), indentation='\t')
def get_project_xml(self): """ Get the xml for use in the xproj file """ xml_projroot = et.Element('Project') xml_projroot.set('ToolsVersion', '14.0') xml_projroot.set('DefaultTargets', 'Build') xml_projroot.set('xmlns', 'http://schemas.microsoft.com/developer/msbuild/2003') propgroup1 = et.SubElement(xml_projroot, 'PropertyGroup') studiover = et.SubElement(propgroup1, 'VisualStudioVersion') studiover.set('Condition', "'$(VisualStudioVersion)' == ''") studiover.text = '14.0' vstoolspath = et.SubElement(propgroup1, 'VSToolsPath') vstoolspath.set('Condition', "'$(VSToolsPath)' == ''") vstoolspath.text = r"$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)" import1 = et.SubElement(xml_projroot, 'Import') import1.set('Project', '$(VSToolsPath)\DotNet\Microsoft.DotNet.Props') import1.set('Condition', "'$(VSToolsPath)' != ''") propgroup2 = et.SubElement(xml_projroot, 'PropertyGroup') propgroup2.set('Label', 'Globals') projguid = et.SubElement(propgroup2, 'ProjectGuid') projguid.text = self.lower(self.GUID) rootnamespace = et.SubElement(propgroup2, 'RootNamespace') rootnamespace.text = self.RootNamespace baseintermediateoutputpath = et.SubElement(propgroup2, 'BaseIntermediateOutputPath') baseintermediateoutputpath.set('Condition', "'$(BaseIntermediateOutputPath)'=='' ") baseintermediateoutputpath.text = self.BaseIntermediateOutputPath targetframeworkversion = et.SubElement(propgroup2, 'TargetFrameworkVersion') targetframeworkversion.text = self.TargetFrameworkVersion propgroup3 = et.SubElement(xml_projroot, 'PropertyGroup') schemaver = et.SubElement(propgroup3, 'SchemaVersion') schemaver.text = '2.0' import2 = et.SubElement(xml_projroot, 'Import') import2.set('Project', '$(VSToolsPath)\DotNet\Microsoft.DotNet.targets') import2.set('Condition', "'$(VSToolsPath)' != ''") etstr = et.tostring(xml_projroot, encoding='utf-8', method='xml').decode('utf-8') outtxt = indent(etstr) return outtxt
def saveHtml(self, filename): file = self.basedir + "/" + filename + ".html" self.saveScripts() imgdata = pkgutil.get_data('ivwpy', 'regression/resources/inviwo.png') imgdir = mkdir(self.basedir, "_images") with open(toPath(imgdir, "inviwo.png"), 'wb') as f: f.write(imgdata) cssdata = pkgutil.get_data('ivwpy', 'regression/resources/report.css') with open(toPath(self.basedir, "report.css"), 'w') as f: f.write(lesscpy.compile(io.StringIO(cssdata.decode("utf-8")))) with open(file, 'w') as f: f.write(yattag.indent(self.doc.getvalue())) return file
def generate_graph_html(): doc, tag, text = Doc().tagtext() with tag('h1'): text('Percentage of data transferred from each host') for host in hosts: total_received = host.total_bytes_received with tag('h2'): text(host.ip) with tag('table', border="0", cellpadding="0", cellspacing="0", klass="bar-chart"): for src_host in host.hosts: received = (src_host.bytes_sent / total_received) * 100 with tag('tr'): with tag('td', valign="middle", align="left", klass="ip-address"): text(src_host.ip) with tag('td', valign="middle", align="left", klass="bar"): with tag('div', style="width:{}%".format(received)): text('{}%'.format(round(received, 3))) return indent(doc.getvalue())
def create_report(self, report_file="Output/report.html"): doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') from src.resource.style import css with tag('html'): with tag('head'): with tag('title'): text("Automation Test Result") with tag('meta', charset="utf-8"): pass with tag('meta', name="viewport", content="width=device-width, initial-scale=1"): pass with tag('style'): text(css) with tag('body'): with tag('div', klass='container'): pass with tag('div', klass='panel'): pass with tag('div', klass='panel-footer'): for meta in self.meta_dic: with tag('p'): with tag('b'): text(meta + ": ") if meta == "HOST": with tag('a', href='https://' + self.meta_dic.get(meta) + "/"): text(self.krakken.master) else: text(str(self.meta_dic.get(meta))) with tag('div', klass="panel-body"): with tag('table'): with tag('tr'): with tag('th'): text("TEST #") with tag('th'): text("TEST NAME") with tag('th'): text("START TIME") with tag('th'): text("DURATION") with tag('th'): text("RESULT") with tag('th'): text("LOG") root = self.result_root cnt = 0 for result in root.iter('testcase'): if result.find("failure") is not None: res = "FAIL" with open("Output/failure", "a+") as r: r.write("failed\n") elif result.find("skipped") is not None: res = "SKIP" else: res = "PASS" print result.attrib.get('name') + " -> " + res properties = result.find('properties') st_time = "" for property in properties: if property.attrib.get( 'name') == 'case_start_time': st_time = property.attrib.get('value') break with tag('tr'): with tag('td'): text("TC" + str(cnt)) with tag('td'): text(result.attrib.get('name')) with tag('td'): if st_time: time_string = time.strftime( '%H:%M:%S', time.localtime(float(st_time))) else: time_string = '-' text(time_string) with tag('td'): try: t = float(result.attrib.get('time')) time_elapsed = ( "{:0>2}h:".format(int(t // 3600)) if int(t // 3600) > 0 else "" ) + ("{:0>2}m:".format( int(t - 3600 * (t // 3600)) // 60) if (t - 3600 * (t // 3600)) // 60 > 0 or t // 3600 > 0 else "") + "{:05.2f}s".format(t % 60) except: time_elapsed = "-" text(time_elapsed) with tag('td'): with tag( 'span', klass=res.lower(), style= "font-family:'Monaco', monospace"): text(res) with tag('td'): # misc = read_options_as_dict_from_config("misc.ini", "TEST") if "JOB_NAME" in self.meta_dic and "BUILD_URL" in self.meta_dic: BUILD_ID = filter( None, self.meta_dic.get( 'BUILD_URL').split("/"))[-1] with tag( 'a', klass=res.lower(), href="http://" + socket.gethostname() + "/jenkins/" + self.meta_dic.get("JOB_NAME") + "/" + BUILD_ID + "/Output/" + str(cnt)): with tag( 'span', style= "font-family: monospace;"): text(">") else: with tag('a', klass=res.lower(), href="./" + str(cnt) + "/"): with tag( 'span', style= "font-family: monospace;"): text(">") cnt += 1 with tag('div', klass="panel-footer"): with tag('p'): with tag('b'): text('Total Processing Time: ') end_time = time.time() elapse = end_time - self.start_time_stamp h, r = divmod(elapse, 3600) m, s = divmod(r, 60) text("{:0>2}h:{:0>2}m:{:05.2f}s".format( int(h), int(m), s)) with open(report_file, 'w+') as f: f.write( indent(doc.getvalue(), indentation=" ", newline="\r\n", indent_text=True))
def parse_new_digs_animal_profile(pet): pet_fields = pet["fields"] name = pet_fields.get("Pet Name") record_id = pet["id"] doc, tag, text, line = Doc().ttl() with tag("div", klass="pet-profile"): with tag("div", klass="pet-profile-images"): doc.asis( "<script src=\"https://ajax.googleapis.com/ajax/" + "libs/jquery/1.11.1/jquery.min.js\"></script>" ) doc.asis( "<link href=\"https://cdnjs.cloudflare.com/ajax/" + "libs/fotorama/4.6.4/fotorama.css\" rel=\"stylesheet\">" ) doc.asis( "<script src=\"https://cdnjs.cloudflare.com/ajax/" + "libs/fotorama/4.6.4/fotorama.js\"></script>" ) with tag( "div", ("data-nav", "thumbs"), ("data-allowfullscreen", "true"), klass="fotorama" ): pet_photos = pet_fields.get("Pictures") if len(pet_photos) > 0: for photo in pet_photos: doc.stag("img", src=photo["url"]) else: doc.stag("img", src=PLACEHOLDER_IMAGE) with tag("div", klass="pet-profile-data"): with tag("div", klass="pet-profile-name"): text(name) doc.stag("br") with tag("div", klass="pet-profile-other-data"): new_digs_profile_other_data(pet, doc, text, line) interested_in = "Dogs" species = pet_fields.get("Pet Species") if species == "Cat": interested_in = "Cats" adopt_link = ( NEW_DIGS_ADOPTION_APP_LINK + record_id + "&prefill_I%27m+interested+in+adopting+this+type+of+pet:=" + interested_in ) with tag( "a", href=adopt_link, klass="pet-profile-top-adopt-button" ): text("Apply to Adopt " + name) with tag("div", klass="pet-profile-description"): with tag("div", klass="pet-profile-description-title"): text("Meet " + name + "!") doc.stag("br") description = pet_fields.get("Public Description") if not description or len(description) < 3: text( "We don't have much information on this animal yet. " + "If you'd like to find out more, " + "please email [email protected]." ) else: with tag("p"): doc.asis(description.replace("\n\n", "</p><p>")) with tag( "div", klass=( "et_pb_promo et_pb_bg_layout_dark" + "et_pb_text_align_center pet-profile-adopt-bottom" ), style="background-color: #006cb7;" ): with tag("div", klass="et_pb_promo_description"): line("h2", "Apply to Adopt " + name + " Today") with tag("a", klass="et_pb_promo_button", href=adopt_link): text("Go To Adoption Application") return indent(doc.getvalue())
def to_xml(self): doc, tag, text = Doc().tagtext() with tag('lfm', status="failed"): with tag('error', code=self.api_error.code): text(self.api_error.message) return '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue())
curr_path = '{}/{}'.format(myroot, author) albums = [dir for dir in listdir(curr_path) if not isfile(join(curr_path, dir))] with tag('autor', ime=author, brojAlbuma=len(albums), pol="", zanr="", rodnoMesto="", tema=""): for album in albums: with tag('album', naziv=album, godina="", izdavac=""): album_path = '{}/{}'.format(curr_path, album) songs = [f for f in listdir(album_path) if isfile(join(album_path, f))] for song in songs: if not song.startswith('.DS_S'): with tag('pesma', naslovPesme=song[:-4]): song_path = '{}/{}'.format(album_path, song) for stih in open(song_path, encoding="utf8", errors='ignore').read().split('\n')[:-1]: with tag('li'): text('{}'.format(replace_char_entities(stih))) result = indent( doc.getvalue(), indentation = ' '*4, newline = '\r\n' ) print(result) with open('ExYuKorpus.xml', 'w') as x: x.write(result)
def create_part_html(parts, distributors): '''@brief Create HTML page containing info for local (non-webscraped) parts. @param parts `list()` of parts. @parm `list()`of the distributors to check each one is local. @return `str()` of the HTML page to be read by `get_part_html_tree()` ''' logger.log(DEBUG_OVERVIEW, 'Create HTML page for parts with custom pricing...') doc, tag, text = Doc().tagtext() with tag('html'): with tag('body'): for p in parts: # Find the manufacturer's part number if it exists. pn = p.fields.get('manf#') # Returns None if no manf# field. # Find the various distributors for this part by # looking for leading fields terminated by SEPRTR. for key in p.fields: try: dist = key[:key.index(SEPRTR)] except ValueError: continue # If the distributor is not in the list of web-scrapable distributors, # then it's a local distributor. Copy the local distributor template # and add it to the table of distributors. if dist not in distributors: distributors[dist] = copy.copy( distributors['local_template']) distributors[dist][ 'label'] = dist # Set dist name for spreadsheet header. # Now look for catalog number, price list and webpage link for this part. for dist in distributors: cat_num = p.fields.get(dist + ':cat#') pricing = p.fields.get(dist + ':pricing') link = p.fields.get(dist + ':link') if cat_num is None and pricing is None and link is None: continue def make_random_catalog_number(p): hash_fields = {k: p.fields[k] for k in p.fields} hash_fields['dist'] = dist return '#{0:08X}'.format( abs(hash(tuple(sorted(hash_fields.items()))))) cat_num = cat_num or pn or make_random_catalog_number(p) p.fields[dist + ':cat#'] = cat_num # Store generated cat#. with tag('div', klass=dist + SEPRTR + cat_num): with tag('div', klass='cat#'): text(cat_num) if pricing is not None: with tag('div', klass='pricing'): text(pricing) if link is not None: url_parts = list(urlsplit(link)) if url_parts[0] == '': url_parts[0] = u'http' link = urlunsplit(url_parts) with tag('div', klass='link'): text(link) # Remove the local distributor template so it won't be processed later on. # It has served its purpose. try: del distributors['local_template'] except: pass html = doc.getvalue() if logger.isEnabledFor(DEBUG_OBSESSIVE): print(indent(html)) return html
def generate_html(output_path: str, docs, search_word: str, search_mode: bool): # TODO: implement html generation doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html'): with tag('head'): # Add css for better looking tables with tag('style'): doc.asis(''' // Breakpoints $bp-maggie: 15em; $bp-lisa: 30em; $bp-bart: 48em; $bp-marge: 62em; $bp-homer: 75em; // Styles * { @include box-sizing(border-box); &:before, &:after { @include box-sizing(border-box); } } body { font-family: $helvetica; color: rgba(94,93,82,1); } a { color: rgba(51,122,168,1); &:hover, &:focus { color: rgba(75,138,178,1); } } .container { margin: 5% 3%; @media (min-width: $bp-bart) { margin: 2%; } @media (min-width: $bp-homer) { margin: 2em auto; max-width: $bp-homer; } } .responsive-table { width: 100%; margin-bottom: 1.5em; border-spacing: 0; @media (min-width: $bp-bart) { font-size: .9em; } @media (min-width: $bp-marge) { font-size: 1em; } thead { // Accessibly hide <thead> on narrow viewports position: absolute; clip: rect(1px 1px 1px 1px); /* IE6, IE7 */ padding: 0; border: 0; height: 1px; width: 1px; overflow: hidden; @media (min-width: $bp-bart) { // Unhide <thead> on wide viewports position: relative; clip: auto; height: auto; width: auto; overflow: auto; } th { background-color: rgba(29,150,178,1); border: 1px solid rgba(29,150,178,1); font-weight: normal; text-align: center; color: white; &:first-of-type { text-align: left; } } } // Set these items to display: block for narrow viewports tbody, tr, th, td { display: block; padding: 0; text-align: left; white-space: normal; } tr { @media (min-width: $bp-bart) { // Undo display: block display: table-row; } } th, td { padding: .5em; vertical-align: middle; @media (min-width: $bp-lisa) { padding: .75em .5em; } @media (min-width: $bp-bart) { // Undo display: block display: table-cell; padding: .5em; } @media (min-width: $bp-marge) { padding: .75em .5em; } @media (min-width: $bp-homer) { padding: .75em; } } caption { margin-bottom: 1em; font-size: 1em; font-weight: bold; text-align: center; @media (min-width: $bp-bart) { font-size: 1.5em; } } tfoot { font-size: .8em; font-style: italic; @media (min-width: $bp-marge) { font-size: .9em; } } tbody { @media (min-width: $bp-bart) { // Undo display: block display: table-row-group; } tr { margin-bottom: 1em; @media (min-width: $bp-bart) { // Undo display: block display: table-row; border-width: 1px; } &:last-of-type { margin-bottom: 0; } &:nth-of-type(even) { @media (min-width: $bp-bart) { background-color: rgba(94,93,82,.1); } } } th[scope="row"] { background-color: rgba(29,150,178,1); color: white; @media (min-width: $bp-lisa) { border-left: 1px solid rgba(29,150,178,1); border-bottom: 1px solid rgba(29,150,178,1); } @media (min-width: $bp-bart) { background-color: transparent; color: rgba(94,93,82,1); text-align: left; } } td { text-align: right; @media (min-width: $bp-bart) { border-left: 1px solid rgba(29,150,178,1); border-bottom: 1px solid rgba(29,150,178,1); text-align: center; } &:last-of-type { @media (min-width: $bp-bart) { border-right: 1px solid rgba(29,150,178,1); } } } td[data-type=currency] { text-align: right; } td[data-title]:before { content: attr(data-title); float: left; font-size: .8em; color: rgba(94,93,82,.75); @media (min-width: $bp-lisa) { font-size: .9em; } @media (min-width: $bp-bart) { // Don’t show data-title labels content: none; } } } } ''') with tag('body'): with tag('h1', id="heading"): text('Summary of search results') doc_index = 0 for document in docs: with tag('div', id=str(doc_index)): doc_index += 1 header_printed = False # output paragraphs containing search words for paragraph in find_words_paragraphs( document.paragraphs, search_mode, search_word.split(","), 80): with tag('p'): if not header_printed: with tag('h2'): text("Found in document with location: " + str(document.path)) header_printed = True text(paragraph) # output tables containing search words table_index = 0 for table in find_words_tables(document.tables, search_mode, search_word.split(","), 80): with tag('div', id="table" + str(table_index), klass="container"): table_index += 1 tempfile_path = tempfile.gettempdir( ) + "/PDFScraper" try: os.makedirs(tempfile_path) except FileExistsError: pass tempfile_path = tempfile_path + "/table" table.to_html(tempfile_path, classes="responsive-table", index=False) with codecs.open(tempfile_path, 'r') as table_file: # replace \n in table to fix formatting tab = re.sub(r'\\n', '<br>', table_file.read()) if not header_printed: with tag('h2'): text( "Found in document with location: " + str(document.path)) doc.asis(tab) os.remove(tempfile_path) # write HTML to file # check if output path is a directory if not os.path.isdir(output_path): output_path = str(Path(output_path).parent) with open(output_path + "/summary.html", "w", encoding='utf-8') as file: file.write(indent(doc.getvalue()))
def GenerateHtml(self): """This function generates and formats the HTML file text.""" html = indent(self.doc.getvalue(), indentation="", newline="\n") return html
with tag("head"): with tag("title"): doc.asis("Word tones cheatsheet") with tag("script", language="JavaScript", src="code.js"): pass with tag("body"): line("h1", "Play me.") with tag("table", cellpadding=10): for first_tone in range(1, 5): with tag("tr"): for second_tone in range(1, 6): with tag("td"): doc.stag( "input", type="submit", value=" ", onclick=f"play({first_tone}, {second_tone})", style=( f"background-image: linear-gradient(to right, {COLORS[first_tone]} 48%, #ffffff 49%, #ffffff 51%, {COLORS[second_tone]} 52%);" "border-radius: 5px;" "border: none;" "min-width: 150px;" "min-height: 80px;" ), ) print(indent(doc.getvalue()))
def _writexml(self, traffic: types.Traffic, route_path: str): """Writes a traffic spec into a route file. Typically this would be the source data to Sumo's DUAROUTER. """ doc = Doc() doc.asis('<?xml version="1.0" encoding="UTF-8"?>') with doc.tag( "routes", ("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance"), ("xsi:noNamespaceSchemaLocation", "http://sumo.sf.net/xsd/routes_file.xsd"), ): # Actors and routes may be declared once then reused. To prevent creating # duplicates we unique them here. for actor in { actor for flow in traffic.flows for actor in flow.actors.keys() }: sigma = min(1, max(0, actor.imperfection.sample())) # range [0,1] min_gap = max(0, actor.min_gap.sample()) # range >= 0 doc.stag( "vType", id=actor.id, accel=actor.accel, decel=actor.decel, vClass=actor.vehicle_type, speedFactor=actor.speed.mean, speedDev=actor.speed.sigma, sigma=sigma, minGap=min_gap, maxSpeed=actor.max_speed, **actor.lane_changing_model, **actor.junction_model, ) # Make sure all routes are "resolved" (e.g. `RandomRoute` are converted to # `Route`) so that we can write them all to file. resolved_routes = {} for route in {flow.route for flow in traffic.flows}: resolved_routes[route] = self.resolve_route(route) for route in set(resolved_routes.values()): doc.stag("route", id=route.id, edges=" ".join(route.edges)) # We don't de-dup flows since defining the same flow multiple times should # create multiple traffic flows. Since IDs can't be reused, we also unique # them here. for flow_idx, flow in enumerate(traffic.flows): total_weight = sum(flow.actors.values()) route = resolved_routes[flow.route] for actor_idx, (actor, weight) in enumerate(flow.actors.items()): doc.stag( "flow", id="{}-{}-{}-{}".format(actor.name, flow.id, flow_idx, actor_idx), type=actor.id, route=route.id, vehsPerHour=flow.rate * (weight / total_weight), departLane=route.begin[1], departPos=route.begin[2], departSpeed=actor.depart_speed, arrivalLane=route.end[1], arrivalPos=route.end[2], begin=flow.begin, end=flow.end, ) with open(route_path, "w") as f: f.write( indent(doc.getvalue(), indentation=" ", newline="\r\n", indent_text=True))
def report_generator_comp( case_fastqcRes=None, case_identifyAdapterRes=None, case_bismarkRes=None, case_deduplicateRes=None, case_rmduplicateRes=None, case_fraglenplotRes=None, case_DeconCCNRes=None, ctrl_fastqcRes=None, ctrl_identifyAdapterRes=None, ctrl_bismarkRes=None, ctrl_deduplicateRes=None, ctrl_rmduplicateRes=None, ctrl_fraglenplotRes=None, ctrl_DeconCCNRes=None, OCFRes=None, CNVRes=None, fraglenplotcompRes=None, PCARes=None, fragprofplotRes=None, outputdir=None, label=None, # list ): if outputdir is None: outputdir = Configure2.getRepDir() if label is None: label = ["Case", "Control"] doc, tag, text, line = Doc().ttl() write_head(doc, tag, text, line) write_body( doc, tag, text, line, case_fastqcRes, case_identifyAdapterRes, case_bismarkRes, case_deduplicateRes, case_rmduplicateRes, case_fraglenplotRes, case_DeconCCNRes, ctrl_fastqcRes, ctrl_identifyAdapterRes, ctrl_bismarkRes, ctrl_deduplicateRes, ctrl_rmduplicateRes, ctrl_fraglenplotRes, ctrl_DeconCCNRes, OCFRes, CNVRes, fraglenplotcompRes, PCARes, fragprofplotRes, outputdir, label, ) fout = open( os.path.join(outputdir, "Cell Free DNA WGBS Analysis Report.html"), "w") fout.write(indent(doc.getvalue())) fout.close()
def write_html(self,): r1 = self.doc.getvalue() r2 = indent(r1) with open(self.html_file, 'w') as f: f.write(r2)
def main(_): opts = flags.FLAGS # pdb.set_trace() vis_dir_names = vis_dir_names_from_file = os.listdir(opts.imgs_root_dir) vis_dir_names.sort() if opts.filename_ordering is not None: vis_dir_names = json.load(open(opts.filename_ordering)) if opts.vis_dirs is not None: vis_dir_names = [] with open(opts.vis_dirs) as f: for line in f: vis_dir_names.append(line.strip()) img_root_rel_path = osp.relpath(opts.imgs_root_dir, opts.html_dir) if not os.path.exists(opts.html_dir): os.makedirs(opts.html_dir) if opts.html_name is None: print('html_name is necessary') return html_file = osp.join(opts.html_dir, opts.html_name + '.html') ctr = 0 img_keys = os.listdir(osp.join(opts.imgs_root_dir, vis_dir_names[0])) img_keys.sort() # pdb.set_trace() img_keys = [ 'img_roi.png', 'img_rel_dir_gt.png', 'img_rel_dir_pred.png', 'bench_iter_0.json', 'c_gt_objects_cam_view.png', 'b_pred_objects_cam_view.png' ] # img_keys = ['img_roi.png'] doc, tag, text = Doc().tagtext() doc_tags = (doc, tag, text) with tag('html'): with tag('body'): with tag('table', style='width:100%', border="1"): with tag('tr'): with tag('td'): text("Filename") for img_name in img_keys: with tag('td'): text("{}".format(img_name)) for img_dir in vis_dir_names: with tag('tr'): with tag('td'): text("{}".format(img_dir)) ## Images from A for img_name in img_keys: if img_dir not in vis_dir_names_from_file: continue if 'json' in img_name: json_path = osp.join(opts.imgs_root_dir, img_dir, img_name) draw_json_table(json_path, doc_tags) continue with tag('td'): with tag('img', width="640px", src=osp.join(img_root_rel_path, img_dir, img_name)): ctr += 1 r1 = doc.getvalue() r2 = indent(r1) with open(html_file, 'wt') as f: f.write(r2)
def record_listens(data): """ Submit the listen in the lastfm format to be inserted in db. Accepts listens for both track.updateNowPlaying and track.scrobble methods. """ output_format = data.get('format', 'xml') try: sk, api_key = data['sk'], data['api_key'] except KeyError: raise InvalidAPIUsage( CompatError.INVALID_PARAMETERS, output_format=output_format) # Invalid parameters session = Session.load(sk) if not session: if not Token.is_valid_api_key(api_key): raise InvalidAPIUsage( CompatError.INVALID_API_KEY, output_format=output_format) # Invalid API_KEY raise InvalidAPIUsage( CompatError.INVALID_SESSION_KEY, output_format=output_format) # Invalid Session KEY user = db_user.get(session.user_id, fetch_email=True) if mb_engine and current_app.config[ "REJECT_LISTENS_WITHOUT_USER_EMAIL"] and user["email"] is None: raise InvalidAPIUsage( CompatError.NO_EMAIL, output_format=output_format) # No email available for user in LB lookup = defaultdict(dict) for key, value in data.items(): if key in ["sk", "token", "api_key", "method", "api_sig", "format"]: continue matches = re.match('(.*)\[(\d+)\]', key) if matches: key = matches.group(1) number = matches.group(2) else: number = 0 lookup[number][key] = value if data['method'].lower() == 'track.updatenowplaying': for i, listen in lookup.items(): if 'timestamp' not in listen: listen['timestamp'] = calendar.timegm( datetime.now().utctimetuple()) # Convert to native payload then submit 'em after validation. listen_type, native_payload = _to_native_api(lookup, data['method'], output_format) try: validated_payload = [ validate_listen(listen, listen_type) for listen in native_payload ] except ListenValidationError as err: # Unsure about which LastFMError code to use but 5 or 6 probably make the most sense. # see listenbrainz.webserver.errors.py for a detailed list of all available codes raise InvalidAPIUsage(LastFMError(code=6, message=err.message), 400, output_format) user_metadata = SubmitListenUserMetadata( user_id=user['id'], musicbrainz_id=user['musicbrainz_id']) augmented_listens = insert_payload(validated_payload, user_metadata, listen_type=listen_type) # With corrections than the original submitted listen. doc, tag, text = Doc().tagtext() with tag('lfm', status='ok'): if listen_type == 'playing_now': doc.asis( create_response_for_single_listen( list(lookup.values())[0], augmented_listens[0], listen_type)) else: accepted_listens = len(lookup.values()) # Currently LB accepts all the listens and ignores none with tag('scrobbles', accepted=accepted_listens, ignored='0'): for original_listen, augmented_listen in zip( list(lookup.values()), augmented_listens): doc.asis( create_response_for_single_listen( original_listen, augmented_listen, listen_type)) return format_response( '<?xml version="1.0" encoding="utf-8"?>\n' + yattag.indent(doc.getvalue()), output_format)
text('REGULAR') with tag('AttachmentNameThree'): text('SHOPEESLS') # Merchant SLS tracking number - check with LBC with tag('ReferenceNoThree'): text('123456789') with tag('AttachmentNameFour'): text('STATUS') # check with LBC with tag('ReferenceNoFour'): text('0') with tag('DestinationCode'): # Use elastic search - map given values text('COP') with tag('Client'): text('SPE') # Merchants code check with LBC message = indent(doc.getvalue(), indentation=' ' * 4, newline='\r\n') xml_payload = """<PickupRequestEntity xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"> {} <Commodity xsi:nil="true" /> <ForCrating xsi:nil="true" /> </PickupRequestEntity>""".format(message) print('REQUEST', xml_payload) response = requests.request("POST", url, headers=headers, data=xml_payload) json_resp = json.loads(response.text) print(json_resp)
def main(): yaml.SafeLoader.add_constructor("!env", env_constructor) base_dir = Path(__file__).parent.parent closed_reports = {} open_reports = {} profile_yml = {} for profile_dir in sys.stdin: profile_dir = base_dir / profile_dir.strip() profile_name = profile_dir.name with open(profile_dir / "profile.yml", "r") as yaml_file: profile_yml[profile_name] = yaml.safe_load(yaml_file) closed_path = profile_dir / "test" / "closed" / "report.json" open_path = profile_dir / "test" / "open" / "report.json" if not open_path.is_file(): # Try alternative name open_path = profile_dir / "test" / "open" / "report_open.json" if closed_path.is_file(): with open(closed_path, "r") as closed_file: closed_reports[profile_name] = json.load(closed_file) if open_path.is_file(): with open(open_path, "r") as open_file: open_reports[profile_name] = json.load(open_file) else: print("Missing", closed_path, file=sys.stderr) rows = [] for profile_name in profile_yml: print("Processing", profile_name, file=sys.stderr) profile = profile_yml[profile_name] closed_report = closed_reports.get(profile_name) open_report = open_reports.get(profile_name) row = { "name": profile_name, "version": profile["version"], "language": profile["language"]["name"], "locale": profile["language"]["code"], "system": profile["speech-to-text"]["acoustic-model-type"], } if closed_report: row["closed_accuracy"] = closed_report["transcription_accuracy"] row["closed_speedup"] = closed_report[ "average_transcription_speedup"] if open_report: row["open_accuracy"] = open_report["transcription_accuracy"] row["open_speedup"] = open_report["average_transcription_speedup"] rows.append(row) # Convert to HTML rows = sorted(rows, key=lambda r: (r["language"], r["locale"])) doc, tag, text = Doc().tagtext() with tag("table"): # Header with tag("thead"): with tag("tr"): with tag("th"): # Download pass with tag("th"): text("Language") with tag("th"): text("Locale") with tag("th"): text("System") with tag("th"): text("Closed") with tag("th"): text("Open") # Body with tag("tbody"): for row in rows: with tag("tr"): # Download with tag("td"): with tag( "a", href= f'https://github.com/synesthesiam/{row["name"]}/archive/v{row["version"]}.tar.gz', ): text("Download") # Language with tag("td"): lang = row["language"] lang = lang[0].upper() + lang[1:] native = NATIVE.get(row["locale"]) if native: lang = f"{lang} ({native})" text(lang) # Locale with tag("td"): text(row["locale"]) # System with tag("td"): text(row["system"]) # Closed with tag("td"): closed_accuracy = row.get("closed_accuracy") if closed_accuracy: closed_text = to_stars(closed_accuracy) closed_speedup = row.get("closed_speedup") if closed_speedup: closed_speedx = int( math.ceil(float(closed_speedup))) closed_text = f"{closed_text} ({closed_speedx}x)" doc.asis(closed_text) else: with tag("strong"): text("UNTESTED") # Open with tag("td"): open_accuracy = row.get("open_accuracy") if open_accuracy: open_text = to_stars(open_accuracy) open_speedup = row.get("open_speedup") if open_speedup: open_speedx = int( math.ceil(float(open_speedup))) open_text = f"{open_text} ({open_speedx}x)" doc.asis(open_text) else: with tag("strong"): text("UNTESTED") print(indent(doc.getvalue()))
def writeReport(self): with open("{}/report.html".format(self.reportDir), 'w') as report: report.write(indent(self.doc.getvalue()))
def get_xml_from_daily_schedule(self, currentTime, bgImageURL, datalist): now = datetime.now() new_day = now + timedelta(days=1) time = now.strftime("%B %d, %Y") update_time = datetime.strptime(config.dailyUpdateTime,'%H:%M').replace(year=int(now.strftime('%Y')),month=int(now.strftime('%m')),day=int(now.strftime('%d'))) doc, tag, text, line = Doc( ).ttl() doc.asis('<?xml version="1.0" encoding="UTF-8"?>') with tag('schedule', currently_playing_bg_image=bgImageURL if bgImageURL != None else ''): previous_row = None for row in datalist: if datetime.strptime(row[8],'%I:%M:%S %p') > update_time: #checking for after midnight but before reset (from config) current_start_time = datetime.strptime(row[8],'%I:%M:%S %p').replace(year=int(now.strftime('%Y')),month=int(now.strftime('%m')),day=int(now.strftime('%d'))) else: current_start_time = datetime.strptime(row[8],'%I:%M:%S %p').replace(year=int(new_day.strftime('%Y')),month=int(new_day.strftime('%m')),day=int(new_day.strftime('%d'))) current_start_time_unix = (current_start_time - datetime(1970,1,1)).total_seconds() current_start_time_string = current_start_time.strftime('%H:%M:%S') try: if_end_time = datetime.strptime(row[9],'%Y-%m-%d %H:%M:%S.%f') except ValueError: if_end_time = datetime.strptime(row[9],'%Y-%m-%d %H:%M:%S') if if_end_time.replace(year=int(now.strftime('%Y')),month=int(now.strftime('%m')),day=int(now.strftime('%d'))) > update_time: #checking for after midnight but before reset (from config) current_end_time = if_end_time.replace(year=int(now.strftime('%Y')),month=int(now.strftime('%m')),day=int(now.strftime('%d'))) else: current_end_time = if_end_time.replace(year=int(new_day.strftime('%Y')),month=int(new_day.strftime('%m')),day=int(new_day.strftime('%d'))) current_end_time_unix = (current_end_time - datetime(1970,1,1)).total_seconds() current_end_time_string = current_end_time.strftime('%H:%M:%S') if previous_row != None: #compare previous end time to current start time try: previous_end_time = datetime.strptime(previous_row[9], '%Y-%m-%d %H:%M:%S.%f') except ValueError: previous_end_time = datetime.strptime(previous_row[9], '%Y-%m-%d %H:%M:%S') if previous_end_time > update_time: #checking for after midnight but before reset (from config) previous_end_time = previous_end_time.replace(year=int(now.strftime('%Y')),month=int(now.strftime('%m')),day=int(now.strftime('%d'))) else: previous_end_time = previous_end_time.replace(year=int(new_day.strftime('%Y')),month=int(new_day.strftime('%m')),day=int(new_day.strftime('%d'))) if previous_end_time > current_start_time: #if previous end time is later than start time, change end time previous_end_time = current_start_time - timedelta(seconds=1) #convert start and end times to unix previous_end_time_unix = (previous_end_time - datetime(1970,1,1)).total_seconds() #convert start and end times to the same readable format previous_end_time_string = previous_end_time.strftime('%H:%M:%S') if str(previous_row[11]) == "Commercials" and self.DEBUG == False: continue timeB = datetime.strptime(previous_row[8], '%I:%M:%S %p') if currentTime == None: with tag('time', ('key', str(previous_row[12])), ('current', 'false'), ('type', str(previous_row[11])), ('show-title', str(previous_row[6])), ('show-season', str(previous_row[5])), ('show-episode', str(previous_row[4])), ('title', str(previous_row[3])), ('duration', str(previous_row[7])), ('time-start', str(previous_start_time_string)), ('time-end', str(previous_end_time.strftime('%H:%M:%S'))), ('time-start-unix', str(previous_start_time_unix)), ('time-end-unix', str(previous_end_time_unix)), ('library', str(previous_row[13])), ): text(previous_row[8]) elif currentTime.hour == timeB.hour and currentTime.minute == timeB.minute: with tag('time', ('key', str(previous_row[12])), ('current', 'true'), ('type', str(previous_row[11])), ('show-title', str(previous_row[6])), ('show-season', str(previous_row[5])), ('show-episode', str(previous_row[4])), ('title', str(previous_row[3])), ('duration', str(previous_row[7])), ('time-start', str(previous_start_time_string)), ('time-end', str(previous_end_time.strftime('%H:%M:%S'))), ('time-start-unix', str(previous_start_time_unix)), ('time-end-unix', str(previous_end_time_unix)), ('library', str(previous_row[13])), ): text(previous_row[8]) else: with tag('time', ('key', str(previous_row[12])), ('current', 'false'), ('type', str(previous_row[11])), ('show-title', str(previous_row[6])), ('show-season', str(previous_row[5])), ('show-episode', str(previous_row[4])), ('title', str(previous_row[3])), ('duration', str(previous_row[7])), ('time-start', str(previous_start_time_string)), ('time-end', str(previous_end_time.strftime('%H:%M:%S'))), ('time-start-unix', str(previous_start_time_unix)), ('time-end-unix', str(previous_end_time_unix)), ('library', str(previous_row[13])), ): text(previous_row[8]) previous_start_time = current_start_time previous_start_time_unix = current_start_time_unix previous_start_time_string = current_start_time_string previous_row = row # if str(row[11]) == "Commercials" and self.DEBUG == False: # continue timeB = datetime.strptime(row[8], '%I:%M:%S %p') if currentTime == None: with tag('time', ('key', str(row[12])), ('current', 'false'), ('type', str(row[11])), ('show-title', str(row[6])), ('show-season', str(row[5])), ('show-episode', str(row[4])), ('title', str(row[3])), ('duration', str(row[7])), ('time-start', str(current_start_time_string)), ('time-end', str(current_end_time_string)), ('time-start-unix', str(current_start_time_unix)), ('time-end-unix', str(current_end_time_unix)), ('library', str(row[13])), ): text(row[8]) elif currentTime.hour == timeB.hour and currentTime.minute == timeB.minute: with tag('time', ('key', str(row[12])), ('current', 'true'), ('type', str(row[11])), ('show-title', str(row[6])), ('show-season', str(row[5])), ('show-episode', str(row[4])), ('title', str(row[3])), ('duration', str(row[7])), ('time-start', str(current_start_time_string)), ('time-end', str(current_end_time_string)), ('time-start-unix', str(current_start_time_unix)), ('time-end-unix', str(current_end_time_unix)), ('library', str(row[13])), ): text(row[8]) else: with tag('time', ('key', str(row[12])), ('current', 'false'), ('type', str(row[11])), ('show-title', str(row[6])), ('show-season', str(row[5])), ('show-episode', str(row[4])), ('title', str(row[3])), ('duration', str(row[7])), ('time-start', str(current_start_time_string)), ('time-end', str(current_end_time_string)), ('time-start-unix', str(current_start_time_unix)), ('time-end-unix', str(current_end_time_unix)), ('library', str(row[13])), ): text(row[8]) return indent(doc.getvalue())
def render(self): print StyleSheet.BATCHPROFILER_DOCTYPE print yattag.indent(self.doc.getvalue())
def prepare_order_request_data(): doc, tag, text = Doc().tagtext() doc.asis('<?xml version="1.0" encoding="utf-8"?>') with tag('AppKey'): text('2b28bdaf34a34bc88cf0135fae6244c2') with tag('ShipmentMode'): text(1) # default to 1 with tag('Origin'): text('MMB') # origin where merchant is located-check with LBC with tag('TransactionDate'): text( '11/07/2019 11:28:47') # Ordered Date - Format MM/DD/YYYY HH:MM:SS with tag('ODZ'): text('false') # default to false with tag('ShipperAccountNo'): text(2019100826898887) # Merchants account no-check with LBC with tag('Shipper'): text('Merchant seller company name' ) # Merchant seller company name-check with LBC with tag('ShipperStBldg'): text( "37/F Net Park Building, 5th Ave., Bonifacio Global City, Taguig, PH 1634" ) with tag('ShipperBrgy'): text("ANYWHERE") with tag('ShipperCityMuncipality'): text("TAGUIG CITY") with tag('ShipperProvince'): text("METRO MANIL") with tag('ShipperContactNumber'): text(0) with tag('ShipperSendSMS'): text(0) # default to 0 with tag('ShipperMobileNumber'): text(0) # 0 if not applicable with tag('ProductLine'): text(2) # only use 2 (Cargo) with tag('ServiceMode'): text(8) # default to 8 - door to door # if not order.cod_amt_to_collect: # cod_amt_to_collect = '0.00' # else: # cod_amt_to_collect = '{0:.2f}'.format(order.cod_amt_to_collect) with tag('CODAmount'): text(5499) with tag('PreferredDate'): text("11/25/2020 10:10:10" ) # confirm date & time - Format MM/DD/YYYY HH:MM:SS-check with tag('Consignee'): text("mark philip zurbito") with tag('ConsigneeStBldg'): text("Test Consignee Address") with tag('ConsigneeBrgy'): text("33446") with tag('ConsigneeCityMuncipality'): text(1129) with tag('ConsigneeProvince'): text(42) with tag('ConsigneeContactNumber'): text('524807617') with tag('ConsigneeSendSMS'): text(1) # optional with tag('ConsigneeMobileNumber'): text("524807617") with tag('Quantity'): text(1) # to check with tag('PKG'): text(16) # default to 16 with tag('ACTWTkgs '): text('{0:.2f}'.format(0)) with tag('LengthCM'): text(0) with tag('WidthCM'): text(0) with tag('HeightCM'): text(0) with tag('VolWTcbm'): text(0) # volumetric weight-check with LBC - LXWXH/3500 in kg with tag('CBM'): text(0) # default to 0 (sea cargo) with tag('ChargeableWT'): text(0) # default to 0 with tag('DeclaredValue'): # package value - kerryhk text(2000) # Package value to check with tag('Description'): text('items description') # check example from ams with tag('AttachmentNameOne'): # check with LBC text('ORDERNO') # Merchant Order Number with tag('ReferenceNoOne'): # check with LBC text('SP123456792') with tag('AttachmentNameTwo'): text('MERCHANTS MODE') # check with LBC with tag('ReferenceNoTwo'): text('REGULAR') with tag('AttachmentNameThree'): text('SHOPEESLS') # Merchant SLS tracking number - check with LBC with tag('ReferenceNoThree'): text('123456789') with tag('AttachmentNameFour'): text('STATUS') # check with LBC with tag('ReferenceNoFour'): text('0') with tag('DestinationCode'): # Use elastic search - map given values text('COP') with tag('Client'): text('SPE') # Merchants code check with LBC message = indent(doc.getvalue(), indentation=' ' * 4, newline='\r\n') xml_payload = """<PickupRequestEntity xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"> {} <Commodity xsi:nil="true" /> <ForCrating xsi:nil="true" /> </PickupRequestEntity>""".format(message) print('REQUEST', xml_payload) return xml_payload
def build_behavior_graph(cognitive_model, problem_name: str, var_lists: list): stage_changes = len(cognitive_model) node_count = stage_changes + 1 var_pos = {'line': 0} for i in range(0, len(var_lists)): var_pos[var_lists[i]] = i + 1 doc, tag, text, line = Doc().ttl() doc.asis('<?xml version="1.0" standalone="yes"?>') with tag('stateGraph', firstCheckAllStates="true", caseInsensitive="true", unordered="false", lockWidget="true", hintPolicy="Use Both Kinds of Bias", version="4.0", suppressStudentFeedback="Show All Feedback", highlightRightSelection="true", startStateNodeName="%(startStateNodeName)%", tutorType="Example-tracing Tutor"): # startNodeMessage with tag('startNodeMessage'): doc.asis( message_section({ 'verb': 'SendNoteProperty', 'properties': { 'MessageType': 'StartProblem', 'ProblemName': problem_name } })) doc.asis( message_section({ 'verb': 'SendNoteProperty', 'properties': { 'MessageType': 'StartStateEnd' } })) # nodes -> states for i in range(1, node_count + 2): if i == 1: doc.asis( stage_node({ 'end': False, 'text': problem_name, 'id': str(i), 'x': str(181), 'y': str(30 + 110 * (i - 1)) })) elif i == node_count + 1: doc.asis( stage_node({ 'end': True, 'text': 'Done', 'id': str(i), 'x': str(181), 'y': str(30 + 110 * (i - 1)) })) else: doc.asis( stage_node({ 'end': False, 'text': 'state' + str(i), 'id': str(i), 'x': str(181), 'y': str(30 + 110 * (i - 1)) })) # edges -> state changes R_value = 1 current_line = 1 for i in range(1, stage_changes + 1): if current_line != cognitive_model[i - 1]['line']: current_line = cognitive_model[i - 1]['line'] R_value += 1 prop = { 'id': i, 'step_value': cognitive_model[i - 1]['value'], 'step_var_name': cognitive_model[i - 1]['changed'], 'line_num': R_value, 'target': 'MyTable.R' + str(R_value) + 'C' + str(var_pos[cognitive_model[i - 1]['changed']]), 'action': 'UpdateTextArea' } print(prop) doc.asis(edge(prop, var_pos)) prop_done = { 'id': stage_changes + 1, 'step_value': -1, 'step_var_name': 'Button', 'target': 'done', 'action': 'ButtonPressed' } doc.asis(edge(prop_done, var_pos)) # other tags with tag('EdgesGroups', ordered="true"): pass return indent(doc.getvalue())
def to_html(self): """Method for producing and html string document from presentation object.""" doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html'): doc.attr(title=self.metadata.data.data['title'][0]) with tag('head'): doc.stag('meta', charset='utf-8') doc.stag('meta', author=' and '.join( self.metadata.data.data['authors'][0])) with tag('title'): text(self.metadata.data.data['title'][0]) doc.stag('meta', subtitle=self.metadata.data.data['subtitle'][0]) doc.stag('link', rel='stylesheet', href='css/normalize.css') if __config__.highlight: doc.stag('link', rel='stylesheet', href='js/highlight/styles/' + __config__.highlight_style) doc.stag('link', rel='stylesheet', href='css/theme.css') with tag('body', onload="resetCountdown(" + self.metadata.data.data['max_time'][0] + ");"): with tag('div', id='impress'): if self.titlepage.found: if self.theme.slide.data.data['slide-transition'][ 0].lower() == 'svgpath': pos = self.svgpath_pos[0] else: pos = self.pos html = self.titlepage.to_html(position=pos, theme=self.theme.slide) html = self.metadata.parse(html) html = self.toc.parse(html) doc.asis(html) for section in self.sections: for subsection in section.subsections: for slide in subsection.slides: if slide.title == '$overview': pos = self.center elif self.theme.slide.data.data[ 'slide-transition'][0].lower( ) == 'svgpath': pos = self.svgpath_pos[slide.number - 1] else: pos = self.pos html = slide.to_html(position=pos, theme=self.theme.slide) html = self.metadata.parse(html) html = self.toc.parse( html, current=[ int(slide.data['sectionnumber']), int(slide.data['subsectionnumber']), slide.number ]) doc.asis(html) with tag('script'): doc.attr(src='js/countDown.js') with tag('script'): doc.attr(src='js/impress.js') with tag('script'): doc.asis('impress().init();') if __config__.online_mathjax: with tag('script'): doc.attr(('type', 'text/javascript')) doc.attr( src= 'http://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML' ) else: with tag('script'): doc.attr(('type', 'text/x-mathjax-config')) doc.text(""" MathJax.Hub.Config({ extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true }, "HTML-CSS": { availableFonts: ["Neo-Euler"] } }); """) with tag('script'): doc.attr(('type', 'text/javascript')) doc.attr(src='js/MathJax/MathJax.js') if __config__.highlight: with tag('script'): doc.attr(src='js/highlight/highlight.pack.js') with tag('script'): doc.text("""hljs.initHighlightingOnLoad();""") if __config__.indented: return indent(doc.getvalue()) else: return doc.getvalue()
def generate_new_digs_pet_list(pets, filename): doc, tag, text = Doc().tagtext() if len(pets) == 0: with tag("div", klass="empty-pet-list"): text( "We currently have no pets of this type" + " in the New Digs program." ) with open(filename, 'w+') as file: file.write(indent(doc.getvalue())) return doc.asis( "<script src=\"" + LIST_THEME_PATH + "lazy/jquery.min.js\"></script>" ) doc.asis( "<script type=\"text/javascript\" src=\"" + LIST_THEME_PATH + "lazy/jquery.lazy.min.js\"></script>" ) doc.asis( "<link href=\"" + LIST_THEME_PATH + "jplist/jplist.styles.css\" rel=\"stylesheet\" type=\"text/css\" />" ) with tag("div", klass="sort-filter-options-parent"): with tag("div", klass="sort-filter-options"): with tag( "div", ("data-jplist-control", "dropdown-sort"), ("data-group", "newdigsgroup"), ("data-name", "sorttitle"), klass="jplist-dd", ): with tag( "div", ("data-type", "panel"), klass="jplist-dd-panel", ): text(" Sort by ") with tag( "div", ("data-type", "content"), klass="jplist-dd-content", ): # sort by options with tag( "div", ("data-path", "default"), klass="jplist-dd-item", ): text(" Sort by ") with tag( "div", ("data-path", ".pet-list-name"), ("data-order", "asc"), ("data-type", "text"), klass="jplist-dd-item", ): text(" Name A - Z ") with tag( "div", ("data-path", ".pet-list-name"), ("data-order", "desc"), ("data-type", "text"), klass="jplist-dd-item", ): text(" Name Z - A ") with tag( "div", ("data-path", ".pet-list-intake-date"), ("data-order", "asc"), ("data-type", "number"), ("data-selected", "true"), klass="jplist-dd-item", ): text(" Featured Pets First ") with tag( "div", ("data-path", ".pet-list-intake-date"), ("data-order", "desc"), ("data-type", "number"), ("data-selected", "true"), klass="jplist-dd-item", ): text(" Newest Arrivals First ") with tag("div", ("data-jplist-group", "newdigsgroup"), klass="pet-list"): pet_count = 0 for pet in pets: pet_count += 1 pet_fields = pet.get("fields", {}) pet_name = pet_fields.get("Pet Name") pet_id = str(pet_fields.get("Pet ID - do not edit")) pet_photo = pet_fields.get("ThumbnailURL") with tag( "div", ("data-jplist-item"), klass="pet-list-pet", ): pet_link = PET_LINK_RELATIVE_PATH + "new_digs_pet/" + pet_id with tag("a", href=pet_link): with tag("div", klass="pet-list-image"): pet_photo_link = pet_photo if not pet_photo: pet_photo_link = PLACEHOLDER_IMAGE doc.stag( "img", ("data-src", pet_photo_link), src=PLACEHOLDER_IMAGE, alt="Photo", klass="lazy", ) with tag("div", klass="pet-list-name"): text(pet_name) with tag("div", klass="pet-list-intake-date hidden"): date = pet_fields.get("Made Available for Adoption Date") if date: date = datetime.strptime(date, "%Y-%m-%d") epoch_time = (date - datetime(1970, 1, 1)).total_seconds() text(epoch_time) doc.asis( "<script>" + " $(function() {" + " $('.lazy').lazy();" + " });" + "</script>" ) doc.asis( "<script src=\"//cdnjs.cloudflare.com/ajax/libs/babel-polyfill" + "/6.26.0/polyfill.min.js\"></script>" ) doc.asis( "<script src=\"" + LIST_THEME_PATH + "jplist/jplist.min.js\"></script>" ) with open(filename, 'w+') as file: file.write(indent(doc.getvalue()))
def get_html_from_daily_schedule(self, currentTime, bgImageURL, datalist, nowPlayingTitle): now = datetime.now() time = now.strftime("%B %d, %Y") doc, tag, text, line = Doc( ).ttl() doc.asis('<!DOCTYPE html>') with tag('html'): with tag('head'): with tag('title'): text(time + " - Daily Pseudo Schedule") doc.asis('<link href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css" rel="stylesheet">') doc.asis('<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>') doc.asis('<link rel="shortcut icon" href="https://raw.githubusercontent.com/justinemter/pseudo-channel/master/favicon.ico" type="image/x-icon">') doc.asis(""" <script> $(function(){ var refreshFlag = ''; """ +"""var controllerServerPath ='"""+self.CONTROLLER_SERVER_PATH+":"+self.CONTROLLER_SERVER_PORT+"""'; if(controllerServerPath != ''){ console.log("here"); window.setInterval(function(){ $.ajax({ url: controllerServerPath+"/pseudo_refresh.txt", async: true, // asynchronous request? (synchronous requests are discouraged...) cache: false, // with this, you can force the browser to not make cache of the retrieved data dataType: "text", // jQuery will infer this, but you can set explicitly success: function( data, textStatus, jqXHR ) { newFlag = data; if(refreshFlag != ''){ if (refreshFlag != newFlag){ location.reload(); } else { //do nothing console.log("skip"); } } else { refreshFlag = newFlag; } } }); }, 1000); } else { setTimeout(function() {location.reload();}, 30000); } }); </script> """) if bgImageURL != None: doc.asis('<style>body{ background:transparent!important; } html { background: url('+bgImageURL+') no-repeat center center fixed; -webkit-background-size: cover;-moz-background-size: cover;-o-background-size: cover;background-size: cover;}.make-white { padding: 24px; background:rgba(255,255,255, 0.9); }</style>') with tag('body'): with tag('div', klass='container mt-3'): with tag('div', klass='row make-white'): with tag('div'): with tag('div'): line('h1', self.HTML_PSEUDO_TITLE, klass='col-12 pl-0') with tag('div'): line('h3', time, klass='col-12 pl-1') line('h3', "Now Playing: "+nowPlayingTitle, klass='col-12 pl-1', style="color:red;") with tag('table', klass='col-12 table table-bordered table-hover'): with tag('thead', klass='table-info'): with tag('tr'): with tag('th'): text('#') with tag('th'): text('Type') with tag('th'): text('Series') with tag('th'): text('Title') with tag('th'): text('Start Time') numberIncrease = 0 for row in datalist: if str(row[11]) == "Commercials" and self.DEBUG == False: continue numberIncrease += 1 with tag('tbody'): if currentTime != None: currentTime = currentTime.replace(year=1900, month=1, day=1) timeBStart = datetime.strptime(row[8], '%I:%M:%S %p') timeBStart = timeBStart.replace(year=1900, month=1, day=1) try: timeBEnd = datetime.strptime(row[9], '%Y-%m-%d %H:%M:%S.%f') except: timeBEnd = datetime.strptime(row[9], '%Y-%m-%d %H:%M:%S') #print timeBStart if currentTime == None: with tag('tr'): with tag('th', scope='row'): text(numberIncrease) with tag('td'): text(row[11]) with tag('td'): text(row[6]) with tag('td'): text(row[3]) with tag('td'): text(row[8]) elif (currentTime - timeBStart).total_seconds() >= 0 and \ (timeBEnd - currentTime).total_seconds() >= 0: #if self.DEBUG: print "+++++ Currently Playing:", row[3] with tag('tr', klass='bg-info'): with tag('th', scope='row'): text(numberIncrease) with tag('td'): text(row[11]) with tag('td'): text(row[6]) with tag('td'): text(row[3]) with tag('td'): text(row[8]) else: with tag('tr'): with tag('th', scope='row'): text(numberIncrease) with tag('td'): text(row[11]) with tag('td'): text(row[6]) with tag('td'): text(row[3]) with tag('td'): text(row[8]) return indent(doc.getvalue())
text('Always') with tag('Colpf3Dectection'): with tag('Type'): text('Always') with tag('NinjaEnterCount1'): text(a8mem[MapNinjaEnterCount1 + room.index]) with tag('NinjaEnterCount2'): text(a8mem[MapNinjaEnterCount2 + room.index]) with tag('YamoEnterCount1'): text(a8mem[MapYamoEnterCount1 + room.index]) with tag('YamoEnterCount2'): text(a8mem[MapYamoEnterCount2 + room.index]) with tag('YamoSpawnPosition'): text("1" if a8mem[MapYamoSpawnPosition + room.index] == 0x80 else "2" if a8mem[MapYamoSpawnPosition + room.index] == 2 else "0") with tag('NinjaSpawnPosition'): text("1" if a8mem[MapNinjaSpawnPosition + room.index] == 0x80 else "2" if a8mem[MapNinjaSpawnPosition + room.index] == 2 else "0") result = indent(doc.getvalue(), indentation=' ', newline='\n', indent_text=True) with open(ripPath / "brucelee.xml", "w") as f: f.write(result) f.close() print(result)
def task_to_kml_with_yattag(df_waypoints, outdir, filename_base): from yattag import Doc, indent from lxml import etree from pykml.parser import Schema doc, tag, text = Doc().tagtext() #doc.asis('<?xml version="1.0" encoding="UTF-8"?>') with tag('kml'): doc.attr(("xmlns:gx", "http://www.google.com/kml/ext/2.2"), ("xmlns:atom", "http://www.w3.org/2005/Atom"), ("xmlns", "http://www.opengis.net/kml/2.2")) with tag('Document'): with tag('name'): text('Waypoints') for i, wpt in df_waypoints.iterrows(): id = i + 1 with tag('Placemark'): with tag('name'): text(wpt.Name) with tag('description'): text("""{desc} <dl> <dt>Lat: </dt><dd>{lat}</dd> <dt>Lon: </dt><dd>{lon}</dd> <dt>Alt: </dt><dd>{alt}</dd> </dl> <dl> <dt>Waypoint style: </dt><dd>{wpt_style}</dd> <dt>Code: </dt><dd>{code}</dd> <dt>Country: </dt><dd>{country}</dd> <dt>Runway direction : </dt><dd>{runway_dir}</dd> <dt>Runway length: </dt><dd>{runway_length}</dd> <dt>Airport Frequency: </dt><dd>{airport_frq}</dd> </dl> <dl> <dt>Google search: </dt><dd><a href="https://www.google.fr/?#safe=off&q={name}">{name}</a></dd> </dl> """.format(desc=wpt.Description, lat="%.5f" % wpt.Lat, lon="%.5f" % wpt.Lon, alt="%.1f" % wpt.Altitude.to_base_units().magnitude, name=wpt.Name, wpt_style=wpt['Waypoint style'], code=wpt.Code, country=wpt.Country, runway_dir=wpt['Runway direction'], runway_length=wpt['Runway length'], airport_frq=wpt['Airport Frequency'])) with tag('Point'): with tag('coordinates'): text("%.5f,%.5f,%.1f" % (wpt.Lon, wpt.Lat, wpt.Altitude.to_base_units().magnitude)) result = indent(doc.getvalue(), indentation=' ' * 4, newline='\r\n') filename_out = os.path.join(outdir, "wpt_" + filename_base + '.kml') print("Output '%s'" % filename_out) outfile = file(filename_out, 'w') outfile.write(result) doc = etree.fromstring(result) assert Schema('kml22gx.xsd').validate(doc)
text('Section Name 6') with tag('div',klass="rectangle"): text("") write_row(2,[19,20],347.7,170) write_row(2,[21,22],347.7,170) with tag('p',style="font-size:16pt;color:#0f4d74;font-family:Century Gothic"): with tag('b'): text('Section Name 7') with tag('div',klass="rectangle"): text("") write_row(1,[23],715,178.8) with tag('p',style="font-size:16pt;color:#0f4d74;font-family:Century Gothic"): with tag('b'): text('Section Name 8') with tag('div',klass="rectangle"): text("") write_row(2,[24,25],347.7,170) doc.asis('<!DOCTYPE html>') with tag('html'): write_head() write_body() html_content=indent(doc.getvalue()) with open('charts.html','w') as temp_file: temp_file.write(html_content)
def generate_pet_list(pets, filename): doc, tag, text = Doc().tagtext() if len(pets) == 0: with tag("div", klass="empty-pet-list"): text( "We currently have no pets of this type!" + " Please check out our other adoptable pets," + " or consider fostering." ) with open(filename, 'w+') as file: file.write(indent(doc.getvalue())) return doc.asis( "<script src=\"" + LIST_THEME_PATH + "lazy/jquery.min.js\"></script>" ) doc.asis( "<script type=\"text/javascript\" src=\"" + LIST_THEME_PATH + "lazy/jquery.lazy.min.js\"></script>" ) doc.asis( "<link href=\"" + LIST_THEME_PATH + "jplist/jplist.styles.css\" rel=\"stylesheet\" type=\"text/css\" />" ) with tag("div", klass="sort-filter-options-parent"): with tag("div", klass="sort-filter-options"): with tag( "div", ("data-jplist-control", "dropdown-sort"), ("data-group", "group1"), ("data-name", "sorttitle"), klass="jplist-dd", ): with tag( "div", ("data-type", "panel"), klass="jplist-dd-panel", ): text(" Sort by ") with tag( "div", ("data-type", "content"), klass="jplist-dd-content", ): # sort by options with tag( "div", ("data-path", "default"), klass="jplist-dd-item", ): text(" Sort by ") with tag( "div", ("data-path", ".pet-list-name"), ("data-order", "asc"), ("data-type", "text"), klass="jplist-dd-item", ): text(" Name A - Z ") with tag( "div", ("data-path", ".pet-list-name"), ("data-order", "desc"), ("data-type", "text"), klass="jplist-dd-item", ): text(" Name Z - A ") with tag( "div", ("data-path", ".pet-list-intake-date"), ("data-order", "asc"), ("data-type", "number"), ("data-selected", "true"), klass="jplist-dd-item", ): text(" Featured Pets First ") with tag( "div", ("data-path", ".pet-list-intake-date"), ("data-order", "desc"), ("data-type", "number"), ("data-selected", "true"), klass="jplist-dd-item", ): text(" Newest Arrivals First ") with tag("div", ("data-jplist-group", "group1"), klass="pet-list"): pet_count = 0 for pet in pets: pet_count += 1 pet_name = pets[pet]['Name'] pet_id = pets[pet]['ID'] pet_photo = pets[pet]['CoverPhoto'] with tag( "div", ("data-jplist-item"), klass="pet-list-pet", ): pet_link = PET_LINK_RELATIVE_PATH + "pet/" + pet_id with tag("a", href=pet_link): with tag("div", klass="pet-list-image"): pet_photo_link = pet_photo if "default_" in pet_photo: pet_photo_link = PLACEHOLDER_IMAGE doc.stag( "img", ("data-src", pet_photo_link), src=PLACEHOLDER_IMAGE, alt="Photo", klass="lazy", ) with tag("div", klass="pet-list-name"): text(pet_name) with tag("div", klass="pet-list-intake-date hidden"): text(pets[pet]['LastIntakeUnixTime']) doc.asis( "<script>" + " $(function() {" + " $('.lazy').lazy();" + " });" + "</script>" ) doc.asis( "<script src=\"//cdnjs.cloudflare.com/ajax/libs/babel-polyfill" + "/6.26.0/polyfill.min.js\"></script>" ) doc.asis( "<script src=\"" + LIST_THEME_PATH + "jplist/jplist.min.js\"></script>" ) with open(filename, 'w+') as file: file.write(indent(doc.getvalue()))
def parse_animal_profile(animal): doc, tag, text, line = Doc().ttl() with tag("div", klass="pet-profile"): with tag("div", klass="pet-profile-images"): doc.asis( "<script src=\"https://ajax.googleapis.com/ajax/" + "libs/jquery/1.11.1/jquery.min.js\"></script>" ) doc.asis( "<link href=\"https://cdnjs.cloudflare.com/ajax/" + "libs/fotorama/4.6.4/fotorama.css\" rel=\"stylesheet\">" ) doc.asis( "<script src=\"https://cdnjs.cloudflare.com/ajax/" + "libs/fotorama/4.6.4/fotorama.js\"></script>" ) with tag( "div", ("data-nav", "thumbs"), ("data-allowfullscreen", "true"), klass="fotorama" ): if len(animal["Photos"]) > 0: for photo in animal["Photos"]: doc.stag("img", src=photo) else: doc.stag("img", src=PLACEHOLDER_IMAGE) with tag("div", klass="pet-profile-data"): with tag("div", klass="pet-profile-name"): text(animal["Name"]) doc.stag("br") with tag("div", klass="pet-profile-other-data"): profile_other_data(animal, doc, text, line) adopt_link = ( "https://www.shelterluv.com/matchme/adopt/DPA-A-" + animal["ID"] ) if animal["Type"] == "Dog": adopt_link = adopt_link + "?species=Dog" elif animal["Type"] == "Cat": adopt_link = adopt_link + "?species=Cat" with tag( "a", href=adopt_link, klass="pet-profile-top-adopt-button" ): text("Apply to Adopt " + animal["Name"]) with tag("div", klass="pet-profile-description"): with tag("div", klass="pet-profile-description-title"): text("Meet " + animal["Name"] + "!") doc.stag("br") if len(animal["Description"]) < 3: text( "We don't have much information on this animal yet. " + "If you'd like to find out more, " + "please email [email protected]." ) else: with tag("p"): doc.asis(animal["Description"].replace("\n\n", "</p><p>")) with tag( "div", klass=( "et_pb_promo et_pb_bg_layout_dark" + "et_pb_text_align_center pet-profile-adopt-bottom" ), style="background-color: #006cb7;" ): with tag("div", klass="et_pb_promo_description"): line("h2", "Apply to Adopt " + animal["Name"] + " Today") with tag("a", klass="et_pb_promo_button", href=adopt_link): text("Go To Adoption Application") return indent(doc.getvalue())
with tag('tbody'): """ For each course in the courseCache, add the course's info as a row in the table """ for code in courseCache: with tag('tr'): with tag('td'): text(code) with tag('td'): text(courseCache[code]['Title']) with tag('td'): text(courseCache[code]['Units']) with tag('td'): text(courseCache[code]['Terms']) if 'Prerequisites' in courseCache[code]: with tag('td'): text(courseCache[code]['Prerequisites']) else: with tag('td'): text('n/a') with tag('td'): text(courseCache[code]['Level']) if 'Grading/Final exam status' in courseCache[code]: with tag('td'): text(courseCache[code]['Grading/Final exam status']) else: with tag('td'): text(courseCache[code]['Grading']) if 'Hours & Format' in courseCache[code]: with tag('td'): text(courseCache[code]['Hours & Format']) else: with tag('td'): text('n/a') if 'Instructors' in courseCache[code]: with tag('td'): text(courseCache[code]['Instructors']) else: with tag('td'): text('n/a') result = indent(doc.getvalue()) # Adds indents to help with readability in .html file html_file.write(result) # Write the string generated above to the .html file; gets overwritten each time courseScraper.py is called webbrowser.open('file://' + os.path.realpath('test.html')) # Open the file in the browser when courseScraper.py is called
def to_html(self, config): """Generate a html stream of the whole presentation. Parameters ---------- config : MatisseConfig MaTiSSe configuration """ doc, tag, text = Doc().tagtext() doc.asis('<!DOCTYPE html>') with tag('html'): doc.attr(title=self.metadata['title'].value) self.__put_html_tag_head(doc=doc, tag=tag, text=text, config=config) with tag('body', onload="resetCountdown(" + str(self.metadata['max_time'].value) + ");"): with tag('div', id='impress'): # numbering: [local_chap, local_sec, local_subsec, local_slide] current = [0, 0, 0, 0] for chapter in self.chapters: current[0] += 1 current[1] = 0 current[2] = 0 current[3] = 0 self.metadata['chaptertitle'].update_value( value=chapter.title) self.metadata['chapternumber'].update_value( value=chapter.number) for section in chapter.sections: current[1] += 1 current[2] = 0 current[3] = 0 self.metadata['sectiontitle'].update_value( value=section.title) self.metadata['sectionnumber'].update_value( value=section.number) for subsection in section.subsections: current[2] += 1 current[3] = 0 self.metadata['subsectiontitle'].update_value( value=subsection.title) self.metadata['subsectionnumber'].update_value( value=subsection.number) for slide in subsection.slides: current[3] += 1 self.metadata['slidetitle'].update_value( value=slide.title) self.metadata['slidenumber'].update_value( value=slide.number) with doc.tag('div'): chapter.put_html_attributes(doc=doc) section.put_html_attributes(doc=doc) subsection.put_html_attributes(doc=doc) slide.put_html_attributes(doc=doc) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='header', current=current, overtheme=slide.overtheme) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='sidebar', position='L', current=current, overtheme=slide.overtheme) slide.to_html(doc=doc, parser=self.parser, metadata=self.metadata, theme=self.theme, current=current) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='sidebar', position='R', current=current, overtheme=slide.overtheme) self.__put_html_slide_decorators( tag=tag, doc=doc, decorator='footer', current=current, overtheme=slide.overtheme) self.__put_html_tags_scripts(doc=doc, tag=tag, config=config) # source = re.sub(r"<li>(?P<item>.*)</li>", r"<li><span>\g<item></span></li>", source) html = indent(doc.getvalue()) return html