def main(): parser = ArgumentParser() parser.add_argument('-i', '--input', default='-') parser.add_argument('-o', '--output', default='-') parser.add_argument('-f', '--output-format', choices=('simple', 'latex'), default='simple') parser.add_argument('-n', '--table-name', default="timing") parser.add_argument('-c', '--table-caption', default="Timing results") args = parser.parse_args() with fileinput.input(args.input) as i: rows = summarize_timing(i) if len(rows) == 0: print("No results to summarize") with fileoutput(args.output) as o: w = csv.writer(o, delimiter="\t") if args.output_format == 'simple': w.writerow(('Program', 'Threads', 'Error Rate', 'Quality Cutoff', 'Time')) w.writerows(rows) else: from mako.template import Template template = Template(filename='timing_table_template.latex') rows, threads = format_table(rows) o.write(template.render(rows=rows, threads=threads, name=args.table_name, caption=args.table_caption))
def generate_scenario_report(self, scenario, stats): """Format a report based on calculated statistics for an executed scenario. :stats: A python data structure with calculated statistics :returns: A report (string) suitable for printing, emailing, etc. """ template = Template(self.scenario_template()) tmpl_vars = { 'size_data': [], 'stat_list': [ ('TOTAL', stats['agg_stats'], stats['size_stats']), ('CREATE', stats['op_stats'][ssbench.CREATE_OBJECT], stats['op_stats'][ssbench.CREATE_OBJECT]['size_stats']), ('READ', stats['op_stats'][ssbench.READ_OBJECT], stats['op_stats'][ssbench.READ_OBJECT]['size_stats']), ('UPDATE', stats['op_stats'][ssbench.UPDATE_OBJECT], stats['op_stats'][ssbench.UPDATE_OBJECT]['size_stats']), ('DELETE', stats['op_stats'][ssbench.DELETE_OBJECT], stats['op_stats'][ssbench.DELETE_OBJECT]['size_stats']), ], 'agg_stats': stats['agg_stats'], 'nth_pctile': stats['nth_pctile'], 'start_time': datetime.utcfromtimestamp( stats['time_series']['start_time'] ).strftime(REPORT_TIME_FORMAT), 'stop_time': datetime.utcfromtimestamp( stats['time_series']['stop']).strftime(REPORT_TIME_FORMAT), 'duration': stats['time_series']['stop'] - stats['time_series']['start_time'], 'weighted_c': 0.0, 'weighted_r': 0.0, 'weighted_u': 0.0, 'weighted_d': 0.0, } for size_data in scenario.sizes_by_name.values(): if size_data['size_min'] == size_data['size_max']: size_range = '%-15s' % ( self._format_bytes(size_data['size_min']),) else: size_range = '%s - %s' % ( self._format_bytes(size_data['size_min']), self._format_bytes(size_data['size_max'])) initial_files = scenario._scenario_data['initial_files'] initial_total = sum(initial_files.values()) pct_total = (initial_files.get(size_data['name'], 0) / float(initial_total) * 100.0) tmpl_vars['size_data'].append({ 'crud_pcts': ' '.join(map(lambda p: '%2.0f' % p, size_data['crud_pcts'])), 'size_range': size_range, 'size_name': size_data['name'], 'pct_total_ops': '%3.0f%%' % pct_total, }) tmpl_vars['weighted_c'] += pct_total * size_data['crud_pcts'][0] / 100.0 tmpl_vars['weighted_r'] += pct_total * size_data['crud_pcts'][1] / 100.0 tmpl_vars['weighted_u'] += pct_total * size_data['crud_pcts'][2] / 100.0 tmpl_vars['weighted_d'] += pct_total * size_data['crud_pcts'][3] / 100.0 return template.render(scenario=scenario, stats=stats, **tmpl_vars)
def test_module_roundtrip(self): lookup = TemplateLookup() template = Template(""" <%inherit file="base.html"/> % for x in range(5): ${x} % endfor """, lookup=lookup) base = Template(""" This is base. ${self.body()} """, lookup=lookup) lookup.put_template("base.html", base) lookup.put_template("template.html", template) assert result_lines(template.render()) == [ "This is base.", "0", "1", "2", "3", "4" ] lookup = TemplateLookup() template = ModuleTemplate(template.module, lookup=lookup) base = ModuleTemplate(base.module, lookup=lookup) lookup.put_template("base.html", base) lookup.put_template("template.html", template) assert result_lines(template.render()) == [ "This is base.", "0", "1", "2", "3", "4" ]
def _execute(self): # Create Directory target = os.path.join(self.drupal_dir, 'sites', 'default', 'modules', 'sbc_version') if not os.path.exists(target): print " mkdirs", target os.makedirs(target) # Copy in static assets for filename in ['sbc_version.info', 'sbc_version.module']: src = os.path.join(self.asset_path, filename) dst = os.path.join(target, filename) print " cp %s -> %s" % (src, dst) shutil.copy(src, dst) # Write dynamic CSS tpl = None path = os.path.join(self.asset_path, 'sbc_version.css.tpl') with open(path, 'rt') as fh: tpl = MakoTemplate(fh.read()) src = tpl.render( name = self.instance.name, instance_desc = self.instance.instance_title, instance_color = self.instance.instance_color) target_path = os.path.join(target, 'sbc_version.css') print " generating", target_path with open(target_path, 'wt') as fh: fh.write(src) return True
def test_strict(self): t = Template(""" % if x is UNDEFINED: undefined % else: x: ${x} % endif """, strict_undefined=True) assert result_lines(t.render(x=12)) == ['x: 12'] assert_raises( NameError, t.render, y=12 ) l = TemplateLookup(strict_undefined=True) l.put_string("a", "some template") l.put_string("b", """ <%namespace name='a' file='a' import='*'/> % if x is UNDEFINED: undefined % else: x: ${x} % endif """) assert result_lines(t.render(x=12)) == ['x: 12'] assert_raises( NameError, t.render, y=12 )
def get(self): if not validate_user_handles(self.request, self.response): return twitter_handle = self.request.get('t') youtube_handle = self.request.get('yt') vimeo_handle = self.request.get('v') logging.debug("------------->twitter: %s youtube: %s vimeo: %s", twitter_handle, youtube_handle, vimeo_handle) if self.request.headers.get('X-Requested-With') == 'XMLHttpRequest': #getGooglePlusOnes() youtube_events = getYouTubeEvents(youtube_handle) twitter_favorites = getTwitterFavorites(twitter_handle) vimeo_likes = getVimeoLikes(vimeo_handle) data = youtube_events + twitter_favorites + vimeo_likes self.response.set_status(200) self.response.out.write( json.dumps(data) ) else: data = { 'twitter_handle_default': twitter_handle, 'youtube_handle_default': youtube_handle, 'vimeo_handle_default': vimeo_handle } makoLookupDir = TemplateLookup(directories=['.']) mytemplate = Template(filename="index.html", lookup=makoLookupDir) self.response.out.write( mytemplate.render( **data ) )
def _do_test_traceback(self, utf8, memory, syntax): if memory: if syntax: source = u('## coding: utf-8\n<% print "m’a réveillé. '\ 'Elle disait: « S’il vous plaît… dessine-moi un mouton! » %>') else: source = u('## coding: utf-8\n<% print u"m’a réveillé. '\ 'Elle disait: « S’il vous plaît… dessine-moi un mouton! »" + str(5/0) %>') if utf8: source = source.encode('utf-8') else: source = source templateargs = {'text': source} else: if syntax: filename = 'unicode_syntax_error.html' else: filename = 'unicode_runtime_error.html' source = util.read_file(self._file_path(filename), 'rb') if not utf8: source = source.decode('utf-8') templateargs = {'filename': self._file_path(filename)} try: template = Template(**templateargs) if not syntax: template.render_unicode() assert False except Exception: tback = exceptions.RichTraceback() if utf8: assert tback.source == source.decode('utf-8') else: assert tback.source == source
def sendHTML(siteRecords, NWISqueryURL): outTemplate = Template(filename='/var/www/mapper/exporter/Templates/HTMLExport.txt') # build a metadata string from the scodes metaData = getHTMLMetaData(len(siteRecords), NWISqueryURL) htmlOut = outTemplate.render(meta = metaData, sites = siteRecords ) return htmlOut
def get(self): if not validate_userhandle_name(self.request, self.response): return userhandle_name = self.request.get('name') userhandle = db.GqlQuery("SELECT * " "FROM UserHandle " "WHERE ANCESTOR IS :1 ", userhandle_key(userhandle_name)).get() if not userhandle: self.response.set_status(403) self.response.out.write( "Page does not exist" ) return #for userhandle in userhandles: logging.debug("%s", userhandle) data = { 'twitter_handle_default': userhandle.twitter_handle, 'youtube_handle_default': userhandle.youtube_handle, 'vimeo_handle_default': userhandle.vimeo_handle } makoLookupDir = TemplateLookup(directories=['.']) mytemplate = Template(filename="index.html", lookup=makoLookupDir) self.response.out.write( mytemplate.render( **data ) )
def create_template_file(self, template, output): """Creates template files for ant in `build/temp`.""" output_dir = os.path.dirname(output) tempalte = Template(filename=template) output_file = open(output, 'w') output_file.write(tempalte.render(**self.options)) output_file.close()
def generate_index(): # write template tpl = Template(filename=config.template_dir + "/index.tpl", default_filters=['decode.utf8'], input_encoding='utf-8', output_encoding='utf-8', encoding_errors='replace') content = tpl.render(profiles=config.profiles) f = open(config.output_dir + "/index.htm", 'w') f.write(content) f.close()
def generateFile(isResponse, className, templatePath, properties, requestConf, headerList): templatePathH = templatePath + '.h' templatePathM = templatePath + '.m' templateH = Template(filename=templatePathH,input_encoding='utf-8',output_encoding='utf-8') templateM = Template(filename=templatePathM,input_encoding='utf-8',output_encoding='utf-8') writeFile(className + '.h', templateH.render(className=className, properties=properties, headerList=headerList)) writeFile(className + '.m', templateM.render(className=className, requestConf=requestConf, headerList=headerList))
def _makeGraphItem(self): self.graphItem = QGraphicsWebView() self.graphItem.setResizesToContents(True) self.graphItem.setPos(0, 0) self.graphItem.setZValue(-1) # put under edges tmpl = Template(filename=os.path.join(main.MAIN_DIR, 'graph.html')) html = tmpl.render( blocks=[ (self._getBlockElementID(mb.addr), mb) for mb in self.myBlocks], edges=[ (self._getEdgeElementID(b1Addr, b2Addr), edgeInfo['type']) for (b1Addr, b2Addr, edgeInfo) in self.blockGraph.edges_iter(data=True)], fmtAddr=self._formatAddr, fmtHex=self._formatHex, fmtAsm=self._formatAsm) self.graphItem.setHtml(html) self.addItem(self.graphItem) mainFrame = self.graphItem.page().mainFrame() self.blockElements = dict( (self._blockElementIDToAddr(blockElem.attribute('id')), blockElem) for blockElem in mainFrame.findAllElements(".block")) self.edgeElements = dict( (self._edgeElementIDToAddrs(edgeElem.attribute('id')), edgeElem) for edgeElem in mainFrame.findAllElements(".edge"))
def savesettings(self, allopts, token): if str(token) != str(self.token): return self.error("Invalid token (" + str(self.token) + ").") try: dbh = SpiderFootDb(self.config) # Reset config to default if allopts == "RESET": dbh.configClear() # Clear it in the DB self.config = deepcopy(self.defaultConfig) # Clear in memory else: useropts = json.loads(allopts) cleanopts = dict() for opt in useropts.keys(): cleanopts[opt] = self.cleanUserInput([useropts[opt]])[0] currentopts = deepcopy(self.config) # Make a new config where the user options override # the current system config. sf = SpiderFoot(self.config) self.config = sf.configUnserialize(cleanopts, currentopts) dbh.configSet(sf.configSerialize(currentopts)) except Exception as e: return self.error("Processing one or more of your inputs failed: " + str(e)) templ = Template(filename='dyn/opts.tmpl', lookup=self.lookup) self.token = random.randint(0, 99999999) return templ.render(opts=self.config, pageid='SETTINGS', updated=True, docroot=self.docroot, token=self.token)
def stopscanmulti(self, ids): global globalScanStatus # running scans dbh = SpiderFootDb(self.config) error = list() for id in ids.split(","): errState = False scaninfo = dbh.scanInstanceGet(id) if globalScanStatus.getStatus(id) == "FINISHED" or scaninfo[5] == "FINISHED": error.append("Scan '" + scaninfo[0] + "' is in a finished state. <a href='/scandelete?id=" + \ id + "&confirm=1'>Maybe you want to delete it instead?</a>") errState = True if not errState and (globalScanStatus.getStatus(id) == "ABORTED" or scaninfo[5] == "ABORTED"): error.append("Scan '" + scaninfo[0] + "' is already aborted.") errState = True if not errState and globalScanStatus.getStatus(id) is None: error.append("Scan '" + scaninfo[0] + "' is not actually running. A data consistency " + \ "error for this scan probably exists. <a href='/scandelete?id=" + \ id + "&confirm=1'>Click here to delete it.</a>") errState = True if not errState: globalScanStatus.setStatus(id, "ABORT-REQUESTED") templ = Template(filename='dyn/scanlist.tmpl', lookup=self.lookup) return templ.render(pageid='SCANLIST', stoppedscan=True, errors=error, docroot=self.docroot)
def extract_rpclib(args): """ THE extraction function """ workdir = tempfile.mkdtemp() rpclib_root = args.rpclib_root new_rpclib_root = tempfile.mkdtemp() copy_tree(rpclib_root, new_rpclib_root) patch_rpclib(new_rpclib_root) copy_important_files(workdir, new_rpclib_root) version = extract_rpclib_version(new_rpclib_root) shutil.rmtree(new_rpclib_root) cmake_template = Template( filename=os.path.join(os.path.dirname(os.path.realpath(__file__)), "rpc_CMakeLists.txt")) real_cmake_file = cmake_template.render( rpclib_major_version=version[0], rpclib_minor_version=version[1], rpclib_patch_version=version[2] ) with open(os.path.join(workdir, "CMakeLists.txt"), "w") as cmake_file: cmake_file.write(real_cmake_file) cmake_file.truncate() fix_naming(workdir) copy_tree(workdir, args.rpclib_target, update=True) shutil.rmtree(workdir)
def newscan(self): dbh = SpiderFootDb(self.config) types = dbh.eventTypes() templ = Template(filename='dyn/newscan.tmpl', lookup=self.lookup) return templ.render(pageid='NEWSCAN', types=types, docroot=self.docroot, modules=self.config['__modules__'], scanname="", selectedmods="", scantarget="")
def render_GET(self, request): path = request.path[1:] uri = request.uri[1:] if hasattr(os, 'winver'): lookup = TemplateLookup(directories=[self._file_access.path], default_filters=[], input_encoding='utf-8') else: lookup = TemplateLookup(directories=['/'], default_filters=[], input_encoding='utf-8') for regex, f in self._mappings: match = regex.match(uri) if match: vars = match.groupdict() self._io.info('matched: ' + regex.pattern) self._io.info('vars: ' + str(vars)) self._io.info('serving: ' + f.path) request.setHeader('Content-Type', 'text/html; charset=utf-8') t = Template(filename=f.path, default_filters=[], input_encoding='utf-8', lookup=lookup) try: vars['__repos'] = self._repos return t.render(**vars) except BaseException, e: for num, line in enumerate(t.code.splitlines()): print '%d: %s' % (num, line) raise
def GET(self, title=None, page_name='main_page'): if cherrypy.request.path_info.count('/') != 3: raise cherrypy.HTTPRedirect('/book/{0}/{1}'.format(title, page_name)) pageData = None if os.path.exists(os.path.join(self.root_path, 'books', title, page_name)): with codecs.open(os.path.join(self.root_path, 'books', title, page_name), mode='r', encoding='utf-8') as f: pageData = {} pageData['page_name'] = page_name pageData['page_body_raw'] = f.read() if page_name == 'main_page': template = Template(filename=os.path.join(self.root_path, 'templates/book.html')) else: template = Template(filename=os.path.join(self.root_path,'templates/page.html')) if pageData is None: pageData = self.createPage(title, page_name) if page_name == 'main_page': pageData['pages'] = [] pageData['page_body'] = '' else: if page_name == 'main_page': pageData['pages'] = self.buildContents(title) pageData['page_body'] = markdown.markdown(pageData['page_body_raw']) pageData['title'] = title pageData['page_name'] = page_name return template.render(**pageData)
def create_configuration(cls, target): lib_path = cls.get_path_to_nikola_modules() template_path = os.path.join(lib_path, 'conf.py.in') conf_template = Template(filename=template_path) conf_path = os.path.join(target, 'conf.py') with codecs.open(conf_path, 'w+', 'utf8') as fd: fd.write(conf_template.render(**cls.SAMPLE_CONF))
def render(self, **kwargs): powdict = kwargs["powdict"] kwargs["powdict"] = powdict kwargs["template"] = powlib.readconfig("pow.cfg","global","DEFAULT_TEMPLATE", powdict["POW_APP_DIR"]) special_tmpl = None if kwargs.has_key("special_tmpl"): special_tmpl = kwargs["special_tmpl"] del kwargs["special_tmpl"] if self.current_action not in self.locked_actions: if self.access_granted(**kwargs) == True: first_part = os.path.join( os.path.dirname(os.path.abspath(__file__)),"../views/") if special_tmpl == None: fname = self.modelname + "_" + self.current_action +".tmpl" else: fname = special_tmpl mytemplate = self.mylookup.get_template(fname) #mytemplate = Template(filename=fname, lookup=self.mylookup) return mytemplate.render(**kwargs) else: #self.setCurrentAction("login") kwargs["powdict"]["FLASHTEXT"] = "You need to be logged in to access method: %s" % (str(self.current_action)) kwargs["powdict"]["FLASHTYPE"] = "error" fname = os.path.abspath(os.path.join( os.path.dirname(os.path.abspath(__file__)),"../views/Appinfo_login.tmpl")) mytemplate = Template(filename=fname, lookup=self.mylookup) return mytemplate.render(**kwargs) else: kwargs["ERROR_INFO"] = "The action you have called (", self.current_action, "is locked from outside access." return self.error(**kwargs)
def run(output_file_name): lookup = TemplateLookup(directories=['./templates']) page = Template(filename='./templates/album_list.mako', lookup=lookup) with open(output_file_name, 'w') as f: f.write(page.render(albums=Album.select(), tags = popular_tags()).encode('utf8'))
def render_mako_string(tpldir, tplname, data, trust=True): """Render a mako template to a string. :param tpldir: :param tplname: :param data: :param trust: Optional. If ``False``, markup-save escaping will be enabled """ show_errors = settings.DEBUG_MODE # thanks to abought # TODO: The "trust" flag is expected to be temporary, and should be removed # once all templates manually set it to False. lookup_obj = _TPL_LOOKUP_SAFE if trust is False else _TPL_LOOKUP tpl = mako_cache.get(tplname) if tpl is None: with open(os.path.join(tpldir, tplname)) as f: tpl_text = f.read() tpl = Template( tpl_text, format_exceptions=show_errors, lookup=lookup_obj, input_encoding='utf-8', output_encoding='utf-8', default_filters=lookup_obj.template_args['default_filters'], imports=lookup_obj.template_args['imports'] # FIXME: Temporary workaround for data stored in wrong format in DB. Unescape it before it gets re-escaped by Markupsafe. ) # Don't cache in debug mode if not app.debug: mako_cache[tplname] = tpl return tpl.render(**data)
def test_utf8_html_error_template(self): """test the html_error_template with a Template containing utf8 chars""" if util.py3k: code = """# -*- coding: utf-8 -*- % if 2 == 2: /an error ${'привет'} % endif """ else: code = """# -*- coding: utf-8 -*- % if 2 == 2: /an error ${u'привет'} % endif """ try: template = Template(code) template.render_unicode() except exceptions.CompileException, ce: html_error = exceptions.html_error_template().render() assert ("CompileException: Fragment 'if 2 == 2: /an " "error' is not a partial control " "statement at line: 2 char: 1") in \ html_error.decode('utf-8') if util.py3k: assert u"3 ${'привет'}".encode(sys.getdefaultencoding(), 'htmlentityreplace') in html_error else: assert u"3 ${u'привет'}".encode(sys.getdefaultencoding(), 'htmlentityreplace') in html_error
def result(self, result): lookup = TemplateLookup(directories=['houseagent/templates/']) template = Template(filename='houseagent/plugins/zwave/templates/add.html', lookup=lookup) self.request.write(str(template.render(result=result[1], locations=result[0], node=self.node, pluginid=self.pluginid, pluginguid=self.pluginguid))) self.request.finish()
def test_scope_ten(self): t = Template(""" <%def name="a()"> <%def name="b()"> <% y = 19 %> b/c: ${c()} b/y: ${y} </%def> <%def name="c()"> c/y: ${y} </%def> <% # we assign to "y". but the 'enclosing scope' of "b" and "c" is from the "y" on the outside y = 10 %> a/y: ${y} a/b: ${b()} </%def> <% y = 7 %> main/a: ${a()} main/y: ${y} """) assert flatten_result(t.render()) == "main/a: a/y: 10 a/b: b/c: c/y: 10 b/y: 19 main/y: 7"
def test_html_error_template(self): """test the html_error_template""" code = """ % i = 0 """ try: template = Template(code) template.render_unicode() except exceptions.CompileException, ce: html_error = exceptions.html_error_template().render_unicode() assert ("CompileException: Fragment 'i = 0' is not a partial " "control statement") in html_error assert '<style>' in html_error assert '</style>' in html_error html_error_stripped = html_error.strip() assert html_error_stripped.startswith('<html>') assert html_error_stripped.endswith('</html>') not_full = exceptions.html_error_template().\ render_unicode(full=False) assert '<html>' not in not_full assert '</html>' not in not_full assert '<style>' in not_full assert '</style>' in not_full no_css = exceptions.html_error_template().\ render_unicode(css=False) assert '<style>' not in no_css assert '</style>' not in no_css
def submit(request): params = request.params emailTemplate = Template(filename='relentless/templates/appointmentEmail.mak') outputBuffer = StringIO() contextObject = Context(outputBuffer, params=params) emailTemplate.render_context(contextObject) emailBody = outputBuffer.getvalue() msg = MIMEText(emailBody) # Email a copy of the booking to our email address sender = '*****@*****.**' receivers = ['*****@*****.**'] msg['Subject'] = 'Relentless Cleaning Booking! %s ' %(params['inputAddress']) msg['From'] = sender msg['To'] = '*****@*****.**' server = smtplib.SMTP('smtp.gmail.com:587') username = '******' password = '******' server.ehlo() server.starttls() server.login(username, password) server.sendmail(sender, receivers, msg.as_string()) server.quit() return {'params': params}
def test_scope_five(self): """test that variables are pulled from 'enclosing' scope before context.""" # same as test four, but adds a scope around it. t = Template(""" <%def name="enclosing()"> <% x = 5 %> <%def name="a()"> this is a. x is ${x}. </%def> <%def name="b()"> <% x = 9 %> this is b. x is ${x}. calling a. ${a()} </%def> ${b()} </%def> ${enclosing()} """) assert flatten_result(t.render()) == "this is b. x is 9. calling a. this is a. x is 5."
def test_hanzi(self): value = {'rendered_val' : u'a test val to render', 'rendered_hanzi' : u'中文'} # render 返回的是string,render_unicode返回unicode html = Template(filename='hanzi.html', input_encoding="utf-8").render_unicode(**value) # 检查第1段结果 self.assertTrue(u"模板里面的中文" in html) # 检查第2段结果 self.assertTrue(u'a test val to render' in html) self.assertTrue(u'中文' in html) # 指定了output_encoding,再解码成unicode html = Template(filename='hanzi.html', input_encoding="utf-8", output_encoding='utf-8', encoding_errors='replace').render(**value) html = html.decode('utf-8') # 检查第1段结果 self.assertTrue(u"模板里面的中文" in html) # 检查第2段结果 self.assertTrue(u'a test val to render' in html) self.assertTrue(u'中文' in html)
def index(self): # Look for referenced templates in the current directory only templ = Template(filename='dyn/scanlist.tmpl', lookup=self.lookup) return templ.render(pageid='SCANLIST', docroot=self.docroot)
} #序列 % for num in sequence: Z201708010000${num} % endfor ''' # 处理输入 input = getClipboardText() list = input.split("\r\n") # print "list",list if len(list) == 2: start = int(list[0]) end = int(list[1]) sequence = ['{cnt}'.format(cnt=cnt) for cnt in range(start, end)] mytemplate = Template(text=ftl, input_encoding='utf-8', output_encoding='utf-8', default_filters=['decode.utf8']) # mytemplate = Template(filename='template/code-clipboard.ftl',input_encoding='utf-8',output_encoding='utf-8',default_filters=['decode.utf8']) outputContent = mytemplate.render(sequence=sequence, columnList=columnList, kvList=kvList) print outputContent import os desktopPath = os.getenv("USERPROFILE") + "\Desktop" outputPath = desktopPath + "\py-controller.txt" print "outputPath", outputPath fileWrite(outputPath, outputContent)
def render_file(self, tmpl_file: str, ctx: Optional[Dict] = None): r'''render plain file ``tmpl_file`` in mako (notice that there is no lookup)''' return Template( filename=tmpl_file, lookup=self.lookup, **self.extra_option).render_unicode(**MakoContextFilter(ctx))
def render(self, tmpl: str, ctx: Optional[Dict] = None): r'''render raw string ``tmpl`` in mako''' return Template( tmpl, lookup=self.lookup, **self.extra_option).render_unicode(**MakoContextFilter(ctx))
# -*- coding: utf-8 -*- # ################################################################################ """ Try to get names defined in a mako tempate """ from mako.template import Template bar = Template('Basic template with ${name}.') print('call: bar.render(name=\'...\')') print(bar.render(name='testing epta')) ################################################# # how to find identifiers in bar? from mako import lexer, codegen lexer = lexer.Lexer(bar.source) node = lexer.parse() compiler = lambda: None compiler.reserved_names = set() identifiers = codegen._Identifiers(compiler, node) # All template variables can be found found using this # object but you are probably interested in the
def startscan(self, scanname, scantarget, modulelist, typelist): global globalScanStatus # Snapshot the current configuration to be used by the scan cfg = deepcopy(self.config) modopts = dict() # Not used yet as module options are set globally modlist = list() sf = SpiderFoot(cfg) dbh = SpiderFootDb(cfg) types = dbh.eventTypes() targetType = None [scanname, scantarget] = self.cleanUserInput([scanname, scantarget]) if scanname == "" or scantarget == "": return self.error("Form incomplete.") if typelist == "" and modulelist == "": return self.error("Form incomplete.") if modulelist != "": modlist = modulelist.replace('module_', '').split(',') else: typesx = typelist.replace('type_', '').split(',') # 1. Find all modules that produce the requested types modlist = sf.modulesProducing(typesx) newmods = deepcopy(modlist) newmodcpy = deepcopy(newmods) # 2. For each type those modules consume, get modules producing while len(newmodcpy) > 0: for etype in sf.eventsToModules(newmodcpy): xmods = sf.modulesProducing([etype]) for mod in xmods: if mod not in modlist: modlist.append(mod) newmods.append(mod) newmodcpy = deepcopy(newmods) newmods = list() # Add our mandatory storage module.. if "sfp__stor_db" not in modlist: modlist.append("sfp__stor_db") modlist.sort() regexToType = { "^\d+\.\d+\.\d+\.\d+$": "IP_ADDRESS", "^\d+\.\d+\.\d+\.\d+/\d+$": "NETBLOCK_OWNER", "^.[a-zA-Z\-0-9\.]+$": "INTERNET_NAME" } # Parse the target and set the targetType for rx in regexToType.keys(): if re.match(rx, scantarget, re.IGNORECASE): targetType = regexToType[rx] break if targetType is None: return self.error("Invalid target type. Could not recognize it as " + \ "an IP address, IP subnet, domain name or host name.") # Start running a new scan scanId = sf.genScanInstanceGUID(scanname) t = SpiderFootScanner(scanname, scantarget.lower(), targetType, scanId, modlist, cfg, modopts) t.start() # Wait until the scan has initialized while globalScanStatus.getStatus(scanId) is None: print "[info] Waiting for the scan to initialize..." time.sleep(1) templ = Template(filename='dyn/scaninfo.tmpl', lookup=self.lookup) return templ.render(id=scanId, name=scanname, docroot=self.docroot, status=globalScanStatus.getStatus(scanId), pageid="SCANLIST")
def error(self, message): templ = Template(filename='dyn/error.tmpl', lookup=self.lookup) return templ.render(message=message, docroot=self.docroot)
def parse_tmpl(_tmpl_text, **kwargs): return Template(_tmpl_text).render(**kwargs)
from mako.template import Template from mako.lookup import TemplateLookup import pynliner import logger from config import config import sender import datastore _SLEEP_TIME_SECS = 300 # Load the templates at startup _templates = { 'stats': Template(filename='templates/stats.mako', default_filters=['unicode', 'h'], lookup=TemplateLookup(directories=['.'])), 'warning': Template(filename='templates/stats_warning.mako', default_filters=['unicode', 'h'], lookup=TemplateLookup(directories=['.'])), } def _render_email(template_name, data): rendered = _templates[template_name].render(data=data) # CSS in email HTML must be inline rendered = pynliner.fromString(rendered) return rendered
cams = next_line.split(",") cams = [cam_longname(c.strip()) for c in cams if len(c.strip())] cams = [(c, module_get_status(m, c)) for c in cams] cameras += cams if l.strip().startswith("Not checked"): next_line = lines[i+1] cams = next_line.split(",") cams = [cam_longname(c.strip()) for c in cams if len(c.strip())] cams = [(c, "?") for c in cams] cameras += cams return cameras for m in modules: f = "MODULE__" + m MN_DICT[m] = current_menu MN_COUNT[mn] = MN_COUNT.get(mn,0) + 1 ok_cams = module_check_cams(m) for c,s in ok_cams: if s: FD[c,f] = s AF.append(f) version = run("LC_TIME=EN date +'%Y%b%d' && hg id") data = {'FD':FD, 'AF':AF, 'cams':cams, 'shortnames':shortnames, 'menus':menus, 'MN_COUNT': MN_COUNT, 'MN_DICT': MN_DICT, 'porting_threads': porting_threads, 'feature_links': feature_links, 'version': version} mytemplate = Template(filename='features.tmpl') print mytemplate.render(**data)
def default_template(): here = os.path.dirname(os.path.abspath(__file__)) tmp_filename = os.path.join(here, 'tmpl8_CUDA.py') template = Template(filename=tmp_filename) return template
def template(tmpl): return Template(filename=os.path.join( os.path.dirname(os.path.realpath(__file__)), 'templates', tmpl))
def get(self): #Add the system information to the webpage. index = Template(filename='Web/index.html').render( systemConfiguration=configuration().systemConfiguration()) self.write(index)
def _render_attribute_template(self, template, data): t = Template(template, cache_enabled=True, imports=["from satosa.attribute_mapping import scope"]) try: return t.render(**data).split(self.multivalue_separator) except (NameError, TypeError) as e: return []
const_pack_template = Template( dedent("""\ [require] ${func.requirements} [vertex shader] #ifndef GL_ES #extension GL_ARB_shading_language_packing : require #endif const vec4 red = vec4(1, 0, 0, 1); const vec4 green = vec4(0, 1, 0, 1); in vec4 vertex; out vec4 vert_color; void main() { ${func.result_precision} uint actual; gl_Position = vertex; vert_color = green; % for io in func.inout_seq: actual = ${func.name}(${func.vector_type}(${', '.join(io.input)})); if (true % for u in sorted(set(io.valid_outputs)): && actual != ${u} % endfor ) { vert_color = red; } % endfor } [fragment shader] in vec4 vert_color; out vec4 frag_color; void main() { frag_color = vert_color; } [vertex data] vertex/float/2 -1.0 -1.0 1.0 -1.0 1.0 1.0 -1.0 1.0 [test] draw arrays GL_TRIANGLE_FAN 0 4 probe all rgba 0.0 1.0 0.0 1.0 """))
def sysadmin_dash(request): output = Template(filename="template.html").render() return HttpResponse(output) #"<html>Hello</html>")
fftsrc = Template(""" #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> typedef struct { %for t, n in parameters: ${t} ${n}; %endfor } param_t; __constant__ param_t callback_params; #define checkCudaErrors(val) __checkCudaErrors__ ( (val), #val, __FILE__, __LINE__ ) template <typename T> inline void __checkCudaErrors__(T code, const char *func, const char *file, int line) { if (code) { fprintf(stderr, "CUDA error at %s:%d code=%d \\"%s\\" \\n", file, line, (unsigned int)code, func); cudaDeviceReset(); exit(EXIT_FAILURE); } } % if input_callback: ${input_callback} __device__ cufftCallbackLoadC input_callback = in_call; % endif % if output_callback: ${output_callback} __device__ cufftCallbackStoreC output_callback = out_call; % endif extern "C" cufftHandle* create_plan(unsigned int size){ cufftHandle* plan = new cufftHandle; size_t work_size; cufftCreate(plan); checkCudaErrors(cufftMakePlan1d(*plan, size, CUFFT_C2C, 1, &work_size)); % if input_callback: cufftCallbackLoadC h_input_callback; checkCudaErrors(cudaMemcpyFromSymbol(&h_input_callback, input_callback, sizeof(h_input_callback))); checkCudaErrors(cufftXtSetCallback(*plan, (void **) &h_input_callback, CUFFT_CB_LD_COMPLEX, 0)); % endif % if output_callback: cufftCallbackStoreC h_output_callback; checkCudaErrors(cudaMemcpyFromSymbol(&h_output_callback, output_callback, sizeof(h_output_callback))); checkCudaErrors(cufftXtSetCallback(*plan, (void **) &h_output_callback, CUFFT_CB_ST_COMPLEX, 0)); % endif return plan; } extern "C" void execute(cufftHandle* plan, cufftComplex* in, cufftComplex* out, param_t* p){ if (p != NULL) checkCudaErrors(cudaMemcpyToSymbolAsync(callback_params, p, sizeof(param_t), 0, cudaMemcpyHostToDevice, 0)); checkCudaErrors(cufftExecC2C(*plan, in, out, CUFFT_INVERSE)); } """)
TEMPLATE_H = Template("""\ /* This file generated from ${filename}, don't edit directly. */ struct anv_instance_dispatch_table { union { void *entrypoints[${len(instance_entrypoints)}]; struct { % for e in instance_entrypoints: % if e.guard is not None: #ifdef ${e.guard} PFN_${e.name} ${e.name}; #else void *${e.name}; # endif % else: PFN_${e.name} ${e.name}; % endif % endfor }; }; }; struct anv_physical_device_dispatch_table { union { void *entrypoints[${len(physical_device_entrypoints)}]; struct { % for e in physical_device_entrypoints: % if e.guard is not None: #ifdef ${e.guard} PFN_${e.name} ${e.name}; #else void *${e.name}; # endif % else: PFN_${e.name} ${e.name}; % endif % endfor }; }; }; struct anv_device_dispatch_table { union { void *entrypoints[${len(device_entrypoints)}]; struct { % for e in device_entrypoints: % if e.guard is not None: #ifdef ${e.guard} PFN_${e.name} ${e.name}; #else void *${e.name}; # endif % else: PFN_${e.name} ${e.name}; % endif % endfor }; }; }; extern const struct anv_instance_dispatch_table anv_instance_dispatch_table; %for layer in LAYERS: extern const struct anv_physical_device_dispatch_table ${layer}_physical_device_dispatch_table; %endfor %for layer in LAYERS: extern const struct anv_device_dispatch_table ${layer}_device_dispatch_table; %endfor % for e in instance_entrypoints: % if e.alias and e.alias.enabled: <% continue %> % endif % if e.guard is not None: #ifdef ${e.guard} % endif ${e.return_type} ${e.prefixed_name('anv')}(${e.decl_params()}); % if e.guard is not None: #endif // ${e.guard} % endif % endfor % for e in physical_device_entrypoints: % if e.alias: <% continue %> % endif % if e.guard is not None: #ifdef ${e.guard} % endif % for layer in LAYERS: ${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()}); % endfor % if e.guard is not None: #endif // ${e.guard} % endif % endfor % for e in device_entrypoints: % if e.alias and e.alias.enabled: <% continue %> % endif % if e.guard is not None: #ifdef ${e.guard} % endif % for layer in LAYERS: ${e.return_type} ${e.prefixed_name(layer)}(${e.decl_params()}); % endfor % if e.guard is not None: #endif // ${e.guard} % endif % endfor """, output_encoding='utf-8')
def test_names_on_context(self): for name in ('context', 'loop', 'UNDEFINED'): assert_raises_message( exceptions.NameConflictError, r"Reserved words passed to render\(\): %s" % name, Template("x").render, **{name: 'foo'})
def mako(request): # Load the template template = request.GET['template'] mytemplate = Template(template) return HttpResponse(mytemplate)
def test_future_import(self): t = Template("${ x / y }", future_imports=["division"]) assert result_lines(t.render(x=12, y=5)) == ["2.4"]
_TEMPLATE_H = Template(COPYRIGHT + """ #ifndef RADV_EXTENSIONS_H #define RADV_EXTENSIONS_H enum { RADV_INSTANCE_EXTENSION_COUNT = ${len(instance_extensions)}, RADV_DEVICE_EXTENSION_COUNT = ${len(device_extensions)}, }; struct radv_instance_extension_table { union { bool extensions[RADV_INSTANCE_EXTENSION_COUNT]; struct { %for ext in instance_extensions: bool ${ext.name[3:]}; %endfor }; }; }; struct radv_device_extension_table { union { bool extensions[RADV_DEVICE_EXTENSION_COUNT]; struct { %for ext in device_extensions: bool ${ext.name[3:]}; %endfor }; }; }; const VkExtensionProperties radv_instance_extensions[RADV_INSTANCE_EXTENSION_COUNT]; const VkExtensionProperties radv_device_extensions[RADV_DEVICE_EXTENSION_COUNT]; const struct radv_instance_extension_table radv_supported_instance_extensions; struct radv_physical_device; void radv_fill_device_extension_table(const struct radv_physical_device *device, struct radv_device_extension_table* table); #endif """)
def _compile_from_file(self, path, filename): self.path = path return Template("foo bar").module
def test_expression_declared(self): t = Template(""" ${",".join([t for t in ("a", "b", "c")])} """, strict_undefined=True) eq_(result_lines(t.render()), ['a,b,c']) t = Template(""" <%self:foo value="${[(val, n) for val, n in [(1, 2)]]}"/> <%def name="foo(value)"> ${value} </%def> """, strict_undefined=True) eq_(result_lines(t.render()), ['[(1, 2)]']) t = Template(""" <%call expr="foo(value=[(val, n) for val, n in [(1, 2)]])" /> <%def name="foo(value)"> ${value} </%def> """, strict_undefined=True) eq_(result_lines(t.render()), ['[(1, 2)]']) l = TemplateLookup(strict_undefined=True) l.put_string("i", "hi, ${pageargs['y']}") l.put_string( "t", """ <%include file="i" args="y=[x for x in range(3)]" /> """) eq_(result_lines(l.get_template("t").render()), ['hi, [0, 1, 2]']) l.put_string( 'q', """ <%namespace name="i" file="${(str([x for x in range(3)][2]) + 'i')[-1]}" /> ${i.body(y='x')} """) eq_(result_lines(l.get_template("q").render()), ['hi, x']) t = Template(""" <% y = lambda q: str(q) %> ${y('hi')} """, strict_undefined=True) eq_(result_lines(t.render()), ["hi"])
unreachable("unknown bit width"); } return _dst_val; } % endfor nir_const_value nir_eval_const_opcode(nir_op op, unsigned num_components, unsigned bit_width, nir_const_value *src) { switch (op) { % for name in sorted(opcodes.iterkeys()): case nir_op_${name}: { return evaluate_${name}(num_components, bit_width, src); break; } % endfor default: unreachable("shouldn't get here"); } }""" from nir_opcodes import opcodes from mako.template import Template print Template(template).render(opcodes=opcodes, type_sizes=type_sizes, type_has_size=type_has_size, type_add_size=type_add_size, get_const_field=get_const_field)
def test_via_template(self): t = Template("foo", lexer_cls=self._fixture()) self._test_custom_lexer(t)
def main(run,project_id,sample_names,config_file,Map_Stat,Read_Dist,FPKM,rRNA_table): TEMPLATE="""\ RNA-seq analysis report for ${project_id} ================================= ${latex_opt} Summary ------------------------- **Project name:** ${project_id} (UPPMAX project ${uppnex}) **Samples:** ${samplenames} **Run name:** ${runname} **Mapping:** ${mapping} **Duplicate removal:** ${dup_rem} **Read count:** ${read_count} **RPKM/FPKM values:** ${quantifyer} **Result directories on UPPMAX:** /proj/${uppnex}/INBOX/${project_id}/analysis/alignments (BAM files), /proj/${uppnex}/INBOX/${project_id}/analysis/quantification (FPKM files) .. raw:: latex \clearpage Results -------------------------""" if Map_Stat: TEMPLATE=TEMPLATE+""" Mapping statistics ^^^^^^^^^^^^^^^ ${Mapping_statistics} Comments ~~~~~~~~ **tot # read pairs:** The total number of read pairs indicates the total number of sequenced paired-end reads. Since a paired-end read is made up of two sequenced fragments (mates), the total number of sequenced 100-bp regions is twice the number shown in this column. **% mapped reads:** The number of fragments that are mapped relative to the total number of sequenced fragments. **% reads left after dup rem:** We remove duplicate reads (paired end reads where both mates map to the same loci as both mates in a different paired-end read because these are likely to be artifacts caused by PCR amplification or over-sequencing. Aligned files in BAM format with duplicates removed can be found in /proj/${uppnex}/INBOX/${project_id}/analysis/alignments. .. raw:: latex \clearpage """ TEMPLATE=TEMPLATE+""" Expression values ^^^^^^^^^^^^^^^^^ The /proj/${uppnex}/INBOX/${project_id}/analysis/quantification folder contains FPKM values calculated using the Cufflinks program using ENSEMBL annotation of genes and transcripts for each sample. These files also contain the upper and lower limits of the confidence interval for the FPKM estimate. FPKM values are the paired-end equivalent of RPKM (Reads Per Kilobase per Million mapped reads; the standard measure for gene expression in RNA-seq.) There is also a single fpkm_table.txt file, which contains all of the FPKM values. This can be opened in Excel or a regular text processing application. For analyzing differential expression of genes or transcripts, it may be useful to have the raw read counts (the number of sequences that map to each gene/transcript) as well. These are calculated using the HTSeq software and are collected into a table called count_table.txt. .. raw:: latex \clearpage """ if FPKM: TEMPLATE=TEMPLATE+""" FPKM heatmap ^^^^^^^^^^^^^^^^^ This heatmap shows the (Pearson) correlation between FPKM values of samples. ${FPKM_heatmap} .. raw:: latex \clearpage FPKM PCA ^^^^^^^^^^^^^^^^^ This PCA (principal component analysis) score plot has the samples plotted according to their scores for the two principal components that explain the largest amount of variance in the FPKM data table. The number after 'expl var' in the axis labels tells you how much of the variance that is explained by each component. Similar samples should, in theory, cluster together in the PCA plot. PCA is a way to compress the information in your high-dimensional data matrix so that it can be plotted in two dimensions. ${FPKM_PCAplot} .. raw:: latex \clearpage """ if Read_Dist: TEMPLATE=TEMPLATE+""" Read distribution ^^^^^^^^^^^^^^^^^ This table contain information about the extent to which sequences from each sample mapped to different structural parts of genes, like coding exons, untranslated regions, and transcription start sites. The actual number itself is less important than the relative values for the different kinds of regions. For a normal RNA-seq experiment you should have a higher value in the CDS Exon column than in the others, for example. "CDS Exon" means "coding sequence exons", "UTR" stands for "untranslated region", "TES" stands for "transcription end site", "TSS" stands for "transcription start site". "Intronic regions" should be interpreted as "intronic or intergenic regions". Perhaps the most easily interpretable column is the final column, mRNA fraction, which gives the fraction [0-1] of sequences that mapped to ENSEMBL-annotated mRNA (including coding regions and UTRs). While this fraction is not completely accurate (because ENSEMBL doe not completely describe the transcriptome), it is a useful summary statistic which should be relatively high for an mRNA-seq experiment, typically above 0.8. ${Read_Distribution} .. raw:: latex \clearpage """ if rRNA_table: TEMPLATE=TEMPLATE+""" Quantification of rRNA present in the samples ^^^^^^^^^^^^^^^^^^^^^ ${rRNA_table} .. raw:: latex \clearpage """ sphinx_defs = [] if config_file: config = load_config(config_file) else: config = {} sphinx_defs.append("('%s', '%s_analysis.tex', 'RNA-seq Analysis Report', u'SciLifeLab Stockholm', 'howto'),\n" % (project_id, project_id)) projectfile = "%s.mako" % (project_id) fp = open(projectfile, "w") fp.write(TEMPLATE) fp.close() mylookup = TemplateLookup(directories=['./']) tmpl = Template(filename=projectfile, lookup=mylookup) proj_conf = { 'id' : project_id, 'run':run, 'config' : config, 'samples': sample_names.split(',') } d = generate_report(proj_conf) rstfile = "%s.rst" % (project_id) fp = open(rstfile, "w") fp.write(tmpl.render(**d)) fp.close() sphinxconf = os.path.join(os.getcwd(), "conf.py") if not os.path.exists(sphinxconf): logger.warn("no sphinx configuration file conf.py found: you have to edit conf.py yourself!") else: fp = open(sphinxconf) lines = fp.readlines() fp.close() sdout = [] modify_conf = False for sd in sphinx_defs: if not sd in lines: sdout.append(sd) modify_conf = True if modify_conf: i = lines.index("latex_documents = [\n") newconf = lines[:i+3] + sdout + lines[i+3:] fp = open("conf.py", "w") fp.write("".join(newconf)) fp.close()
def find_template(self, options): dirpath = os.path.dirname(os.path.abspath(__file__)) filename = dirpath + '/gen/{}{}_template'.format( 'print_' if options.print_wire else '', options.page) return Template(filename=filename)
class NewTask(): def __init__(self): self.repository_path = os.environ['SCIPIPE_HEURISTICS'] def parse_command_line(self, argv): script_args = '' for idx, arg in enumerate(argv): if 'new_pipeline_task.py' in arg: script_args = argv[idx + 1:] parser = argparse.ArgumentParser(prog="new_pipeline_task", add_help=False) parser.add_argument( '--package', help= "Pipeline package. One of 'h', 'hif', 'hifa', 'hifa', 'hifv', or 'hsd'.", type=str, choices=['h', 'hif', 'hifa', 'hifa', 'hifv', 'hsd'], required=True) parser.add_argument('--task', help='New task name', type=str, required=True) parser.add_argument( '--module', help="Optional module name. e.g. if task is 'foo' " "and module is 'bar' then 'tasks/foo/bar.py' is created", type=str, default='', required=False) try: args = parser.parse_args(script_args) except: parser.print_help() return '', '', '' area = args.package task_name = args.task if args.module: module_name = args.module else: module_name = task_name return area, task_name, module_name def create(self, area, task_name, module_name): from mako.template import Template task_name = task_name.lower() # ----------------------------------------------------------------------------- # define the directories for new files # ----------------------------------------------------------------------------- task_dir = '{repo}/pipeline/{area}/tasks/{task}'.format( repo=self.repository_path, area=area, task=task_name) cli_dir = '{repo}/pipeline/{area}/cli'.format( repo=self.repository_path, area=area) # ----------------------------------------------------------------------------- # create the task directory # ----------------------------------------------------------------------------- print('1.') print('\tCreating {f}'.format(f=task_dir)) try: os.mkdir(task_dir) except OSError, ee: if ee.errno == 17: pass else: raise # ----------------------------------------------------------------------------- # define the new files # ----------------------------------------------------------------------------- module_file = '{tdir}/{task}.py'.format(tdir=task_dir, task=module_name) init_file = '{tdir}/__init__.py'.format(tdir=task_dir) cli_file = '{cdir}/task_{area}_{task}.py'.format(cdir=cli_dir, area=area, task=task_name) cli_xml = '{cdir}/{area}_{task}.xml'.format(cdir=cli_dir, area=area, task=task_name) weblog_mako = '{repo}/pipeline/{area}/templates/{task}.mako'.format( repo=self.repository_path, area=area, task=task_name) # ----------------------------------------------------------------------------- # instantiate the templates # ----------------------------------------------------------------------------- module_template = Template( filename='{repo}/pipeline/infrastructure/new_pipeline_task/' 'pipeline_task_module.mako'.format(repo=self.repository_path)) cli_template = Template( filename='{repo}/pipeline/infrastructure/new_pipeline_task/' 'pipeline_cli_module.mako'.format(repo=self.repository_path)) cli_xml_template = Template( filename='{repo}/pipeline/infrastructure/new_pipeline_task/' 'pipeline_cli_xml.mako'.format(repo=self.repository_path)) init_template = Template( filename='{repo}/pipeline/infrastructure/new_pipeline_task/' 'pipeline_task_init.mako'.format(repo=self.repository_path)) # ----------------------------------------------------------------------------- # create the files # ----------------------------------------------------------------------------- print('\tCreating {f}'.format(f=module_file)) with open(module_file, 'w+') as fd: fd.writelines(module_template.render(taskname=task_name)) print('\tCreating {f}'.format(f=init_file)) with open(init_file, 'w+') as fd: fd.writelines( init_template.render(taskname=task_name, modulename=module_name)) print('\tCreating {f}'.format(f=cli_file)) with open(cli_file, 'w+') as fd: fd.writelines(cli_template.render(package=area, taskname=task_name)) print('\tCreating {f}'.format(f=cli_xml)) with open(cli_xml, 'w+') as fd: fd.writelines( cli_xml_template.render(package=area, taskname=task_name)) # print('\tCreating {f}'.format(f=weblog_mako)) # with open(weblog_mako, 'w+') as fd: # fd.writelines(web_template.render(taskname=task_name)) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- print('''2. Check infrastructure/jobrequest.py to see if it lists all CASA tasks needed by your new pipeline task. If not, add them to the CASATaskGenerator class. ''') # ----------------------------------------------------------------------------- # Add import to package __init__.py # ----------------------------------------------------------------------------- package_init_file = "{repo}/pipeline/{area}/tasks/__init__.py".format( repo=self.repository_path, area=area) with open(package_init_file) as fd: init_file_data = fd.readlines() last_import_line = 0 task_already_in_init = False # look for the last "from " import line and # add the new module import on the next line for idx, line in enumerate(init_file_data): if task_name in line: task_already_in_init = True elif line.startswith('from '): last_import_line = idx if task_already_in_init is False: init_file_data.insert( last_import_line + 1, 'from .{task} import {task_class}\n'.format( task=task_name, task_class=task_name.capitalize())) print('\tAdded "from .{task} import {task_class}" to {pfile}'.format( task=task_name, task_class=task_name.capitalize(), pfile=package_init_file)) temp_init_file = tempfile.NamedTemporaryFile(delete=False) with open(temp_init_file.name, "w+") as fd: fd.writelines(init_file_data) shutil.copy(temp_init_file.name, package_init_file) os.unlink(temp_init_file.name) # ----------------------------------------------------------------------------- # Add task to casataskdict # ----------------------------------------------------------------------------- casa_task_dictionary_file = "{repo}/pipeline/infrastructure/casataskdict.py".format( repo=self.repository_path) with open(casa_task_dictionary_file) as fd: casataskdict_data = fd.readlines() for idx, line in enumerate(casataskdict_data): match = re.match( '(\s+{area}_tasks.)([a-zA-Z]+)([^:]+)(:)'.format(area=area), line) if task_name in line: break if match and match.group(2) > task_name.title(): casataskdict_data.insert( idx, " {area}_tasks.{task_camel:24}" ": '{area}_{task}',\n".format(area=area, task_camel=task_name.title(), task=task_name)) print( '\tAdding {task} to classToCASATask in {ctd_file}'.format( task=task_name, ctd_file=casa_task_dictionary_file)) break temp_taskdict_file1 = tempfile.NamedTemporaryFile(delete=False) temp_taskdict_file2 = tempfile.NamedTemporaryFile(delete=False) with open(temp_taskdict_file1.name, "w+") as fd: fd.writelines(casataskdict_data) with open(temp_taskdict_file1.name) as fd: casataskdict_data = fd.readlines() for idx, line in enumerate(casataskdict_data): match = re.match( "(\s+'{area}_)([a-zA-Z]+)(': ')".format(area=area), line) if match and match.group(2) > task_name.title(): if task_name not in line: casataskdict_data.insert( idx, " '{area}_{task}': " "'{task_camel}',\n".format( area=area, task_camel=task_name.title(), task=task_name)) print( '\tAdding {task} to CasaTaskDict in {ctd_file}'.format( task=task_name, ctd_file=casa_task_dictionary_file)) break with open(temp_taskdict_file2.name, "w+") as fd: fd.writelines(casataskdict_data) shutil.copy(temp_taskdict_file2.name, casa_task_dictionary_file) os.unlink(temp_taskdict_file1.name) os.unlink(temp_taskdict_file2.name) # ----------------------------------------------------------------------------- # Say something about call library # ----------------------------------------------------------------------------- print('''3. Consider adding code to infrastructure/callibrary.py if needed. ''') # ----------------------------------------------------------------------------- # Say something about file namer # ----------------------------------------------------------------------------- print('''4. Consider adding code to infrastructure/filenamer.py if needed for calibration tables. ''') # ----------------------------------------------------------------------------- # Web log rendering # ----------------------------------------------------------------------------- print('''5. Consider adding code to infrastructure/displays/ if needed for the web log. ''') print('''6. Later you might need to add a renderer.py file to {area}/tasks/{task}/ if needed for the web log. '''.format(area=area, task=task_name)) # ----------------------------------------------------------------------------- # mako template for weblog # ----------------------------------------------------------------------------- mako_template = '{repo}/pipeline/infrastructure/new_pipeline_task/pipeline_weblog_task.mako'.format( repo=self.repository_path) print('7.') print('\tCreating {f}'.format(f=weblog_mako)) temp_mako = tempfile.NamedTemporaryFile(delete=False) with open(mako_template) as infile, open(temp_mako.name, 'w+') as outfile: for line in infile: # print(line.replace("$$mytaskname$$", task_name.capitalize())) outfile.write( line.replace("$$mytaskname$$", task_name.capitalize())) shutil.copy(temp_mako.name, weblog_mako) os.unlink(temp_mako.name) print( '\n\tNow use runsetup to make the new pipeline task visible within CASA.\n' )
def __call__(self, filename, options, fileobj=None, lineno=0): del fileobj, lineno init_region({"backend": "dogpile.cache.memory"}, "std") init_region({"backend": "dogpile.cache.memory"}, "obj") int_filename = filename if re.match("^" + re.escape("./{}/templates".format(self.config["package"])), filename): try: empty_template = Template("") # nosec class Lookup(TemplateLookup): @staticmethod def get_template(uri): del uri # unused return empty_template class MyTemplate(MakoTemplate): tpl = None def prepare(self, **kwargs): options.update({"input_encoding": self.encoding}) lookup = Lookup(**kwargs) if self.source: self.tpl = Template(self.source, lookup=lookup, **kwargs) # nosec else: self.tpl = Template( # nosec uri=self.name, filename=self.filename, lookup=lookup, **kwargs ) try: processed = template( filename, { "request": _Request(self.config), "lang": "fr", "debug": False, "extra_params": {}, "permalink_themes": "", "fulltextsearch_groups": [], "wfs_types": [], "_": lambda x: x, }, template_adapter=MyTemplate, ) int_filename = os.path.join(os.path.dirname(filename), "_" + os.path.basename(filename)) with open(int_filename, "wb") as file_open: file_open.write(processed.encode("utf-8")) except Exception: print( colorize("ERROR! Occurred during the '{}' template generation".format(filename), RED) ) print(colorize(traceback.format_exc(), RED)) if os.environ.get("IGNORE_I18N_ERRORS", "FALSE") == "TRUE": # Continue with the original one int_filename = filename else: raise except Exception: print(traceback.format_exc()) message_str = subprocess.check_output(["node", "tools/extract-messages.js", int_filename]).decode( "utf-8" ) if int_filename != filename: os.unlink(int_filename) try: messages = [] for contexts, message in json.loads(message_str): for context in contexts.split(", "): messages.append(Message(None, message, None, [], "", "", context.split(":"))) return messages except Exception: print(colorize("An error occurred", RED)) print(colorize(message_str, RED)) print("------") raise