def _render_html_response(self, config): page = self._html_template(config, ['html/page'], elems=['results', 'logged']) quiet = Session(config) quiet.ui = SilentInteraction() while page.startswith('{# set'): load, page = page.split('\n', 1) _set, vname, _eq, mode, cmd = load.strip()[3:-2].split(None, 4) cmd, arg = (' ' in cmd) and cmd.split(' ', 1) or (cmd, '') quiet.ui.render_mode = mode result = mailpile.commands.Action(quiet, cmd, arg) self.html_variables[vname] = quiet.ui.display_result(result) return jsontemplate.expand( page, default_dict( self.html_variables, { 'results': '\n'.join([ '<div class="result">%s</div>' % r for r in self.results ]), 'logged': '\n'.join( ['<p class="ll_%s">%s</p>' % l for l in self.logged]) }), **JSONTEMPLATE_ARGS)
def render_html(self, cfg, tpl_names, data): """Render data as HTML""" return jsontemplate.expand(self._html_template(cfg, tpl_names, elems=data.keys()), data, undefined_str='')
def gen_index_entity_global(data_index_entity, global_config): for entity_type in data_index_entity: print 'process {}'.format(entity_type) json_data = {entity_type: [] } for entity_uri in sorted(data_index_entity[entity_type]): entity_conf = data_index_entity[entity_type][entity_uri] json_data_sub = {} json_data_sub["entity_uri"] = entity_uri json_data_sub["conf"] = entity_conf json_data_sub["name"] = entity_conf[0]["name"] json_data[entity_type].append(json_data_sub) filename_output = "{}/data/www/index-{}.json".format(global_config["home"], entity_type) with codecs.open(filename_output,"w","utf-8") as f: json.dump(json_data, f, indent=4) id_html = "index-{}".format(entity_type) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content= jsontemplate.expand(json_template, json_data) filename_output = "{}/data/www/index-{}.html".format (global_config["home"], entity_type) with codecs.open(filename_output,"w","utf-8") as f: f.write(u'\ufeff') f.write(content)
def generate_html_report(results_dir, relative_links=True): """ Render a job report HTML. All CSS and javascript are inlined, for more convenience. :param results_dir: Path to the results directory. """ json_path = generate_json_file(results_dir, relative_links) json_fo = open(json_path, 'r') job_data = json.load(json_fo) templates_path = settings.settings.get_value("CLIENT", "job_templates_dir", default="") if not templates_path: templates_path = os.path.join(common.client_dir, "shared", "templates") base_template_path = os.path.join(templates_path, "report.jsont") base_template = open(base_template_path, "r").read() css_path = os.path.join(templates_path, "media", "css", "report.css") css = open(css_path, "r").read() js1_path = os.path.join(templates_path, "media", "js", "mktree.js") js1 = open(js1_path, "r").read() js2_path = os.path.join(templates_path, "media", "js", "table.js") js2 = open(js2_path, "r").read() context = {} context['css'] = css context['table_js'] = js1 context['maketree_js'] = js2 context['job_data'] = job_data return jsontemplate.expand(base_template, context)
def ratesubmit(req): form = util.FieldStorage(req,keep_blank_values=1) image_id = form.get("id", None) new_rating = form.get("rating", None) params = urllib.urlencode({'imagekey': image_id, 'rating': new_rating}) f = urllib.urlopen("http://imaj.lddi.org:8010/ratesubmit", params) result = jsontemplate.expand('{rating}', json.read(f.read())) return result
def MakeToc(blog_template): """ """ headings = [] for match in _HEADING_RE.finditer(blog_template): headings.append( dict(target=match.group('target'), name=match.group('name'))) toc = jsontemplate.expand(TOC_TEMPLATE, {'headings': headings}) return toc
def index(self): msg = "" style = cycle(["odd", "even"]) try: servers = self.list() for server in servers: server["style"] = style.next() except Exception as e: msg = "Error: " + str(e) servers = [] tmpl = open(self.base + "/templates/index.html").read() return jsontemplate.expand(tmpl, {"servers": servers, "msg": msg})
def index(self): msg = '' style = cycle(["odd", "even"]) try: servers = self.list() for server in servers: server['style'] = style.next() except Exception as e: msg = "Error: " + str(e) servers = [] tmpl = open(self.base + '/templates/index.html').read() return jsontemplate.expand(tmpl, {'servers': servers, 'msg': msg})
def build_single_file(infile, outfile, css_directory, extra_extensions): files = [] _find_files(files, infile) text = _convert(files, extra_extensions) screen_css = _load_css('style.css', css_directory) print_css = _load_css('print.css', css_directory) html = jsontemplate.expand(html_wrap, { 'content': text, 'screen_css': screen_css, 'print_css': print_css }) open(outfile, 'w').write(html.encode('utf-8'))
def run_generate_csv_and_html(global_config, allowed_year=[]): list_conference =[] data_index_entity ={} for year in range(2001, 2015): #if year not in [2014]: # continue conference = { "id-swsa":"ISWC{}".format(year), "id": "iswc-{}".format(year) } list_conference.append(conference) if len(allowed_year)>0: if year not in allowed_year: continue print "processing {}".format(conference["id"]) for id_query in ["conf-person","conf-paper","conf-event", "index-person", "index-organization"]: filename_rdf = "%s/data/www/%s-complete.ttl" % (global_config["home"], conference["id"]) if "conf-paper" == id_query: filename_rdf = "%s/data/www/%s-conf-paper.ttl" % (global_config["home"], conference["id"]) filename_query = "resources/files/%s.sparql" % (id_query) filename_output = "%s/data/www/%s-%s.csv" % (global_config["home"], conference["id"], id_query) ConfData.sparql_rdf2csv(filename_rdf, filename_query, filename_output) ConfData.csv2html(conference["id"], global_config) data_index_entity_local ={} ConfData.update_index_data(conference["id"], global_config, data_index_entity, data_index_entity_local) ConfData.gen_index_entity_one_conf(data_index_entity_local, global_config, conference['id']) #write index_page filename_output = "{}/data/www/index.html".format (global_config["home"]) json_index = {"conferences": list_conference} id_html = "index" json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content= jsontemplate.expand(json_template, json_index) with codecs.open(filename_output,"w","utf-8") as f: f.write(u'\ufeff') f.write(content) #write index_person and index_organization ConfData.gen_index_entity_global(data_index_entity, global_config)
def _run(self, options): """Compile templates and run extra code Args: options: The JSON data """ for root, dirs, files in os.walk(self.template_dir): for name in files: newfilename = jsontemplate.expand(name, options) oldpath = join(root, name) newpath = jsontemplate.expand(join(self.project_dir, oldpath.replace(self.template_dir+os.path.sep, '')), options) if os.path.exists(newpath) == False: try: oldfile = open(oldpath, "r") expanded = jsontemplate.FromFile(oldfile).expand(options, undefined_str='') oldfile.close() newfile = open(newpath, "w") newfile.write(expanded) newfile.close() except: print "Failed to compile file: "+oldpath print "\tcreate: "+newpath else: print "\texists: "+newpath for name in dirs: oldpath = join(root, name) newpath = jsontemplate.expand(join(self.project_dir, oldpath.replace(self.template_dir+os.path.sep, '')), options) if os.path.exists(newpath) == False: os.mkdir(newpath) print "\tcreate: "+newpath else: print "\texists: "+newpath print 'Done.'
def generate_html_report(results_dir, relative_links=True): """ Render a job report HTML. All CSS and javascript are inlined, for more convenience. :param results_dir: Path to the results directory. """ json_path = generate_json_file(results_dir, relative_links) json_fo = open(json_path, 'r') job_data = json.load(json_fo) templates_path = settings.settings.get_value("CLIENT", "job_templates_dir", default=None) if not templates_path: if hasattr(common, 'autotest_dir'): templates_path = os.path.join(common.autotest_dir, "client", "shared", "templates") elif hasattr(common, 'client_dir'): templates_path = os.path.join(common.client_dir, "shared", "templates") if templates_path is None: raise ValueError( 'Could not find json templates directory to create report') base_template_path = os.path.join(templates_path, "report.jsont") base_template = open(base_template_path, "r").read() css_path = os.path.join(templates_path, "media", "css", "report.css") css = open(css_path, "r").read() js1_path = os.path.join(templates_path, "media", "js", "mktree.js") js1 = open(js1_path, "r").read() js2_path = os.path.join(templates_path, "media", "js", "table.js") js2 = open(js2_path, "r").read() context = {} context['css'] = css context['table_js'] = js1 context['maketree_js'] = js2 context['job_data'] = job_data return jsontemplate.expand(base_template, context)
def _render_html_response(self, config): page = self._html_template(config, ['html/page'], elems=['results', 'logged']) quiet = Session(config) quiet.ui = SilentInteraction() while page.startswith('{# set'): load, page = page.split('\n', 1) _set, vname, _eq, mode, cmd = load.strip()[3:-2].split(None, 4) cmd, arg = (' ' in cmd) and cmd.split(' ', 1) or (cmd, '') quiet.ui.render_mode = mode result = mailpile.commands.Action(quiet, cmd, arg) self.html_variables[vname] = quiet.ui.display_result(result) return jsontemplate.expand(page, default_dict(self.html_variables, { 'results': '\n'.join(['<div class="result">%s</div>' % r for r in self.results]), 'logged': '\n'.join(['<p class="ll_%s">%s</p>' % l for l in self.logged]) }), undefined_str='')
def gen_index_entity_one_conf(data_index_entity, global_config, id_data): for entity_type in data_index_entity: json_data = {entity_type: [] } line_number = 0 for entity_uri in sorted(data_index_entity[entity_type]): entity_x = data_index_entity[entity_type][entity_uri] line_number += 1 json_data_sub = entity_x[0] json_data_sub["line-number"] = line_number json_data_sub["entity_uri"] = entity_uri json_data[entity_type].append(json_data_sub) id_html = "index-table-{}".format(entity_type) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content= jsontemplate.expand(json_template, json_data) filename_output = "{}/data/www/{}-index-{}.html".format (global_config["home"], id_data, entity_type) with codecs.open(filename_output,"w","utf-8") as f: f.write(u'\ufeff') f.write(content)
privatekeyfile = self.private_key mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host, username='******', pkey=mykey, timeout=2) stdin, stdout, stderr = \ ssh.exec_command('tail -n %d "%s"' % (int(size), fn)) log = stdout.read() ssh.close() except Exception, e: log = "Exception: %s" % e else: log = "Slice is not ready to be used, we are currently in status: %s" % status tmpl = open(self.base + '/templates/log.html').read() return jsontemplate.expand(tmpl, {'log': log, 'host': host}) @cherrypy.expose def new( self, name=None, srvtype=None, source_repo=None, source_branch=None, dirname=None ): password = self.gen_password() img = [i for i in self.compute.images.list() if i.name.find("Ubuntu 10.10") != -1][0] flav = [f for f in self.compute.flavors.list() \ if f.ram == int(self.server_size)][0] srvname = self.prefix + "-" + str(uuid4()).replace('-', '') if name is None: name = srvname cron = "* * * * * root /bin/bash /root/install.sh\n" envvars = ''
import jsontemplate print jsontemplate.expand("Hello {name}", {"name": "world"})
def render_template(self, json_response, template): self.response.write( jsontemplate.expand(template, json_response))
def render_html(self, cfg, tpl_names, data): """Render data as HTML""" return jsontemplate.expand(self._html_template(cfg, tpl_names, elems=data.keys()), data, **JSONTEMPLATE_ARGS)
import time from boto.sqs.message import RawMessage from boto.sqs.connection import SQSConnection import jsontemplate import json import boto import urllib sqsconn = SQSConnection('AKIAJHJXHTMTVQYVZJOA','2YVZfFXQ7mhdFeUnMjcMOJ8uc5GBjz5LXhmh8LiM') q = sqsconn.get_queue('commentresult') q.set_message_class(RawMessage) while True: rs = q.get_messages() for item in rs: result = jsontemplate.expand('{approved}', json.read(str(item.get_body()))) cmtkey = jsontemplate.expand('{commentkey}', json.read(str(item.get_body()))) sdb = boto.connect_sdb('AKIAJHJXHTMTVQYVZJOA','2YVZfFXQ7mhdFeUnMjcMOJ8uc5GBjz5LXhmh8LiM') domain = sdb.get_domain('comment') cmt = domain.get_item(cmtkey) if result == "true": cmt['status'] = "approved" else: cmt['status'] = "denied" cmt.save() params = urllib.urlencode({'student': 'armstrow', 'type': 'INFO', 'system': 'appserver', 'message': 'Comment result processed: '+cmtkey}) f = urllib.urlopen("http://imaj.lddi.org:8080/log/submit", params) q.delete_message(item) time.sleep(10) #
def json_conf2html(data_json_conf): '''http://stackoverflow.com/questions/1395593/managing-resources-in-a-python-project''' json_template_proceedings = resource_string('resources.files', 'proceedings.jsont') return jsontemplate.expand(json_template_proceedings, data_json_conf)
import jsontemplate print jsontemplate.expand('Hello {name}', {'name': 'world'})
def render(req): return jsontemplate.expand('Hello {name}', {'name': 'world'})
def csv2html(id_data, global_config): #create json_conf data json_conf ={} ###################### #conf-paper filename_csv_conf_paper = "%s/data/www/%s-%s.csv" % ( global_config["home"], id_data, "conf-paper") indexed_proceedings ={} list_title = [] with open(filename_csv_conf_paper) as f: csvreader = UnicodeReader(f) headers = csvreader.next() while len(headers)<=1: print "skipping header row {0}".format( headers ) headers = csvreader.next() for row in csvreader: if len(row)<len(headers): print "skipping row {0}".format( row ) continue entry = dict(zip(headers, row)) # print entry if entry["subtitle_proceedings"]: proceeding_title = "{} -- {}".format(entry["label_proceedings"], entry["subtitle_proceedings"]) if proceeding_title not in list_title: list_title.insert(0, proceeding_title) else: proceeding_title = "{}".format(entry["label_proceedings"]) if proceeding_title not in list_title: list_title.append(proceeding_title) UtilJson.add_init_list( indexed_proceedings, [proceeding_title], entry["category"], entry) #update json_conf for proceedings in list_title: #print proceedings json_proceedings ={} json_proceedings["title"] =proceedings UtilJson.add_init_list(json_conf, [], "proceedings", json_proceedings) for category in sorted(indexed_proceedings[proceedings].keys()): #print category json_category = {} if len(indexed_proceedings[proceedings].keys()) > 1: json_category["title"] =category UtilJson.add_init_list(json_proceedings, [], "categories", json_category) json_category["papers"] =indexed_proceedings[proceedings][category] ###################### #conf-person filename_csv_conf_person = "%s/data/www/%s-%s.csv" % ( global_config["home"], id_data, "conf-person") indexed_persons ={} with open(filename_csv_conf_person) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row)<len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) #print entry name = entry["name"] name = name.strip() name = re.sub("\s+"," ",name) cnt_paper = int(entry["cnt_paper"]) if cnt_paper >0: index_1 = entry["proceedings_label"] if len(entry["proceedings_label"])==0: index_1 = "All" index_1 = "[Proceedings] {}".format(index_1) index_2 = "Authors" UtilJson.add_init_dict( indexed_persons, [index_1,index_2], name, entry) #consolidate affiliation organization = entry["organization"] if len(entry["organization"])>0: entry["organization"] = organization.split(";")[0] #only keep direct conference role ALLOWED_EVENT_TYPE= [] ALLOWED_EVENT_TYPE.append("http://data.semanticweb.org/ns/swc/ontology#ConferenceEvent") # ALLOWED_EVENT_TYPE.append("http://data.semanticweb.org/ns/swc/ontology#WorkshopEvent") if entry["role_event_type"] not in ALLOWED_EVENT_TYPE: continue if entry["role_type"].endswith("Chair") and entry["role_event_type"].endswith("ConferenceEvent"): entry["role_event_label"] = " {} (organization Committee)".format(entry["role_event_label"]) UtilJson.add_init_dict( indexed_persons, [entry["role_event_label"],entry["role_label"]], name, entry) #update json_conf for role_event_label in sorted(indexed_persons.keys()): #print role_event_label josn_role_event ={} josn_role_event["title"] =role_event_label UtilJson.add_init_list(json_conf, [], "events", josn_role_event) list_role = [] for role_label in sorted(indexed_persons[role_event_label].keys()): if "Chair" in role_label or "Webmaster" in role_label: list_role.insert(0, role_label) else: list_role.append(role_label) for role_label in list_role: #print role_label json_role_label = {} json_role_label["title"] =role_label UtilJson.add_init_list(josn_role_event, [], "roles", json_role_label) json_role_label["persons"] = sorted( indexed_persons[role_event_label][role_label].values()) ###################### # write xyz-proceedings id_html = "proceedings" filename_html = "%s/data/www/%s-%s.html" % ( global_config["home"], id_data, id_html) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content= jsontemplate.expand(json_template, json_conf) with codecs.open(filename_html,"w","utf-8") as f: f.write(u'\ufeff') f.write(content) ###################### # write xyz-people id_html = "people" filename_html = "%s/data/www/%s-%s.html" % ( global_config["home"], id_data, id_html) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content= jsontemplate.expand(json_template, json_conf) with codecs.open(filename_html,"w","utf-8") as f: f.write(u'\ufeff') f.write(content) ###################### #conf-event filename_csv_conf_event = "%s/data/www/%s-%s.csv" % ( global_config["home"], id_data, "conf-event") dict_events ={} list_events = [] conf_event_name ="" with open(filename_csv_conf_event) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row)<len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) #print entry dict_events[entry["event_uri"]] = entry list_events.append(entry) event_type = entry["event_type"].split('#')[-1] if event_type in ['ConferenceEvent']: conf_event_name = entry["label"] elif event_type in ['InvitedTalkEvent', 'PanelEvent']: entry['category'] = event_type.replace('Event', '') indexed_events ={} map_events ={} for entry in list_events: temp = entry["event_type"].split('#')[-1] temp = temp.replace("Event","") if not temp in ["Tutorial","Talk","Special","Break"]: entry["event_type_label"] = temp UtilJson.add_init_list( map_events, [], entry["super_event_uri"], entry["event_uri"], True) super_event_name = conf_event_name if entry["super_event_uri"] and entry["super_event_uri"] in dict_events: super_event_type = dict_events[entry["super_event_uri"]]["event_type"].split('#')[-1].replace("Event","") if super_event_type in ['Workshop', 'Tutorial'] : super_event_name = dict_events[entry["super_event_uri"]]["label"] if super_event_name.lower().find("Doctoral Consortium".lower()) < 0: if not super_event_name.startswith(super_event_type): super_event_name = "{}: {}".format(super_event_type, super_event_name) entry['start_x'] = entry['start'] entry['end_x'] = entry['end'] if len(entry['start'])>0: #skip talk event if len(entry['order_in_super_event'])>0: continue date = entry['start'][0:10] entry['start_x'] = entry['start'][11:-3] date_end = date if len(entry['end'])>0: date_end = entry['end'][0:10] entry['end_x'] = entry['end'][11:-3] #only keep same day events if date_end == date: UtilJson.add_init_list( indexed_events, [super_event_name], date, entry) #print json.dumps(map_events, indent=4) #update json_conf list_event_name = [] for event_name in sorted(indexed_events.keys()): if conf_event_name == event_name: list_event_name.insert(0, event_name) else: list_event_name.append(event_name) for event_name in list_event_name: top_events_in_program = indexed_events[event_name] json_program = { 'title': event_name } UtilJson.add_init_list(json_conf, [], "top_programs", json_program) for date in sorted(top_events_in_program.keys()): events_in_program_date = top_events_in_program[date] json_date_program ={} if len(top_events_in_program) >1: json_date_program["title"] = datetime.datetime(*time.strptime(date,"%Y-%m-%d")[0:5]).strftime("%Y-%m-%d (%A)") json_date_program["events"] = events_in_program_date UtilJson.add_init_list(json_program, [], "date_programs", json_date_program) # sorted(events_in_program_date, key=lambda item: item['start']) for entry in events_in_program_date: entry["super_event_type"] = dict_events[entry["super_event_uri"]]["event_type"] if entry["super_event_type"] == "http://data.semanticweb.org/ns/swc/ontology#TrackEvent": entry["track"] = dict_events[entry["super_event_uri"]]["label"] else: entry["track"] = "" #if entry["event_type"] == "http://data.semanticweb.org/ns/swc/ontology#SessionEvent": if entry["event_uri"] in map_events: for sub_event_uri in map_events[entry["event_uri"]]: UtilJson.add_init_list(entry, [], "talks", dict_events[sub_event_uri]) ###################### # write json-data #print json.dumps(json_conf, indent=4) filename_json = "%s/data/www/%s-conf.json" % ( global_config["home"], id_data) with codecs.open(filename_json,"w","utf-8") as f: json.dump(json_conf, f, indent=4) ###################### # write xyz-program id_html = "program" filename_html = "%s/data/www/%s-%s.html" % ( global_config["home"], id_data, id_html) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content= jsontemplate.expand(json_template, json_conf) with codecs.open(filename_html,"w","utf-8") as f: f.write(u'\ufeff') f.write(content) ###################### # write icalendar id_html = "program" filename_ics_prefix = "%s/data/www/%s-%s" % ( global_config["home"], id_data, id_html) ConfData.json_conf2ics(json_conf, filename_ics_prefix)
def gen_conf_paper(global_config, local_config): dir_home = global_config["home"] id_data = local_config["id"] id_html = "gen_conf_paper" #load data filename_input = "{0}/data/source/{1}-paper.csv".format( dir_home, id_data) data_json = UtilCsv.csv2json(filename_input) IswcDirect.debug("load {} entries from [{}]".format(len(data_json), filename_input)) #prepare json for templating p_group = "tracks" p_item = "papers" data_for_jsont = {p_group: []} prev = None for entry in data_json: x_proceedings = entry["proceedings_uri"].split("/")[-1] if x_proceedings not in ["proceedings", "proceedings-1", "proceedings-2"]: #skip non conference proceedings paper continue if None == prev or entry["category"] != prev["label"]: prev = {"label": entry["category"], p_item: []} data_for_jsont[p_group].append(prev) #copy entry_new metadata entry_new = {} for p in ["author", "title", "pages", "link_open_access", "abstract"]: entry_new[p] = entry[p] # link local entry_new["link_local"] = "{}".format(entry_new["link_open_access"].split("/")[-1]) # link local if len(entry_new["abstract"]) == 0: entry_new["abstract"] = "TBA" if entry["pages"]: entry_new['page_start'] = entry["pages"].split("-")[0] #add uri entry_new["uri"] = "#{}".format(create_ascii_localname(entry_new["title"], escape=True)) entry_new['author_latex'] = unicode2latex(entry["author"]) entry_new['authors'] = [] for x in entry["author"].split(","): person = {"name": x.strip()} person["uri"] = "#{}".format(create_ascii_localname(person["name"], escape=True)) entry_new['authors'].append(person) prev[p_item].append(entry_new) filename_json = "%s/data/www/%s-%s.json" % ( dir_home, id_data, id_html) with codecs.open(filename_json, "w", "utf-8") as f: json.dump(data_for_jsont, f, indent=4) #write html filename_html = "%s/data/www/%s-%s.html" % ( dir_home, id_data, id_html) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content = jsontemplate.expand(json_template, data_for_jsont) with codecs.open(filename_html, "w", "utf-8") as f: f.write(u'\ufeff') f.write(content) IswcDirect.debug("write to file [{}]".format(filename_html)) #write index-usb filename_html = "%s/data/paper/iswc-2013/index.html" % ( dir_home) json_template = resource_string('resources.files', '{}.jsont'.format("gen_open_access_index")) content = jsontemplate.expand(json_template, data_for_jsont) with codecs.open(filename_html, "w", "utf-8") as f: f.write(u'\ufeff') f.write(content) IswcDirect.debug("write to file [{}]".format(filename_html))
privatekeyfile = self.private_key mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host, username='******', pkey=mykey, timeout=2) stdin, stdout, stderr = \ ssh.exec_command('tail -n %d "%s"' % (int(size), fn)) log = stdout.read() ssh.close() except Exception, e: log = "Exception: %s" % e else: log = "Slice is not ready to be used, we are currently in status: %s" % status tmpl = open(self.base + '/templates/log.html').read() return jsontemplate.expand(tmpl, {'log': log, 'host': host}) @cherrypy.expose def new( self, name=None, srvtype=None, novascript_url="https://raw.github.com/cloudbuilders/deploy.sh/master/nova.sh" ): password = self.gen_password() img = [ i for i in self.compute.images.list() if i.name.find("Ubuntu 10.10") != -1 ][0] flav = [f for f in self.compute.flavors.list() \ if f.ram == int(self.server_size)][0]
import time from boto.sqs.message import RawMessage from boto.sqs.connection import SQSConnection import jsontemplate import json import boto import urllib sqsconn = SQSConnection('AKIAJHJXHTMTVQYVZJOA','2YVZfFXQ7mhdFeUnMjcMOJ8uc5GBjz5LXhmh8LiM') q = sqsconn.get_queue('approvalresult') q.set_message_class(RawMessage) while True: rs = q.get_messages() for item in rs: result = jsontemplate.expand('{approved}', json.read(str(item.get_body()))) imgkey = jsontemplate.expand('{imagekey}', json.read(str(item.get_body()))) sdb = boto.connect_sdb('AKIAJHJXHTMTVQYVZJOA','2YVZfFXQ7mhdFeUnMjcMOJ8uc5GBjz5LXhmh8LiM') domain = sdb.get_domain('picture') img = domain.get_item(imgkey) if result == "true": img['status'] = "approved" else: img['status'] = "denied" img.save() params = urllib.urlencode({'student': 'armstrow', 'type': 'INFO', 'system': 'appserver', 'message': 'Image approval result processed: '+imgkey}) f = urllib.urlopen("http://imaj.lddi.org:8080/log/submit", params) q.delete_message(item) time.sleep(10) #
def MakeIndexHtml(directory): files = os.listdir(directory) files.sort() html = jsontemplate.expand(TEST_CASE_INDEX_HTML_TEMPLATE, files) open(directory + 'index.html', 'w').write(html)
def render_html(self, cfg, tpl_names, data): """Render data as HTML""" return jsontemplate.expand( self._html_template(cfg, tpl_names, elems=data.keys()), data, **JSONTEMPLATE_ARGS)
def csv2html(id_data, global_config): #create json_conf data json_conf ={} ###################### #conf-paper filename_csv_conf_paper = "%s/data/output/%s-%s.csv" % ( global_config["home"], id_data, "conf-paper") indexed_proceedings ={} with open(filename_csv_conf_paper) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row)<len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) # print entry proceeding_title = "%s -- %s" % (entry["label_proceedings"], entry["subtitle_proceedings"]) UtilJson.add_init_list( indexed_proceedings, [proceeding_title], entry["category"], entry) #update json_conf for proceedings in sorted(indexed_proceedings.keys(), reverse=True): #print proceedings json_proceedings ={} json_proceedings["title"] =proceedings UtilJson.add_init_list(json_conf, [], "proceedings", json_proceedings) for category in sorted(indexed_proceedings[proceedings].keys()): #print category json_category = {} json_category["title"] =category UtilJson.add_init_list(json_proceedings, [], "categories", json_category) json_category["papers"] =indexed_proceedings[proceedings][category] ###################### #conf-person filename_csv_conf_paper = "%s/data/output/%s-%s.csv" % ( global_config["home"], id_data, "conf-person") indexed_persons ={} with open(filename_csv_conf_paper) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row)<len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) #only keep direct conference role if entry["role_event_type"] != "http://data.semanticweb.org/ns/swc/ontology#ConferenceEvent": continue #print entry UtilJson.add_init_list( indexed_persons, [entry["role_event_label"]], entry["role_label"], entry) #update json_conf for role_event_label in sorted(indexed_persons.keys()): #print role_event_label josn_role_event ={} josn_role_event["title"] =role_event_label UtilJson.add_init_list(json_conf, [], "events", josn_role_event) for role_label in sorted(indexed_persons[role_event_label].keys()): #print role_label json_role_label = {} json_role_label["title"] =role_label UtilJson.add_init_list(josn_role_event, [], "roles", json_role_label) json_role_label["persons"] =indexed_persons[role_event_label][role_label] ###################### # write xyz-proceedings filename_html_proceedings = "%s/data/output/%s-%s.html" % ( global_config["home"], id_data, "proceedings") json_template_program = resource_string('resources.files', 'proceedings.jsont') content= jsontemplate.expand(json_template_program, json_conf) with codecs.open(filename_html_proceedings,"w","utf-8") as f: f.write(u'\ufeff') f.write(content) ###################### #conf-event filename_csv_conf_event = "%s/data/output/%s-%s.csv" % ( global_config["home"], id_data, "conf-event") indexed_events ={} dict_events ={} map_events ={} with open(filename_csv_conf_event) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row)<len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) #print entry dict_events[entry["event_uri"]]=entry UtilJson.add_init_list( map_events, [], entry["super_event_uri"], entry["event_uri"], True) if len(entry['start'])>0: date = entry['start'][0:10] entry['start'] = entry['start'][11:-3] date_end =date if len(entry['end'])>0: date_end = entry['end'][0:10] entry['end'] = entry['end'][11:-3] #only keep same day events if date_end==date: UtilJson.add_init_list( indexed_events, [], date, entry) #print json.dumps(map_events, indent=4) #update json_conf for date in sorted(indexed_events.keys()): josn_date_program ={} josn_date_program["title"] = datetime.datetime(*time.strptime(date,"%Y-%m-%d")[0:5]).strftime("%Y-%m-%d (%A)") UtilJson.add_init_list(json_conf, [], "program", josn_date_program) josn_date_program["events"] =indexed_events[date] for entry in indexed_events[date]: entry["super_event_type"] = dict_events[entry["super_event_uri"]]["event_type"] if entry["super_event_type"] == "http://data.semanticweb.org/ns/swc/ontology#TrackEvent": entry["track"] = dict_events[entry["super_event_uri"]]["label"] else: entry["track"] = "" if entry["event_type"] == "http://data.semanticweb.org/ns/swc/ontology#SessionEvent": if entry["event_uri"] in map_events: for sub_event_uri in map_events[entry["event_uri"]]: UtilJson.add_init_list(entry, [], "talks", dict_events[sub_event_uri]) ###################### # write xyz-program filename_html_program = "%s/data/output/%s-%s.html" % ( global_config["home"], id_data, "program") json_template_program = resource_string('resources.files', 'program.jsont') content= jsontemplate.expand(json_template_program, json_conf) with codecs.open(filename_html_program,"w","utf-8") as f: f.write(u'\ufeff') f.write(content)
def gen_conf_organizer(global_config, local_config): dir_home = global_config["home"] id_data = local_config["id"] id_html = "gen_conf_organizer" #load data filename_input = "{0}/data/source/{1}-person.csv".format( dir_home, id_data) data_json = UtilCsv.csv2json(filename_input) IswcDirect.debug("load {} entries from [{}]".format(len(data_json), filename_input)) #only keep organizer data_json_new = [] for entry in data_json: if entry["role_event"] == "[ME]" and entry["role_type"] == "swc:Chair": data_json_new.append(entry) data_json = data_json_new IswcDirect.debug("keep {} persons for conf organization".format(len(data_json))) #prepare json for templating data_for_jsont = {"roles": []} role_prev = None for entry in data_json: if None == role_prev or entry["role_label"] != role_prev["label"]: role_prev = {"label": entry["role_label"], "persons": []} data_for_jsont["roles"].append(role_prev) #copy person metadata person = {} for p in ["name", "homepage", "organization", "country"]: person[p] = entry[p] #add person uri person["uri"] = "#{}".format(create_ascii_localname(person["name"], escape=True)) role_prev["persons"].append(person) #print json.dumps(data_for_jsont, indent=4) filename_json = "%s/data/www/%s-%s.json" % ( dir_home, id_data, id_html) with codecs.open(filename_json, "w", "utf-8") as f: json.dump(data_for_jsont, f, indent=4) #write html filename_html = "%s/data/www/%s-%s.html" % ( dir_home, id_data, id_html) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content = jsontemplate.expand(json_template, data_for_jsont) with codecs.open(filename_html, "w", "utf-8") as f: f.write(u'\ufeff') f.write(content) IswcDirect.debug("write to file [{}]".format(filename_html))
def csv2html(id_data, global_config): #create json_conf data json_conf = {} ###################### #conf-paper filename_csv_conf_paper = "%s/data/output/%s-%s.csv" % ( global_config["home"], id_data, "conf-paper") indexed_proceedings = {} with open(filename_csv_conf_paper) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row) < len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) # print entry proceeding_title = "%s -- %s" % (entry["label_proceedings"], entry["subtitle_proceedings"]) UtilJson.add_init_list(indexed_proceedings, [proceeding_title], entry["category"], entry) #update json_conf for proceedings in sorted(indexed_proceedings.keys(), reverse=True): #print proceedings json_proceedings = {} json_proceedings["title"] = proceedings UtilJson.add_init_list(json_conf, [], "proceedings", json_proceedings) for category in sorted(indexed_proceedings[proceedings].keys()): #print category json_category = {} json_category["title"] = category UtilJson.add_init_list(json_proceedings, [], "categories", json_category) json_category["papers"] = indexed_proceedings[proceedings][ category] ###################### #conf-person filename_csv_conf_paper = "%s/data/output/%s-%s.csv" % ( global_config["home"], id_data, "conf-person") indexed_persons = {} with open(filename_csv_conf_paper) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row) < len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) #only keep direct conference role if entry[ "role_event_type"] != "http://data.semanticweb.org/ns/swc/ontology#ConferenceEvent": continue #print entry UtilJson.add_init_list(indexed_persons, [entry["role_event_label"]], entry["role_label"], entry) #update json_conf for role_event_label in sorted(indexed_persons.keys()): #print role_event_label josn_role_event = {} josn_role_event["title"] = role_event_label UtilJson.add_init_list(json_conf, [], "events", josn_role_event) for role_label in sorted(indexed_persons[role_event_label].keys()): #print role_label json_role_label = {} json_role_label["title"] = role_label UtilJson.add_init_list(josn_role_event, [], "roles", json_role_label) json_role_label["persons"] = indexed_persons[role_event_label][ role_label] ###################### # write xyz-proceedings filename_html_proceedings = "%s/data/output/%s-%s.html" % ( global_config["home"], id_data, "proceedings") json_template_program = resource_string('resources.files', 'proceedings.jsont') content = jsontemplate.expand(json_template_program, json_conf) with codecs.open(filename_html_proceedings, "w", "utf-8") as f: f.write(u'\ufeff') f.write(content) ###################### #conf-event filename_csv_conf_event = "%s/data/output/%s-%s.csv" % ( global_config["home"], id_data, "conf-event") indexed_events = {} dict_events = {} map_events = {} with open(filename_csv_conf_event) as f: csvreader = UnicodeReader(f) headers = csvreader.next() for row in csvreader: if len(row) < len(headers): #print "skipping row %s" % row continue entry = dict(zip(headers, row)) #print entry dict_events[entry["event_uri"]] = entry UtilJson.add_init_list(map_events, [], entry["super_event_uri"], entry["event_uri"], True) if len(entry['start']) > 0: date = entry['start'][0:10] entry['start'] = entry['start'][11:-3] date_end = date if len(entry['end']) > 0: date_end = entry['end'][0:10] entry['end'] = entry['end'][11:-3] #only keep same day events if date_end == date: UtilJson.add_init_list(indexed_events, [], date, entry) #print json.dumps(map_events, indent=4) #update json_conf for date in sorted(indexed_events.keys()): josn_date_program = {} josn_date_program["title"] = datetime.datetime(*time.strptime( date, "%Y-%m-%d")[0:5]).strftime("%Y-%m-%d (%A)") UtilJson.add_init_list(json_conf, [], "program", josn_date_program) josn_date_program["events"] = indexed_events[date] for entry in indexed_events[date]: entry["super_event_type"] = dict_events[ entry["super_event_uri"]]["event_type"] if entry[ "super_event_type"] == "http://data.semanticweb.org/ns/swc/ontology#TrackEvent": entry["track"] = dict_events[ entry["super_event_uri"]]["label"] else: entry["track"] = "" if entry[ "event_type"] == "http://data.semanticweb.org/ns/swc/ontology#SessionEvent": if entry["event_uri"] in map_events: for sub_event_uri in map_events[entry["event_uri"]]: UtilJson.add_init_list(entry, [], "talks", dict_events[sub_event_uri]) ###################### # write xyz-program filename_html_program = "%s/data/output/%s-%s.html" % ( global_config["home"], id_data, "program") json_template_program = resource_string('resources.files', 'program.jsont') content = jsontemplate.expand(json_template_program, json_conf) with codecs.open(filename_html_program, "w", "utf-8") as f: f.write(u'\ufeff') f.write(content)
import boto import urllib fullSize = 600.0 thumbSize = 75.0 AWSKey = 'AKIAJHJXHTMTVQYVZJOA' AWSSecret = '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' sqsconn = SQSConnection(AWSKey, AWSSecret) q = sqsconn.get_queue('imageprocess') q.set_message_class(RawMessage) dir_path = "/var/www/python/files/" while True: rs = q.get_messages() for item in rs: imgkey = jsontemplate.expand('{imagekey}', json.read(str(item.get_body()))) date = jsontemplate.expand('{submitdate}', json.read(str(item.get_body()))) curtime = strftime("%Y-%m-%dT%H:%M:%S") from boto.s3.connection import S3Connection conn = S3Connection(AWSKey, AWSSecret) bucket = conn.get_bucket('theimageproject') from boto.s3.key import Key k = Key(bucket) k.key = imgkey + ".jpg" tryAgain = True while tryAgain: try: k.get_contents_to_filename(os.path.join(dir_path, imgkey + ".jpg")) tryAgain = False except boto.exception.S3ResponseError: tryAgain = True
def gen_open_access_index(global_config, local_config): dir_home = global_config["home"] id_data = local_config["id"] id_html = "gen_open_access_index" data_all = [] list_filename_input = [] # list_filename_input.append( "{0}/data/source/iswc-all-papers.csv".format( # dir_home)) list_filename_input.append("{0}/data/source/iswc-2013-paper.csv".format( dir_home)) for filename_input in list_filename_input: #load data data_json = UtilCsv.csv2json(filename_input) IswcDirect.debug("load {} entries from [{}]".format(len(data_json), filename_input)) #prepare json for templating p_group = "tracks" p_item = "papers" data_for_jsont = {p_group: []} prev = None for entry in data_json: if len(entry['title'])==0: continue if None == prev or entry["proceedings_uri"] != prev["proceedings_uri"]: prev = {"label": entry["category"], "proceedings_uri": entry["proceedings_uri"], p_item: []} data_for_jsont = {p_group: []} data_all_entry = {"label": "-".join([entry["year"], entry["proceedings_uri"].split("/")[-1]]), "data": data_for_jsont } data_all.append(data_all_entry) data_for_jsont[p_group].append(prev) if None == prev or entry["category"] != prev["label"]: prev = {"label": entry["category"], "proceedings_uri": entry["proceedings_uri"], p_item: []} data_for_jsont[p_group].append(prev) #copy entry_new metadata entry_new = {} for p in ["author", "title", "pages", "link_open_access", "abstract"]: entry_new[p] = entry[p] # link local entry_new["link_local"] = entry_new["link_open_access"].split("/")[-1] #print '---->', entry_new['title'] #if len(entry_new["link_open_access"])>0: # id_data = entry_new["link_open_access"].split("/")[-2] # dir_paper = os.path.join(dir_home, "data/paper/{}".format(id_data)) # # temp_filename = entry_new["link_open_access"].split("/")[-1] # #print temp_filename # temp_id, temp_ext = temp_filename.split('.') # pretty_name = IswcDirect.create_pretty_filename(entry_new['title']) # entry_new["link_local"] = '{}-{}.{}'.format(temp_id, pretty_name, temp_ext) # print "{}".format(entry_new["link_local"]) # # filename_old = os.path.join(dir_paper, temp_filename) # #print os.path.exists(filename_old) # filename_new = os.path.join(dir_paper, entry_new["link_local"]) # #print os.path.exists(filename_new) # os.rename(filename_old, filename_new) # link local if len(entry_new["abstract"]) == 0: entry_new["abstract"] = "TBA" if entry["pages"]: entry_new['page_start'] = entry["pages"].split("-")[0] #add uri entry_new["uri"] = "#{}".format(create_ascii_localname(entry_new["title"], escape=True)) entry_new['author_latex'] = unicode2latex(entry["author"]) entry_new['authors'] = [] for x in entry["author"].split(","): person = {"name": x.strip()} person["uri"] = "#{}".format(create_ascii_localname(person["name"], escape=True)) entry_new['authors'].append(person) prev[p_item].append(entry_new) filename_input = list_filename_input[0] head, tail = os.path.split(filename_input) filename_output = "{}/data/open_access/{}".format( dir_home, tail) shutil.copyfile(filename_input, filename_output) for data_all_entry in data_all: data_for_jsont = data_all_entry["data"] path = data_all_entry["label"] #print json.dumps(data_for_jsont, indent=4) #write index-usb filename_html = "%s/data/open_access/%s/index.html" % ( dir_home, path) json_template = resource_string('resources.files', '{}.jsont'.format(id_html)) content = jsontemplate.expand(json_template, data_for_jsont) if os.path.exists(os.path.dirname(filename_html)): with codecs.open(filename_html, "w", "utf-8") as f: f.write(u'\ufeff') f.write(content) IswcDirect.debug("write to file [{}]".format(filename_html))
privatekeyfile = self.private_key mykey = paramiko.RSAKey.from_private_key_file(privatekeyfile) ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(host, username="******", pkey=mykey, timeout=2) stdin, stdout, stderr = ssh.exec_command('tail -n %d "%s"' % (int(size), fn)) log = stdout.read() ssh.close() except Exception, e: log = "Exception: %s" % e else: log = "Slice is not ready to be used, we are currently in status: %s" % status tmpl = open(self.base + "/templates/log.html").read() return jsontemplate.expand(tmpl, {"log": log, "host": host}) @cherrypy.expose def new(self, name=None, srvtype=None): password = self.gen_password() img = [i for i in self.compute.images.list() if i.name.find("Ubuntu 10.10") != -1][0] flav = [f for f in self.compute.flavors.list() if f.ram == int(self.server_size)][0] srvname = self.prefix + "-" + str(uuid4()).replace("-", "") if name is None: name = srvname cron = "* * * * * root /bin/bash /root/install.sh\n" install = ( open(self.base + "/templates/install.sh") .read() .format(password=password, pubkey=self.pubkey, srvtype=srvtype) )