def getDiagnosisICD9Data(self,xml,format): if (xml): documentElms = xml.getElementsByTagName('entry') if (documentElms): #grab two highest ranked content content1 = '' content2 = '' contentNode1 = documentElms[0] content1 = APIUtils.parseHealthDiagnosisICD9Content(format,contentNode1) if (len(documentElms) > 0): contentNode2 = documentElms[1] content2 = APIUtils.parseHealthDiagnosisICD9Content(format,contentNode2) returnData = "%s%s" % (content1, content2) if (format == 'json'): return returnData[:-1] else: return returnData else: logging.error('unable to retrieve diagnosis content') return Formatter.data(format, 'error', 'No results') else: logging.error('unable to retrieve diagnosis content') return Formatter.data(format, 'error', 'Unable to retrieve content from provider')
def get(self,format,id): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) #get consumer/client app id appid = 'Unknown' if (self.request.GET): if ('appid' in self.request.GET): appid = self.request.GET['appid'] if ('app' in self.request.GET): appid = self.request.GET['app'] referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] returnData = APIContent.getHackerNewsPost(id,format,self.request.url, referer, self.request.remote_addr) #track this request GAHelper.trackGARequests('/post/%s' % (id), appid, referer) if (not returnData): returnData = '' #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def main(): f = open('data/person_foaf.n3') fresnel_data = f.read() fresnel = Fresnel( fresnel_data ) print "Created Fresnel Graph.." rdf_graph = Graph() print "Downloading resource.." #f = open('data/Tim_Berners-Lee.rdf') #rdf_graph.parse( file=f, format="xml") #rdf_graph.parse(source="http://dbpedia.org/page/A._P._J._Abdul_Kalam") #print "more..." f = open('data/A._P._J._Abdul_Kalam.rdf') rdf_graph.parse( file=f, format="xml") #rdf_graph.parse(source="http://dbpedia.org/page/Tim_Berners-Lee") for term in rdf_graph: print term print "Making selection.." selector = Selector( fresnel , rdf_graph) selector.select() formatter = Formatter( selector ) formatter.format() data = "<html>\n<head><link rel='stylesheet' type='text/css' href='style.css'></head>\n<body>\n" for resource in formatter.result: print resource.render() data += resource.render() data += "</body>\n</html>" f = open('output/test.html', 'w') f.write(data.encode('utf-8'))
def get(self,keyword, state, format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) returnData = MutableString() returnData = '' try: #get XML from the service state = "%s%s" % ("NA%3AUS%3A", state) xml = minidom.parse(urllib.urlopen('%s&term=%s&state1=%s' % (AppConfig.clinicalTrialsAPIURL, keyword, state.upper()))) if (xml): documentElms = xml.getElementsByTagName('clinical_study') if (documentElms): for docNode in documentElms: content = APIUtils.parseClinicalTrialsContent(format,docNode) returnData += content else: logging.error('unable to retrieve content') self.response.out.write(Formatter.error(format, 'No results')) return else: logging.error('unable to retrieve content') self.response.out.write(Formatter.error(format, 'Unable to retrieve content from provider')) return except Exception, e: logging.error('GetClinicalTrialsHandler: unable to get health topics or parse XML: %s' % e) self.response.out.write(Formatter.error(format, 'Exception: %s' % (e))) return
def get(self, format='json', page=''): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] returnData = APIContent.getHackerNewsPageContent( page, format, self.request.url, referer, self.request.remote_addr) if (not returnData or returnData == None or returnData == '' or returnData == 'None'): #call the service again this time without the pageID returnData = APIContent.getHackerNewsPageContent( '', format, self.request.url, referer, self.request.remote_addr) #track this request GAHelper.trackGARequests('/news', self.request.remote_addr, referer) if (not returnData): returnData = '' #output to the browser self.response.out.write( Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def getHealthNews(lan='en',format='json'): returnData = MutableString() returnData = '' if (lan == 'es'): dom = minidom.parse(urllib.urlopen(AppConfig.medlinePlusHealthNewsSpanishURL)) else: dom = minidom.parse(urllib.urlopen(AppConfig.medlinePlusHealthNewsEnglishURL)) rssTitle = MutableString() rssDescription = MutableString() rssURL = MutableString() for node in dom.getElementsByTagName('item'): for item_node in node.childNodes: rssTitle = '' rssDescription = '' rssURL = '' #item title if (item_node.nodeName == "title"): for text_node in item_node.childNodes: if (text_node.nodeType == node.TEXT_NODE): rssTitle += text_node.nodeValue #description if (item_node.nodeName == "description"): for text_node in item_node.childNodes: rssDescription += text_node.nodeValue #link to URL if (item_node.nodeName == "link"): for text_node in item_node.childNodes: rssURL += text_node.nodeValue if (format == 'json'): startTag = '{' endTag = '},' #cleanup #rssTitle = re.sub("\"", "'", rssTitle) rssTitle = re.sub("\n", "", rssTitle) rssTitle = re.sub("\"", "\\\"", rssTitle) rssDescription = re.sub("\"", "\\\"", rssDescription) rssDescription = re.sub("\n", "", rssDescription) rssDescription = re.sub("\t", " ", rssDescription) rssDescription = re.sub("\r", "", rssDescription) if (len(rssDescription) > 0): rssDescription = Formatter.data(format, 'description', escape(rssDescription))[:-1] else: startTag = '<record>' endTag = '</record>' if (len(rssDescription) > 0): rssDescription = Formatter.data(format, 'description', escape(rssDescription)) if (len(rssTitle) > 0): returnData += startTag + Formatter.data(format, 'title', rssTitle) if (len(rssURL) > 0): returnData += Formatter.data(format, 'url', rssURL) if (len(rssDescription) > 0 ): returnData += rssDescription + endTag return returnData
def get(self, format, id): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) #get consumer/client app id appid = 'Unknown' if (self.request.GET): if ('appid' in self.request.GET): appid = self.request.GET['appid'] if ('app' in self.request.GET): appid = self.request.GET['app'] referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] returnData = APIContent.getHackerNewsPost(id, format, self.request.url, referer, self.request.remote_addr) #track this request GAHelper.trackGARequests('/post/%s' % (id), appid, referer) if (not returnData): returnData = '' #output to the browser self.response.out.write( Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def generate(self): try: count = self.limit.getNumber() except ValueError: self.cprint('Error: limit field is not a number') return start = self.start_string.getText() if start: #if the start string is not empty make sure it is a key if start not in self.mbot.keys(): self.cprint("Error: the start string '" + start + "' is not a key") return else: #if the start string is empty set it to none start = None stop = self.stop_string.getText() if not stop: stop = None try: repeat = self.repeat.getNumber() except ValueError: self.cprint('Error: repeat field is not a number') return if self.output_select.getSelectedButton() == self.output_select_file: fname = self.output_file_name.getText() if not fname: self.cprint( 'Error: output to file selected but no output file name given' ) return if fname.find('.') == -1: fname = fname + '.txt' file = open(fname, mode='w') self.cprint('Writing result to ' + fname) for i in range(repeat): tokens = self.mbot.walk(count, start, stop) result = Formatter.capped_sentences(tokens) if i != 0: if self.repeat_select.getSelectedButton( ) == self.repeat_select_new_line: file.write('\n') else: file.write(' ') file.write(result) file.close() else: for i in range(repeat): tokens = self.mbot.walk(count, start, stop) result = Formatter.capped_sentences(tokens) if i != 0: if self.repeat_select.getSelectedButton( ) == self.repeat_select_new_line: self.console.appendText('\n') else: self.console.appendText(' ') self.console.appendText(result) self.console.appendText('\n')
def constructFromFile(self): self.constructNewBoard() try: self.file = FileUtil.openForRead(os.path.join(FileUtil.getProgramDirectory(), "maps", self.mapName + ".battlefield")) except IOError: print "Error loading map" ships = Formatter.stripShips(Formatter.convertMatrix(self.file)) self.board.addShips(ships)
def get(self,lan='en',format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) returnData = MutableString() returnData = APIUtils.getHealthNews(lan,format) #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData))
def get(self,query,zip,lan='en',type='keyword',format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) logging.debug('Get: Query=%s, Zip=%s, Language=%s, Type=%s, Format=%s' % (query,zip,lan,type,format)) #data to output returnData = MutableString() returnData = '' rpcTopic = None rpcDiagnosis = None rpcClinicalTrials = None diagnosisData = MutableString() topicData = MutableString() clinicalTrialsData = MutableString() userState = None userLat = None userLon = None #request to get lat,lon,and state based on a zipcode try: urlStr = 'http://maps.google.com/maps/geo?q=%s&output=json&sensor=true&key=%s' % (zip,AppConfig.googleMapsAPIKey) jsonData = UtilLib.reverseGeo(urlStr) #lets see if have jsonData from reverse geocoding call if (jsonData): userLon = jsonData['Placemark'][0]['Point']['coordinates'][0] userLat = jsonData['Placemark'][0]['Point']['coordinates'][1] userState = jsonData['Placemark'][0]['AddressDetails']['Country']['AdministrativeArea']['AdministrativeAreaName'] else: logging.error('Unable to retrieve geo information based on zipcode') except Exception,exTD1: logging.error('Errors getting geo data based on zipcode: %s' % exTD1)
def get(self, format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) returnData = MutableString() returnData = APIContent.getHackerNewsRSS(format) referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] #track this request GAHelper.trackGARequests('/rss', self.request.remote_addr, referer) #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def main(): style = """ <style> table.header { background: #D1F9D1; color: #990033; } </style> <style> td { text-align: center; } </style> <style> td.cellbold { font-weight : bold; background-color: #FFB875; } </style> <style> body { color: #990033; link: navy; vlink: maroon; alink: tomato; background: #D1F9D1; } </style> """ fmtr = Formatter.SimpleHTMLFormatter( title="Test Logs from CMSSW Integration Build", style=style) lv = LogViewer(fmtr) lv.showLogs() return
def main(): style = """ <link rel="stylesheet" type="text/css" href="/SDT/html/intbld.css">\n <style type="text/css"> .info { display: none; } .mainTable { table-layout:fixed; } .showOld { font-size: 18; text-decoration: underline; padding: 20px} td.left { text-align: left; } </style> <script type="text/javascript" src="/SDT/html/jsExt/jquery.js"></script> <script> function showHide(obj){ var myname = obj.name; $(".detail[name='"+myname+"']").toggle(); $(".info[name='"+myname+"']").toggle(); } </script> """ fmtr = Formatter.SimpleHTMLFormatter(title="Test Logs from CMSSW Integration Build", style=style) lv = LogViewer(fmtr) lv.showLogs() return
def post(self): #set content-type format = "json" formatParamName = "format" if (formatParamName in self.request.params): format = self.request.params[formatParamName] self.response.headers['Content-Type'] = Formatter.contentType(format) #get content textToTranslate = "" textParamName = "data" if (textParamName in self.request.params): textToTranslate = self.request.params[textParamName] if (textToTranslate == None or textToTranslate == ''): logging.error('GetTranslateHandler: invalid parameters') self.response.out.write(Formatter.error(format, 'Invalid parameters')) return #strip tags? stripTagsParam = "striphtml" if (stripTagsParam in self.request.params): if (self.request.params[stripTagsParam] == 'true'): textToTranslate = CharReplacementMap.remove_html_tags(textToTranslate) #language toLanguage = "es" lanParam = "language" if (textParamName in self.request.params): toLanguage = self.request.params[lanParam] #auto-detect language (blank) sourceLanguage = '' params = ({'langpair': '%s|%s' % (sourceLanguage, toLanguage), 'v': '1.0' }) returnData = MutableString() translatedText = '' for textToTranslate in self.getSplits(textToTranslate): params['q'] = textToTranslate resp = simplejson.load(urllib.urlopen('%s' % (AppConfig.googleTranslateAPIURL), data = urllib.urlencode(params))) try: translatedText += resp['responseData']['translatedText'] except Exception, e: logging.error('GetTranslateHandler: error(s) translating data: %s' % e) self.response.out.write(Formatter.error(format, 'Exception: %s' % (e))) return
def get(self, format='json', limit=1): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] returnData = APIContent.getHackerNewsLatestContent('', format, self.request.url, referer, self.request.remote_addr, limit) #track this request GAHelper.trackGARequests('/latest', self.request.remote_addr, referer) if (not returnData): returnData = '' #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def get(self,format,user): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] returnData = APIContent.getHackerNewsSubmittedContent(user,format,self.request.url, referer, self.request.remote_addr) #track this request GAHelper.trackGARequests('/submitted/%s' % (user), self.request.remote_addr, referer) if (not returnData): returnData = '' #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData))
def get(self, format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) returnData = MutableString() returnData = APIContent.getHackerNewsRSS(format) referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] #track this request GAHelper.trackGARequests('/rss', self.request.remote_addr, referer) #output to the browser self.response.out.write( Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def get(self, format='json', page=''): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] returnData = APIContent.getHackerNewsNewestContent(page, format, self.request.url, referer, self.request.remote_addr) if (not returnData or returnData == None or returnData == '' or returnData == 'None'): #call the service again this time without the pageID returnData = APIContent.getHackerNewsNewestContent('', format, self.request.url, referer, self.request.remote_addr) #track this request GAHelper.trackGARequests('/newest', self.request.remote_addr, referer) #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def get(self, format, id): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) referer = '' if ('HTTP_REFERER' in os.environ): referer = os.environ['HTTP_REFERER'] returnData = APIContent.getHackerNewsNestedComments(id, format, self.request.url, referer, self.request.remote_addr) #track this request GAHelper.trackGARequests('/nestedcomments/%s' % (id), self.request.remote_addr, referer) if (not returnData): returnData = '' #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData, self.request.get('callback')))
def man(self, pattern): """Call viewer.sh to view man page""" try: os.makedirs(Environ.man_dir) except: pass avail = os.listdir(Environ.man_dir) if not os.path.exists(Environ.index_db): raise RuntimeError("can't find index.db") conn = sqlite3.connect(Environ.index_db) cursor = conn.cursor() # Try direct match try: page_name, url = cursor.execute( "SELECT name,url FROM CPPMAN WHERE" ' name="%s" ORDER BY LENGTH(name)' % pattern ).fetchone() except TypeError: # Try standard library try: page_name, url = cursor.execute( "SELECT name,url FROM CPPMAN" ' WHERE name="std::%s" ORDER BY LENGTH(name)' % pattern ).fetchone() except TypeError: try: page_name, url = cursor.execute( "SELECT name,url FROM " 'CPPMAN WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)' % pattern ).fetchone() except TypeError: raise RuntimeError("No manual entry for " + pattern) finally: conn.close() page_name = page_name.replace("/", "_") if page_name + ".3.gz" not in avail or self.forced: self.cache_man_page(url, page_name) self.update_mandb() pager = Environ.pager if sys.stdout.isatty() else Environ.renderer # Call viewer pid = os.fork() if pid == 0: os.execl( "/bin/sh", "/bin/sh", pager, Environ.man_dir + page_name + ".3.gz", str(Formatter.get_width()), Environ.pager_config, page_name, ) return pid
def get(self,keyword,format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) returnData = MutableString() try: #get XML from the service xml = minidom.parse(urllib.urlopen('%s?db=healthTopics&term=%s' % (AppConfig.medlinePlusHealthTopicURL, keyword))) if (xml): documentElms = xml.getElementsByTagName('document') if (documentElms): #grab two highest ranked content content1 = '' content2 = '' content3 = '' contentNode1 = documentElms[0] content1 = APIUtils.parseHealthTopicContent(format,contentNode1) if (len(documentElms) > 1): contentNode2 = documentElms[1] content2 = APIUtils.parseHealthTopicContent(format,contentNode2) if (len(documentElms) > 2): contentNode3 = documentElms[2] content3 = APIUtils.parseHealthTopicContent(format,contentNode3) returnData = "%s%s%s" % (content1, content2, content3) else: logging.error('unable to retrieve content') self.response.out.write(Formatter.error(format, 'No results')) return else: logging.error('unable to retrieve content') self.response.out.write(Formatter.error(format, 'Unable to retrieve content from provider')) return except Exception, e: logging.error('GetHealthTopicsHandler: unable to get health topics or parse XML: %s' % e) self.response.out.write(Formatter.error(format, 'Exception: %s' % (e))) return
def get(self,code, lan = 'en', format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) returnData = MutableString() try: #webservice conversion if (lan == 'es'): lan = 'sp' #get XML from the service xml = minidom.parse(urllib.urlopen('%s&mainSearchCriteria.v.c=%s&informationRecipient.languageCode.c=%s' % (AppConfig.medlinePlusHealthDiagnosisICD9URL, code, lan))) if (xml): documentElms = xml.getElementsByTagName('entry') if (documentElms): #grab two highest ranked content content1 = '' content2 = '' contentNode1 = documentElms[0] content1 = APIUtils.parseHealthDiagnosisICD9Content(format,contentNode1) if (len(documentElms) > 0): contentNode2 = documentElms[1] content2 = APIUtils.parseHealthDiagnosisICD9Content(format,contentNode2) returnData = "%s%s" % (content1, content2) else: logging.error('unable to retrieve content') self.response.out.write(Formatter.error(format, 'No results')) return else: logging.error('unable to retrieve content') self.response.out.write(Formatter.error(format, 'Unable to retrieve content from provider')) return except Exception, e: logging.error('GetHealthTopicsHandler: unable to get health topics or parse XML: %s' % e) self.response.out.write(Formatter.error(format, 'Exception: %s' % (e))) return
def main(): style = """ <link rel="stylesheet" type="text/css" href="http://cern.ch/cms-sdt/intbld.css">\n """ fmtr = Formatter.SimpleHTMLFormatter( title="CMSSW Integration Build Scram Info", style=style) sana = ScramAnalyzer(fmtr) sana.showLog()
def get(self, format, user): # set content-type self.response.headers["Content-Type"] = Formatter.contentType(format) referer = "" if "HTTP_REFERER" in os.environ: referer = os.environ["HTTP_REFERER"] returnData = APIContent.getHackerNewsSubmittedContent( user, format, self.request.url, referer, self.request.remote_addr ) # track this request GAHelper.trackGARequests("/submitted/%s" % (user), self.request.remote_addr, referer) if not returnData: returnData = "" # output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData, self.request.get("callback")))
def main(): style = """ <link rel="stylesheet" type="text/css" href="%s/intbld.css">\n """ % (config.siteInfo['HtmlPath']) fmtr = Formatter.SimpleHTMLFormatter( title="CMSSW Integration Build Scram Info", style=style) qad = QADisplay(fmtr) qad.showInfo()
def man(self, pattern): """Call viewer.sh to view man page""" try: os.makedirs(Environ.man_dir) except: pass avail = os.listdir(Environ.man_dir) if not os.path.exists(Environ.index_db): raise RuntimeError("can't find index.db") conn = sqlite3.connect(Environ.index_db) cursor = conn.cursor() # Try direct match try: page_name, url = cursor.execute( 'SELECT name,url FROM CPPMAN WHERE' ' name="%s" ORDER BY LENGTH(name)' % pattern).fetchone() except TypeError: # Try standard library try: page_name, url = cursor.execute( 'SELECT name,url FROM CPPMAN' ' WHERE name="std::%s" ORDER BY LENGTH(name)' % pattern).fetchone() except TypeError: try: page_name, url = cursor.execute( 'SELECT name,url FROM ' 'CPPMAN WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)' % pattern).fetchone() except TypeError: raise RuntimeError('No manual entry for ' + pattern) finally: conn.close() page_name = page_name.replace('/', '_') if page_name + '.3.gz' not in avail or self.forced: self.cache_man_page(url, page_name) self.update_mandb() pager = Environ.pager if sys.stdout.isatty() else Environ.renderer # Call viewer pid = os.fork() if pid == 0: os.execl('/bin/sh', '/bin/sh', pager, Environ.man_dir + page_name + '.3.gz', str(Formatter.get_width()), Environ.pager_config, page_name) return pid
def get(self, format="json", page=""): # set content-type self.response.headers["Content-Type"] = Formatter.contentType(format) referer = "" if "HTTP_REFERER" in os.environ: referer = os.environ["HTTP_REFERER"] returnData = APIContent.getHackerNewsBestContent( page, format, self.request.url, referer, self.request.remote_addr ) if not returnData or returnData == None or returnData == "" or returnData == "None": # call the service again this time without the pageID returnData = APIContent.getHackerNewsBestContent( "", format, self.request.url, referer, self.request.remote_addr ) # track this request GAHelper.trackGARequests("/best", self.request.remote_addr, referer) # output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData))
def man(self, pattern): """Call viewer.sh to view man page""" try: os.makedirs(Environ.man_dir) except: pass avail = os.listdir(Environ.man_dir) if not os.path.exists(Environ.index_db): raise RuntimeError("can't find index.db") conn = sqlite3.connect(Environ.index_db) cursor = conn.cursor() # Try direct match try: page_name, url = cursor.execute('SELECT name,url FROM CPPMAN WHERE' ' name="%s" ORDER BY LENGTH(name)' % pattern).fetchone() except TypeError: # Try standard library try: page_name, url = cursor.execute('SELECT name,url FROM CPPMAN' ' WHERE name="std::%s" ORDER BY LENGTH(name)' % pattern).fetchone() except TypeError: try: page_name, url = cursor.execute('SELECT name,url FROM ' 'CPPMAN WHERE name LIKE "%%%s%%" ORDER BY LENGTH(name)' % pattern).fetchone() except TypeError: raise RuntimeError('No manual entry for ' + pattern) finally: conn.close() page_name = page_name.replace('/', '_') #if page_name + '.3.gz' not in avail or self.forced: #self.cache_man_page(url, page_name) #self.update_mandb() pager = Environ.pager if hasattr(sys.stdout, 'isatty') and sys.stdout.isatty() else Environ.renderer # Call viewer #pid = os.fork() #if pid == 0: #out = os.execl('/bin/sh', '/bin/sh', pager, #page_name + '.3.gz', #str(Formatter.get_width()), Environ.pager_config, #page_name) p = subprocess.Popen([pager, page_name + '.3.gz', str(Formatter.get_width()), Environ.pager_config, page_name], stdout=subprocess.PIPE) return {'page':page_name, 'out':p.stdout.read()}
def cache_man_page(self, url, name=None): """callback to cache new man page""" data = urllib.urlopen(url).read() groff_text = Formatter.cplusplus2groff(data) if not name: name = self.extract_name(data).replace('/', '_') # Skip if already exists, override if forced flag is true outname = Environ.man_dir + name + '.3.gz' if os.path.exists(outname) and not self.forced: return f = gzip.open(outname, 'w') f.write(groff_text) f.close()
def CallFormat(self, TrendReports): self.FormattedObject = Formatter.Reformat( TrendReports ) #creates an object from the file Formatter.py and the class Reformat #Object should include actual values and nominal/tolerance measurements in the form of a Pandas Dataframe self.DataAndStats = StatsCalc.DataStatistics( self.FormattedObject ) #calls the file StatsCalc.py and the class DataStatistics #calculates statistical information appends itself to the dataframe UnSavedData = self.DataAndStats.CompletedData #Looks into the object DataAndStats and grabs the CompletedData (Actual Values, Nominal/Tol, and Stats calculations) self.CallSave(UnSavedData)
def index(): username = None password = None email = None form = loginForm() if form.validate_on_submit(): try: course_info = GetHTML.htmlHandle(form.username.data,form.password.data) timesold = course_info[2] names = course_info[1] course_info[2] = Formatter.formatTimes(course_info[2]) sdates = {'M':'26','T':'20','W':'21','R':'22','F':'23'} snames = {} for i in range(len(timesold)): snames[names[i]] = len(timesold[i]) dnames = {} k = 0 for j in range(7): for i in range(snames[names[j]]): dnames[k] = names[j] k += 1 filename = 'OliniCalendar.ics' iCalCreation.iCalWrite(course_info[2],"201501","20150430T000000",sdates,dnames,filename) ical = open('OliniCalendar.ics','r') email = form.email.data Emailer.iCalCreator(email,ical) os.remove('OliniCalendar.ics') # return render_template(html_sched) form.username.data = '' form.password.data = '' form.email.data = '' except: "Oops" return render_template('index.html', form=form)
def disassemble(self, rv, formatter=Formatter()): """Disassemble the cubin instructions in this kernel""" # Phase 1 -- decode instructions ptr = 0 disa = Disassembler() instructions = [] while ptr < len(self.bincode): base = ptr*4 inst = [self.bincode[ptr]] ptr += 1 if inst[0] & 1: inst.append(self.bincode[ptr]) ptr += 1 instructions.append(disa.decode(base, inst)) # Phase 2 -- labels, sort in order of address label_set = set() for i in instructions: for o in i.dst_operands: if o.indirection == OP_INDIRECTION_CODE and o.source == OP_SOURCE_IMMEDIATE: label_set.add(o.value) labels = list(label_set) labels.sort() label_map = dict([(l, "label%i" % x) for x,l in enumerate(labels)]) # Phase 3 -- fill in labels in program arguments for i in instructions: for o in i.dst_operands: if o.indirection == OP_INDIRECTION_CODE and o.source == OP_SOURCE_IMMEDIATE: o.label = label_map[o.value] # Phase 4 -- print for i in instructions: formatter.address(rv, i.address) formatter.bincode(rv, (" ".join(["%08x" % x for x in i.inst]))) if i.address in label_map: formatter.label(rv, label_map[i.address]) i.dump(rv, formatter) formatter.newline(rv) # Phase 5 -- print constants for seg in self.const: formatter.const_hdr(rv, seg.segname, seg.segnum, seg.offset, seg.bytes) formatter.const_data(rv, seg.mem)
def _rule_test_helper(self, rule): """ Helper function to test a rule """ rule_dir = 'rules' if os.path.exists("test") and os.path.isdir("test"): if os.path.exists(os.path.join("test", rule_dir)) and os.path.isdir( os.path.join("test", rule_dir)): rule_dir = os.path.join('test', rule_dir) rule_path = os.path.join(rule_dir, rule) data_dir = 'data' if os.path.exists("test") and os.path.isdir("test"): if os.path.exists(os.path.join("test", data_dir)) and os.path.isdir( os.path.join("test", data_dir)): data_dir = os.path.join('test', data_dir) input_fname = rule + '.xlsx' input_path = os.path.join(data_dir, input_fname) input_fname_prefix, input_type = os.path.splitext(input_fname) actual_path = os.path.join(data_dir, input_fname_prefix + '_formatted.xlsx') expected_path = os.path.join(data_dir, input_fname_prefix + '_expected.xlsx') Formatter(FormatterOptions(rule_path, input_path)).run() actual_SR = SpreadsheetReader(actual_path) expected_SR = SpreadsheetReader(expected_path) actual_file_data = actual_SR.get_rows() expected_file_data = expected_SR.get_rows() try: self.assertEqual(actual_file_data, expected_file_data) except Exception as e: print('Expected:\n', expected_file_data) print('Actual:\n', actual_file_data) print(e)
'''A terminal program demonstrating the use of Markov Bot Py''' import Formatter from MarkovBot import MarkovBot import Tokenizer fname = 'seuss.txt' tokens = Tokenizer.group_aware(Tokenizer.read_all(fname)) bot = MarkovBot(tokens) print( 'Five random Dr. Seuss sentences that start with sam and (most likely) end in a period:' ) for i in range(5): walk = bot.walk(20, start='sam', stop='.') print(Formatter.capped_sentences(walk))
def query(self, start_response, req, path, client, format="", alt_resolver=None, do_dnssec=False, tcp=False, cd=False, edns_size=default_edns_size, reverse=False): """ path must starts with a /, then the domain name then an (optional) / followed by the QTYPE """ if not path.startswith('/'): raise Exception("Internal error: no / at the beginning of %s" % path) plaintype = 'text/plain; charset=%s' % self.encoding if not format: mformat = req.accept.best_match(['text/html', 'application/xml', 'application/json', 'text/dns', 'text/plain']) if mformat == "text/html": format = "HTML" elif mformat == "application/xml": format = "XML" elif mformat == "application/json": format = "JSON" elif mformat == "text/dns": format = "ZONE" elif mformat == "text/plain": format = "TEXT" if not mformat: output = "No suitable output format found\n" send_response(start_response, '400 Bad request', output, plaintype) return [output] mtype = '%s; charset=%s' % (mformat, self.encoding) else: if format == "TEXT" or format == "TXT": format = "TEXT" mtype = 'text/plain; charset=%s' % self.encoding elif format == "HTML": mtype = 'text/html; charset=%s' % self.encoding elif format == "JSON": mtype = 'application/json' elif format == "ZONE": mtype = 'text/dns' # RFC 4027 # TODO: application/dns, "detached" DNS (binary), see issue #20 elif format == "XML": mtype = 'application/xml' else: output = "Unsupported format \"%s\"\n" % format send_response(start_response, '400 Bad request', output, plaintype) return [output] ip_client = netaddr.IPAddress(client) if ip_client.version == 4: ip_prefix = netaddr.IPNetwork(client + "/28") elif ip_client.version == 6: ip_prefix = netaddr.IPNetwork(client + "/64") else: output = "Unsupported address family \"%s\"\n" % ip_client.version send_response(start_response, '400 Unknown IP version', output, plaintype) return [output] if ip_client not in self.whitelist: if self.buckets.has_key(ip_prefix.cidr): if self.buckets[ip_prefix.cidr].full(): status = '429 Too many requests' # 429 registered by RFC 6585 in april 2012 # http://www.iana.org/assignments/http-status-codes # Already common # http://www.flickr.com/photos/girliemac/6509400997/in/set-72157628409467125 output = "%s sent too many requests" % client # TODO: better message send_response(start_response, status, output, plaintype) return [output] else: self.buckets[ip_prefix.cidr].add(1) else: self.buckets[ip_prefix.cidr] = LeakyBucket(size=self.bucket_size) args = path[1:] slashpos = args.find('/') if slashpos == -1: if reverse: domain = str(dns.reversename.from_address(args)) qtype = 'PTR' else: domain = args qtype = 'ADDR' qclass = 'IN' else: if reverse: domain = str(dns.reversename.from_address(args[:slashpos])) else: domain = args[:slashpos] nextslashpos = args.find('/', slashpos+1) if nextslashpos == -1: requested_qtype = args[slashpos+1:].upper() qclass = 'IN' else: requested_qtype = args[slashpos+1:nextslashpos].upper() qclass = args[nextslashpos+1:].upper() # We do not test if the QTYPE exists. If it doesn't # dnspython will raise an exception. The formatter will # have to deal with the various records. if requested_qtype == "": if reverse: qtype = 'PTR' else: qtype = 'ADDR' else: qtype = requested_qtype if reverse and qtype != 'PTR': output = "You cannot ask for a query type other than PTR with reverse queries\n" send_response(start_response, '400 Bad qtype with reverse', output, plaintype) return [output] # Pseudo-qtype ADDR is handled specially later if not domain.endswith('.'): domain += '.' if domain == 'root.': domain = '.' domain = unicode(domain, self.encoding) for forbidden in self.forbidden_suffixes: if domain.endswith(forbidden): output = "You cannot query local domain %s" % forbidden send_response(start_response, '403 Local domain is private', output, plaintype) return [output] punycode_domain = punycode_of(domain) if punycode_domain != domain: qdomain = punycode_domain.encode("US-ASCII") else: qdomain = domain.encode("US-ASCII") try: if format == "HTML": formatter = Formatter.HtmlFormatter(domain) elif format == "TEXT": formatter = Formatter.TextFormatter(domain) elif format == "JSON": formatter = Formatter.JsonFormatter(domain) elif format == "ZONE": formatter = Formatter.ZoneFormatter(domain) elif format == "XML": formatter = Formatter.XmlFormatter(domain) self.resolver.reset() if edns_size is None: self.resolver.set_edns(version=-1) else: if do_dnssec: self.resolver.set_edns(payload=edns_size, dnssec=True) else: self.resolver.set_edns(payload=edns_size) if alt_resolver: self.resolver.set_nameservers([alt_resolver,]) query_start = datetime.now() if qtype != "ADDR": answer = self.resolver.query(qdomain, qtype, qclass, tcp=tcp, cd=cd) else: try: answer = self.resolver.query(qdomain, "A", tcp=tcp, cd=cd) except dns.resolver.NoAnswer: answer = None try: answer_bis = self.resolver.query(qdomain, "AAAA", tcp=tcp, cd=cd) if answer_bis is not None: for rrset in answer_bis.answer: answer.answer.append(rrset) except dns.resolver.NoAnswer: pass # TODO: what if flags are different with A and AAAA? (Should not happen) if answer is None: query_end = datetime.now() self.delay = query_end - query_start formatter.format(None, qtype, qclass, 0, self) output = formatter.result(self) send_response(start_response, '200 OK', output, mtype) return [output] query_end = datetime.now() self.delay = query_end - query_start formatter.format(answer, qtype, qclass, answer.flags, self) output = formatter.result(self) send_response(start_response, '200 OK', output, mtype) except Resolver.UnknownRRtype: output = "Record type %s does not exist\n" % qtype output = output.encode(self.encoding) send_response(start_response, '400 Unknown record type', output, plaintype) except Resolver.UnknownClass: output = "Class %s does not exist\n" % qclass output = output.encode(self.encoding) send_response(start_response, '400 Unknown class', output, plaintype) except Resolver.NoSuchDomainName: output = "Domain %s does not exist\n" % domain output = output.encode(self.encoding) # TODO send back in the requested format (see issue #11) send_response(start_response, '404 No such domain', output, plaintype) except Resolver.Refused: output = "Refusal to answer for all name servers for %s\n" % domain output = output.encode(self.encoding) send_response(start_response, '403 Refused', output, plaintype) except Resolver.Servfail: output = "Server failure for all name servers for %s (may be a DNSSEC validation error)\n" % domain output = output.encode(self.encoding) send_response(start_response, '504 Servfail', output, plaintype) except Resolver.Timeout: output = "No server replies for domain %s\n" % domain output = output.encode(self.encoding) # TODO issue #11. In that case, do not serialize output. send_response(start_response, '504 Timeout', output, "text/plain") except Resolver.NoPositiveAnswer: output = "No server replies for domain %s\n" % domain output = output.encode(self.encoding) # TODO issue #11 send_response(start_response, '504 No positive answer', output, "text/plain") except Resolver.UnknownError as code: output = "Unknown error %s resolving %s\n" % (dns.rcode.to_text(int(str(code))), domain) output = output.encode(self.encoding) # TODO issue #11 send_response(start_response, '500 Unknown server error', output, plaintype) return [output]
def getTopicData(self,xml,format,lan='en'): if (xml): documentElms = xml.getElementsByTagName('document') if (documentElms): #grab two highest ranked content content1 = '' content2 = '' content3 = '' contentNode1 = documentElms[0] content1 = APIUtils.parseHealthTopicContent(format,contentNode1) if (len(documentElms) > 1): contentNode2 = documentElms[1] content2 = APIUtils.parseHealthTopicContent(format,contentNode2) if (len(documentElms) > 2): contentNode3 = documentElms[2] content3 = APIUtils.parseHealthTopicContent(format,contentNode3) returnData = "%s%s%s" % (content1, content2, content3) """ Giving up for now....major pain in the butt to translate entire XML or JSON document #fun stuff: lets see if we need to translate #because we have three content records it's better to #translate the entire document (json or xml) instead of individual content summaries translatedData = '' if (lan != 'en'): #prep data #if JSON, replace record definition with non-translatable text if (format == 'json'): returnData = re.sub("\"", "X59X", returnData) else: #get rid of the tags. Google Translate doesn't seem to like them returnData = returnData#CharReplacementMap.translate_tags_from_xml_record(returnData) translatedData = self.translateData(lan,returnData.encode("utf-8")) self.out(translatedData) if (translatedData): #cheesy search&replace - need a quick solution to fix JSON if (format == 'json'): translatedData = re.sub("X59X","\"", translatedData) translatedData = re.sub("\n", "", translatedData) translatedData = re.sub("\r", "", translatedData) translatedData = re.sub("\t", "", translatedData) else: translatedData = translatedData#CharReplacementMap.translate_tags_to_xml_record(translatedData) return translatedData """ if (format == 'json'): return returnData[:-1] else: return returnData else: logging.error('unable to retrieve health topic content') return Formatter.data(format, 'error', 'No results') else: logging.error('unable to retrieve health topic content') return Formatter.data(format, 'error', 'Unable to retrieve content from provider')
import numpy as np from keras.callbacks import TensorBoard, ModelCheckpoint from keras.layers import Dense, LSTM, Merge from keras.models import Sequential, model_from_json from keras.optimizers import RMSprop import keras bin_count = 8 # no of bins BATCH_SIZE = 1000 import Formatter period_sample = Formatter.PeriodSample(60) INPUT_SIZE = 24 def createModel(train_period, target): cost = RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0) EMA_lstm = Sequential() EMA_lstm.add( LSTM(INPUT_SIZE, input_shape=(INPUT_SIZE, 1), batch_input_shape=(BATCH_SIZE, INPUT_SIZE, 1), dropout=0.2, return_sequences=False)) K_lstm = Sequential() K_lstm.add( LSTM(INPUT_SIZE, input_shape=(INPUT_SIZE, 1), batch_input_shape=(BATCH_SIZE, INPUT_SIZE, 1),
def get(self,zip=None,format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) #validate zip if (not zip or zip == ''): self.response.out.write(Formatter.error(format, 'invalid zipcode')) return #get lat/lon from zipcode urlStr = 'http://maps.google.com/maps/geo?q=%s&output=json&sensor=true&key=%s' % (zip,AppConfig.googleMapsAPIKey) jsonData = UtilLib.reverseGeo(urlStr) #lets see if have jsonData from reverse geocoding call if (jsonData): lon = jsonData['Placemark'][0]['Point']['coordinates'][0] lat = jsonData['Placemark'][0]['Point']['coordinates'][1] logging.debug("GPS Coordinates: %s,%s" % (lat,lon)) gb = geobox2.Geobox(lat, lon) #scope 100 miles box = gb.search_geobox(100) query = HospitalInfo.all().filter("geoboxes IN", [box]) #get 100 records results = query.fetch(100) db_recs = {} for result in results: distance = UtilLib.getEarthDistance(lat, lon, result.location.lat, result.location.lon) if (distance and distance > 0): db_recs[distance] = result returnData = MutableString() returnData = '' #output this badboy if (db_recs and len(db_recs) > 0): for key in sorted(db_recs.iterkeys()): p = db_recs[key] if (format == 'json'): startTag = '{' endTag = '},' distance = Formatter.data(format, 'distance', '%s %s' % (str(math.ceil(key)), "mi"))[:-1]#'%.2g %s' % (key, "mi"))[:-1] else: startTag = '<record>' endTag = '</record>' distance = Formatter.data(format, 'distance', '%s %s' % (str(math.ceil(key)), "mi")) #build the string returnData = "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" % (returnData,startTag, Formatter.data(format, 'hospital_id', p.hospital_id), Formatter.data(format, 'name', p.name.replace('&', '&')), Formatter.data(format, 'address', p.address.replace('&', '&')), Formatter.data(format, 'city', p.city), Formatter.data(format, 'state', p.state), Formatter.data(format, 'zip_code', p.zip_code), Formatter.data(format, 'county', p.county.replace('&', '&')), Formatter.data(format, 'phone', p.phone), Formatter.data(format, 'hospital_type', p.hospital_type.replace('&', '&')), Formatter.data(format, 'hospital_owner', p.hospital_owner.replace('&', '&')), Formatter.data(format, 'emergency_service', p.emergency_service), Formatter.data(format, 'geo_location', p.location), distance, endTag ) #output to the browser self.response.out.write(Formatter.dataWrapper(format, returnData)) else: self.response.out.write(Formatter.error(format, 'Unable to perform reverse geoencoding')) return
def getHackerNewsRSS(format='json'): returnData = MutableString() returnData = '' dom = minidom.parse(urllib.urlopen(AppConfig.hackerNewsRSSFeed)) rssTitle = MutableString() rssDescription = MutableString() rssURL = MutableString() for node in dom.getElementsByTagName('item'): for item_node in node.childNodes: rssTitle = '' rssDescription = '' rssURL = '' #item title if (item_node.nodeName == "title"): for text_node in item_node.childNodes: if (text_node.nodeType == node.TEXT_NODE): rssTitle += text_node.nodeValue #description if (item_node.nodeName == "description"): for text_node in item_node.childNodes: rssDescription += text_node.nodeValue #link to URL if (item_node.nodeName == "link"): for text_node in item_node.childNodes: rssURL += text_node.nodeValue if (format == 'json'): startTag = '{' endTag = '},' #cleanup #rssTitle = re.sub("\"", "'", rssTitle) rssTitle = re.sub("\n", "", rssTitle) rssTitle = re.sub("\"", "\\\"", rssTitle) rssDescription = re.sub("\"", "\\\"", rssDescription) rssDescription = re.sub("\n", "", rssDescription) rssDescription = re.sub("\t", " ", rssDescription) rssDescription = re.sub("\r", "", rssDescription) if (len(rssDescription) > 0): rssDescription = Formatter.data(format, 'description', escape(rssDescription))[:-1] else: startTag = '<record>' endTag = '</record>' if (len(rssTitle) > 0): rssTitle = escape(removeNonAscii(rssTitle)) if (len(rssURL) > 0): rssURL = escape(rssURL) if (len(rssDescription) > 0): rssDescription = Formatter.data(format, 'description', escape(rssDescription)) if (len(rssTitle) > 0): returnData += startTag + Formatter.data(format, 'title', rssTitle) if (len(rssURL) > 0): returnData += Formatter.data(format, 'url', rssURL) if (len(rssDescription) > 0 ): returnData += rssDescription + endTag return returnData
def parseHealthTopicContent(format,node): if (node): contentTitle = MutableString() contentAltTitle = MutableString() contentSummary = MutableString() contentTitle = '' contentAltTitle = '' contentSummary = '' for content_node in node.childNodes: #get title if (content_node and content_node.nodeName == 'content' and content_node.attributes['name'].value == "title"): for text_node in content_node.childNodes: contentTitle += CharReplacementMap.remove_html_tags(text_node.nodeValue) #get alt title """if (content_node and content_node.nodeName == 'content' and content_node.attributes['name'].value == "altTitle"): for text_node in content_node.childNodes: contentAltTitle += remove_html_tags(text_node.nodeValue) """ #get content if (content_node and content_node.nodeName == 'content' and content_node.attributes['name'].value == "FullSummary"): for text_node in content_node.childNodes: contentSummary += text_node.nodeValue #replace HTML tags with friendly tags (that will be replaced back to HTML later) #contentSummary = CharReplacementMap.translate_tags_from_html(contentSummary) #now strip HTML garbage #contentSummary = CharReplacementMap.remove_html_tags(contentSummary) returnData = MutableString() returnData = '' if (format == 'json'): startTag = '{' endTag = '},' #cleanup contentTitle = re.sub("\n", "", contentTitle) contentTitle = re.sub("\"", "\\\"", contentTitle) contentAltTitle = re.sub("\n", "", contentAltTitle) contentAltTitle = re.sub("\r", "", contentAltTitle) contentAltTitle = re.sub("\"", "\\\"", contentAltTitle) contentSummary = re.sub("\"", "\\\"", contentSummary) contentSummary = re.sub("\n", "", contentSummary) contentSummary = re.sub("\t", " ", contentSummary) contentSummary = re.sub("\r", "", contentSummary) if (len(contentSummary) > 0): contentSummary = Formatter.data(format, 'summary', escape(contentSummary))[:-1] else: startTag = '<record>' endTag = '</record>' if (len(contentSummary) > 0): contentSummary = Formatter.data(format, 'summary', escape(contentSummary)) if (len(contentTitle) > 0): returnData += startTag + Formatter.data(format, 'title', escape(contentTitle)) if (len(contentAltTitle) > 0): returnData += Formatter.data(format, 'alt_title', escape(contentAltTitle)) if (len(contentSummary) > 0 ): returnData += contentSummary + endTag return returnData
''' author = Megi Beca ''' import Sales import Formatter import Pay import time end = True while end: request = input( "Pick a letter:\nA = Identify thhe persecnt yoy saved.\nB = Identify what level of savings you got.\n" "C = Identify how much you owe or how much change you are owed.\nD = Format a receipt.\nE = Calculate how " "much of a bonus you reseaved this year.\nF = Calculate your yearly income.\n" ).upper() if request == "A": Sales.identifying_percent_off() elif request == "B": Sales.identifying_savings() elif request == "C": Sales.change() elif request == "D": Formatter.formatting_receipts() elif request == "E": Pay.bonuses() elif request == "F": Pay.income() time.sleep(5)
fd.write(".TE\n") fd.write(".sp\n.sp\n") elif self.name == "tr": fd.write("\n") elif self.name in ["th", "td"]: fd.write("\nT}%s" % ("|" if not last else "")) def parse_table(html): root = Node(None, "root", "", html) fd = StringIO.StringIO() root.gen(fd) return fd.getvalue() ########NEW FILE######## __FILENAME__ = test #!/usr/bin/env python import sys import os import os.path sys.path.insert(0, os.path.normpath(os.getcwd())) from cppman import Formatter Formatter.func_test() ########NEW FILE########
def dump(self, rv, fmt=Formatter()): # Predication # Condition code # What do these mean? # do we have a zero bit, sign bit? # self.pred_op&3 seems straightforward enough # but what is self.pred_op>>2 ? if not self.predicated or self.pred_op == 15 or self.pred_op == None: pass # No self.pred else: fmt.pred(rv, "@$p%i.%s" % (self.pred, condition_codes[self.pred_op])) #elif self.pred_op == 2: # 0010 # # Execute on false, not on true # fmt.pred(rv, "@!$p%i" % self.pred) #elif self.pred_op == 5: # 0101 # # Execute on true, not on false # fmt.pred(rv, "@$p%i" % self.pred) #elif (self.pred_op&3)==2: # xx10 -- seems to be @!$p%i # fmt.pred(rv, "@!$p%i" % (self.pred)) # self.warnings.append("cc is %s" % condition_codes[self.pred_op]) #elif (self.pred_op&3)==1: # xx01 -- seems to be @p%i # fmt.pred(rv, "@$p%i" % (self.pred)) # self.warnings.append("cc is %s" % condition_codes[self.pred_op]) #elif logic_ops.has_key(self.pred_op): # # unsigned version # fmt.pred(rv, "@$p%i%s.u" % (self.pred,logic_ops[self.pred_op])) #elif (self.pred_op>=8 and self.pred_op<16) and logic_ops.has_key(self.pred_op-8): # # signed version # fmt.pred(rv, "@$p%i%s.s" % (self.pred,logic_ops[self.pred_op-8])) #else: # fmt.pred(rv, "@$p%i.%i" % (self.pred, self.pred_op)) # self.warnings.append("cc is %s" % condition_codes[self.pred_op]) # Base if self.base: fmt.base(rv, self.base) elif self.system: fmt.base(rv, "sop.%01x" % (self.op)) else: fmt.base(rv, "op.%01x%01x" % (self.op, self.subop)) # Add instruction modifiers for m in self.modifiers: fmt.modifier(rv, m) # Operand types # collapse if all are the same #optypes.extend([x.typestr() for x in self.dst_operands]) # Promote sign of source operands srco = self.dst_operands + self.src_operands srco = [x.clone() for x in srco] sign = OP_SIGN_NONE for o in srco: if o.sign != OP_SIGN_NONE: sign = o.sign for o in srco: if o.sign == OP_SIGN_NONE: o.sign = sign # Add to operand list optypes = [] optypes.extend([x.typestr() for x in srco]) optypes = [x for x in optypes if x != ""] # Filter empty types (predicates) oset = set(optypes) if len(oset) == 1: # There is only one type fmt.types(rv, optypes[0]) else: # Show all types fmt.types(rv, "".join(optypes)) # Destination operands dst_operands = self.dst_operands[:] #if self.ignore_result: # # When ignoring result, only pred register output # dst_operands = [x for x in dst_operands if x.source==OP_SOURCE_PRED_REGISTER] # output register 0x7f = bit bucket # dst_operands = [x for x in dst_operands if not (x.source==OP_SOURCE_OUTPUT_REGISTER and x.value==0x7f)] if len(dst_operands): operands_str = [x.__repr__() for x in dst_operands] fmt.dest_operands(rv, "|".join(operands_str)) # Source operands if len(self.src_operands): pre = "" if len(self.dst_operands): pre = ", " operands_str = [x.__repr__() for x in self.src_operands] fmt.src_operands(rv, pre + (", ".join(operands_str))) # Disassembler warnings if self.inst != None and self.visited != None: unk0 = self.inst[0] & ~(self.visited[0]) if len(self.inst) == 2: unk1 = self.inst[1] & ~(self.visited[1]) else: unk1 = 0 if unk0: fmt.warning(rv, "unk0 %08x" % (unk0)) if unk1: fmt.warning(rv, "unk1 %08x" % (unk1)) for warn in self.warnings: fmt.warning(rv, warn)
def parseCommentsContent(hnAPIUrl, hnAPIUrlBackup, apiURL, page='',format='json'): returnData = MutableString() returnData = '' logging.debug('HN URL: %s' % hnAPIUrl) result = getRemoteData(hnAPIUrl, hnAPIUrlBackup) if (result): htmlData = result.content soup = BeautifulSoup(htmlData) urlLinksContent = soup('table') counter = 0 comment_container = {} for node in urlLinksContent: commentTd = node.first('td', {'class' : 'default'}) if (commentTd): authorSpan = commentTd.first('span', {'class' : 'comhead'}) #multi-paragraph comments are a bit tricky, parser wont' retrieve them using "span class:comment" selector commentSpan = getParagraphCommentSiblings(commentTd.first('span', {'class' : 'comment'})) replyLink = commentTd.first('a', {'href' : re.compile('^reply.*')})['href'] if (replyLink and "reply?id=" in replyLink): replyLink = replyLink.replace('reply?id=', '') if (authorSpan and commentSpan): #author span: <span class="comhead"><a href="user?id=dendory">dendory</a> 1 day ago | <a href="item?id=3015166">link</a></span> commentId = authorSpan.first('a', {'href' : re.compile('^item.*')}) user = authorSpan.first('a', {'href' : re.compile('^user.*')}) #get time posted...lame but works. for some reason authorSpan.string returns NULL timePosted = str(authorSpan).replace('<span class="comhead">', '').replace('</span>', '') #now replace commentId and user blocks timePosted = timePosted.replace(str(user), '').replace('| ', '').replace(str(commentId), '') if (commentId['href'] and "item?id=" in commentId['href']): commentId = commentId['href'].replace('item?id=', '') #cleanup commentString = removeHtmlTags(str(commentSpan)) if ('__BR__reply' in commentString): commentString = commentString.replace('__BR__reply', '') comment_container[counter] = [commentId, user.string, timePosted.strip(), commentString, replyLink] counter = counter + 1 #build up string commentKeyContainer = {} for key in comment_container.keys(): listCommentData = comment_container[key] if (listCommentData and not commentKeyContainer.has_key(listCommentData[0])): commentId = listCommentData[0] if (commentId): commentKeyContainer[commentId] = 1 userName = listCommentData[1] whenPosted = listCommentData[2] commentsString = listCommentData[3] replyId = listCommentData[4] if (format == 'json'): startTag = '{' endTag = '},' #cleanup if (commentsString): commentsString = re.sub("\"", "\\\"", commentsString) commentsString = re.sub("\n", "", commentsString) commentsString = re.sub("\t", " ", commentsString) commentsString = re.sub("\r", "", commentsString) if (len(commentsString) > 0): commentsString = Formatter.data(format, 'comment', escape(removeNonAscii(commentsString))) else: commentsString = "n/a " else: startTag = '<record>' endTag = '</record>' if (len(userName) > 0): userName = escape(removeNonAscii(userName)) if (len(whenPosted) > 0): whenPosted = escape(whenPosted) if (len(commentsString) > 0): commentsString = Formatter.data(format, 'comment', escape(removeNonAscii(commentsString))) if (commentId and userName and whenPosted and replyId and commentsString): if (len(commentId) > 0): returnData += startTag + Formatter.data(format, 'id', commentId) if (len(userName) > 0): returnData += Formatter.data(format, 'username', userName) if (len(whenPosted) > 0): returnData += Formatter.data(format, 'time', whenPosted) if (len(replyId) > 0): returnData += Formatter.data(format, 'reply_id', escape(replyId)) if (len(commentsString) > 0 ): returnData += commentsString + endTag else: returnData = None return returnData
endyearmonthdaytime = '20150430T000000' USERNAME = str(raw_input("Enter my.olin.edu Username: "******"time slots" are made instead. This just maps each class to it's respective slots. snames = {} for i in range(len(timesold)): snames[names[i]] = len(timesold[i]) dnames = {} k = 0 for j in range(7):
def getHospitalData(self,lat,lon,format): #get hospital data gb = geobox2.Geobox(lat, lon) box = gb.search_geobox(100) query = HospitalInfo.all().filter("geoboxes IN", [box]) results = query.fetch(100) db_recs = {} for result in results: distance = UtilLib.getEarthDistance(lat, lon, result.location.lat, result.location.lon) if (distance and distance > 0): db_recs[distance] = result returnData = MutableString() returnData = '' #output this badboy if (db_recs and len(db_recs) > 0): for key in sorted(db_recs.iterkeys()): p = db_recs[key] if (format == 'json'): startTag = '{' endTag = '},' distance = Formatter.data(format, 'distance', '%s %s' % (str(math.ceil(key)), "mi"))[:-1]#'%.2g %s' % (key, "mi"))[:-1] else: startTag = '<record>' endTag = '</record>' distance = Formatter.data(format, 'distance', '%s %s' % (str(math.ceil(key)), "mi")) #build the string returnData = "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" % (returnData,startTag, Formatter.data(format, 'hospital_id', p.hospital_id), Formatter.data(format, 'name', p.name.replace('&', '&')), Formatter.data(format, 'address', p.address.replace('&', '&')), Formatter.data(format, 'city', p.city), Formatter.data(format, 'state', p.state), Formatter.data(format, 'zip_code', p.zip_code), Formatter.data(format, 'county', p.county.replace('&', '&')), Formatter.data(format, 'phone', p.phone), Formatter.data(format, 'hospital_type', p.hospital_type.replace('&', '&')), Formatter.data(format, 'hospital_owner', p.hospital_owner.replace('&', '&')), Formatter.data(format, 'emergency_service', p.emergency_service), Formatter.data(format, 'geo_location', p.location), distance, endTag ) if (format == 'json'): return returnData[:-1] else: return returnData
if self.name == 'table': fd.write('.TE\n') fd.write('.sp\n.sp\n') elif self.name == 'tr': fd.write('\n') elif self.name in ['th', 'td']: fd.write('\nT}%s' % ('|' if not last else '')) def parse_table(html): root = Node(None, 'root', '', html) fd = StringIO.StringIO() root.gen(fd) return fd.getvalue() ########NEW FILE######## __FILENAME__ = test #!/usr/bin/env python import sys import os import os.path sys.path.insert(0, os.path.normpath(os.getcwd())) from cppman import Formatter Formatter.func_test() ########NEW FILE########
DATA_AMOUNT_PER_SEC = 250 TWO_SECS_DATA_AMOUNT = 2 * DATA_AMOUNT_PER_SEC THREE_SECS_DATA_AMOUNT = 3 * DATA_AMOUNT_PER_SEC FIVE_SECS_DATA_AMOUNT = 5 * DATA_AMOUNT_PER_SEC def tagAction(channels, startTime, endTime, actionInterval, actingDuration): doNothing = [[] if channel else None for channel in channels] action = copy.deepcopy(doNothing) actionDataAmount = int(actingDuration * DATA_AMOUNT_PER_SEC) actionIntervalDataAmount = int(actionInterval * DATA_AMOUNT_PER_SEC) for dataPtr in xrange(int(startTime * DATA_AMOUNT_PER_SEC) + 1, int(endTime * DATA_AMOUNT_PER_SEC), actionIntervalDataAmount): for i, channel in enumerate(channels): if channel: action[i].extend(channel[dataPtr: dataPtr + actionDataAmount]) doNothing[i].extend(channel[dataPtr + actionDataAmount: dataPtr + actionIntervalDataAmount]) return doNothing, action if __name__ == "__main__": channelsWrap = Formatter.format({"step": 1, "wink": 1}, [1, 2, 3, 4]) winkDoNothing, wink = tagAction(channelsWrap["wink"][0], 10, 101, 5, 1) stepDoNothing, leftStep = tagAction(channelsWrap["step"][0], 4.87, 100.87, 5, 0.85) stepDoNothing2, rightStep = tagAction(channelsWrap["step"][0], 104.87, 200.87, 5, 0.85) channelsWrap.clear() channelsWrap["winkdonothing"] = [winkDoNothing] channelsWrap["wink"] = [wink] channelsWrap["stepdonothing"] = [stepDoNothing] channelsWrap["leftstep"] = [leftStep] channelsWrap["rightstep"] = [rightStep] channelsWrap = Sampler.sampleChannelsWrap(channelsWrap, 2) Formatter.writeDataWrap(channelsWrap, 3)
import Formatter import ANN as nn import numpy as np train_period = 24 * 7 # 7 days test_period = 24 # 1 day bin_count = 8 period_class = Formatter.PeriodSample(60) target = [] change_data = [] matrix = [ period_class.getChangeVolData(train_period, test_period) for i in range(900000) ] for index in range(0, len(matrix), 1): change_data.append(matrix[index][0][:, 1]) bin_no = np.zeros([bin_count], dtype=float) bin_no[matrix[index][1]] = 1.0 target.append(bin_no) change_data = np.array(change_data, dtype=float) model = nn.ann() cost = model.train(change_data, target) # plt.plot(cost) print(cost) # plt.show() # print(cost) print((model.test(change_data, target)))
sys.exit(1) if not os.path.isfile(sys.argv[1]): print("***file not found") sys.exit(1) file = open(sys.argv[1], "r") variable_dictionary = {} format_dictionary = { "FLOW": "YES", "LM": "1", "RM": "80", "JUST": "LEFT", "BULLET": "o" } f = Formatter(" ", format_dictionary) # noinspection PySimplifyBooleanCheck,PyGlobalUndefined def readCommands(file): """ :param file: text file to be parsed :return: N/A """ for input_line in file.readlines(): input_line = input_line.rstrip('\n') # removes newline if input_line == "": f.getformattedLine(input_line) token_list = input_line.split()
import Data import Formatter import grouping_changecalc as grp print(Data.getNames()) formatter = Formatter.PeriodSample(10) formatter.getChangeVolData(50, 5) # # # train, test = Data.randomSample(10,2) # print(Data.getNames())
def parsePostContent(hnAPIUrl,hnBackupAPIUrl, apiURL, page='',format='json',limit=0): returnData = MutableString() returnData = '' logging.debug('HN URL: %s' % hnAPIUrl) #next page content (not allowed - robots.txt Disallow) #if (page): # hnAPIUrl = '%s/x?fnid=%s' % (AppConfig.hackerNewsURL, page) #call HN website to get data httpData = getRemoteData(hnAPIUrl) if (httpData): htmlData = httpData #php parser (primary API) if ('{"title":"' in htmlData and 'HNDroidAPI PHP Parser' in htmlData): return htmlData #classic API fallback soup = BeautifulSoup(htmlData) urlLinksContent = soup('td', {'class' : 'title'}) counter = 0 url_links = {} for node in urlLinksContent: if (node.a): url_links[counter] = [node.a['href'], node.a.string] counter = counter + 1 if (limit > 0 and counter == limit): break; #get comments & the rest commentsContent = soup('td', {'class' : 'subtext'}) counter = 0 comments_stuff = {} for node in commentsContent: if (node): #parsing this #<td class="subtext"><span id="score_3002117">110 points</span> by <a href="user?id=JoelSutherland">JoelSutherland</a> 3 hours ago | <a href="item?id=3002117">36 comments</a></td> nodeString = removeHtmlTags(str(node)) score = node.first('span', {'id' : re.compile('^score.*')}).string user = node.first('a', {'href' : re.compile('^user.*')}).string itemId = node.first('a', {'href' : re.compile('^item.*')})["href"] comments = node.first('a', {'href' : re.compile('^item.*')}).string #since 'XX hours ago' string isn't part of any element we need to simply search and replace other text to get it timeAgo = nodeString.replace(str(score), '') timeAgo = timeAgo.replace('by %s' % str(user), '') timeAgo = timeAgo.replace(str(comments), '') timeAgo = timeAgo.replace('|', '') comments_stuff[counter] = [score, user, comments, timeAgo.strip(), itemId, nodeString] counter = counter + 1 if (limit > 0 and counter == limit): break; #build up string for key in url_links.keys(): tupURL = url_links[key] if (key in comments_stuff): tupComments = comments_stuff[key] else: tupComments = None if (tupURL): url = '' title = '' score = '' user = '' comments = '' timeAgo = '' itemId = '' itemInfo = '' #assign vars url = tupURL[0] title = tupURL[1] if (title): title = title.decode("string-escape") if (tupComments): score = tupComments[0] if (score): score = score.decode("string-escape") user = tupComments[1] if (user): user = user.decode("string-escape") comments = tupComments[2] if (comments): comments = comments.decode("string-escape") timeAgo = tupComments[3] if (timeAgo): timeAgo = timeAgo.decode("string-escape") itemId = tupComments[4] if (itemId): itemId = itemId.decode("string-escape") itemInfo = tupComments[5] if (itemInfo): itemInfo = itemInfo.decode("string-escape") else: #need this for formatting itemInfo = 'n/a ' #last record (either news2 or x?fnid) if (title.lower() == 'more' or '/x?fnid' in url): title = 'NextId' if ('/x?fnid' in url): url = '%s/format/%s/page/%s' % (apiURL, format, url.replace('/x?fnid=', '')) else: url = '/news2' itemInfo = 'hn next id %s ' % tupURL[0] if (format == 'json'): startTag = '{' endTag = '},' #cleanup if (title): title = re.sub("\n", "", title) title = re.sub("\"", "\\\"", title) #title = re.sub("€", "", title) if (itemInfo): itemInfo = re.sub("\"", "\\\"", itemInfo) itemInfo = re.sub("\n", "", itemInfo) itemInfo = re.sub("\t", " ", itemInfo) itemInfo = re.sub("\r", "", itemInfo) #itemInfo = re.sub("€", "", itemInfo) if (len(itemInfo) > 0): itemInfo = Formatter.data(format, 'description', escape(itemInfo))[:-1] else: startTag = '<record>' endTag = '</record>' if (len(title) > 0): title = escape(removeNonAscii(title)) if (len(url) > 0): url = escape(url) if (len(user) > 0): user = escape(user) if (len(itemInfo) > 0): itemInfo = Formatter.data(format, 'description', escape(itemInfo)) if (len(title) > 0): returnData += startTag + Formatter.data(format, 'title', title) if (len(url) > 0): returnData += Formatter.data(format, 'url', url) if (len(score) > 0): returnData += Formatter.data(format, 'score', score) if (len(user) > 0): returnData += Formatter.data(format, 'user', user) if (len(comments) > 0): returnData += Formatter.data(format, 'comments', comments) if (len(timeAgo) > 0): returnData += Formatter.data(format, 'time', timeAgo) if (len(itemId) > 0): #cleanup if ('item?id=' in itemId): itemId = itemId.replace('item?id=', '') returnData += Formatter.data(format, 'item_id', itemId) if (len(itemInfo) > 0 ): returnData += itemInfo + endTag else: returnData = None return returnData
try: #government webservice conversion (these guys aren't aware of international country codes) language = lan if (language == 'es'): language = 'sp' url = '%s&mainSearchCriteria.v.c=%s&informationRecipient.languageCode.c=%s' % (AppConfig.medlinePlusHealthDiagnosisICD9URL, query, language) rpcDiagnosis = urlfetch.create_rpc() urlfetch.make_fetch_call(rpcDiagnosis, url) except Exception,exD: logging.error('Errors downloading health diagnosis content: %s' % exD) #get hospital data if (userLat and userLon): hospitalData = self.getHospitalData(userLat, userLon,format) if (hospitalData): returnData += Formatter.dataComplex(format, 'hospital-records', hospitalData) if (rpcClinicalTrials): clinicalTrialsData = '' try: resultClinicalTrials = rpcClinicalTrials.get_result() if (resultClinicalTrials and resultClinicalTrials.status_code == 200): if (resultClinicalTrials.content): clinicalTrialsData = self.getClinicalTrialsData(minidom.parseString(resultClinicalTrials.content), format) else: logging.error('unable to retrieve clinical trials content') clinicalTrialsData = Formatter.data(format, 'error', 'Unable to retrieve content from provider') except Exception,ex: logging.error('Errors getting clinical trials content: %s' % ex) clinicalTrialsData = Formatter.data(format, 'error', 'Error(s) retrieving content from provider: %s' % ex)
def parse_args_and_file(): args = get_args() global num_variants num_variants = args.num_variants print(args.input) problems = [] formatter = Formatter.Formatter() with open(args.input, "r") as file: problem= None preamble_passed = False lines = file.readlines() for line in lines: if (line == '=====\n' or (line == '=====' and line is lines[-1])): preamble_passed = True print('created problem') if(problem is not None): problems.append(problem) # Bug : doesn't execute if =====\n is on the last line problem = Problem_Modifier.Problem_Specs() continue data = line.split() if(not preamble_passed): if(data[0] == 'NONUMBER'): formatter.has_numbebered_pages(False) elif(data[0] == 'TITLEPAGE'): formatter.has_title_page(True) elif(data[0] == 'TITLETEXT'): text = ' '.join(data[1:]) formatter.set_title_text(text) elif(data[0] == 'MARKTOTAL'): total_marks = int(data[1]) formatter.set_mark_total(total_marks) elif(data[0] == 'HEADER_L'): text = ' '.join(data[1:]) formatter.set_left_header(text) elif(data[0] == 'HEADER_R'): text = ' '.join(data[1:]) formatter.set_right_header(text) elif(data[0] == 'FOOTER_L'): text = ' '.join(data[1:]) formatter.set_left_footer(text) elif(data[0] == 'FOOTER_R'): text = ' '.join(data[1:]) formatter.set_right_footer(text) continue if(data[0] == 'VAR'): parse_variable_line(problem, data) elif (data[0] == 'TOSIM'): if(len(data[1:])< 2): raise ValueError('TOSIM had too few arguments') problem.to_sim(data[1], data[2]) elif (data[0] == 'IMAGE'): file_path = data[1] display = Displays.Image(file_path) elif (data[0] == 'SCHEMATIC'): file_path = data[1] show = True if(len(data[1:]) == 2): show = bool(data[2]) display = Displays.Schematic(file_path, show) problem.add_soldisplay_to_problem(display) elif (data[0] == 'TEXT'): text = ' '.join(data[1:]) display = Displays.Text(text) problem.add_display_to_problem(display) elif (data[0] == 'SOLTEXT'): text = ' '.join(data[1:]) display = Displays.Text(text) problem.add_soldisplay_to_problem(display) elif (data[0] == 'SOLDISPLAY'): problem.show_solution_all_params(True) elif (data[0] == 'TITLE'): title = data[1] problem.set_title(title) elif (data[0] == 'MARKS'): marks = int(data[1]) problem.set_marks(marks) Contain = Problem_Modifier.Container(problems,formatter) return Contain
def get(self,id=None,format='json'): #set content-type self.response.headers['Content-Type'] = Formatter.contentType(format) #validate id if (not id or id == ''): self.response.out.write(Formatter.error(format, 'invalid id')) return #get the record q = db.GqlQuery("SELECT * FROM HospitalInfo WHERE hospital_id = :1", id) #call storage and build up result string results = q.fetch(1) returnData = MutableString() returnData = '' for p in results: if (p): returnData = "%s%s%s%s%s%s%s%s%s%s%s%s%s" % (Formatter.data(format, 'hospital_id', p.hospital_id), Formatter.data(format, 'name', p.name.replace('&', '&')), Formatter.data(format, 'address', p.address.replace('&', '&')), Formatter.data(format, 'city', p.city), Formatter.data(format, 'state', p.state), Formatter.data(format, 'zip_code', p.zip_code), Formatter.data(format, 'county', p.county), Formatter.data(format, 'phone', p.phone), Formatter.data(format, 'hospital_type', p.hospital_type.replace('&', '&')), Formatter.data(format, 'hospital_owner', p.hospital_owner.replace('&', '&')), Formatter.data(format, 'emergency_service', p.emergency_service), Formatter.data(format, 'geo_location', p.location), Formatter.data(format, 'geo_box', string.join(p.geoboxes, ",")) ) #output to the browser if (format == 'json'): self.response.out.write('{') self.response.out.write(returnData[:-1]) self.response.out.write('}') else: self.response.out.write('<?xml version="1.0"?>') self.response.out.write('<root>') self.response.out.write(returnData) self.response.out.write('</root>')