def test_parse_format_changed(self): from UserString import MutableString header = MutableString('lo') format = [('field', 2)] test_parser = make_packet_parser(header, format) self.do_parse_test( test_parser, ['lo\x00\x00'], [{ 'field': '\x00\x00' }], ['6C6F 0000'], ) format += [('field2', 2)] header.append('l') self.do_parse_test( test_parser, ['lo\x00\x00'], [{ 'field': '\x00\x00' }], ['6C6F 0000'], )
def __repr__(self): out_buffer=MutableString() for r in range(self.rows): for c in range(self.cols): out_buffer.append(self.__matrix[r][c]) out_buffer.append('\n') return str(out_buffer)
def testRepr(self): board=Board(10,12) ocean = '~' expected = MutableString() for _ in range(10): expected.append(ocean*12) expected.append('\n') self.assertEqual(expected, str(board))
def check_for_urls_in_files(app, reporter): """Check that URLs do not include redirect or requests from external web sites. """ # It's a little verbose but with the explicit-ness comes # References # http://tools.ietf.org/html/rfc3986 # http://stackoverflow.com/questions/4669692/valid-characters-for-directory-part-of-a-url-for-short-links url_regex_pattern = ("(\w*://)+" # Captures protocol "([\w\d\-]+\.[\w\d\-\.]+)+" # Captures hostname "(:\d*)?" # Captures port "(\/[^\s\?]*)?" # Captures path "(\?[^\s]*)?") # Capture query string url_regex_object = re.compile(url_regex_pattern, re.IGNORECASE) excluded_types = [".csv", ".gif", ".jpeg", ".jpg", ".md", ".org", ".pdf", ".png", ".svg", ".txt"] excluded_directories = ["samples"] url_matches = app.search_for_pattern(url_regex_pattern, excluded_dirs=excluded_directories, excluded_types=excluded_types) if url_matches: # {url_pattern: {filename: [lineno_list]}} result_dict = {} for (fileref_output, match) in url_matches: url_match = match.group() filename, line_number = fileref_output.rsplit(":", 1) if url_match not in result_dict: result_dict[url_match] = {} if filename not in result_dict[url_match]: result_dict[url_match][filename] = [] result_dict[url_match][filename].append(str(line_number)) reporter_output = ("A file was detected that contains that a url." " Match: {}" " File: {}" " Line: {}" ).format(url_match, filename, line_number) reporter.manual_check(reporter_output, filename, line_number) # create some extra manual checks in order to see results in a more convenient way for (url_match, file_dict) in result_dict.items(): reporter_output = MutableString() reporter_output.append("A url {} was detected in the following files".format(url_match)) for (file_name, lineno_list) in file_dict.items(): reporter_output.append(", (File: {}, Linenolist: [{}])".format(file_name, ', '.join(lineno_list))) # don't need filename and line_number here, since it is an aggregated result reporter.manual_check(str(reporter_output))
def toString (self): result = MutableString() result.append("Archaive type: %s" % self.myArchaiveType) result.append("Archaive errors allowed: %s" % self.myErrorsAllowed) result.append("Archaive consolidation span: %d" % self.myConsolidationSpan) result.append("Archaive rows to keep: %d" % self.myRowsToKeep) return result.data
def load(durl, greet): def remove_tags(text): text = TAG_RE.sub('', text) text = re.sub("\n", "", text) text = re.sub("\"", "\\\"", text) return "".join(filter(lambda x: ord(x) < 128, text)) content = MutableString() content = [] opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/24.0')] MEMCACHE_GREETINGS = greet data = cache.get(MEMCACHE_GREETINGS) time = 1800 if data is None: file = urllib2.urlopen(durl) data = file.read() file.close() cache.add(MEMCACHE_GREETINGS, data, time) doc = ET.fromstring(data) # logging.debug("value of 16 my var is %s", str(data)) gg = doc.findall('channel/item') # logging.debug("value of 1 my var is %s", str(gg)) for node in gg: title = node.find('./title').text description = node.find('./description').text url = node.find('./link').text info = {} info['title'] = remove_tags(title) info['description'] = remove_tags(description) info['url'] = url # submitterobj = User.objects.get(username='******') # submitter = submitterobj.username info['submitter'] = User.objects.filter( username='******')[0].id info['linksource'] = urlparse(url).netloc # info['submitter'] = User.objects.filter(is_superuser=1)[1].id info['votes'] = randrange(20) logging.debug( "value of user my var is %s", str( User.objects.filter( username='******')[0].get_username())) # info[title] = Wikifetch(title, description, url) content.append(info) # logging.debug("value of 1 my var is %s", str(title)) # logging.debug("value of 2 my var is %s", str(link)) return content
def toString(self): result = MutableString() result.append("Archaive type: %s" % self.myArchaiveType) result.append("Archaive errors allowed: %s" % self.myErrorsAllowed) result.append("Archaive consolidation span: %d" % self.myConsolidationSpan) result.append("Archaive rows to keep: %d" % self.myRowsToKeep) return result.data
def as_string(self,show_hit=True,show_ship=True,show_miss=True): out_buffer = MutableString() for r in range(self.rows): for c in range(self.cols): coord = r, c if coord in self.hit and show_hit: result = Board.HIT elif coord in self.hidden_ships and show_ship: result = Board.SHIP elif coord in self.miss and show_miss: result = Board.MISS else: result = Board.OCEAN out_buffer.append(result) out_buffer.append('\n') return out_buffer
def load(durl, greet): def remove_tags(text): text = TAG_RE.sub('', text) text = re.sub("\n", "", text) text = re.sub("\"", "\\\"", text) return "" . join(filter(lambda x: ord(x)<128, text)) content = MutableString() content = [] opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/24.0')] MEMCACHE_GREETINGS = greet data = cache.get(MEMCACHE_GREETINGS) time = 1800 if data is None: file = urllib2.urlopen(durl) data = file.read() file.close() cache.add(MEMCACHE_GREETINGS, data, time) doc = ET.fromstring(data) # logging.debug("value of 16 my var is %s", str(data)) gg = doc.findall('channel/item') # logging.debug("value of 1 my var is %s", str(gg)) for node in gg: title = node.find('./title').text description = node.find('./description').text url = node.find('./link').text info = {} info['title'] = remove_tags(title) info['description'] = remove_tags(description) info['url'] = url # submitterobj = User.objects.get(username='******') # submitter = submitterobj.username info['submitter'] = User.objects.filter(username='******')[0].id info['linksource'] = urlparse(url).netloc # info['submitter'] = User.objects.filter(is_superuser=1)[1].id info['votes'] = randrange(20) logging.debug("value of user my var is %s", str(User.objects.filter(username='******')[0].get_username())) # info[title] = Wikifetch(title, description, url) content.append(info) # logging.debug("value of 1 my var is %s", str(title)) # logging.debug("value of 2 my var is %s", str(link)) return content
def formatCreateCommand (self, counter, rrdPath): cmd = "create %s --step %d" % (rrdPath, counter.mySamplingRate) #Format archaives string archaivesString = MutableString() for archaive in counter.myArchaives: archaivesString.append(" RRA:%s:%f:%d:%d" % (archaive.myArchaiveType, archaive.myErrorsAllowed,archaive.myConsolidationSpan,archaive.myRowsToKeep)) #Format DS cmd += " DS:%d:%s:%d:%s:%s %s" % \ (counter.myCounterId, counter.myCounterType, counter.myMinHeartbeat, counter.getMinStr(), counter.getMaxStr(), archaivesString.data) return cmd
def load(durl, greet): def remove_tags(text): text = TAG_RE.sub('', text) text = re.sub("\n", "", text) text = re.sub("\"", "\\\"", text) text = re.sub(u"(\u2018|\u2019)", "'", text) return "".join(filter(lambda x: ord(x) < 128, text)) content = MutableString() content = [] def entry_to_link_dict(entry): s = MLStripper() s.feed(entry.description) link = { "title": remove_tags(entry.title), "url": entry.link, "linksource": urlparse(entry.link).netloc, "votes": "1", "description": remove_tags(s.get_data()), } return link try: user_id = User.objects.filter(username='******')[0].id except IndexError: return for entry in parse(durl).entries: link = entry_to_link_dict(entry) link["submitter"] = user_id info = link logging.debug( "value of user my var is %s", str( User.objects.filter( username='******')[0].get_username())) # info[title] = Wikifetch(title, description, url) content.append(info) # logging.debug("value of 1 my var is %s", str(title)) # logging.debug("value of 2 my var is %s", str(link)) return content
def test_parse_format_changed(self): from UserString import MutableString header = MutableString('lo') format = [('field', 2)] test_parser = make_packet_parser(header, format) self.do_parse_test(test_parser, ['lo\x00\x00'], [{'field': '\x00\x00'}], ['6C6F 0000'], ) format += [('field2', 2)] header.append('l') self.do_parse_test(test_parser, ['lo\x00\x00'], [{'field': '\x00\x00'}], ['6C6F 0000'], )
def print_board(self): raw_string = self.as_string(show_ship=False) str_buf_1 = MutableString() str_buf_2 = MutableString() str_buf_1.append(" ") str_buf_2.append(" ") for i in range(self.cols): if i%10!=0 or i<10: str_buf_1.append(' ') else: str_buf_1.append("%d"%(i/10)) str_buf_2.append(str(i%10)) print str_buf_1 print str_buf_2 row_num=0 for row in raw_string.splitlines(): if len(row)>0: print "%2d %s" %(row_num,row) row_num+=1
def formatCreateCommand(self, counter, rrdPath): cmd = "create %s --step %d" % (rrdPath, counter.mySamplingRate) #Format archaives string archaivesString = MutableString() for archaive in counter.myArchaives: archaivesString.append( " RRA:%s:%f:%d:%d" % (archaive.myArchaiveType, archaive.myErrorsAllowed, archaive.myConsolidationSpan, archaive.myRowsToKeep)) #Format DS cmd += " DS:%d:%s:%d:%s:%s %s" % \ (counter.myCounterId, counter.myCounterType, counter.myMinHeartbeat, counter.getMinStr(), counter.getMaxStr(), archaivesString.data) return cmd
class WikiCorpus: '''Class construct that takes a filehandler as argument. This must have been initialized: filehandler = open(filename) where filename is the name of the text file storing the corpus. labeled should be True if the corpus we are reading contains the topic labels of the articles.''' def __init__(self, filehandler, labeled=False): self.filehandler = filehandler self.labeled = labeled ## Cursor position currentTitle = "" currentCategory = "" currentText = MutableString() def __iter__(self): while True: nextLine = next(self.filehandler, None) if nextLine == None or nextLine == '\n': if self.currentTitle != '': if self.labeled: yield Document(str(self.currentTitle), str(self.currentText), str(self.currentCategory)) else: yield Document(str(self.currentTitle), str(self.currentText)) if nextLine == None: break else: self.currentTitle = "" self.currentCategory = "" self.currentText = MutableString() elif len(self.currentTitle) == 0: self.currentTitle = nextLine.strip('\n') elif self.labeled and len(self.currentCategory) == 0: self.currentCategory = nextLine.strip('\n') else: self.currentText.append(nextLine)
def toString (self): result = MutableString() result.append("Property name: %s" % self.myName) result.append("Property variable type: %s" % self.myVariableType) #Get the correct format according to the variable type frmt = "Property value: %s" % VariableTypes.FORMAT_BY_NAMES[self.myVariableType] result.append(frmt % self.myValue) return result.data
def toString(self): result = MutableString() result.append("Property name: %s" % self.myName) result.append("Property variable type: %s" % self.myVariableType) #Get the correct format according to the variable type frmt = "Property value: %s" % VariableTypes.FORMAT_BY_NAMES[ self.myVariableType] result.append(frmt % self.myValue) return result.data
class WikiCorpus : '''Class construct that takes a filehandler as argument. This must have been initialized: filehandler = open(filename) where filename is the name of the text file storing the corpus. labeled should be True if the corpus we are reading contains the topic labels of the articles.''' def __init__(self, filehandler, labeled = False) : self.filehandler = filehandler self.labeled = labeled ## Cursor position currentTitle = "" currentCategory = "" currentText = MutableString() def __iter__(self) : while True : nextLine = next(self.filehandler, None) if nextLine == None or nextLine == '\n' : if self.currentTitle != '' : if self.labeled : yield Document(str(self.currentTitle), str(self.currentText), str(self.currentCategory)) else : yield Document(str(self.currentTitle), str(self.currentText)) if nextLine == None : break else : self.currentTitle = "" self.currentCategory = "" self.currentText = MutableString() elif len(self.currentTitle) == 0 : self.currentTitle = nextLine.strip('\n') elif self.labeled and len(self.currentCategory) == 0 : self.currentCategory = nextLine.strip('\n') else : self.currentText.append(nextLine)
def load(durl, greet): def remove_tags(text): text = TAG_RE.sub('', text) text = re.sub("\n", "", text) text = re.sub("\"", "\\\"", text) text = re.sub(u"(\u2018|\u2019)", "'", text) return "" . join(filter(lambda x: ord(x)<128, text)) content = MutableString() content = [] def entry_to_link_dict(entry): s = MLStripper() s.feed(entry.description) link = { "title": remove_tags(entry.title), "url": entry.link, "linksource": urlparse(entry.link).netloc, "votes": "1", "description": remove_tags(s.get_data()), } return link try: user_id = User.objects.filter(username='******')[0].id except IndexError: return for entry in parse(durl).entries: link = entry_to_link_dict(entry) link["submitter"] = user_id info = link logging.debug("value of user my var is %s", str(User.objects.filter(username='******')[0].get_username())) # info[title] = Wikifetch(title, description, url) content.append(info) # logging.debug("value of 1 my var is %s", str(title)) # logging.debug("value of 2 my var is %s", str(link)) return content
def toString(self): result = MutableString() result.append("Process name: %s " % self.myCounterProcess) result.append("Counter path: %s " % self.myCounterPath) result.append("Counter name: %s " % self.myCounterName) result.append("Counter name: %s " % self.myCounterSamplingName) result.append("Sampling rate: %s " % self.mySamplingRate) result.append("Counter Type: %s " % self.myCounterType) result.append("Counter Units: %s " % self.myMeasuredUnits) result.append("Counter present per second is: %s " % str(self.myPresentCounterPerSecond)) result.append("Counter Id: %d " % self.myCounterId) result.append("Is rate? %s" % str(self.myIsRate)) result.append("Counter short description: %s " % self.myCounterShortDescriptionString) result.append("Counter short description override flag: %s " % str(self.myCounterDescriptionIsOverride)) result.append("Counter value variable type: %s " % self.myVariableType) result.append("Meta-Counter arithmetic expression: %s " % self.myMetaCounterExpression) result.append("Communication method: %s " % self.myCommMethod) result.append("RRD min heartbeat: %s " % self.myMinHeartbeat) for arch in self.myArchaives: result.append(arch.toString()) for prop in self.myProperties: result.append(prop.toString()) return result.data
def code(question_id, question_title, question_content, question_snippet): dr = re.compile(r'<[^>]+>', re.S) question_content = dr.sub('', question_content) file_name = rename(question_id, question_title.encode('utf-8')) sb = MutableString() sb.append(constant.JAVA_FILE_PACKAGE) sb.append("\n") sb.append("/**\n") sb.append(question_content .replace('"', '') .replace('*/', '') ) sb.append("**/\n") # pattern = re.compile(r'(class )(.+)( {)') # question_snippet = re.sub(pattern, 'class '+file_name+' {', question_snippet) match_obj = re.match(r'(class )(.+)( {)', question_snippet, re.M | re.I) class_name = '' if match_obj: class_name = match_obj.group(2) if question_snippet.find('class') < 0: sb.append('class ') sb.append(str(file_name)) sb.append(' {\n') sb.append('//') sb.append(question_snippet) sb.append('\n') sb.append('}') else: sb.append(question_snippet .replace('Solution', str(file_name)) .replace(class_name, str(file_name))) file_path = constant.JAVA_FILE_PATH if not os.path.exists(file_path): print 'folder ', file_path, 'not exist, makedir~' os.makedirs(file_path) temp = str(file_name+'.java') if file_exist(file_path, temp): print(file_name, 'already exist') return with open(file_path + temp, 'wb') as file: file.write(str(sb)) # def creator(): # questions = question_template.questions(constant.QUESTION_LIMIT) # for question in questions: # code(question[0], question[1], question[4], question[5]) # # schedule.every(10).second.do(creator()) # # while True: # schedule.run_pending()
def __repr__(self): outstr = MutableString("") for r in self.__matrix: outstr.append("".join(r)+"\n") return str(outstr)
def toString (self): result = MutableString() result.append("Process name: %s " % self.myCounterProcess) result.append("Counter path: %s " % self.myCounterPath) result.append("Counter name: %s " % self.myCounterName) result.append("Counter name: %s " % self.myCounterSamplingName) result.append("Sampling rate: %s " % self.mySamplingRate) result.append("Counter Type: %s " % self.myCounterType) result.append("Counter Units: %s " % self.myMeasuredUnits) result.append("Counter present per second is: %s " % str(self.myPresentCounterPerSecond)) result.append("Counter Id: %d " % self.myCounterId) result.append("Is rate? %s" % str(self.myIsRate)) result.append("Counter short description: %s " % self.myCounterShortDescriptionString) result.append("Counter short description override flag: %s " % str(self.myCounterDescriptionIsOverride)) result.append("Counter value variable type: %s " % self.myVariableType) result.append("Meta-Counter arithmetic expression: %s " % self.myMetaCounterExpression) result.append("Communication method: %s " % self.myCommMethod) result.append("RRD min heartbeat: %s " % self.myMinHeartbeat) for arch in self.myArchaives: result.append(arch.toString()) for prop in self.myProperties: result.append(prop.toString()) return result.data