def __init__(self): """Create our main helpers and compile RE patterns""" self.S = Syllabizer() self.P = Positioner() self.SD = ScanDict(self) self.vowelRE = sre.compile('[aeiouyAEIOUY]') self.wordBoundsRE = sre.compile(r"([-.,;:?!\(\)\"\s]+)") self.possIambRE = sre.compile('(x[x/])+')
def __init__(self): """Create our main helpers and compile RE patterns""" self.LD = LineData() self.S = Syllabizer() self.P = Positioner() self.SD = ScanDict(self) self.vowelRE = sre.compile('[aeiouyAEIOUY]') self.wordBoundsRE = sre.compile(r"([-.,;:?!\(\)\"\s]+)") self.possIambRE = sre.compile('(x[x/])+')
def createCluster(clusterNode, cellName): cellConfigNode = getChild(clusterNode, "CellConfig") if cellConfigNode: createCellConfig(cellConfigNode, cellName) for nodeConfigNode in getChildElements(clusterNode, "NodeConfig"): createNodeConfig(nodeConfigNode) clusterName = clusterNode.attributes['name'].value nodesNodeList = getChildElements(clusterNode, 'Node') if len(nodesNodeList) > 0: clusterId, serverIds, nodeNames = createClusterMembers(clusterName, nodesNodeList, cellName) for nodesNode in nodesNodeList: nodeName = nodesNode.attributes["name"].value for serverNode in getChildElements(nodesNode, "Server"): serverName = serverNode.attributes["name"].value for serverPortNode in getChildElements(serverNode, "ServerPort"): modifyServerPorts(nodeName, serverName, serverPortNode) else: clusterId = AdminConfig.getid('/ServerCluster:%s/' % clusterName) serverIds, nodeNames = getClusterMembers(clusterId) clusterConfigNode = getChild(clusterNode, "ClusterConfig") if clusterConfigNode: resourcesNode = getChild(clusterConfigNode, "Resources") if resourcesNode: clusterScope = "Cluster=%s" % clusterName clusterPath = "/ServerCluster:%s/" % clusterName createResources(clusterPath, clusterScope, resourcesNode) modifyServices(clusterConfigNode, clusterId) modifyAttributes(clusterConfigNode, clusterId) for serverConfigNode in getChildElements(clusterNode, "ServerConfig"): if serverConfigNode.getAttributeNode("match"): match = sre.compile(serverConfigNode.attributes["match"].value) else: match = sre.compile(".*") for serverItem in serverIds: if match.match(serverItem["server"]): print "Applying ServerConfig to " + serverItem["server"] serverId = serverItem["serverId"] modifyServices(serverConfigNode, serverId) modifyAttributes(serverConfigNode, serverId) resourcesNode = getChild(serverConfigNode, "Resources") if resourcesNode: serverScope = "Node=%(node)s,Server=%(server)s" % serverItem serverPath = "/Node:%(node)s/Server:%(server)s/" % serverItem createResources(serverPath, serverScope, resourcesNode) print "Saving Config..." AdminConfig.save() print "Config saved." print "Refresh and sync..." refreshAndSync(cellName, clusterName, nodeNames)
def main(): gaia = wikipedia.getSite(code=u'en', fam=u'gaia') plot = wikipedia.getSite(code=u'en', fam=u'plotwiki') wikipedia.setAction(wikipedia.translate(gaia, msg)) wikipedia.setAction(wikipedia.translate(plot, msg)) final = u'<noinclude><!-- Do not edit this page, this page is automatically created by a Bot. -->\n' final += u'==Most Recent Events==</noinclude>\n' nonrecent = u'<noinclude>==Older Events==\n' end = u'\n\'\'View everything here on the [[Plotwiki:|plotwiki...]]\'\'</noinclude>' moreYears = True year = 04 events = [] temp = [] while moreYears: y = str(year) page = wikipedia.Page(plot, u'Template:Pnav%s' % y.zfill(2)) try: text = page.get() r = sre.compile(u'^.*<span style=".*normal.*">(.*)</span>.*$', sre.UNICODE | sre.MULTILINE | sre.DOTALL) text = sre.sub(r, u'\\1', text) r = sre.compile(u'\s+\|\s+', sre.UNICODE | sre.MULTILINE | sre.DOTALL) pages = sre.split(r, text) r = sre.compile(u'\[\[([^|]*)(\|.*)?\]\]', sre.UNICODE) for p in pages: temp.append(sre.sub(r, u'\\1', p)) year += 1 except wikipedia.NoPage: moreYears = False for e in temp: if not e in events: events.append(e) events = reversed(list(events)) x = 1 for e in events: final += u'* [[Plotwiki:%s|]]\n' % e x += 1 if x == 6: final += nonrecent if x <= 6: final += end final += end page = wikipedia.Page(gaia, u'Plotwiki Current Events') page.put(final)
def main(): gaia = wikipedia.getSite(code=u'en', fam=u'gaia') plot = wikipedia.getSite(code=u'en', fam=u'plotwiki') wikipedia.setAction(wikipedia.translate(gaia, msg)) wikipedia.setAction(wikipedia.translate(plot, msg)) final = u'<noinclude><!-- Do not edit this page, this page is automatically created by a Bot. -->\n' final+= u'==Most Recent Events==</noinclude>\n' nonrecent = u'<noinclude>==Older Events==\n' end = u'\n\'\'View everything here on the [[Plotwiki:|plotwiki...]]\'\'</noinclude>' moreYears = True year = 04 events = [] temp = [] while moreYears: y = str(year) page = wikipedia.Page(plot, u'Template:Pnav%s' % y.zfill(2)) try: text = page.get() r = sre.compile(u'^.*<span style=".*normal.*">(.*)</span>.*$', sre.UNICODE | sre.MULTILINE | sre.DOTALL) text = sre.sub(r, u'\\1', text) r = sre.compile(u'\s+\|\s+', sre.UNICODE | sre.MULTILINE | sre.DOTALL) pages = sre.split(r, text) r = sre.compile(u'\[\[([^|]*)(\|.*)?\]\]', sre.UNICODE) for p in pages: temp.append(sre.sub(r, u'\\1', p)) year+=1 except wikipedia.NoPage: moreYears = False for e in temp: if not e in events: events.append(e) events = reversed(list(events)); x = 1 for e in events: final+=u'* [[Plotwiki:%s|]]\n' % e x+=1 if x==6: final+=nonrecent if x<=6: final+=end final+=end page = wikipedia.Page(gaia, u'Plotwiki Current Events') page.put(final)
def __init__(self): self.suffixes = sre.compile( r""" [^aeiouhr]y\b | er\b | age | est | ing | ness\b | less | ful | ment\b | time\b | [st]ion | [ia]ble\b | [ct]ial | [ctg]iou | [ctg]ious """, sre.VERBOSE, ) # | ical\b | icle\b | ual\b | ism \b | [ae]ry\b # don't work (as 2-syl) # Note: left out special-character "*ag$" and "tim$" -- don't understand! # final syllable spelled with liquid or nasal and silent 'e' self.liquidterm = sre.compile(r" [^aeiouy] [rl] e \b", sre.X) # the collection of special-character groups self.finalE = sre.compile(r" [^aeiouy] e \b ", sre.X) self.CiVcomb = sre.compile(r" [st] i [aeiouy] ", sre.X) self.CCpair = sre.compile(r" [cgprstw] h | gn | gu[aeiouy] | qu | ck", sre.X) self.VyVcomb = sre.compile(r" [aeiou] y [aeiou]", sre.X) # vowel pairs reliably disyllabic (not 'ui' ('juice' vs 'intuition'! some # 'ue' missed ('constituent'), some 'oe' ('poem')) self.sylvowels = sre.compile(r" [aeiu] o | [iu] a | iu", sre.X) # divisions should fall before or after, not within, these consonant pairs self.splitLeftPairs = sre.compile( r""" [bdfk%02] [rl] | g [rln] | [tw] r | p [rlsn] s [nml]""", sre.X, )
class InpCmdLineOptions: r"""Reads a chapter-section style template file constructing the list of variables that can be specified on the command prompt. """ # matches "$var" varRe = sre.compile('\$(?P<var>\w+)') def __init__(self, fname): self.fname = fname # file name self.vars = {} # dictionary of variable -> value def parse(self): r"""parse() -> None """ fp = open(self.fname, 'r') data = fp.read() values = self.varRe.findall(data) # findall all $vars in file # loop over values, inserting them in the dictionary for v in values: self.vars[v] = None def getVars(self): return self.vars.keys()
def _setOkay(self, okay): if type(okay) == str: self.okay = sre.compile(okay) elif type(okay) in (LambdaType, ListType, NoneType): self.okay = okay else: raise TypeError, "okay must be lambda, list, or string"
def add_expression(self, expression, state, action, next_state, flags=0): """Adds a transition that activates if the input symbol matches the regular expression. The action callable gets a match object instead of the symbol.""" cre = sre.compile(expression, flags) self._expressions.append( (cre, state, action, next_state) ) self._transitions[(SREType, state)] = (self._check_expression, None)
def init_pattern(key): """Return a SRE compiled pattern; the match can be accessed in the match object as m = P[key].match(string) m.group(key) """ return sre.compile('^init\(\): %(key)s: *(?P<%(key)s>.*)$' % locals())
def __init__(self, parent, ID, title): wd, ht, fnt = self._setSizes() wx.Frame.__init__(self, parent, ID, title, size=(wd, ht)) # our panels, top to bottom self.ScanLine = MyScanTC(self, fnt) self.TextLine = MyLineTC(self, fnt) self.NotesWindow = MyNotesTC(self, fnt) self.WholeText = MyTextSTC(self, -1) # line numbers self.WholeText.SetMarginType(0, stc.STC_MARGIN_NUMBER) self.WholeText.SetMarginType(1, stc.STC_MARGIN_SYMBOL) self.WholeText.StyleSetBackground(stc.STC_STYLE_LINENUMBER, (246,246,246)) # initialize our data members and helpers self.SM = ScansionMachine() # central engine of scansion work self.E = Explainer(self.NotesWindow) self.lineNum = 0 # where to put its scansion when done self.Metron = 2 # initial assumption: self.LineFeet = 5 # iambic pentameter self.LineFeetSet = True self.SM.SetLineFeet(5, True) self.forceiamb, self.forceanap = False, False self.SetupGUI() # buttons, menus . . . self.SetupScansionSteps() # inc some more data items self.WholeText.DisplayText(InitialText) # as a startup . . . self.loadedtext = True self.WholeText.SetReadOnly(0) # but allow editing self.DisableAllScanButtons() self.loadPath, self.savePath = '', '' wx.FutureCall(100, self.WholeText.SetFocus) # Robin Dunn's fix! self.leadSpaceRE = sre.compile(r'[ |\t]+') self.newFindDialog, self.oldFindDialog = [None for i in range(2)] # icon ico = robIcon.getrobIcon() self.SetIcon(ico)
def parse(self, text): linesplitter = sre.compile(r"[ \t=]") self.lines = [] state = 0 currenttitle = None for line in text.splitlines(): line = line.strip() if line.startswith("#"): self.lines.append(GrubComment(line[1:])) continue if not line: continue if state == 0: cmd, rest = linesplitter.split(line, 1) CMD = COMMANDS[cmd] if CMD.flags & BUILTIN_TITLE: state = 1 currenttitle = GrubTitleEntry(rest) self.lines.append(currenttitle) continue elif CMD.flags & BUILTIN_MENU: self.lines.append(GrubEntry(CMD, rest)) if state == 1: cmd, rest = linesplitter.split(line, 1) CMD = COMMANDS[cmd] if CMD.flags & BUILTIN_TITLE: currenttitle = GrubTitleEntry(rest) self.lines.append(currenttitle) elif CMD.flags & (BUILTIN_CMDLINE | BUILTIN_MENU): currenttitle.lines.append(GrubEntry(CMD, rest))
def logall(start, xtra_args=None, verbose=False, dry_run=False): paths = [] svn = Popen(['svn', 'info', start], stdout=PIPE) url_p = sre.compile(r'^URL: (.*)') url = None for line in svn.stdout: m = url_p.match(line) if m: url = m.group(1) assert url is not None, "svn info: could not find URL:" paths = [] svn = Popen(['svn', 'list', '--recursive', url], stdout=PIPE) [paths.append(L.strip()) for L in svn.stdout] r = svn.wait() if r != 0: print "\n".join(paths) sys.exit(r) cmd = ['svn'] if xtra_args: cmd.extend(xtra_args) cmd.extend( ['log', url] ) cmd.extend(paths) # if verbose or dry_run: # print ' '.join(cmd) if dry_run: sys.exit(0) svn = Popen(cmd, stdout=PIPE) for line in svn.stdout: print line,
def __init__(self, parent, ID, title, poem): wd, ht, fnt = self._setSizes() wx.Frame.__init__(self, parent, ID, title, size=(wd, ht)) # our panels, top to bottom self.ScanLine = MyScanTC(self, fnt) self.TextLine = MyLineTC(self, fnt) self.NotesWindow = MyNotesTC(self, fnt) self.WholeText = MyTextSTC(self, -1) # line numbers self.WholeText.SetMarginType(0, stc.STC_MARGIN_NUMBER) self.WholeText.SetMarginType(1, stc.STC_MARGIN_SYMBOL) self.WholeText.SetMarginWidth(0, 1) self.WholeText.SetMarginWidth(1, 10) self.WholeText.StyleSetBackground(stc.STC_STYLE_LINENUMBER, (246,246,246)) # initialize our data members and helpers self.SM = ScansionMachine() # central engine of scansion work self.E = Explainer(self.NotesWindow) self.lineNum = 0 # where to put its scansion when done self.loaddir = '' # where user gets textfiles to Load self.Metron = 2 # initial assumption: self.LineFeet = 5 # iambic pentameter self.LineFeetSet = True self.SM.SetLineFeet(5, True) self.SetupGUI() # buttons, menus . . . # self.dwds = [] self.SetupScansionSteps() # inc some more data items self.WholeText.DisplayText(InitialText) # as a startup . . . self.WholeText.SetReadOnly(0) # but allow editing self.EnableScanButtons(False) wx.FutureCall(100, self.WholeText.SetFocus) # Robin Dunn's fix! self.leadSpaceRE = sre.compile(r'[ |\t]+') self.poem = poem
def __init__(self, parent, ID, title): wd, ht, fnt = self._setSizes() wx.Frame.__init__(self, parent, ID, title, size=(wd, ht)) # our panels, top to bottom self.ScanLine = MyScanTC(self, fnt) self.TextLine = MyLineTC(self, fnt) self.NotesWindow = MyNotesTC(self, fnt) self.WholeText = MyTextSTC(self, -1) # line numbers self.WholeText.SetMarginType(0, stc.STC_MARGIN_NUMBER) self.WholeText.SetMarginType(1, stc.STC_MARGIN_SYMBOL) self.WholeText.StyleSetBackground(stc.STC_STYLE_LINENUMBER, (246, 246, 246)) # initialize our data members and helpers self.SM = ScansionMachine() # central engine of scansion work self.E = Explainer(self.NotesWindow) self.lineNum = 0 # where to put its scansion when done self.Metron = 2 # initial assumption: self.LineFeet = 5 # iambic pentameter self.LineFeetSet = True self.SM.SetLineFeet(5, True) self.forceiamb, self.forceanap = False, False self.SetupGUI() # buttons, menus . . . self.SetupScansionSteps() # inc some more data items self.WholeText.DisplayText(InitialText) # as a startup . . . self.loadedtext = True self.WholeText.SetReadOnly(0) # but allow editing self.DisableAllScanButtons() self.loadPath, self.savePath = '', '' wx.FutureCall(100, self.WholeText.SetFocus) # Robin Dunn's fix! self.leadSpaceRE = sre.compile(r'[ |\t]+') self.newFindDialog, self.oldFindDialog = [None for i in range(2)] # icon ico = robIcon.getrobIcon() self.SetIcon(ico)
def init_pattern(key): """Return a SRE compiled pattern; the match can be accessed in the match object as m = P[key].match(string) m.group(key) """ return sre.compile("^init\(\): {key!s}: *(?P<{key!s}>.*)$".format(**locals()))
def _findFiles(suffix): """ Builds list of all files which contain suffix. Suffix should not contain wildcards; instead, they should be like 'crj.fits' or '.dat' or 'coeffs'. """ _ldir = os.listdir('.') # format suffix for use in reg expression _indx = string.find(suffix, '.') if _indx > 0: extn = suffix[:_indx] + '[.]' + suffix[_indx + 1:] else: extn = suffix regpatt = '.*' + extn + '.*$' # compile regular expression _reg = sre.compile(regpatt) # build file list flist = [] for file in _ldir: if _reg.match(file): flist.append(file) return flist
def listCellConfig(doc, cellConfigElement): security = AdminConfig.getid('/Cell:/Security:/') securityElement = doc.createElement("Security") cellConfigElement.appendChild(securityElement) for configClass in ConfigClasses.CellConfigSecurity: for configObj in AdminConfig.list(configClass.classname, security).splitlines(): configElement = doc.createElement(configClass.classname) securityElement.appendChild(configElement) for attribute in configClass.requiredAttributes(): configElement.setAttribute(attribute.attributeName, AdminConfig.showAttribute(configObj, attribute.attributeName)) signerCertRegExp = sre.compile( "\[ \[issuedTo (.*)\] \[fingerPrint .*\] \[signatureAlgorithm .*\] \[serialNumber .*\] \[alias (.*)\] \[validity (.*)\] \[version .*\] \[issuedBy .*\] \[size .*\] \]") for keystoreName in ['CellDefaultKeyStore', 'CellDefaultTrustStore']: keystoreElement = doc.createElement(keystoreName) securityElement.appendChild(keystoreElement) for line in AdminTask.listSignerCertificates('[-keyStoreName %s]' % keystoreName).splitlines(): signerCertMatch = signerCertRegExp.match(line) if signerCertMatch: signerCertElement = doc.createElement('SignerCertificate') keystoreElement.appendChild(signerCertElement) signerCertElement.setAttribute("alias", signerCertMatch.group(2)) signerCertElement.setAttribute("issuedTo", signerCertMatch.group(1)) signerCertElement.setAttribute("validity", signerCertMatch.group(3)) for line in AdminTask.listPersonalCertificates('[-keyStoreName %s]' % keystoreName).splitlines(): signerCertMatch = signerCertRegExp.match(line) if signerCertMatch: signerCertElement = doc.createElement('PersonalCertificate') keystoreElement.appendChild(signerCertElement) signerCertElement.setAttribute("alias", signerCertMatch.group(2)) signerCertElement.setAttribute("issuedTo", signerCertMatch.group(1)) signerCertElement.setAttribute("validity", signerCertMatch.group(3)) cellResourcesElement = doc.createElement('Resources') cellConfigElement.appendChild(cellResourcesElement) listCellResources(doc, cellResourcesElement, AdminConfig.getid('/Cell:/'))
def appendTargets(doc, moduleElement, moduleTargetLine): nodeServerRegexp = sre.compile("WebSphere:cell=.*,node=(.*),server=(.*)") clusterRegexp = sre.compile("WebSphere:cell=.*,cluster=(.*)") for moduleTargetId in moduleTargetLine.split("+"): clusterMatch = clusterRegexp.match(moduleTargetId) if clusterMatch: targetElement = doc.createElement("Target") moduleElement.appendChild(targetElement) targetElement.setAttribute("cluster", clusterMatch.group(1)) else: nodeServerMatch = nodeServerRegexp.match(moduleTargetId) if nodeServerMatch: targetElement = doc.createElement("Target") moduleElement.appendChild(targetElement) targetElement.setAttribute("node", nodeServerMatch.group(1)) targetElement.setAttribute("server", nodeServerMatch.group(2))
def substitute(self,*args): for color in colors: self.txt.tag_remove(color,"1.0","end") self.txt.tag_remove("emph"+color,"1.0","end") self.rex = sre.compile("") # default value in case of misformed regexp self.rex = sre.compile(self.fld.get("1.0","end")[:-1],sre.MULTILINE) try: sre.compile("(?P<emph>%s)" % self.fld.get(tk.SEL_FIRST,tk.SEL_LAST)) self.rexSel = sre.compile("%s(?P<emph>%s)%s" % ( self.fld.get("1.0",tk.SEL_FIRST), self.fld.get(tk.SEL_FIRST,tk.SEL_LAST), self.fld.get(tk.SEL_LAST,"end")[:-1], ),sre.MULTILINE) except: self.rexSel = self.rex self.rexSel.sub(self.addTags,self.txt.get("1.0","end"))
def init_pattern(key): """Return a SRE compiled pattern; the match can be accessed in the match object as m = P[key].match(string) m.group(key) """ return sre.compile( '^init\(\): {key!s}: *(?P<{key!s}>.*)$'.format(**locals()))
class InpParser: # matches "[SectionName]" reSection = sre.compile('\[(?P<sn>\S+)\]') # matches "key = value" reKeyValue = sre.compile('(?P<key>\w+)[ \t]*=[ \t]*(?P<value>\S+)') def __init__(self, fname): self.fname = fname # input file name def parse(self): r"""parse() -> Chapter Parses input file and returns chapter object for the complete input file.""" chapter = Chapter() # open input file for reading fp = open(self.fname, 'r') data = fp.read().split('\n') # split into lines currSection = None for d in data: # check if line is section header mo = self.reSection.search(d) if mo: # yes, it is a section header: extract name and add it to chapter sn = mo.group('sn') section = Section(sn) chapter.addSection(sn, section) currSection = section else: # no it is not: check if it is key=value pair mo = self.reKeyValue.search(d) if mo: # yes, it is: extract key and pair key = mo.group('key') value = mo.group('value') currSection.addValue(key, value) else: # it is neither section nor data line so ignore it pass return chapter
def substitute(self, *args): for color in colors: self.txt.tag_remove(color, "1.0", "end") self.txt.tag_remove("emph" + color, "1.0", "end") self.rex = sre.compile("") # default value in case of misformed regexp self.rex = sre.compile(self.fld.get("1.0", "end")[:-1], sre.MULTILINE) try: sre.compile("(?P<emph>%s)" % self.fld.get(tk.SEL_FIRST, tk.SEL_LAST)) self.rexSel = sre.compile( "%s(?P<emph>%s)%s" % ( self.fld.get("1.0", tk.SEL_FIRST), self.fld.get(tk.SEL_FIRST, tk.SEL_LAST), self.fld.get(tk.SEL_LAST, "end")[:-1], ), sre.MULTILINE) except: self.rexSel = self.rex self.rexSel.sub(self.addTags, self.txt.get("1.0", "end"))
def main(): #Setup Familys for Wikia Involved anime = wikipedia.getSite(code=u'en', fam=u'anime') wikipedia.setAction(wikipedia.translate(anime, msg)) siteList = [] #Get Project Wiki Listing wikiaIds = [] page = wikipedia.Page(anime, u'Bots/Wiki', None, None, 4)#4=Project Namespace try: text = page.get() r = sre.compile(u'^.*<!-- \|\|START\|\| -->\n?', sre.UNICODE | sre.DOTALL) text = sre.sub(r, u'', text) r = sre.compile(u'\n?<!-- \|\|END\|\| -->.*$', sre.UNICODE | sre.DOTALL) text = sre.sub(r, u'', text) r = sre.compile(u'\n', sre.UNICODE | sre.MULTILINE | sre.DOTALL) wikilist = sre.split(r, text) for wiki in wikilist: if wiki != u'': wikiaIds.append(wiki) except wikipedia.NoPage: moreYears = False for wiki in wikiaIds: siteList.append(wikipedia.getSite(code=u'en', fam=wiki)) commonstart = u'@import "http://en.anime.wikia.com/index.php?title=MediaWiki:Anime-Common.css&action=raw&ctype=text/css";' monobookstart = u'@import "http://en.anime.wikia.com/index.php?title=MediaWiki:Anime-Monobook.css&action=raw&ctype=text/css";' for site in siteList: common = wikipedia.Page(site, u'Common.css', None, None, 8)#8=MediaWiki Namespace monobook = wikipedia.Page(site, u'Monobook.css', None, None, 8)#8=MediaWiki Namespace siteSource = u'' try: siteSource = sitePage.get() except wikipedia.NoPage: wikipedia.output(u'Site %s has no %s template, creating it' % (site, template)) if siteSource != templateSource: wikipedia.output(u'Site \'%s\' template status: Needs Updating' % site) wikipedia.output(u'Updating template on %s' % site) sitePage.put(templateSource) else: wikipedia.output(u'Site \'%s\' template status: Ok' % site)
def __init__(self, hunk): self.hunk = hunk self.lines = self.hunk.split("\n") self.linecount = len(self.lines) self.insertions = 0 self.deletions = 0 self.added = [] self.removed = [] self.ID = sre.compile("\$Id:{0,1}.*\$") self.Revision = sre.compile("\$Revision:{0,1}.*\$") self.Author = sre.compile("\$Author:{0,1}.*\$") self.Date = sre.compile("\$Date:{0,1}.*\$") self.ok = 0 for i in range(0, self.linecount): if self.lines[i].startswith("+"): self.insertions += 1 self.added.append(self.lines[i]) if self.lines[i].startswith("-"): self.deletions += 1 self.removed.append(self.lines[i]) self.checkhunk()
def auto_complete(self): pos = self._sc.get_current_pos() wpos = self._sc.word_start_position(pos, True) linepos, line = self._sc.get_cur_line() word = line[linepos - (pos - wpos):linepos] if word: pattern = sre.compile(r'\W(%s.+?)\W' % word) text = self._sc.get_text() words = set(pattern.findall(text)) self._sc.auto_c_set_choose_single(True) self._sc.auto_c_set_drop_rest_of_word(True) self._sc.auto_c_show(len(word), ' '.join(sorted(list(words))))
def getSuggestionListFile(word, filename): try: f = file(filename, 'r') except IOError: return [] pattern = sre.compile(word+r'.*?\b') matches = [] for line in f.readlines(): for match in pattern.findall(line): if match != word and match not in matches: matches.append(match) matches.sort() return matches
def from_one_line(msg): # (?<!\\) is a lookbehind assertion which asks anything but '\' # to match the regexp that follows it # So here match '\\n' but not if you have a '\' before that re = sre.compile(r"(?<!\\)\\n") msg = re.sub("\n", msg) msg = msg.replace("\\\\", "\\") # s12 = 'test\\ntest\\\\ntest' # s13 = re.sub('\n', s12) # s14 s13.replace('\\\\', '\\') # s14 # 'test\ntest\\ntest' return msg
def get_next_image_dir(): dnbase = "./imgs" lst = glob.glob(dnbase + '-[0-9][0-9][0-9][0-9]') if (lst): lst.sort() last = lst[len(lst) - 1] m = sre.search(sre.compile(dnbase + "-(\d+)$"), last) nlast = int(m.group(1)) fno = nlast + 1 else: fno = 1 dirname = (dnbase + "-%04d") % (fno) # printf("dirname=%s\n", dirname); os.mkdir(dirname) return dirname
def loadurl(urlname,context={}): """resolves a url and returns the results if the url starts with file:// then it will try to find a static file if it starts with python:// then it will try to find a python object""" parameter_re = "\s*(\w+\s*=|)\s*\w+\s*" namedparameter_re = "\s*((?P<key>\w+)\s*=|)\s*(?P<value>\w+)\s*" identifier_re = "(\w+[.])+\w+" module_re = "(?P<module>(%s))" % identifier_re parameters_re = "(?P<params>(%s,)*%s)" % (parameter_re, parameter_re) innerid_re = "[.]\w+" import_re = "\s*%s\s*(\(%s\)\s*|)" % (module_re, parameters_re) import_matcher = sre.compile(import_re) import_match = import_matcher.match(urlname) if not import_match: raise ValueError("Invalid overlay definition: %s" % urlname) module = import_match.group("module") moduleparts = module.split(".") #load the first part as that should be a module modulename = moduleparts.pop(0) includeobject = imp.load_module(modulename,*imp.find_module(modulename)) #cycle through the rest of the url parts gathering objects as we go for part in moduleparts: if hasattr(includeobject,part): includeobject = getattr(includeobject,part) if callable(includeobject): parameters = import_match.group("params") includeargs = [] includekwargs = {} if parameters: for parameterdef in parameters.split(","): namedparameter_match = sre.match(namedparameter_re, parameterdef) key = namedparameter_match.group("key") value = namedparameter_match.group("value") value = context.get(value) if key: if isinstance(key, unicode): key = key.encode("utf-8") includekwargs[key] = context.get(value,value) else: includeargs.append(context.get(value,value)) includeobject = includeobject(*includeargs, **includekwargs) return str(includeobject) includesource = open(os.path.join(namespacedir, filename), "r").read() return includesource
def format_directory(self): # XXX Unixism if self.url and self.url[-1] != '/': self.url = self.url + '/' fp = os.popen("ls -l -a %s/. 2>&1" % self.pathname, "r") lines = fp.readlines() fp.close() import StringIO import sre from urllib import quote from urlparse import urljoin def escape(s): if not s: return "" s = s.replace('&', '&') # Must be done first s = s.replace('<', '<') s = s.replace('>', '>') return s prog = sre.compile(self.listing_pattern) data = self.listing_header % { 'url': self.url, 'pathname': escape(self.pathname) } for line in lines: if line[-1] == '\n': line = line[:-1] m = prog.match(line) if not m: line = escape(line) + '\n' data = data + line continue mode, middle, name = m.group(1, 2, 3) rawname = name [mode, middle, name] = map(escape, [mode, middle, name]) href = urljoin(self.url, quote(rawname)) if len(mode) == 10 and mode[0] == 'd' or name[-1:] == '/': if name[-1:] != '/': name = name + '/' if href[-1:] != '/': href = href + '/' line = '%s%s<A HREF="%s">%s</A>\n' % (mode, middle, escape(href), name) data = data + line data = data + self.listing_trailer self.fp = StringIO.StringIO(data) self.headers['content-type'] = 'text/html' self.headers['content-length'] = str(len(data))
class GrailFTP(ftplib.FTP): # # Hackish subclass of ftplib.FTP to allow the transfer size to be # available for the creation of a content-length header. # import sre _size_re = sre.compile("\\(([0-9][0-9]*) bytes\\)", sre.IGNORECASE) _xfer_size = None def getresp(self): resp = ftplib.FTP.getresp(self) if len(resp) >= 3 and resp[:3] == "150": m = self._size_re.search(resp) if m and m.start() >= 0: self._xfer_size = string.atoi(m.group(1)) return resp
def search(self, argv): """search <pattern> Search the source file for the regular expression pattern.""" patt = sre.compile(" ".join(argv[1:])) filename = self._dbg.curframe.f_code.co_filename if self._dbg.lineno is None: start = 0 else: start = max(0, self._dbg.lineno - 9) lines = linecache.getlines(filename)[start:] for lineno, line in enumerate(lines): #line = linecache.getline(filename, lineno) mo = patt.search(line) if mo: self._print_source(filename, lineno+start-10, lineno+start+10) return else: self._print("Pattern not found.")
def scan_log(logfile, P): STATUS = {"abort": sre.compile("(?P<abort>Abort|abort)")} Vars = {} StatusVars = {} with open(logfile, "r") as log: print "== {logfile!s} ==".format(**locals()) for line in log: l = line.strip() for key, pattern in P.items(): m = pattern.match(l) if m: Vars[key] = m.group(key) break for key, pattern in STATUS.items(): m = pattern.search(l) if m: StatusVars[key] = m.group(key) return Vars, StatusVars
def format_directory(self): # XXX Unixism if self.url and self.url[-1] != '/': self.url = self.url + '/' fp = os.popen("ls -l -a %s/. 2>&1" % self.pathname, "r") lines = fp.readlines() fp.close() import StringIO import sre from urllib import quote from urlparse import urljoin def escape(s): if not s: return "" s = s.replace('&', '&') # Must be done first s = s.replace('<', '<') s = s.replace('>', '>') return s prog = sre.compile(self.listing_pattern) data = self.listing_header % {'url': self.url, 'pathname': escape(self.pathname)} for line in lines: if line[-1] == '\n': line = line[:-1] m = prog.match(line) if not m: line = escape(line) + '\n' data = data + line continue mode, middle, name = m.group(1, 2, 3) rawname = name [mode, middle, name] = map(escape, [mode, middle, name]) href = urljoin(self.url, quote(rawname)) if len(mode) == 10 and mode[0] == 'd' or name[-1:] == '/': if name[-1:] != '/': name = name + '/' if href[-1:] != '/': href = href + '/' line = '%s%s<A HREF="%s">%s</A>\n' % ( mode, middle, escape(href), name) data = data + line data = data + self.listing_trailer self.fp = StringIO.StringIO(data) self.headers['content-type'] = 'text/html' self.headers['content-length'] = str(len(data))
def listCluster(doc, clusterElement, cluster): clusterName = AdminConfig.showAttribute(cluster, 'name') print 'listCluster: ' + clusterName clusterElement.setAttribute('name', clusterName) members = wsadminToList(AdminConfig.showAttribute(cluster, 'members')) for member in members: nodeElement = doc.createElement('Node') clusterElement.appendChild(nodeElement) nodeName = AdminConfig.showAttribute(member, 'nodeName') nodeElement.setAttribute('name', nodeName) serverElement = doc.createElement('Server') nodeElement.appendChild(serverElement) serverName = AdminConfig.showAttribute(member, 'memberName') serverElement.setAttribute('name', serverName) serverPortRegex = sre.compile("\[(.*) \[ \[port (.*)\] \[node (.*)\] \[host (.*)\] \[server (.*)\] \]\]") for serverPort in AdminTask.listServerPorts(serverName, '-nodeName %s' % nodeName).splitlines(): serverPortMatch = serverPortRegex.match(serverPort) if serverPortMatch: serverPortElement = doc.createElement("ServerPort") serverElement.appendChild(serverPortElement) serverPortElement.setAttribute("endPointName", serverPortMatch.group(1)) serverPortElement.setAttribute("host", serverPortMatch.group(4)) serverPortElement.setAttribute("port", serverPortMatch.group(2)) clusterConfigElement = doc.createElement('ClusterConfig') clusterElement.appendChild(clusterConfigElement) clusterResourcesElement = doc.createElement('Resources') clusterConfigElement.appendChild(clusterResourcesElement) listClusterResources(doc, clusterResourcesElement, cluster) for member in members: nodeName = AdminConfig.showAttribute(member, 'nodeName') serverName = AdminConfig.showAttribute(member, 'memberName') print 'listMember: ' + serverName server = AdminConfig.getid('/Node:%s/Server:%s/' % (nodeName, serverName)) serverConfigElement = doc.createElement('ServerConfig') clusterElement.appendChild(serverConfigElement) serverConfigElement.setAttribute('match', serverName) serverResourcesElement = doc.createElement('Resources') serverConfigElement.appendChild(serverResourcesElement) listClusterResources(doc, serverResourcesElement, server) serverServicesElement = doc.createElement('Services') serverConfigElement.appendChild(serverServicesElement) listServerConfigServices(doc, serverServicesElement, server) listConfigAttributes(doc, serverConfigElement, server, ConfigClasses.ServerConfigClass)
def __init__(self): self.suffixes = sre.compile(r""" [^aeiouhr]y\b | er\b | age | est | ing | ness\b | less | ful | ment\b | time\b | [st]ion | [ia]ble\b | [ct]ial | [ctg]iou | [ctg]ious """, sre.VERBOSE) # | ical\b | icle\b | ual\b | ism \b | [ae]ry\b # don't work (as 2-syl) # Note: left out special-character "*ag$" and "tim$" -- don't understand! # final syllable spelled with liquid or nasal and silent 'e' self.liquidterm = sre.compile(r" [^aeiouy] [rl] e \b", sre.X) # the collection of special-character groups self.finalE = sre.compile(r" [^aeiouy] e \b ", sre.X) self.CiVcomb = sre.compile(r" [st] i [aeiouy] ", sre.X) self.CCpair = sre.compile(r" [cgprstw] h | gn | gu[aeiouy] | qu | ck", sre.X) self.VyVcomb = sre.compile(r" [aeiou] y [aeiou]", sre.X) # vowel pairs reliably disyllabic (not 'ui' ('juice' vs 'intuition'! some # 'ue' missed ('constituent'), some 'oe' ('poem')) self.sylvowels = sre.compile(r" [aeiu] o | [iu] a | iu", sre.X) # divisions should fall before or after, not within, these consonant pairs self.splitLeftPairs = sre.compile(r""" [bdfk%02] [rl] | g [rln] | [tw] r | p [rlsn] s [nml]""", sre.X)
def find_function(funcname, filename): cre = sre.compile(r'def\s+%s\s*[(]' % funcname) try: fp = open(filename) except IOError: return None # consumer of this info expects the first line to be 1 lineno = 1 answer = None while 1: line = fp.readline() if line == '': break if cre.match(line): answer = funcname, filename, lineno break lineno = lineno + 1 fp.close() return answer
def scan_log(logfile, P): STATUS = { 'abort': sre.compile('(?P<abort>Abort|abort)'), } Vars = {} StatusVars = {} with open(logfile, "r") as log: print "== {logfile!s} ==".format(**locals()) for line in log: l = line.strip() for key, pattern in P.items(): m = pattern.match(l) if m: Vars[key] = m.group(key) break for key, pattern in STATUS.items(): m = pattern.search(l) if m: StatusVars[key] = m.group(key) return Vars, StatusVars
def scan_log(logfile,P): STATUS = {'abort':sre.compile('(?P<abort>Abort|abort)'), } Vars = {} StatusVars = {} log = open(logfile,"r") print "== %(logfile)s ==" % locals() for line in log: l = line.strip() for key,pattern in P.items(): m = pattern.match(l) if m: Vars[key] = m.group(key) break for key,pattern in STATUS.items(): m = pattern.search(l) if m: StatusVars[key] = m.group(key) log.close() return Vars, StatusVars
def __init__(self, poem): self.poem = poem fnt = 13 # ------- sets the font size... self.footlist = [] self.LineData = [] self.CurrentStep = 0 self.mfield = "" self.footfield_txt = "" self.ScanLine = [] self.FORKSTEP = 3 # the step at which the two algorithms divide self.poem = poem self.TextLine = [] self.NotesWindow = [] self.WholeText = [] self.SM = ScansionMachine() self.SM.SD.Dict = {} self.SM.SD.LoadDictionary() self.E = Explainer(self.NotesWindow) self.lineNum = 0 # where to put its scansion when done self.loaddir = '' # where user gets textfiles to Load self.Metron = 2 # initial assumption: self.LineFeet = 5 # iambic pentameter self.LineFeetSet = True self.SM.SetLineFeet(5, True) self.SetupScansionSteps() # inc some more data items self.leadSpaceRE = sre.compile(r'[ |\t]+') self.OneIambicAlgFailed = False self.SM.ParseLine(str(poem)) self.SetupScansionSteps() laststep = len(self.Steps) step = self.CurrentStep while step < laststep: self.OnStepButton() step = self.CurrentStep if len(poem) > 10: for item in self.NotesWindow: print(item)
def scan_log(logfile, P): STATUS = { 'abort': sre.compile('(?P<abort>Abort|abort)'), } Vars = {} StatusVars = {} log = open(logfile, "r") print "== %(logfile)s ==" % locals() for line in log: l = line.strip() for key, pattern in P.items(): m = pattern.match(l) if m: Vars[key] = m.group(key) break for key, pattern in STATUS.items(): m = pattern.search(l) if m: StatusVars[key] = m.group(key) log.close() return Vars, StatusVars
class RawFeed(FeedPlugin): torrent_pattern = sre.compile( "(http://[a-zA-Z0-9._~:/?#\[\]@!$&'()*+,;=-]+?\.torrent)") def __init__(self, main, url, doc=None): FeedPlugin.__init__(self, main, url, url, url, doc) self.main = main self.data = None def _supports(version): if version >= '4.3.0': return True return False supports = staticmethod(_supports) def _matches_type(mimetype, subtype): if (mimetype is None and subtype.lower() == subtype): return True return False matches_type = staticmethod(_matches_type) def _update(self, doc=None): u = zurllib.urlopen(self.url) self.data = u.read() u.close() self.main.show_status(self.get_items()) self.main.feed_was_updated(self.url) def get_items(self): items = [] if self.data is None: return items urls = self.torrent_pattern.findall(self.data) for u in urls: i = (u, u, u) items.append(i) return items
def colorize_re(regexp): r""" @return: The HTML code for a colorized version of the pattern for the given SRE regular expression. If C{colorize_re} can't figure out how to colorize the regexp, then it will simply return the (uncolorized) pattern, with C{'&'}, C{'<'}, and C{'>'} escaped as HTML entities. The colorized expression includes spans with the following css classes: - X{re}: The entire regular expression. - X{re-char}: Special characters (such as C{'.'}, C{'\('}), character categories (such as C{'\w'}), and locations (such as C{'\b'}). - X{re-op}: Operators (such as C{'*'} and C{'|'}). - X{re-group}: Grouping constructs (such as C{'(...)'}). - X{re-ref} References (such as C{'\1'}) @rtype: C{string} @param regexp: The regular expression to colorize. @type regexp: C{SRE_Pattern} or C{string} """ try: if type(regexp) == type(''): regexp = sre.compile(regexp) tree = sre_parse.parse(regexp.pattern, regexp.flags) return ('<span class="%s">%s</span>' % (RE_TAG, _colorize_re(tree, 1))) except: try: pat = regexp.pattern pat = pat.replace('&', '&') pat = pat.replace('<', '<') pat = pat.replace('>', '>') return '<span class="%s">%s</span>' % (RE_TAG, pat) except: try: str = `regexp` str = str.replace('&', '&') str = str.replace('<', '<') str = str.replace('>', '>') return str except: return '<span class="%s">...</span>' % RE_TAG
def getlistingdata(self): if not self.lines: return "" lines, self.lines = self.lines[:-1], self.lines[-1:] data = "" prog = sre.compile(self.listing_pattern) for line in lines: if self.debuglevel > 2: print "*getl*", ` line ` if line is None: data = data + self.listing_header % { 'url': self.escape(self.url) } continue if line[-1:] == '\r': line = line[:-1] m = prog.match(line) if not m: line = self.escape(line) + '\n' data = data + line continue mode, middle, name, symlink = m.group(1, 2, 3, 5) rawname = name [mode, middle, name] = map(self.escape, [mode, middle, name]) href = urljoin(self.url, quote(rawname)) if len(mode) == 10 and mode[0] == 'd' or name[-1:] == '/': if name[-1:] != '/': name = name + '/' if href[-1:] != '/': href = href + '/' line = '%s%s<A HREF="%s">%s</A>%s\n' % ( mode, middle, self.escape(href), name, (symlink and symlink or '')) data = data + line if self.lines == [None]: data = data + self.listing_trailer self.lines = [] return data
class NewlineScratcher: import sre __scratch_re = sre.compile("[ \\t]*\\n") # for new version only: ## __buffer = '' def __init__(self, parser, limit=-1): self.__limit = limit self.__parser = parser def __call__(self, data): # new version that works better sometimes but can really die badly: # (hopefully fixable!) ## data = self.__buffer + data ## while "\n" in data and self.__limit != 0: ## length = self.__scratch_re.match(data) ## if length >= 0: ## data = data[length:] ## self.__limit = self.__limit - 1 ## if string.strip(data) or self.__limit == 0: ## self.__parser.formatter.add_literal_data(data) ## self.__parser.set_data_handler( ## self.__parser.formatter.add_literal_data) ## self.__parser = None ## else: ## self.__buffer = data # old version: while data and data[0] == "\n" and self.__limit != 0: data = data[1:] self.__limit = self.__limit - 1 if data: self.__parser.formatter.add_literal_data(data) self.__parser.set_data_handler( self.__parser.formatter.add_literal_data) del self.__parser
def listNodeConfig(doc, nodeElement, node): nodeName = AdminConfig.showAttribute(node, 'name') print 'listnode: ' + nodeName nodeElement.setAttribute('name', nodeName) nodeElement.setAttribute('hostName', AdminConfig.showAttribute(node, 'hostName')) nodeServicesElement = doc.createElement('Resources') nodeElement.appendChild(nodeServicesElement) listNodeResources(doc, nodeServicesElement, node) serverPortRegex = sre.compile("\[(.*) \[ \[port (.*)\] \[node (.*)\] \[host (.*)\] \[server (.*)\] \]\]") for webServer in AdminConfig.list('WebServer', node).splitlines(): webServerElement = doc.createElement('WebServer') nodeElement.appendChild(webServerElement) server = AdminConfig.showAttribute(webServer, 'server') serverName = AdminConfig.showAttribute(server, 'name') webServerElement.setAttribute("name", serverName) print 'list: WebServer ' + serverName for serverPort in AdminTask.listServerPorts(serverName, '-nodeName %s' % nodeName).splitlines(): serverPortMatch = serverPortRegex.match(serverPort) if serverPortMatch: serverPortElement = doc.createElement("ServerPort") webServerElement.appendChild(serverPortElement) serverPortElement.setAttribute("endPointName", serverPortMatch.group(1)) serverPortElement.setAttribute("host", serverPortMatch.group(4)) serverPortElement.setAttribute("port", serverPortMatch.group(2))
# SRE test harness for the Python regression suite # this is based on test_re.py, but uses a test function instead # of all those asserts import sys sys.path = ['.'] + sys.path from test_support import verbose, TestFailed, have_unicode import sre import sys, os, string, traceback # # test support def test(expression, result, exception=None): try: r = eval(expression) except: if exception: if not isinstance(sys.exc_value, exception): print expression, "FAILED" # display name, not actual value if exception is sre.error: print "expected", "sre.error" else: print "expected", exception.__name__ print "got", sys.exc_type.__name__, str(sys.exc_value) else: print expression, "FAILED" traceback.print_exc(file=sys.stdout) else: if exception: print expression, "FAILED"
## Sid Meier's Civilization 4 ## Copyright Firaxis Games 2005 import CvUtil from CvPythonExtensions import * import string import sre import Version ArtFileMgr = CyArtFileMgr() localText = CyTranslator() gc = CyGlobalContext() # sre compile remLinks = sre.compile(r'<link=.*?>|</link>') class CvDawnOfMan: "Dawn of man screen" def __init__(self, iScreenID): self.iScreenID = iScreenID self.X_SCREEN = 0 self.Y_SCREEN = 0 self.W_SCREEN = 1024 self.H_SCREEN = 768 # < Dawn Of Man Start > self.W_TECH = 425 self.H_TECH = 80
def error(msg): sys.stderr.write("\033[31;1mERROR: "+msg+"\033[0m\n") sys.exit(1) if len(sys.argv) != 2: DIR=path.normpath(sys.argv[1])+'/' banfile=sys.argv[2] pref_len=len(DIR) else: error("No directory and banfile given to the "+sys.argv[0]+" script") file=open(banfile,"r") ban=[] for i in file.readlines(): ban+=[sre.compile(i.strip())] file.close() lib_re=sre.compile("^.*\.so(\.[0-9]+)*$") def judgement(suspect): """Checks if a file isn't banned.""" for i in ban: if i.match(suspect): return 1 return 0 hardlinked_inodes=[] for top, dirs, files in walk(DIR): for i in files: file=path.join(top,i)
def init_pattern(key): """Return a SRE compiled pattern; the match can be accessed in the match object as m = P[key].match(string) m.group(key) """ return sre.compile( '^init\(\): {key!s}: *(?P<{key!s}>.*)$'.format(**locals())) INIT_KEYS = ['hostname', 'stagedir', 'JOB_ID', 'JOB_NAME'] P = {key: init_pattern(key) for key in INIT_KEYS} Q = { 'hostname': sre.compile('^host: *(?P<hostname>.*)$'), 'stagedir': sre.compile('^\+\+ temp_dir=(?P<stagedir>/scratch/oliver/.*)$'), 'WDIR': sre.compile('^WDIR: *(?P<WDIR>.*)$'), } # note: the SRE's are matched patterns, ie anchored at beginning of line!! def scan_log(logfile, P): STATUS = { 'abort': sre.compile('(?P<abort>Abort|abort)'), } Vars = {} StatusVars = {} with open(logfile, "r") as log: print "== {logfile!s} ==".format(**locals())
def main(): finished = False gaia = wikipedia.getSite(code=u'en', fam=u'gaia') wikipedia.setAction(wikipedia.translate(gaia, msg)) wikipedia.output(u'Welcome to the Gaiabot Item Creatior.') blank = sre.compile(u'^$', sre.UNICODE) while not finished: wikipedia.output(u'Begining Process...') choice = wikipedia.inputChoice(u'Do you wish to create a item?', ['Yes', 'No'], ['y', 'N'], 'N') if choice in ['n', 'N']: finished = True else: wikipedia.output( u'Please enter the asked values to create a Item partial.') name = u'' image = u'' thumb = False price = u'' store = u'' gender = u'' description = u'' month = u'' year = u'' isDonation = False alt = u'' otheritem = u'' intro = u'' trivia = [] external = [] groups = [] animalHats = False animalMasks = False masks = False headgear = False while name.isspace() or sre.match(blank, name) != None: name = wikipedia.input(u'Please enter the item name: ').strip( u' ').strip(u' ') choice = wikipedia.inputChoice(u'Is this a donation Item?', ['Yes', 'No'], ['y', 'N'], 'N') if choice in ['y', 'Y']: isDonation = True alt = wikipedia.input( u'(Optional)Please enter the item\'s alternate name if it has one: ' ).strip(u' ').strip(u' ') otheritem = wikipedia.input( u'(Optional)Please enter the name of other item released this month: ' ).strip(u' ').strip(u' ') else: intro = wikipedia.input( u'Please enter the intro statement for the item: ').strip( u' ').strip(u' ') image = wikipedia.input( u'If it has one, please enter the name of the image for the Item. No Item: Prefix: ' ).strip(u' ').strip(u' ') if not image.isspace() and sre.match(blank, image) == None: choice = wikipedia.inputChoice( u'Does this image need to become a thumbnail for space (Large Images only.)?', ['Yes', 'No'], ['y', 'N'], 'N') if choice in ['y', 'Y']: thumb = True price = wikipedia.input( u'Please enter the item\'s price, if it is not sold in stores leave blank: ' ).strip(u' ').strip(u' ') store = wikipedia.input( u'Please enter the name of the store that the item is sold at: ' ).strip(u' ').strip(u' ') gender = wikipedia.input( u'Please enter the gender that the item can be equiped to, leave blank for Any: ' ).strip(u' ').strip(u' ') description = wikipedia.input( u'Please enter the item description: ').strip(u' ').strip(u' ') choice = wikipedia.inputChoice( u'What Month was this item released in?', [ 'January', 'Fenuary', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December', 'Unknown' ], [ 'jan', 'feb', 'mar', 'apr', 'may', 'june', 'july', 'aug', 'sept', 'oct', 'nov', 'dec', 'u' ], 'u') if choice in ['jan', 'Jan', 'january', 'January']: month = u'January' elif choice in ['feb', 'Feb', 'feburary', 'Febuary']: month = u'Febuary' elif choice in ['mar', 'Mar', 'march', 'March']: month = u'March' elif choice in ['apr', 'Apr', 'april', 'April']: month = u'April' elif choice in ['may', 'May']: month = u'May' elif choice in ['june', 'June']: month = u'June' elif choice in ['july', 'July']: month = u'July' elif choice in ['aug', 'Aug', 'august', 'August']: month = u'August' elif choice in [ 'sep', 'Sep', 'sept', 'Sept', 'september', 'September' ]: month = u'September' elif choice in ['oct', 'Oct', 'october', 'October']: month = u'October' elif choice in ['nov', 'Nov', 'november', 'November']: month = u'November' elif choice in ['dec', 'Dec', 'december', 'December']: month = u'December' year = wikipedia.input(u'What year was this item released in?' ).strip(u' ').strip(u' ') incomplete = True while incomplete: t = wikipedia.input( u'You may enter multiple entrys for the trivia section here. Leave the box blank when you are finished: ' ).strip(u' ').strip(u' ') if not t.isspace() and sre.match(blank, t) == None: trivia.append(t) else: incomplete = False incomplete = True while incomplete: e = wikipedia.input( u'Please enter a URL for the External links section. Use Blank when finished: ' ).strip(u' ').strip(u' ') if not e.isspace() and sre.match(blank, e) == None: c = wikipedia.input( u'Please enter a caption for this URL if you wish: ' ).strip(u' ').strip(u' ') external.append((e, c)) else: incomplete = False incomplete = True while incomplete: g = wikipedia.input( u'Please enter the names of the normal Item Groups this Item is in. Leave the box blank when you are finished(Do not use special groups): ' ).strip(u' ').strip(u' ') if not g.isspace() and sre.match(blank, g) == None: groups.append(g) else: incomplete = False incomplete = True choice = wikipedia.inputChoice( u'Is this a in the Special group "Animal Hats"?', ['Yes', 'No'], ['y', 'N'], 'N') if choice in ['y', 'Y']: animalHats = True choice = wikipedia.inputChoice( u'Is this a in the Special group "Animal Masks"?', ['Yes', 'No'], ['y', 'N'], 'N') if choice in ['y', 'Y']: animalMasks = True choice = wikipedia.inputChoice( u'Is this a in the Special group "Masks"?', ['Yes', 'No'], ['y', 'N'], 'N') if choice in ['y', 'Y']: masks = True choice = wikipedia.inputChoice( u'Is this a in the Special group "Headgear"?', ['Yes', 'No'], ['y', 'N'], 'N') if choice in ['y', 'Y']: headgear = True outputTo = wikipedia.input( u'(Required)Please enter the article name you wish to output the Item Partial to: ' ).strip(u' ').strip(u' ') if not outputTo.isspace() and sre.match(blank, outputTo) == None: pageData = u'' if isDonation: pageData += u'{{donationIntro|name=%s|' % name if not alt.isspace() and sre.match(blank, alt) == None: pageData += u'alt=%s|' % alt pageData += u'month=%s|year=%s|other=%s}}\n' % ( month, year, other) else: pageData += u'%s\n' % intro pageData += u'{{item|name=%s' % name if not image.isspace() and sre.match(blank, image) == None: pageData += u'|image=%s' % image if thumb: pageData += u'|thumb=true' if not price.isspace() and sre.match(blank, price) == None: pageData += u'|price=%s' % price if not store.isspace() and sre.match(blank, store) == None: pageData += u'|store=%s' % store if not gender.isspace() and sre.match(blank, gender) == None: pageData += u'|gender=%s' % gender pageData += u'|description=%s' % description if not month.isspace() and sre.match(blank, month) == None: pageData += u'|month=%s' % month if not year.isspace() and sre.match(blank, year) == None: pageData += u'|year=%s' % year pageData += u'}}\n' pageData += u'==Trivia==\n' if len(trivia) > 0: for t in trivia: pageData += u'%s\n\n' % t else: pageData += u'???\n\n' pageData += u'==External Links==\n' for url, comment in external: pageData += u'* %s' % url if not comment.isspace() and sre.match(blank, comment) == None: pageData += u' - %s' % comment pageData += u'\n' pageData += u'\n' pageData += u'{{itemGroups' for g in groups: pageData += u'|%s' % g if animalHats: pageData += u'|Animal Hats' if animalMasks: pageData += u'|Animal Masks' if masks: pageData += u'|Masks' if headgear: pageData += u'|Headgear' pageData += u'}}' page = wikipedia.Page(gaia, u'%s' % outputTo) old = u'' try: old = page.get() except wikipedia.NoPage: old = u'' page.put(old + pageData)
("text/x-perl", ".pl"), ("text/x-ruby", ".rb"), ("text/x-tex", ".tex"), # this was created by me in order for it to be picked up later ("text/x-ocaml", ".ml"), ("text/x-apache-conf", ".conf"), ("text/x-lua", ".lua"), ("text/x-yaml", ".yml"), ) for key, val in MIMETYPES: mimetypes.add_type(key, val) # ! import sre seps = r'[.\W]' word_re = sre.compile(r'(\w+)$') class Scintilla(scintilla.Scintilla): def get_text(self): return super(Scintilla, self).get_text(self.get_length() + 1)[1] def append_text(self, text): super(Scintilla, self).append_text(len(text), text) def get_line(self, number): return super(Scintilla, self).get_line(number)[1] class Pscyntilla(gobject.GObject): gsignal('mark-clicked', gobject.TYPE_INT, gobject.TYPE_BOOLEAN)