def get_color(self, name, is_fg=False): if is_fg: kind = 3 else: kind = 4 if name == 'default': # Default seq = '\x1b[%s9m' % kind else: if name in self.colors: # Named color seq = '\x1b[%s8;5;%sm' % (kind, self.colors[name]) elif rematch(r'^\d{1,3}$', name): # Color number seq = '\x1b[%s8;5;%sm' % (kind, name) elif rematch(r'^\d{1,3};\d{1,3};\d{1,3}$', name): # RGB color seq = '\x1b[%s8;2;%sm' % (kind, name) else: # If anything else, use default seq = '\x1b[%s9m' % kind if SHELL == 'zsh': ret = "%%{%s%%}" % seq else: ret = "\001%s\002" % seq return ret
def color(name='gray'): """ Return a color from a curated list of colors. Can take a name, an integer, or can pass through an RGB[A] or hex color. Default is gray. """ colors = { 'orange': '#E86E0A', 'red': '#D61E21', 'gray': '#7C7C7C', 'black': '#000000', 'green': '#75D977', 'mint': '#47D1A8', 'purple': '#C880D1', 'indigo': '#5E5AE6', 'blue': '#47AEED', # previously 4087DD 'yellow': '#F2E205', } named_colors = config.params()['colors'] if isinstance(name, int): clrs = [colors[key] for key in colors] return clrs[name%len(clrs)] elif name in colors: return colors[name] elif name in named_colors: return named_colors[name] elif rematch('^(#[0-9A-Fa-f]{6})|(rgb(a){0,1}\([0-9,]+\))$', name): return name else: return colors['gray']
def run(self): ''' Main loop that searches directories and retrieves the data ''' # the socket for outgoing cache-update-requests to FSCache context = zmq.Context() socket = context.socket(zmq.REQ) socket.setsockopt(zmq.LINGER, 100) socket.connect("ipc:///tmp/fsc_upd") time.sleep(1) data = {} # create shortcut to prevent manymany dot-lookups in the loop dir_n = os.path.dirname if self.verify(): print "WORKER({0}): {1} running in dir {2}".format( self.pid, self.name, self.path) for fn, _ in Statwalker(self.path): # add a few more checks data: # - dont open empty files # - what to add to the dict for empty files? if rematch(self.pattern, fn): data[fn] = {} #data[fn] = 'test' data[fn] = salt.utils.fopen(fn, 'rb').read() # send the data back to the caller socket.send(self.serial.dumps(data)) ack = self.serial.loads(socket.recv()) if ack == 'OK': print "WORKER: {0} finished".format(self.name) else: # directory does not exist, return empty result dict socket.send(self.serial.dumps({self.path: None}))
def validate_address_arg(inputDict): reDict = { "XMR": "^(4|8)[1-9A-HJ-NP-Za-km-z]{94}([1-9A-HJ-NP-Za-km-z]{11})?$", "BTC": "^(bc1|[13])[a-zA-HJ-NP-Z0-9]{25,39}$", "LTC": "^[LM3][a-km-zA-HJ-NP-Z1-9]{26,33}$", "ETH": "0x[a-fA-F0-9]{40}", "BCH": "[13][a-km-zA-HJ-NP-Z1-9]{33}", "DASH": "^X[1-9A-HJ-NP-Za-km-z]{33}$", } address = inputDict["address"] coin = inputDict["coin"] tx_type = inputDict["type"] try: if not rematch(reDict.get(coin), address) and "bitpay.com" not in address: validation_error_out( "invalid_address {} {}".format(tx_type, coin), "\nThe {} address you entered may not be a valid {} address.\n" .format(tx_type, coin) + "Do you want to continue anyway?", ) elif address.lower().startswith("bc1") and coin == "BTC": validation_error_out( "segwit_address {}".format(inputDict["exchange"]), "The Morphtoken and XMR.to APIs do not support segwit addresses.\n" + "If you receive an error try again with a standard address instead.\n" + "Do you want to continue with segwit address anyway?") except: exit(0)
def makebattercsv(info: CricketInfo): grounds = dict() if exists("data.files/grounds.dict"): with open("data.files/grounds.dict", 'rb') as f: grounds = load(f) for match_file in match_file_generator(): soup: Bs = __get_match_soup(match_file=match_file) # each score has innings cards for the innings played and the last one for the match summary match_cards: ResultSet = soup.find_all(class_='match-scorecard-table') match_summary_card: Tag = match_cards.pop(-1) (test_number, ground, played_in_country) = __get_match_details(match_summary_card, grounds) innings_number: int = 0 innings_card: Tag = None # since the last match card is the summary card, we're iterating till the second last for innings_number, innings_card in enumerate(match_cards): batter_score_tables = innings_card.find_all('table', class_='batsman') if len(batter_score_tables) == 1: # find the team playing batting_side: str = None m = rematch(pattern=r'^(.+?)\s+\d', string=innings_card.find('h5').get_text()) batting_side: str = m.group(1) print(batting_side, innings_number) break with open("data.files/grounds.dict", 'wb') as f: dump(grounds, f)
async def emoji(self, ctx, emoji): """Gives as much information on a custom emoji as possible.""" # Use Rapptz's EmojiConverter code for first leg. match = rematch(r'([0-9]{15,21})$', emoji) or rematch( r'<a?:[a-zA-Z0-9_]+:([0-9]+)>$', emoji) result = None emoji_guild = "Unknown" emoji_id = None bot = ctx.bot guild = ctx.guild if match is None: # Try to get the emoji by name. Try local guild first. if guild: result = get(guild.emojis, name=emoji) if result is None: result = get(bot.emojis, name=emoji) else: for g in match.groups(): print(g) emoji_id = int(match.group(1)) # Try to look up emoji by id. if guild: result = get(guild.emojis, id=emoji_id) if result is None: result = get(bot.emojis, id=emoji_id) # We should hotlink at least the ID and URL of all emojis. if result is not None: emoji_guild = result.guild emoji_id = result.id emoji_name = result.name emoji_type = "gif" if result.animated else "png" elif emoji_id is not None: # Should work because Discord disallows ":" in emoji names. emoji_name = emoji.split(":", 1)[1].split(":")[0] emoji_type = "gif" if emoji.startswith("<a:") else "png" else: await reply(ctx, "I couldn't find such a custom emoji...") return url = "https://cdn.discordapp.com/emojis/{}.{}?v=1".format( emoji_id, emoji_type) embed = Embed(title=emoji_name, url=url) embed.add_field(name="Guild:", value=emoji_guild) embed.add_field(name="ID:", value=str(emoji_id)) embed.set_image(url=url) await ctx.send(embed=embed)
def regex_match(self, element, pattern, attr_name=False): """ Check to see if an attribute matches a regular expression. :param element: :param pattern: :param attr_name: :return: """ attr_value = '' # Handle `attribute` and `inner_text` if both are set. attr_search = self._attribute_search if attr_search and self._inner_text_search: self._reset() # Check if attribute matches. self.attribute(attr_search) if self.regex_match(element, pattern): # Don't return because we still need to check to see # if the inner_text matches. self._reset() else: self._reset() return False # Check to see if the inner text matches. self.inner_text() if self.regex_match(element, pattern): self._reset() # Return since both checks passed. return True else: self._reset() return False # Get the element attribute based on the input to the `attribute` method. if self._attribute_search: attr_value = element.get_attribute(self._attribute_search) print('attribute_search:', attr_value) # Get the elements inner text. if self._inner_text_search: attr_value = element.text # Get the element by attr_name kwarg. if attr_name: attr_value = element.get_attribute(attr_name) if not attr_value: return False # Reset for reuse. self._reset() # Perform the regex match. if not rematch(pattern, attr_value): return False return True
def sniff_dims(x): pattern = r"\#+dims\=" if rematch(pattern, x): y = resub(pattern, "", x) dims = list(map(int, y.split(","))) return dims else: return None
def get_ground_by_id(self, ground_row: Tag): ground_link = ground_row.find('a').attrs['href'] ground_id = research('\/([^\/]+?)\.html$', ground_link).group(1) if not self.__grounds.get(ground_id, False): soup = Bs(markup=get(url=ground_link).text, features='html.parser') m = rematch(pattern='^(.+?)\s*\|\s*(.+?)\s*\|', string=soup.title.get_text()) self.__grounds[ground_id] = Ground(m.group(1), m.group(2)) return self.__grounds[ground_id]
def refullmatch(regex, string): """re.fullmatch wegen alter python version aus wheezy nachgebaut. @param regex RegEx Statement @param string Zeichenfolge gegen die getestet wird @return True, wenn komplett passt sonst False """ m = rematch(regex, string) return m is not None and m.end() == len(string)
def __get_ground_name_and_country(groundTag: Tag, grounds: dict) -> Ground: ground_link = groundTag.find('a').attrs['href'] ground_id = research('\/([^\/]+?)\.html$', ground_link).group(1) ground = grounds.get(ground_id, False) if not ground: soup = Bs(markup=get(url=ground_link).text, features='html.parser') m = rematch(pattern='^(.+?)\s*\|\s*(.+?)\s*\|', string=soup.title.get_text()) grounds[ground_id] = Ground(m.group(1), m.group(2)) return grounds[ground_id]
def __is_match_played(soup: Bs) -> bool: '''Check if match was played. Maybe abandoned or cancelled or something''' match_status = soup.find(class_='status-text') # if no "status" available for a match, implies it wasn't play if match_status: if rematch('Match (abandoned|cancelled) without a ball bowled', match_status.get_text()): return False else: # not status-text was found so don't collect data return False return True
def buildUrlFilter(urls, logger=lambda _: None): """Given a tuple of urls with similar pattern, computes a filtering function that accepts similar urls the and reject others. >>> times = buildUrlFilter([ ... "http://www.thetimes.co.uk/tto/news/world/europe/article3844546.ece", ... "http://www.thetimes.co.uk/tto/business/industries/leisure/" ... "article3843571.ece" ], printf) Url regex: ^http://www.thetimes.co.uk/[^/]+/[^/]+/[^/]+/[^/]+/[^/]+$ >>> times(u"http://www.thetimes.co.uk/tto/opinion/columnists/" ... "philipcollins/article3844110.ece") True >>> times(u"http://www.thetimes.co.uk/tto/public/article2582551.ece") False >>> engadget = buildUrlFilter([ ... "http://www.engadget.com/2013/08/14/back-to-school-guide-tablets/", ... "http://www.engadget.com/2013/08/15/we-can-do-this-hyperloop/" ] ... , printf) Url regex: ^http://www.engadget.com/\\d+/\\d+/\\d+/[^/]+/$ >>> engadget(u"http://www.engadget.com/2013/08/15/yahoo-weather-android-" ... "redesign/") True >>> engadget(u"http://www.engadget.com/THATSNAN/08/15/title/") False @type urls: collections.Iterable of strings @param urls: urls with a similar pattern @rtype: function of string => bool @return: the function filtering blog posts """ eol = "#" urlsTuple = tuple(urls) beginsWith = lambda regex: ( lambda str: bool(rematch(regex, str + "/" + eol))) (scheme, netloc, _, _, _) = urlsplit(urlsTuple[0]) # Note that something like "[a-zA-Z]" would not be safe as it could append # that only one article out of many contains a digit in the title. Also if # we try to match more precisely the urls we might find a temporary pattern # like /2013/. patterns = "{0}://".format(scheme), netloc, eol + "$", "/", "\\d+/", "[^/]+/" def bestRegex(current): """Recursively compute the best regex.""" for pattern in patterns: if all(imap(beginsWith(current + pattern), urlsTuple)): return bestRegex(current + pattern) return current logger("Url regex: {0}".format(bestRegex("^").replace("/" + eol, ""))) return beginsWith(bestRegex("^"))
def makebattercsv(): info: CricketInfo = CricketInfo() grounds = MatchGrounds() savecsv = SaveCsv() for match_file in match_file_generator(): print(match_file) unzipped = ZipFile(file=match_file, mode='r') soup: Bs = Bs(markup=unzipped.read('matcharchive'), features='html.parser') if not __is_match_played(soup): continue # each score has innings cards for the innings played and the last one for the match summary match_cards: ResultSet = soup.find_all(class_=info.Scorecard) match_summary_card: Tag = match_cards.pop(-1) (test_number, ground, played_in_country) = __get_match_details(match_summary_card, grounds) innings_card: Tag = None for i, innings_card in enumerate(match_cards): innings_number: int = i + 1 m = rematch(pattern=r'^(.+?)\s+\d', string=innings_card.find('h5').get_text()) batting_side: str = m.group(1) batter_title_rows: Tag = innings_card.find_all( class_=info.BatterTitleCell) for batter_title_row in batter_title_rows: batter_row = batter_title_row.parent # in later times, they've added a cell for the video of how batter outed. So, it's previously 8 now 9 cells batter_cells = batter_row.find_all('td') if len(batter_cells) == 8: (namestr, outedstr, runs, balls, mins, fours, sixes, _) = (x.get_text() for x in batter_row.find_all('td')) elif len(batter_cells) == 9: (namestr, outedstr, runs, balls, mins, fours, sixes, _, _) = (x.get_text() for x in batter_row.find_all('td')) outed_how: OutedHow = get_outed_how(outedstr) (name, is_cap, is_wk) = __get_cleaned_name(namestr=namestr) savecsv.writerow(name=name, playedin=played_in_country, side=batting_side, captain=is_cap, outedhow=outed_how.value, runs=runs, wicketkeeper=is_wk, innings=innings_number, testnumber=test_number)
def run(self): ''' main loop that searches directories and retrieves the data ''' data = {} dir_n = os.path.dirname if self.verify(): print "WORKER: running in dir {0}".format(self.path) for fn, _ in Statwalker(self.path): # add a few more checks data: # - dont open empty files # - what to add to the dict for empty files? if rematch(self.pattern, fn): data[fn] = {} data[fn] = 'test' #data[fn] = salt.utils.fopen(fn, 'rb').read() # send the data back to the caller self.queue.put({self.name: data})
def matches(self, nick, ident, hostname, command, argument, message): if nick is not None: if not rematch(self.nick, nick): return False if ident is not None: if not rematch(self.ident, ident): return False if hostname is not None: if not rematch(self.hostname, hostname): return False if command is not None: if not rematch(self.command, command): return False if argument is not None: if not rematch(self.argument, argument): return False if message is not None: if not rematch(self.message, message): return False return True
def __init__(self, parentio, name: str, frm: str, **kwargs): """ Erstellt einen IO mit struct-Formatierung. :param parentio: ParentIO Objekt, welches ersetzt wird :param name: Name des neuen IO :param frm: struct formatierung (1 Zeichen) oder 'ANZAHLs' z.B. '8s' :param kwargs: Weitere Parameter: - bmk: Bezeichnung fuer IO - bit: Registriert IO als <class 'bool'> am angegebenen Bit im Byte - byteorder: Byteorder fuer IO, Standardwert vom ersetzten IO - defaultvalue: Standardwert fuer IO, Standard vom ersetzten IO """ # Structformatierung prüfen regex = rematch("^([0-9]*s|[cbB?hHiIlLqQefd])$", frm) if regex is not None: # Byteorder prüfen und übernehmen byteorder = kwargs.get("byteorder", parentio._byteorder) if not (byteorder == "little" or byteorder == "big"): raise ValueError("byteorder must be 'little' or 'big'") bofrm = "<" if byteorder == "little" else ">" # Namen des parent fuer export merken self._parentio_name = parentio._name if frm == "?": bitaddress = kwargs.get("bit", 0) max_bits = parentio._length * 8 if not (0 <= bitaddress < max_bits): raise ValueError( "bitaddress must be a value between 0 and {0}" "".format(max_bits - 1)) bitlength = 1 # Bitweise Ersetzung erfordert diese Informationen zusätzlich if parentio._byteorder == byteorder: self._parentio_defaultvalue = parentio._defaultvalue else: self._parentio_defaultvalue = parentio._defaultvalue[::-1] self._parentio_address = parentio.address self._parentio_length = parentio._length else: bitaddress = "" bitlength = struct.calcsize(bofrm + frm) * 8 self._parentio_address = None self._parentio_defaultvalue = None self._parentio_length = None # [name,default,anzbits,adressbyte,export,adressid,bmk,bitaddress] valuelist = [ name, # Darf nur bei StructIO None sein, wird nur dann berechnet kwargs.get("defaultvalue", None), bitlength, parentio._slc_address.start, False, str(parentio._slc_address.start).rjust(4, "0"), kwargs.get("bmk", ""), bitaddress ] else: raise ValueError( "parameter frm has to be a single sign from [cbB?hHiIlLqQefd] " "or 'COUNTs' e.g. '8s'") # Basisklasse instantiieren super().__init__(parentio._parentdevice, valuelist, parentio._iotype, byteorder, frm == frm.lower()) self.__frm = bofrm + frm if "export" in kwargs: # Use export property to remember given value for export self.export = kwargs["export"] else: # User could change parent IO settings before replace to force # export, so use parent settings for the new IO self._export = parentio._export # Platz für neuen IO prüfen if not (self._slc_address.start >= parentio._parentdevice._dict_slc[parentio._iotype].start and self._slc_address.stop <= parentio._parentdevice._dict_slc[parentio._iotype].stop): raise BufferError( "registered value does not fit process image scope")
def wildcard_match(self, element, pattern, attr_name=False): """ Check to see if an attribute matches a wildcard expression. :param element: :param pattern: :param attr_name: :return: """ attr_value = '' # Handle `attribute` and `inner_text` if both are set. attr_search = self._attribute_search if attr_search and self._inner_text_search: self._reset() # Check if attribute matches. self.attribute(attr_search) if self.wildcard_match(element, pattern): # Don't return because we still need to check to see # if the inner_text matches. self._reset() else: self._reset() return False # Check to see if the inner text matches. self.inner_text() if self.wildcard_match(element, pattern): self._reset() # Return since both checks passed. return True else: self._reset() return False # Get the element attribute. if self._attribute_search: attr_value = element.get_attribute(self._attribute_search) # Get the elements inner text. if self._inner_text_search: attr_value = element.text # Get the element based on attr_name. if attr_name: attr_value = element.get_attribute(attr_name) # If the attribute doesn't exist, then obviously it doesn't match. if not attr_value: self._reset() return False pattern = self._prepare_wildcard_pattern(pattern) # Reset for reuse. self._reset() # Perform the regex match. if not rematch(pattern, attr_value): return False return True
def get_outed_how(outedhowstr: str) -> OutedHow: '''Get how batter is outed. Bowled or Hit wicket or whatnot''' if rematch(pattern=r'\s*c\s+', string=outedhowstr, flags=IGNORECASE): return OutedHow.CAUGHT elif rematch(pattern=r'\s*b\s+', string=outedhowstr, flags=IGNORECASE): return OutedHow.BOWLED elif rematch(pattern=r'lbw+', string=outedhowstr, flags=IGNORECASE): return OutedHow.LBW elif rematch(pattern=r'run out', string=outedhowstr, flags=IGNORECASE): return OutedHow.RUNOUT elif rematch(pattern=r'not out', string=outedhowstr, flags=IGNORECASE): return OutedHow.NOTOUT elif rematch(pattern=r'\s*st\s+', string=outedhowstr, flags=IGNORECASE): return OutedHow.STUMPED elif rematch(pattern=r'retired out', string=outedhowstr, flags=IGNORECASE): return OutedHow.RETIREDOUT # TODO: What is "retired out" elif rematch(pattern=r'hit wicket', string=outedhowstr, flags=IGNORECASE): return OutedHow.HITWICKET elif rematch(pattern=r'retired hurt', string=outedhowstr, flags=IGNORECASE): return OutedHow.RETIREDHURT elif rematch(pattern=r'handled the ball', string=outedhowstr, flags=IGNORECASE): return OutedHow.HANDLEDTHEBALL elif rematch(pattern=r'absent hurt', string=outedhowstr, flags=IGNORECASE): return OutedHow.ABSENTHURT elif rematch(pattern=r'absent ill', string=outedhowstr, flags=IGNORECASE): return OutedHow.ABSENTILL elif rematch(pattern=r'retired not out', string=outedhowstr, flags=IGNORECASE): return OutedHow.RETIREDNOTOUT elif rematch(pattern=r'obstructing the field', string=outedhowstr, flags=IGNORECASE): return OutedHow.RETIREDILL elif rematch(pattern=r'retired ill', string=outedhowstr, flags=IGNORECASE): return OutedHow.RETIREDILL elif rematch(pattern=r'obstructing the field', string=outedhowstr, flags=IGNORECASE): return OutedHow.OBSTRUCTINGTHEFIELD else: raise Exception(f'Unknowed outed method: {outedhowstr}')