def _TimeRE__seqToRE(self, to_convert, directive): to_convert = sorted(to_convert, key = len, reverse = True) for value in to_convert: if value != '': break continue else: return '' regex = '|'.join(lambda [outmost-iterable]: for stuff in [outmost-iterable]: re_escape(stuff)(to_convert)) regex = '(?P<%s>%s' % (directive, regex) return '%s)' % regex
def __seqToRE(self, to_convert, directive): """Convert a list to a regex string for matching a directive.""" def sorter(a, b): """Sort based on length. Done in case for some strange reason that names in the locale only differ by a suffix and thus want the name with the suffix to match first. """ try: a_length = len(a) except TypeError: a_length = 0 try: b_length = len(b) except TypeError: b_length = 0 return cmp(b_length, a_length) to_convert = to_convert[:] # Don't want to change value in-place. for value in to_convert: if value != '': break else: return '' to_convert.sort(sorter) regex = '|'.join([re_escape(stuff) for stuff in to_convert]) regex = '(?P<%s>%s' % (directive, regex) return '%s)' % regex
def search(request, value='', form=None): """ Search through nodes either from a POSTed search query or through an URL like /slug/key/value/ or /slug/value/. """ result = [] posted = False if request.POST: value = request.POST.get('q', '') posted = True if value: query = u'(?i).*{}.*'.format(re_escape(value)) # nodes = nc.search_nodes_by_value(nc.graphdb.manager, query) # TODO: when search uses the above go back to that q = """ match (n:Node) where any(prop in keys(n) where n[prop] =~ {search}) return n """ nodes = nc.query_to_list(nc.graphdb.manager, q, search=query) if form == 'csv': return helpers.dicts_to_csv_response([n['n'] for n in nodes]) elif form == 'xls': return helpers.dicts_to_xls_response([n['n'] for n in nodes]) for node in nodes: nh = get_object_or_404(NodeHandle, pk=node['n']['handle_id']) item = {'node': node['n'], 'nh': nh} result.append(item) if len(result) == 1: return redirect(result[0]['nh'].get_absolute_url()) return render(request, 'noclook/search_result.html', {'value': value, 'result': result, 'posted': posted})
def WildcardToRegex(pattern): res = re_escape(pattern) if res.startswith("\{\?\}"): res = "^" + res res = res.replace("\{\?\}", ".") res = res.replace("\{\*\}", ".*") return res
def register(key, stem): newfullname: str = replace_stem(key, stem, '/') orgname: str = path.basename(key) newname: str = path.basename(newfullname) repfunc: Callable = re_compile('\b%s\b' % re_escape(orgname)).sub key_newname_map[key] = newfullname key_newname_map[ '\b' + orgname] = lambda s, *, _r=repfunc, _rp=newname: _r(_rp, s)
def configStrings(iface): driver = iNetwork.detectWlanModule(iface) ret = "" if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape( config.plugins.wlan.essid.value) + "\" || true\n" if driver == 'wl': ret += '\tpre-up wl-config.sh -m %s -k %s -s "%s" \n' % ( config.plugins.wlan.encryption.value.lower(), re_escape(config.plugins.wlan.psk.value), config.plugins.wlan.essid.value) ret += '\tpost-down wl-down.sh\n' else: ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName( iface) + " -B -dd -D" + driver + " || true\n" ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n" return ret
def _punct(self): """Get the list of punction as an re escaped string. """ if not hasattr(self, '_punct_initialized'): self._punct_initialized = re_escape(punctuation) return self._punct_initialized
def configStrings(iface): driver = iNetwork.detectWlanModule(iface) ret = "" if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape(config.plugins.wlan.essid.value) + "\" || true\n" ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName(iface) + " -B -dd -D" + driver + " || true\n" ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n" return ret
async def prefix_wrapper_sync_callable(prefix_factory, re_flags, message): """ Function to execute not asynchronous callable prefix. This function is a coroutine. Parameters ---------- prefix_factory : `async-callable` Async callable returning the prefix. re_flags : `int` Regex matching flags. message : ``Message`` The received message to parse the prefix from. Returns ------- prefix : `None`, `str` The prefix used by the user. Returned as `None` of parsing failed. end : `int` The start of the content after the prefix. Returned as `-1` if parsing failed. """ prefix = prefix_factory(message) if isinstance(prefix, str): escaped_prefix = re_escape(prefix) elif isinstance(prefix, tuple) and (len(prefix) > 0): escaped_prefix = '|'.join( re_escape(prefix_part) for prefix_part in prefix) else: return None, -1 content = message.content if content is None: prefix = None end = -1 else: parsed = re_match(escaped_prefix, content, re_flags) if parsed is None: prefix = None end = -1 else: prefix = parsed.group(0) end = parsed.end() return prefix, end
def expand_var(string: str, expand_map: Dict[str, Any], var_char: str = '%', end_var_char: Optional[str] = '', case_sensitive: bool = True, exception_on_unexpanded: bool = False) -> str: """ Expand variables in a string. If an encountered variable name is not in provided map, the function can either ignore it -- leaving the variable reference in-place -- or raise an exception. If `end_var_char` is not specified (i.e. is an empty string), `var_char` is used in its place. If `end_var_char` is set to `None`, no surrounding character to the right is used. :param string: The string to be expanded. :param expand_map: A name-to-value map of variable names and their corresponding values. :param var_char: A character that surrounds the variable names if `end_var_char` is not specified, otherwise that is immediately to the left of the variable name. :param end_var_char: A character that is immediately to the right of the variable name, unless set to `None`, in which case no character is used. :param case_sensitive: Whether the variable names in the string are case sensitive. If not, they are lower-cased prior to the map lookup. :param exception_on_unexpanded: Raise an exception if a variable name in the string is not in the map. :return: The variable-expanded string. """ escaped_var_char: str = re_escape(var_char) if end_var_char is None: var_pattern: Pattern = re_compile( f'{escaped_var_char}(?P<variable_name>.+?)\\b') else: # NOTE: `end_var_char` should not be escaped within the bracket expression ("[]"), as characters within such # expressions are parsed literally. var_pattern: Pattern = re_compile( f'{escaped_var_char}(?P<variable_name>[^{end_var_char or var_char}]+){re_escape(end_var_char or var_char)}' ) search_start_offset = 0 while match := var_pattern.search(string=string, pos=search_start_offset): var_pos_start, var_pos_end = match.span(0) variable_name: str = match.groupdict()['variable_name'] if not case_sensitive: variable_name = variable_name.lower() if variable_name in expand_map: expanded_head: str = string[:var_pos_start] + str( expand_map[variable_name]) string: str = expanded_head + string[var_pos_end:] search_start_offset: int = len(expanded_head) elif exception_on_unexpanded: raise KeyError( f'The variable name {variable_name} is not in the expand map.') else: search_start_offset: int = var_pos_end
def escape(pattern): """ escape special characters in pattern :param pattern: an `re` pattern :type pattern: str :rtype: str """ return re_escape(pattern)
def token_spec(self): """The definitions used for English tokens. """ punct = re_escape(punctuation) punct = r'[{}]'.format(punct) """Order matters for this spec, specific tokens must precede general tokens. For example, punctuation contains a period but we want to capture ellipses, so ellipses are listed first and will be matched before a single period. """ spec = ( ('WORD', r'\w+'), ('ELLIPSES', re_escape(r'...')), ('QUOTATION', re_escape(r'"')), ('PUNCTUATION', punct), ) return spec
def sub_dict(text, subst): """ Replace all instances of multiple words in a string. Prevents problems with overlapping keys and substitutions. :param subst: something that behaves like a dictionary, where each key is converted to it's value """ regex = compile('|'.join( re_escape(unicode(key)) for key in sorted(subst.keys(), key=lambda item: -len(item)))) return regex.sub(lambda match: subst[match.group()], unicode(text))
def delete_recipe(urn): ''' Delete recipe page. Post route deletes recipe. ''' recipe_data = mongo.db.recipes.find_one({'urn': urn}, { 'title': 1, 'username': 1, 'parent': 1, 'children': 1, 'deleted': 1 }) username = session.get('username') if recipe_data is None or recipe_data.get('deleted', False): abort(404) elif username == recipe_data[ 'username'] or username == 'Admin': # Only the autor and admins can delete if request.method == 'POST': # Delete recipe and update authors recipe count, and remove references from parents and children if request.form.get('confirm') == recipe_data['title']: mongo.db.recipes.replace_one({'urn': urn}, { 'urn': urn, 'deleted': True }) mongo.db.users.update_one( {'username': recipe_data['username']}, {'$inc': { 'recipe-count': -1 }}) if recipe_data.get('parent') is not None: mongo.db.recipes.update_one( {'urn': recipe_data['parent']}, { '$pull': { 'children': { 'urn': urn, 'title': recipe_data['title'] } } }) if recipe_data.get('children') is not None: mongo.db.recipes.update_many({'parent': urn}, {'$set': { 'parent': None }}) flash('Successfully deleted recipe "{}".'.format( recipe_data['title'])) return redirect(url_for('index')) else: flash('Failed to delete recipe "{}".'.format( recipe_data['title'])) return render_template('delete-recipe.html', title=recipe_data['title'], title_pattern=re_escape(recipe_data['title']), urn=urn, username=username) else: abort(403)
def __seqToRE(self, to_convert, directive): to_convert = sorted(to_convert, key=len, reverse=True) for value in to_convert: if value != '': break else: return '' regex = '|'.join((re_escape(stuff) for stuff in to_convert)) regex = '(?P<%s>%s' % (directive, regex) return '%s)' % regex
def __init__(self, defaults=None, dict_type=_default_dict, allow_no_value=False, *, delimiters=('=', ':'), comment_prefixes=('#', ';'), inline_comment_prefixes=None, strict=True, empty_lines_in_values=True, default_section=DEFAULTSECT, interpolation=_UNSET, converters=_UNSET): # replacement of ConfigParser.__init__, do not call super-class constructor self._dict = dict_type self._defaults = dict_type() self._sections = dict_type() self._proxies = dict_type() self._cache = dict() self._comment_prefixes = tuple(comment_prefixes or ()) self._inline_comment_prefixes = tuple(inline_comment_prefixes or ()) self._strict = strict self._allow_no_value = allow_no_value self._empty_lines_in_values = empty_lines_in_values self.default_section = default_section self._converters = ConverterMapping(self) if (converters is not _UNSET): self._converters.update(converters) self._proxies[default_section] = SectionProxy(self, default_section) if defaults: for key, value in defaults.items(): self._defaults[self.optionxform(key)] = value self._delimiters = tuple(delimiters) if delimiters == ('=', ':'): self._optcre = self.OPTCRE_NV if allow_no_value else self.OPTCRE else: d = "|".join(re_escape(d) for d in delimiters) if allow_no_value: self._optcre = re_compile(self._OPT_NV_TMPL.format(delim=d), RE_VERBOSE) else: self._optcre = re_compile(self._OPT_TMPL.format(delim=d), RE_VERBOSE) if (interpolation is None): self._interpolation = Interpolation() elif (interpolation is _UNSET): self._interpolation = ExtendedInterpolation() else: self._interpolation = interpolation
def configStrings(iface): try: device = open("/proc/stb/info/model", "r").readline().strip() except: device = "" if device != "dm7025": rootkey = [ '\x9f', '|', '\xe4', 'G', '\xc9', '\xb4', '\xf4', '#', '&', '\xce', '\xb3', '\xfe', '\xda', '\xc9', 'U', '`', '\xd8', '\x8c', 's', 'o', '\x90', '\x9b', '\\', 'b', '\xc0', '\x89', '\xd1', '\x8c', '\x9e', 'J', 'T', '\xc5', 'X', '\xa1', '\xb8', '\x13', '5', 'E', '\x02', '\xc9', '\xb2', '\xe6', 't', '\x89', '\xde', '\xcd', '\x9d', '\x11', '\xdd', '\xc7', '\xf4', '\xe4', '\xe4', '\xbc', '\xdb', '\x9c', '\xea', '}', '\xad', '\xda', 't', 'r', '\x9b', '\xdc', '\xbc', '\x18', '3', '\xe7', '\xaf', '|', '\xae', '\x0c', '\xe3', '\xb5', '\x84', '\x8d', '\r', '\x8d', '\x9d', '2', '\xd0', '\xce', '\xd5', 'q', '\t', '\x84', 'c', '\xa8', ')', '\x99', '\xdc', '<', '"', 'x', '\xe8', '\x87', '\x8f', '\x02', ';', 'S', 'm', '\xd5', '\xf0', '\xa3', '_', '\xb7', 'T', '\t', '\xde', '\xa7', '\xf1', '\xc9', '\xae', '\x8a', '\xd7', '\xd2', '\xcf', '\xb2', '.', '\x13', '\xfb', '\xac', 'j', '\xdf', '\xb1', '\x1d', ':', '?' ] etpm = eTPM() l2cert = etpm.getCert(eTPM.TPMD_DT_LEVEL2_CERT) if l2cert is None: return l2key = validate_certificate(l2cert, rootkey) if l2key is None: return l3cert = etpm.getCert(eTPM.TPMD_DT_LEVEL3_CERT) if l3cert is None: return l3key = validate_certificate(l3cert, l2key) if l3key is None: return rnd = get_random() if rnd is None: return val = etpm.challenge(rnd) result = decrypt_block(val, l3key) if device == "dm7025" or result[80:88] == rnd: driver = iNetwork.detectWlanModule(iface) else: driver = 'dreambox' print 'Using "%s" as wpa-supplicant driver' % (driver) ret = "" if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape( config.plugins.wlan.essid.value) + "\" || true\n" ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName( iface) + " -B -dd -D" + driver + " || true\n" ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n" return ret
def configStrings(iface): driver = iNetwork.detectWlanModule(iface) ret = "" if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape(config.plugins.wlan.essid.value) + "\" || true\n" if driver == 'wl': ret += '\tpre-up wl-config.sh -m %s -k %s -s "%s" \n' % ( config.plugins.wlan.encryption.value.lower(), re_escape(config.plugins.wlan.psk.value), config.plugins.wlan.essid.value) ret += '\tpost-down wl-down.sh\n' else: ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName(iface) + " -B -dd -D" + driver + " || true\n" ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n" return ret
def search(request, value='', form=None, permission_filter=False): """ Search through nodes either from a POSTed search query or through an URL like /slug/key/value/ or /slug/value/. """ result = [] posted = False if request.POST: value = request.POST.get('q', '') posted = True if value: query = u'(?i).*{}.*'.format(re_escape(value)) permission_clause = '' if permission_filter: readable_ids = sriutils.get_ids_user_canread(request.user) readable_ids = [str(x) for x in readable_ids] # string conversion ids = ', '.join(readable_ids) permission_clause = 'AND n.handle_id in [{ids}]'.format(ids=ids) # nodes = nc.search_nodes_by_value(nc.graphdb.manager, query) # TODO: when search uses the above go back to that q = """ MATCH (n:Node) WHERE any(prop in keys(n) WHERE n[prop] =~ {{search}}) {} WITH n, [prop IN keys(n) WHERE n[prop] =~ {{search}}] AS match_props RETURN n, reduce(s = "", prop IN match_props | s + n[prop] + ' [..] ') AS match_txt """.format(permission_clause) nodes = nc.query_to_list(nc.graphdb.manager, q, search=query) if form == 'csv': return helpers.dicts_to_csv_response([n['n'] for n in nodes]) elif form == 'xls': return helpers.dicts_to_xls_response([n['n'] for n in nodes]) elif form == 'json': return helpers.dicts_to_json_response([(n['n'], n['match_txt']) for n in nodes]) for node in nodes: nh = get_object_or_404(NodeHandle, pk=node['n']['handle_id']) item = {'node': node['n'], 'nh': nh} result.append(item) if len(result) == 1: return redirect(result[0]['nh'].get_absolute_url()) return render(request, 'noclook/search_result.html', { 'value': value, 'result': result, 'posted': posted })
def init_keywords() -> dict: """Initialize keywords to look for, extracted from files and arguments. Associated options:: --word=test OR --word=test1,test2 --dict=my_dictionary Using the option ``word`` alone replace the default dictionary with words. Using the option ``dict`` alone replace the default dictionary with dict. Using both ``word`` and ``dict`` combines both. """ if OPTIONS.sensitive: REGEX = lambda x: re_compile(re_escape(x).replace("*", r".*")) else: REGEX = lambda x: re_compile(re_escape(x).replace("*", r".*"), IGNORECASE) keywords = [] k_regex = [] # keywords compiled regex categories = [] # OPTION LIST if OPTIONS.list: categories += OPTIONS.list.split(",") VERBOSE("Using keywords from lists: {0}".format(", ".join(categories))) # OPTION WORD if OPTIONS.word: keywords += OPTIONS.word.split(",") k_regex += [REGEX(x) for x in keywords] # OPTION DICT if OPTIONS.dict: dictionaries = OPTIONS.dict.split(",") for dictfile in dictionaries: keywords += parse_dict(dictfile, categories) k_regex += [REGEX(x) for x in keywords] # DEFAULT DICTIONARY if not keywords: keywords += parse_dict(DEFAULT_DICTIONARY, categories) k_regex += [REGEX(x) for x in keywords] return keywords, k_regex
def configStrings(iface): driver = iNetwork.detectWlanModule(iface) ret = "" if driver == "brcm-wl": encryption = config.plugins.wlan.encryption.value if encryption == "WPA/WPA2": encryption = "WPA2" encryption = encryption.lower() if encryption == "unencrypted": encryption = "None" ret += '\tpre-up wl-config.sh -m ' + encryption + ' -k ' + config.plugins.wlan.psk.value + ' -s \"' + config.plugins.wlan.essid.value + '\" || true\n' ret += '\tpost-down wl-down.sh || true\n' return ret if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape( config.plugins.wlan.essid.value) + "\" || true\n" ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName( iface) + " -B -dd -D" + driver + " || true\n" if config.plugins.wlan.hiddenessid.value == True: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape( config.plugins.wlan.essid.value) + "\" || true\n" ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n" return ret
def __init_subclass__( cls, bblfsh_language: str, reserved: List[str], uast_fixers: Optional[Dict[str, Callable[[BblfshNode], None]]] = None, convert_to_utf8: bool = True, ) -> None: cls._bblfsh_language = bblfsh_language cls._parser_reserved = re_compile("|".join( re_escape(i) for i in sorted(reserved, reverse=True))) cls._parser_space = re_compile(r"\s+") cls._uast_fixers = uast_fixers if uast_fixers else {} cls._convert_to_utf8 = convert_to_utf8 cls._logger = getLogger(cls.__name__)
def ordered_permutation_regex(sentence): """ Builds a regex that matches 'ordered permutations' of a sentence's words. Args: sentence (str): The sentence to build a match pattern to Returns: regex (re object): Compiled regex object represented the possible ordered permutations of the sentence, from longest to shortest. Example: The sdesc_regex for an sdesc of " very tall man" will result in the following allowed permutations, regex-matched in inverse order of length (case-insensitive): "the very tall man", "the very tall", "very tall man", "very tall", "the very", "tall man", "the", "very", "tall", and "man". We also add regex to make sure it also accepts num-specifiers, like /2-tall. """ # escape {#nnn} markers from sentence, replace with nnn sentence = _RE_REF.sub(r"\1", sentence) # escape {##nnn} markers, replace with nnn sentence = _RE_REF_LANG.sub(r"\1", sentence) # escape self-ref marker from sentence sentence = _RE_SELF_REF.sub(r"", sentence) # ordered permutation algorithm words = sentence.split() combinations = itertools.product((True, False), repeat=len(words)) solution = [] for combination in combinations: comb = [] for iword, word in enumerate(words): if combination[iword]: comb.append(word) elif comb: break if comb: solution.append(_PREFIX + r"[0-9]*%s*%s(?=\W|$)+" % (_NUM_SEP, re_escape(" ".join(comb)).rstrip("\\"))) # combine into a match regex, first matching the longest down to the shortest components regex = r"|".join(sorted(set(solution), key=lambda o: len(o), reverse=True)) return regex
def mktoken(name, prec='none', re=None, s=None, tokens=None, keyword=None, between=None, escape=None, convert=None): """ creates a token class (that is then converted into a TokenSpec), i.e. this is a Token factory factory. :param name: the name of the token class :param prec: the precedence :param re: a regular expression describing the token :param s: a fixed string for the token :param tokens: a string containing a space-separated list of matching tokens :param keyword: a keyword is a string that is also matched by another RE :param convert: the function used to construct the semantic function :return: """ token_re = None if re is not None: token_re = re elif s is not None: token_re = re_escape(s) elif tokens is not None: token_re = '(?:%s)' % ('|'.join( [re_escape(tok) for tok in tokens.split()])) elif between is not None: if len(between) != 2: raise SpecError("Need exactly two items for between: %s" % (between, )) starter, ender = between not_enders = [] for i in xrange(len(ender)): not_enders.append('{}[^{}]'.format(re_escape(ender[:i]), re_escape(ender[i]))) token_re = '{}(?:{})*{}'.format(re_escape(starter), '|'.join([x for x in not_enders]), re_escape(ender)) #print(token_re) if convert is None: def my_convert(s): assert s.startswith(starter) and s.endswith(ender) return s[len(starter):-len(ender)] convert = my_convert else: token_re = None return TokenBuilder(token_re, prec, convert, keyword=keyword, name=name)
def __seqToRE(self, to_convert, directive): """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This prevents the possibility of a match occuring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). """ to_convert = sorted(to_convert, key=len, reverse=True) for value in to_convert: if value != '': break else: return '' regex = '|'.join(re_escape(stuff) for stuff in to_convert) regex = '(?P<%s>%s' % (directive, regex) return '%s)' % regex
def search_room_by_title(self, keyword="", search_mode={}, req_seq=0, limit=20): try: return Mongo_Wisewolf.rooms.find({ "room_title": { "$regex": "(?i).*" + re_escape(keyword) + ".*" }, "room_kind": { "$ne": "support" } }).sort("_id", -1).skip(req_seq).limit(20) except Exception as e: print("Exception: MongoDao.MongoDao.Mongo_search_room_by_title:", e) return []
def load_tests(_, tests, __): # pylint: disable=redefined-outer-name,unused-argument finder = DocTestFinder(exclude_empty=False) for root_mod in roots: if isinstance(root_mod, ModuleType): root_mod_path, root_mod_name = root_mod.__file__, root_mod.__name__ else: root_mod_path, root_mod_name = root_mod if splitext(basename(root_mod_path))[0] == "__init__": root_mod_path = dirname(root_mod_path) if isfile(root_mod_path): root_mod_iter = ((dirname(root_mod_path), None, (basename(root_mod_path),)),) else: root_mod_iter = os_walk(root_mod_path) for dir_name, _, file_names in root_mod_iter: if not re_match(re_escape(root_mod_path) + _PATH_RE, dir_name): continue mod_name = dir_name[len(root_mod_path) :].replace(ospath_sep, ".").strip(".") if mod_name: mod_name = root_mod_name + "." + mod_name else: mod_name = root_mod_name for file_name in file_names: if not file_name.endswith(".py"): continue if file_name == "__init__.py": test_mod_name = mod_name else: test_mod_name = mod_name + "." + splitext(file_name)[0] try: tests.addTest(DocTestSuite(test_mod_name, test_finder=finder)) except Exception as err: # pylint: disable=broad-except _LOGGER.warning("unable to load doctests from %s (%s)", test_mod_name, err, exc_info=True) return tests
async def filters_watcher(c: Alita, m: Message): chat_filters = db.get_all_filters(m.chat.id) actual_filters = {j for i in chat_filters for j in i.split("|")} for trigger in actual_filters: pattern = r"( |^|[^\w])" + re_escape(trigger) + r"( |$|[^\w])" match = await regex_searcher(pattern, m.text.lower()) if match: try: msgtype = await send_filter_reply(c, m, trigger) LOGGER.info(f"Replied with {msgtype} to {trigger} in {m.chat.id}") except Exception as ef: await m.reply_text(f"Error: {ef}") LOGGER.error(ef) LOGGER.error(format_exc()) break continue return
def configStrings(iface): driver = iNetwork.detectWlanModule(iface) ret = "" if driver == "brcm-wl": encryption = config.plugins.wlan.encryption.value if encryption == "WPA/WPA2": encryption = "WPA2" encryption = encryption.lower() if encryption == "unencrypted": encryption = "None" ret += '\tpre-up wl-config.sh -m ' + encryption + ' -k ' + config.plugins.wlan.psk.value + ' -s \"' + config.plugins.wlan.essid.value + '\" || true\n' ret += '\tpost-down wl-down.sh || true\n' return ret if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + " essid \"" + re.escape(config.plugins.wlan.essid.value) + "\" || true\n" ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName(iface) + " -B -dd -D" + driver + " || true\n" if config.plugins.wlan.hiddenessid.value == True: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape(config.plugins.wlan.essid.value) + "\" || true\n" ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n" return ret
def configStrings(iface): #---> #- try: #- device = open("/proc/stb/info/model", "r").readline().strip() #- except: #- device = "" #- if device != "dm7025": #- rootkey = ['\x9f', '|', '\xe4', 'G', '\xc9', '\xb4', '\xf4', '#', '&', '\xce', '\xb3', '\xfe', '\xda', '\xc9', 'U', '`', '\xd8', '\x8c', 's', 'o', '\x90', '\x9b', '\\', 'b', '\xc0', '\x89', '\xd1', '\x8c', '\x9e', 'J', 'T', '\xc5', 'X', '\xa1', '\xb8', '\x13', '5', 'E', '\x02', '\xc9', '\xb2', '\xe6', 't', '\x89', '\xde', '\xcd', '\x9d', '\x11', '\xdd', '\xc7', '\xf4', '\xe4', '\xe4', '\xbc', '\xdb', '\x9c', '\xea', '}', '\xad', '\xda', 't', 'r', '\x9b', '\xdc', '\xbc', '\x18', '3', '\xe7', '\xaf', '|', '\xae', '\x0c', '\xe3', '\xb5', '\x84', '\x8d', '\r', '\x8d', '\x9d', '2', '\xd0', '\xce', '\xd5', 'q', '\t', '\x84', 'c', '\xa8', ')', '\x99', '\xdc', '<', '"', 'x', '\xe8', '\x87', '\x8f', '\x02', ';', 'S', 'm', '\xd5', '\xf0', '\xa3', '_', '\xb7', 'T', '\t', '\xde', '\xa7', '\xf1', '\xc9', '\xae', '\x8a', '\xd7', '\xd2', '\xcf', '\xb2', '.', '\x13', '\xfb', '\xac', 'j', '\xdf', '\xb1', '\x1d', ':', '?'] #- etpm = eTPM() #- l2cert = etpm.getCert(eTPM.TPMD_DT_LEVEL2_CERT) #- if l2cert is None: #- return #- l2key = validate_certificate(l2cert, rootkey) #- if l2key is None: #- return #- l3cert = etpm.getCert(eTPM.TPMD_DT_LEVEL3_CERT) #- if l3cert is None: #- return #- l3key = validate_certificate(l3cert, l2key) #- if l3key is None: #- return #- rnd = get_random() #- if rnd is None: #- return #- val = etpm.challenge(rnd) #- result = decrypt_block(val, l3key) #- if device == "dm7025" or result[80:88] == rnd: #---< if True: driver = iNetwork.detectWlanModule(iface) else: driver = 'dreambox' print 'Using "%s" as wpa-supplicant driver' % (driver) ret = "" if driver == 'madwifi' and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + " essid \"" + re_escape(config.plugins.wlan.essid.value) + "\" || true\n" ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName(iface) + " -B -dd -D" + driver + " || true\n" ret += "\tpre-down wpa_cli -i" + iface + " terminate || true\n" return ret
def __seqToRE(self, to_convert, directive): """Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., 'abc' matching when 'abcdef' should have been the match). """ try: from re import escape as re_escape except ImportError: raise ImportError('cosmopolitan _strptime.TimeRE() ' 'requires manually yoinking re') to_convert = sorted(to_convert, key=len, reverse=True) for value in to_convert: if value != '': break else: return '' regex = '|'.join(re_escape(stuff) for stuff in to_convert) regex = '(?P<%s>%s' % (directive, regex) return '%s)' % regex
def magic_from_file(filename, mime=False): """ Fallback function to retrieve file type when python-magic is missing Parameters ---------- filename : str File path to read mime : bool Retrieve the file mimetype, otherwise a readable name (Default: False) Returns ------- str File type result as human readable name or mimetype """ # Use dereference to follow symlink commands = ["file", "--dereference", str(filename)] if mime: commands.insert(1, "--mime-type") with Popen(commands, stdout=PIPE, stdin=PIPE, stderr=STDOUT, universal_newlines=True) as pipe: output, error_output = pipe.communicate() result = re_findall(r"^%s\:\s+(.*)$" % re_escape(str(filename)), output[:-1]) if len(result) > 0: return result[0] return str()
def format_to_re(format): """Return the regular expression that matches all possible values the given Python 2 format string (using %(foo)s placeholders) can possibly resolve to. Each placeholder in the format string is captured in a named group. The difficult part here is inserting unescaped regular expression syntax in place of the format variables, while still properly escaping the rest. See this link for more info on the problem: http://stackoverflow.com/questions/2654856/python-convert-format-string-to-regular-expression """ UNIQ = uuid1().hex assert UNIQ not in format class MarkPlaceholders(dict): def __getitem__(self, key): return UNIQ + ('(?P<%s>.*?)' % key) + UNIQ parts = (format % MarkPlaceholders()).split(UNIQ) for i in range(0, len(parts), 2): parts[i] = re_escape(parts[i]) return ''.join(parts)
def configStrings(iface): try: device = open("/usr/local/etc/stb/info/model", "r").readline().strip() except: device = "" if device != "e2pc": rootkey = [ "\x9f", "|", "\xe4", "G", "\xc9", "\xb4", "\xf4", "#", "&", "\xce", "\xb3", "\xfe", "\xda", "\xc9", "U", "`", "\xd8", "\x8c", "s", "o", "\x90", "\x9b", "\\", "b", "\xc0", "\x89", "\xd1", "\x8c", "\x9e", "J", "T", "\xc5", "X", "\xa1", "\xb8", "\x13", "5", "E", "\x02", "\xc9", "\xb2", "\xe6", "t", "\x89", "\xde", "\xcd", "\x9d", "\x11", "\xdd", "\xc7", "\xf4", "\xe4", "\xe4", "\xbc", "\xdb", "\x9c", "\xea", "}", "\xad", "\xda", "t", "r", "\x9b", "\xdc", "\xbc", "\x18", "3", "\xe7", "\xaf", "|", "\xae", "\x0c", "\xe3", "\xb5", "\x84", "\x8d", "\r", "\x8d", "\x9d", "2", "\xd0", "\xce", "\xd5", "q", "\t", "\x84", "c", "\xa8", ")", "\x99", "\xdc", "<", '"', "x", "\xe8", "\x87", "\x8f", "\x02", ";", "S", "m", "\xd5", "\xf0", "\xa3", "_", "\xb7", "T", "\t", "\xde", "\xa7", "\xf1", "\xc9", "\xae", "\x8a", "\xd7", "\xd2", "\xcf", "\xb2", ".", "\x13", "\xfb", "\xac", "j", "\xdf", "\xb1", "\x1d", ":", "?", ] etpm = eTPM() l2cert = etpm.getCert(eTPM.TPMD_DT_LEVEL2_CERT) if l2cert is None: return l2key = validate_certificate(l2cert, rootkey) if l2key is None: return l3cert = etpm.getCert(eTPM.TPMD_DT_LEVEL3_CERT) if l3cert is None: return l3key = validate_certificate(l3cert, l2key) if l3key is None: return rnd = get_random() if rnd is None: return val = etpm.challenge(rnd) result = decrypt_block(val, l3key) if device == "e2pc" or result[80:88] == rnd: driver = iNetwork.detectWlanModule(iface) else: driver = "dreambox" print 'Using "%s" as wpa-supplicant driver' % (driver) ret = "" if driver != "dreambox" and config.plugins.wlan.hiddenessid.value: ret += "\tpre-up iwconfig " + iface + ' essid "' + re_escape(config.plugins.wlan.essid.value) + '" || true\n' ret += "\tpre-up wpa_supplicant -i" + iface + " -c" + getWlanConfigName(iface) + " -D" + driver + " -B\n" ret += "\tpost-down killall -q wpa_supplicant\n" return ret
def search_room_by_title(self, keyword="", search_mode={}, req_seq=0, limit=20): try: return Mongo_Wisewolf.rooms.find({"room_title":{"$regex":"(?i).*"+re_escape(keyword)+".*"}, "room_kind":{"$ne":"support"}}).sort("_id",-1).skip(req_seq).limit(20) except Exception as e: print("Exception: MongoDao.MongoDao.Mongo_search_room_by_title:",e) return []
def find_recipes(page='1', tags=None, exclude=None, meals=None, username=None, forks=None, search=None, featured=None, following=None, favourites=None, preferences=None, sort='views', order='-1', **kwargs): ''' Search function to find recipes based on a set of queries. ''' query = {'deleted': {'$ne': True}} user = session.get('username') if preferences == '-1': # Ignore preferences if preferences set to -1 user_preferences = None user_exclusions = None else: user_preferences = session.get('preferences') user_exclusions = session.get('exclusions') if exists(user_preferences ): # If user preferences are set, add them to the tags if exists(tags): tags = tags + ' ' + user_preferences else: tags = user_preferences if exists(tags): # All tags in query should be in tags query['tags'] = {'$all': tags.split(' ')} if exists(user_exclusions): if exists(exclude ): # If user exclusions are set, add them to the exclusions exclude = exclude + ' ' + user_exclusions else: exclude = user_exclusions if exists(exclude): # Any exclusions should not be in tags exclude = exclude.split(' ') if query.get('tags'): query['tags']['$nin'] = exclude else: query['tags'] = {'$nin': exclude} if query.get('tags'): for tag in query['tags'].get('$all', []): if tag in query['tags'].get('$nin', []): query['tags']['$nin'].remove(tag) if exists(meals): query['meals'] = {'$all': meals.split(' ')} if exists(favourites): query['favouriting-users'] = user if following is not None: # If we are looking only for users we follow the recipe author should be in the list of users we follow following_user = mongo.db.users.find_one({'username': user}, {'following': 1}) if isinstance(following_user.get('following'), list): query['username'] = {'$in': following_user['following']} else: if page != '1': # If we don't follow anybody return no recipes, if the page is greater than one it's out of bounds abort(404) else: return {'recipes': [], 'no_recipes': 0, 'page': 1} elif exists(username): query['username'] = username if exists(forks): query['parent'] = forks if exists(featured): query['featured'] = {'$exists': True} if exists( search ): # If there is a search string, find any parts in double quotes and seperate them out search_strings = None if '"' in search: search_strings = findall('".+"', search) for search_string in search_strings: search = sub(' *' + re_escape(search_string) + ' *', '', search) if ' ' in search: search = split(' +', search) search = '"' + '" "'.join(search) + '"' if search_strings is not None: search += ' ' + ' '.join(search_strings) query['$text'] = {'$search': search} try: order = int(order) if order != 1 and order != -1: order = -1 except ValueError: order = -1 try: page = int(page) except ValueError: page = 1 offset = (page - 1) * 10 no_recipes = mongo.db.recipes.count_documents( query ) # Count recipes matching query, if there's at least one and our page number is in bounds find the recipes if page < 1 or (page != 1 and offset >= no_recipes): abort(404) # Out of bounds error if no_recipes > 0: recipes = (mongo.db.recipes.find( query, { 'urn': 1, 'title': 1, 'username': 1, 'image': 1, 'comment-count': 1, 'favourites': 1 }).sort(sort, order).skip(offset).limit(10)) else: recipes = [] return {'recipes': recipes, 'no_recipes': no_recipes, 'page': page}
def normalizePunctuation(self, value): from re import compile as re_compile, escape as re_escape import string punctuations = re_compile("[%s]" % re_escape(string.punctuation)) return punctuations.sub(" ", value)
def __init__(self, regex, escape=False): if escape: self.regex = re_escape(regex) else: self.regex = regex
def escape_tags(tags_list): return [ re_escape(x) for x in tags_list]
# ---- Imports ------------------------------------------------------------ from doctest import DocTestFinder, DocTestSuite from logging import DEBUG, getLogger from os import walk as os_walk from os.path import basename, dirname, isfile, sep as ospath_sep, splitext from re import escape as re_escape, match as re_match from types import ModuleType import dimgx import tests # ---- Constants ---------------------------------------------------------- __all__ = ("load_tests", "mkloadtests") _PATH_RE = r"(" + re_escape(ospath_sep) + r".*)?$" _DOCTEST_ROOTS = (dimgx, tests) _LOGGER = getLogger(__name__) _LOGGER.setLevel(DEBUG) # ---- Functions ---------------------------------------------------------- # ========================================================================= def load_tests(_, tests, __): # pylint: disable=redefined-outer-name """ >>> True is not False True """ in_len = tests.countTestCases()