async def ybdl(context): url = context.arguments reply = await context.get_reply_message() reply_id = None await context.edit("获取视频中 . . .") if reply: reply_id = reply.id if url is None: await context.edit("出错了呜呜呜 ~ 无效的参数。") return bilibili_pattern = regex_compile( r"^(http(s)?://)?((w){3}.)?bilibili(\.com)?/.+") youtube_pattern = regex_compile( r"^(http(s)?://)?((w){3}.)?youtu(be|.be)?(\.com)?/.+") if youtube_pattern.match(url): if not await fetch_video(url, context.chat_id, reply_id): await context.edit("出错了呜呜呜 ~ 视频下载失败。") await log(f"已拉取UTB视频,地址: {url}.") await context.delete() if bilibili_pattern.match(url): if not await fetch_video(url, context.chat_id, reply_id): await context.edit("出错了呜呜呜 ~ 视频下载失败。") await log(f"已拉取 Bilibili 视频,地址: {url}.") await context.delete()
class ConsoleInputConverter: _INPUTPATTERN = regex_compile(r'''((?:[^ "']|"[^"]*"|'[^']*')+)''') _INPUTPATTERN2 = regex_compile(r'''(["]*["]|[']*['])''') def __init__(self, logger=None): if not logger: logger = get_default_logger(self.__class__.__name__) self.log = logger def convert(self, user_input: str, to_type='OBJECT'): if len(user_input) < 1: self.log.debug('User input is empty!') return False else: input_list = self._INPUTPATTERN.split(user_input) if len(input_list) < 2: self.log.debug('Invalid input!') return False if input_list[1] in ['p', 'parent', self.parent.name]: commands = self.parent.commands i = 3 else: commands = self.commands i = 1 if len(input_list) < i + 1: self.logger.info('No command given!') continue
def read_module_info(self): cmd = 'at+gsv' readbk = self.write_cmd(cmd=cmd) err_check(readbk, cmd) manufacturer = readbk[0] model = readbk[1] rev = readbk[2].strip('Revision:') cmd = 'at+gsn' readbk = self.write_cmd(cmd=cmd) err_check(readbk, cmd) imei = readbk[0] cmd = 'at+cops?' readbk = self.write_cmd(cmd=cmd) err_check(readbk, cmd) cops_regex = regex_compile(r'"(.*)"') cops = cops_regex.search(readbk[0]) cops = cops.group(1) cmd = 'at+cnum' readbk = self.write_cmd(cmd=cmd) err_check(readbk, cmd) num_regex = regex_compile(r'\d{12}') num = num_regex.search(readbk[0]) num = num.group() self.module_info = MODULE_INFO(manufacturer, model, rev, imei, cops, num)
def _validate_regex(regex): # if regex is already a pattern object if isinstance(regex, type(regex_compile(''))): return regex try: return regex_compile('^{regex}$'.format(regex=regex)) except regex_error: raise ConfigurationError('Invalid regex: {regex}'.format(regex=regex))
def test_shibmd_scope_no_regex_all_descriptors(): mds = MetadataStore(ATTRCONV, sec_config, disable_ssl_certificate_validation=True) mds.imp(METADATACONF["15"]) scopes = mds.sbibmd_scopes(entity_id='http://example.com/saml2/idp.xml', typ="idpsso_descriptor") all_scopes = list(scopes) expected = [ { "regexp": False, "text": "descriptor-example.org", }, { "regexp": True, "text": regex_compile("descriptor-example[^0-9]*\.org"), }, { "regexp": False, "text": "idpssodescriptor-example.org", }, ] assert len(all_scopes) == 3 assert all_scopes == expected
async def fetchaudio(context): if context.arguments: if ',' in context.arguments: url, string_2 = context.arguments.split(',', 1) else: url = context.arguments string_2 = "#audio " else: await context.edit(lang('fetchaudio_error_grammer')) return """ Fetches audio from provided URL. """ reply = await context.get_reply_message() reply_id = None if not silent: await context.edit(lang('fetchaudio_processing')) if reply: reply_id = reply.id if url is None: await context.edit(lang('arg_error')) return youtube_pattern = regex_compile( r"^(http(s)?://)?((w){3}.)?youtu(be|.be)?(\.com)?/.+") if youtube_pattern.match(url): if not await fetch_youtube_audio(url, context.chat_id, reply_id, string_2): await context.edit(lang('fetchaudio_error_downloading')) await log( f"{lang('fetchaudio_success')}, {lang('fetchaudio_link')}: {url}.") await context.delete()
def index_generator( self, folder_ids, add_non_nsw_files: bool, add_nsw_files_without_title_id: bool, success: str = None, ) -> None: title_id_pattern = r"\%5B[0-9A-Fa-f]{16}\%5D" pattern = regex_compile(title_id_pattern) for folder_id in folder_ids: files = self.gdrive_service.get_files_in_folder_id(folder_id) for (file_id, file_details) in files.items(): if add_non_nsw_files or file_details["name"][-4:] in ( ".nsp", ".nsz", ".xci", ".xcz", ): file_name = url_encode(file_details["name"], safe="") if add_nsw_files_without_title_id or pattern.search( title_id_pattern, file_name, ): size = int(file_details["size"]) self.index["files"].append({ "url": f"gdrive:{file_id}#{file_name}", "size": size, }) if success is not None: self.index.update({"success": success})
def remove_unwanted_chars(text): unwanted = regex_compile( "[" u"\U0001F600-\U0001F64F" # emoticons u"\U0001F300-\U0001F5FF" # symbols & pictographs u"\U0001F680-\U0001F6FF" # transport & map symbols u"\U0001F1E0-\U0001F1FF" # flags (iOS) u"\U00002702-\U000027B0" u"\U000024C2-\U0001F251" u"\U0001f926-\U0001f937" u'\U00010000-\U0010ffff' u"\u200d" u"\u2640-\u2642" u"\u2600-\u2B55" u"\u23cf" u"\u23e9" u"\u231a" u"\u3030" u"\ufe0f" u"\u2069" u"\u2066" u"\u200c" u"\u2068" u"\u2067" "]+", flags=regex_UNICODE) return unwanted.sub(r'', text)
def _sanity_check(yamlstring): """Sanity check to identify duplicate top-level keys If there are duplicate top-level keys, they will simply overwrite one-another as they are loaded into a Python dict() so it is a good idea to keep this logic, though it isn't very pretty """ lines = yamlstring.splitlines() top_level_keys = [] duped_keys = [] yaml_key_compiled = regex_compile(r'^([A-Za-z0-9_]+) *:') for line in lines: matched = yaml_key_compiled.search(line) if matched: if matched.group(1) in top_level_keys: duped_keys.append(matched.group(1)) else: top_level_keys.append(matched.group(1)) if duped_keys: stderr.write('YaML file %s contains duplicate top-level keys\n', duped_keys) exit(1) return yamlstring, top_level_keys
async def fetchaudio(context): if context.arguments: if ',' in context.arguments: url, string_2 = context.arguments.split(',', 1) else: url = context.arguments string_2 = "#audio " else: await context.edit("出错了呜呜呜 ~ 错误的语法。") return """ Fetches audio from provided URL. """ reply = await context.get_reply_message() reply_id = None await context.edit("拉取音频中 . . .") if reply: reply_id = reply.id if url is None: await context.edit("出错了呜呜呜 ~ 无效的参数。") return youtube_pattern = regex_compile(r"^(http(s)?://)?((w){3}.)?youtu(be|.be)?(\.com)?/.+") if youtube_pattern.match(url): if not await fetch_youtube_audio(url, context.chat_id, reply_id, string_2): await context.edit("出错了呜呜呜 ~ 原声带下载失败。") await log(f"从链接中获取了一条音频,链接: {url}.") await context.delete()
def ip(self, dc, vm): """ Fetch the IP address of a VM in a datacenter based on configuration :param dc: datacenter the VM belongs to :type dc: str :param vm: the vm instance :type vm: ovirtsdk.infrastructure.brokers.VM :return: IP address string based on any NIC and REGEX conditions configured for the datacenter """ nic_name = self.config.get(dc, OVIRT_NIC_NAME) ip_regex = self.config.get(dc, OVIRT_IP_REGEX) pattern = regex_compile('' if ip_regex is None else ip_regex) if nic_name is not None: nics = [vm.get_nics().get(name='nic_{0:s}'.format(nic_name))] else: nics = vm.get_nics().list() ips = [] for nic in nics: for device in nic.get_reported_devices().get_reported_device(): ips.extend(device.get_ips().get_ip()) for ip in ips: if pattern.match(ip.get_address()) is None: continue return ip.get_address() return None
def is_message_signature_valid(msg): if msg[u'SignatureVersion'] != '1': raise Exception('Wrong signature version') signing_url = msg[u'SigningCertURL'] prog = regex_compile(r'^https://sns\.[-a-z0-9]+\.amazonaws\.com/.*$', IGNORECASE) if not prog.match(signing_url): raise Exception("Cert is not hosted at AWS URL (https): %s", signing_url) r = rget(signing_url) cert = X509.load_cert_string(str(r.text)) str_to_sign = None if msg[u'Type'] == 'Notification': str_to_sign = build_notification_string(msg) elif any(msg[u'Type'] == s for s in ['SubscriptionConfirmation', 'UnsubscribeConfirmation']): str_to_sign = build_subscription_string(msg) pubkey = cert.get_pubkey() pubkey.reset_context(md='sha1') pubkey.verify_init() pubkey.verify_update(str_to_sign.encode()) result = pubkey.verify_final(b64decode(msg['Signature'])) if result != 1: raise Exception('Notification could not be confirmed') else: return True
async def badword_create( values: BodyBadWord, response: Response, hood=Depends(get_hood) ): """Creates a new badword for hood with id **hood_id**. - **pattern**: Regular expression which is used to match a badword. """ try: regex_compile(values.pattern) badword = await BadWord.objects.create(hood=hood, **values.__dict__) response.headers['Location'] = '%d' % badword.id return badword except IntegrityError: raise HTTPException(status_code=status.HTTP_409_CONFLICT) except RegexError: raise HTTPException(status_code=status.HTTP_422_UNPROCESSABLE_ENTITY)
def check_match(self, string, pattern): """ Test whether a string matches a string pattern """ if string is None or len(string) == 0: return False string = string.lower() pattern = pattern.lower() self.logger.debug( 'Checking whether string pattern \'{}\' matches to string \'{}\''. format(pattern, string)) # Basic match if pattern in string: self.logger.debug('Pattern matches!') return True # RegEx match pattern_re = regex_compile(pattern) if pattern_re.match(string): self.logger.debug('Pattern matches!') return True else: self.logger.debug('Pattern does NOT match!') return False
def __init__(self): # Parse a quote character. quote_rule = r'["]' # Parse a non-quote character or a quote character preceded by a # backslash. non_quote_rule = r'(?:[^"]|(?:\"))' # Parse a string cell. string_rule = quote_rule + non_quote_rule + r'*' + quote_rule # Parse a whitespace character. whitespace_rule = r'[ \t\r\n]' # Parse a non-whitespace character. non_whitespace_rule = r'[^ \t\r\n]' # Parse a cell. cell_rule = r'(?:' + string_rule + r')' \ + r'|(?:' + non_whitespace_rule + r'+)' # Parse a record record_rule = r'(' + cell_rule + r')' \ + r'(?:(?:' + whitespace_rule + r'+)|$)' self.engine = regex_compile(record_rule)
def scan_folder(self, folder_id: str, files_progress_bar: tqdm, recursion: bool, add_nsw_files_without_title_id: bool, add_non_nsw_files: bool): """Scans the folder id for files and updates the instance index""" title_id_pattern = r"\%5B[0-9A-Fa-f]{16}\%5D" files = self.gdrive_service.get_all_files_in_folder( folder_id, recursion, files_progress_bar) pattern = regex_compile(title_id_pattern) for (file_id, file_details) in files.items(): url_encoded_file_name = url_encode(file_details["name"], safe="") file_valid_nsw_check = add_non_nsw_files or \ url_encoded_file_name[-4:] in (".nsp", ".nsz", ".xci", ".xcz") file_title_id_check = add_nsw_files_without_title_id or \ pattern.search(url_encoded_file_name) if file_title_id_check and file_valid_nsw_check: file_entry_to_add = { "url": f"gdrive:{file_id}#{url_encoded_file_name}", "size": int(file_details["size"]) } if file_entry_to_add not in self.index["files"]: self.index["files"].append(file_entry_to_add) self.files_shared_status.update( {file_id: file_details["shared"]})
class StoreItem(object): validate_key = regex_compile('^[A-Za-z0-9]+([\-\.][A-Za-z0-9]+)*$') def __init__(self, game, meta_data, existing_keys): self.errors = [] self.warnings = [] self.path = None self.game = game self.index = None if not isinstance(meta_data, dict): raise StoreError('YAML file item must be a dictionary') try: key = meta_data['key'] except KeyError: raise StoreError('YAML file item missing key property') if not self.validate_key.match(key): self.error('invalid key format') self.key = key if key in existing_keys: self.error('duplicate key "%s"' % key) existing_keys.add(key) if 'title' not in meta_data or meta_data['title'] is None: self.error('title property missing for store item "%s"' % key) self.title = '' else: self.title = meta_data['title'] if 'description' not in meta_data or meta_data['description'] is None: self.error('description property missing for store item "%s"' % key) self.description = '' else: self.description = meta_data['description'] if 'icon' in meta_data: self.warning('"icon" yaml property has been deprecated please use ' '"icon256", "icon48" or "icon32" for store key "%s"' % key) self.images = { 'img256': meta_data.get('icon256', ''), 'img48': meta_data.get('icon48', ''), 'img32': meta_data.get('icon32', '') } def error(self, msg): self.errors.append(msg) def warning(self, msg): self.warnings.append(msg)
async def vdl(context): url = context.arguments reply = await context.get_reply_message() reply_id = None await context.edit("视频获取中 . . .") if reply: reply_id = reply.id if url is None: await context.edit("出错了呜呜呜 ~ 无效的参数。") return bilibili_pattern = regex_compile( r"^(http(s)?://)?((w){3}.)?bilibili(\.com)?/.+") youtube_pattern = regex_compile( r"^(http(s)?://)?((w){3}.)?youtu(be|.be)?(\.com)?/.+") if youtube_pattern.match(url): try: from pytube import YouTube except ImportError: await context.edit( '`pytube`支持库未安装,YouTube视频无法下载\n请使用 `-sh pip3 install --user ' 'git+https://github.com/nficano/pytube 或 -sh pip3 install pytube --upgrade ` ' '安装,或自行ssh安装\n\n已安装过 `pytube3` 的用户请使用 `-sh pip3 ' 'uninstall pytube3 -y` 进行卸载') return url = url.replace('www.youtube.com/watch?v=', 'youtu.be/') try: if not await youtube_dl(url, context, reply_id): await context.edit("出错了呜呜呜 ~ 视频下载失败。") sleep(3) except: await context.edit("出错了呜呜呜 ~ 视频下载失败。") sleep(3) await log(f"已拉取 YouTube 视频,地址: {url}.") await context.delete() elif bilibili_pattern.match(url): if not await bilibili_dl(url, context, reply_id): await context.edit("出错了呜呜呜 ~ 视频下载失败。") sleep(3) await log(f"已拉取 bilibili 视频,地址: {url}.") await context.delete() else: await context.edit("出错了呜呜呜 ~ 无效的网址。")
def _list_modules(self): regex_path = regex_compile(self.regex_path or '.*') regex_name = regex_compile(self.regex_name or '.*') module_list = {} try: for root, dirs, files in os.walk("."): if self._is_module(files) and self._is_ported(root): path = os.path.abspath(root) name = os.path.basename(root) if regex_path.match(path) and regex_name.match(name): module_list[name] = path self.onFound.fire(FoundEvent(path, name)) except Exception as ex: self.onError.fire(ErrorEvent(ex.message)) return module_list
def detect_keyboards(): file_handle = open('/proc/bus/input/devices', 'r') keyboards = [] regex = regex_compile("event\d{0,3}") for line in file_handle.readlines(): if 'Handlers' in line: if 'kbd' in line: keyboards.append('/dev/input/' + regex.findall(line)[0]) continue file_handle.close() return keyboards
def __init__(self): # Parse a tag. tag_rule = r'[a-zA-Z0-9_]+' # Parse a value. value_rule = r'.+' # Parse a filter. filter_rule = r'(' + tag_rule + r')=(' + value_rule + r')' self.engine = regex_compile(filter_rule)
def gprs_get_ip(self): cmd = 'at+sapbr=2,{}'.format(self.gprs_profile.cid) readbk = self.write_cmd(cmd=cmd) logging.debug('Getting Modules IP Address: %s' % (' '.join(readbk))) err_check(readbk=readbk, cmd=cmd) ip_regex = regex_compile(r'"(.*)"') ip = ip_regex.search(readbk[0]) ip = ip.group(1) if not ip: raise TimeoutExpired('Unable to get valid IP address!') return ip
def noop(self): """ Do a noop to test login status """ try: noop = self.conn.noop() noop_response = Helper().byte_to_str(noop[0]) noop_resp_pattern_re = regex_compile('^(Success|NOOP completed)') login_success = noop_resp_pattern_re.match(noop_response) return self.Retval(True, login_success) except IMAPClient.Error as e: return self.process_error(e)
def gps_stat(self): cmd = 'at+cgpsstatus?' readbk = self.write_cmd(cmd=cmd) logging.debug(msg=str(readbk)) err_check(readbk, cmd) readbk = ' '.join(readbk) gps_stat_regex = regex_compile(r'\+CGPSSTATUS: (.*) ') stat = gps_stat_regex.search(readbk) stat = stat.group(1) return stat
class Time(object): TIME_REGEXP = regex_compile( r"^(?P<hours>20|21|22|23|[01]\d|\d)([:.](?P<minutes>[0-5]\d))$") def __call__(self, value): matcher = Time.TIME_REGEXP.match(value) if not matcher: raise ValidateError(_("Incorrect time format")) matcher = matcher.groupdict() return 3600 * int(matcher["hours"]) + 60 * int(matcher["minutes"])
def __init__(self): # Parse a tag. tag_rule = r'[a-zA-Z0-9_]+' # Parse a variable type. vtype_rule = r'(?:CTL)|(?:IND)|(?:DEP)' # Parse a variable classification. variable_classification_rule = r'(' + tag_rule + r')' \ + r'=' \ + r'(' + vtype_rule + r')' self.engine = regex_compile(variable_classification_rule)
def parse_optional_condition(tokenized_query): """ Looks for a "where" clause in the input clause. :param tokenized_query: the pre-parsed query :return: updated parsed query """ updated_tokenized_query = tokenized_query if "pattern" not in updated_tokenized_query["input"].lower(): condition = regex_compile("(WHERE|Where|where)").split(tokenized_query["input"]) updated_tokenized_query["input"] = condition[0].strip() if len(condition) > 1: updated_tokenized_query["condition"] = QueryParser.parse_timer_window(condition[2].strip()) return updated_tokenized_query
class Email(object): USER_REGEX = regex_compile( r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$" # dot-atom r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"$)', # quoted-string IGNORECASE) DOMAIN_REGEX = regex_compile( r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)' # domain # literal form, ipv4 address (SMTP 4.1.3) r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', IGNORECASE) DOMAIN_WHITELIST = ["localhost"] def __call__(self, value): if not value or '@' not in value: raise ValidateError(_(u"Email must include @ symbol")) user_part, domain_part = value.rsplit('@', 1) if not self.USER_REGEX.match(user_part): raise ValidateError(_(u"mail box in email is incorrect")) if domain_part not in self.DOMAIN_WHITELIST and not self.DOMAIN_REGEX.match( domain_part): # Try for possible IDN domain-part try: domain_part = domain_part.encode('idna').decode('ascii') if not self.DOMAIN_REGEX.match(domain_part): raise ValidateError( _(u"Domain part of email is incorrect")) else: return value except UnicodeError: pass raise ValidateError(_(u"Domain part of email is incorrect")) return value
async def vdl(context): url = context.arguments reply = await context.get_reply_message() reply_id = None await context.edit("视频获取中 . . .") if reply: reply_id = reply.id if url is None: await context.edit("出错了呜呜呜 ~ 无效的参数。") return bilibili_pattern = regex_compile( r"^(http(s)?://)?((w){3}.)?bilibili(\.com)?/.+") youtube_pattern = regex_compile( r"^(http(s)?://)?((w){3}.)?youtu(be|.be)?(\.com)?/.+") if youtube_pattern.match(url): try: from pytube import YouTube except ImportError: await context.edit( '(`pytube3`支持库未安装,YouTube视频无法下载\n请使用 `-sh` `pip3` `install` `pytube3` 安装,或自行ssh安装)' ) return url = url.replace('www.youtube.com/watch?v=', 'youtu.be/') if not await youtube_dl(url, context, reply_id): await context.edit("出错了呜呜呜 ~ 视频下载失败。") sleep(3) await log(f"已拉取 YouTube 视频,地址: {url}.") await context.delete() elif bilibili_pattern.match(url): if not await bilibili_dl(url, context, reply_id): await context.edit("出错了呜呜呜 ~ 视频下载失败。") sleep(3) await log(f"已拉取 bilibili 视频,地址: {url}.") await context.delete() else: await context.edit("出错了呜呜呜 ~ 无效的网址。")
async def fetchaudio(context): """ Fetches audio from provided URL. """ url = context.arguments reply = await context.get_reply_message() reply_id = None await context.edit("Fetching audio . . .") if reply: reply_id = reply.id if url is None: await context.edit("Invalid argument.") return youtube_pattern = regex_compile(r"^(http(s)?://)?((w){3}.)?youtu(be|.be)?(\.com)?/.+") if youtube_pattern.match(url): if not await fetch_youtube_audio(url, context.chat_id, reply_id): await context.edit("The soundtrack failed to download.") await log(f"Fetched audio from {url}.")
def search_for_request(self, fragment: str) -> str: """Return a completed request name.""" # convert the fragment to a useable form. search_reg = regex_compile(escape(fragment.lower())) # get a list of all names from the database self.post_sql("SELECT name FROM requests WHERE completed=0") # iterate over the list and find all matches. returns: List[str] = [] for row in self.cursor.fetchall(): req = search_reg.search(row[0]) if req is not None: returns.append(row[0]) # simpler implementation using list comprehension. # returns = [ # row[0] for row in self.cursor.fetchall() # if search_reg.search(row[0]) is not None # ] # return empty string if no matches are found. if len(returns) < 1: return "" # ask the user for further input if more than one request is found elif len(returns) > 1: print("Possible requests:") for index, item in enumerate(returns): print(f" {index + 1})", item.title()) print("Please enter the number of the desired request:") while True: try: x = int(input(self.prompt)) % len(returns) except ValueError: continue try: return returns[x - 1] except IndexError: continue # otherwise, return the only result else: return returns[0]
def regexp(self, regexp, flags=0): """ Filters stream according to the regular expression using :py:func:`re.match`. It also supports the same flags as :py:func:`re.match`. :param str regexp: Regular expression for filtering. :param int flags: Flags from :py:mod:`re`. :return: new processed :py:class:`Stream` instance. >>> stream = Stream.range(100) >>> stream = stream.strings() >>> stream = stream.regexp(r"^1") >>> list(stream) ... ['1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19'] """ regexp = regex_compile(regexp, flags) return self.filter(regexp.match)
def split_input(line): terms = [] if line: rprs = regex_compile('\'.*?\'|".*?"|^[\S]*$') trivial_parts, interesting_parts = rprs.split(line), rprs.findall(line) assert(len(trivial_parts) == 1 + len(interesting_parts)) for i, tpart in enumerate(trivial_parts): part = _get_from_parsed(tpart) if part: terms += part try: part = _get_from_parsed(interesting_parts[i]) except IndexError: break if part: if tpart and not tpart[-1].endswith(' '): terms[-1] += ' '.join(part) else: terms += part return terms
def check_match(self, string, pattern): """ Test whether a string matches a string pattern """ if string is None or len(string) == 0: return False self.logger.debug("Checking whether string pattern '{}' matches to string '{}'".format(pattern, string)) # Basic match if pattern in string: self.logger.debug("Pattern matches!") return True # RegEx match pattern_re = regex_compile(pattern) if pattern_re.match(string): self.logger.debug("Pattern matches!") return True else: self.logger.debug("Pattern does NOT match!") return False
self._processing_deferreds.append(d) def _process_queued_docs(self): assert(self._has_configured_account()) pending = self._pending_docs log.msg("Mail post-sync hook: processing queued docs") def remove_pending_docs(res): self._pending_docs = [] return res d = self.process_received_docs(pending) d.addCallback(remove_pending_docs) return d _mbox_uuid_regex = regex_compile(constants.METAMSGID_MBOX_RE) _mdoc_chash_regex = regex_compile(constants.METAMSGID_CHASH_RE) def _get_mbox_uuid(doc_id): matches = _mbox_uuid_regex.findall(doc_id) if matches: return matches[0].replace('_', '-') def _get_chash_from_mdoc(doc_id): matches = _mdoc_chash_regex.findall(doc_id) if matches: return matches[0]
from ines.convert import camelcase from ines.convert import force_string from ines.convert import force_unicode from ines.convert import make_sha256 from ines.convert import maybe_unicode from ines.i18n import translate_factory NOW_DATE = datetime.datetime.now PROCESS_ID = getpid() DOMAIN_NAME = maybe_unicode(getfqdn()) # See: http://www.regular-expressions.info/email.html EMAIL_REGEX = regex_compile( "[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*" "@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?") class WarningDict(dict): def __init__(self, message='Duplicate item "{key}" with value "{value}"'): self.message = message def __setitem__(self, key, value): if key in self: warnings.warn( self.message.format(key=key, value=value), UserWarning, stacklevel=2) super(WarningDict, self).__setitem__(key, value)
Contains PT-specific Django form helpers. """ from __future__ import unicode_literals from re import compile as regex_compile from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField, Select from django.utils.translation import ugettext_lazy as _ from .pt_regions import REGION_CHOICES CITIZEN_CARD_NUMBER_REGEX = regex_compile(r'^(\d{8})-?(\d[A-Z0-9]{2}\d)$') SOCIAL_SECURITY_NUMBER_MULTIPLIERS = [29, 23, 19, 17, 13, 11, 7, 5, 3, 2] SOCIAL_SECURITY_NUMBER_REGEX = regex_compile(r'^[12]\d{10}$') ZIP_CODE_REGEX = regex_compile(r'^[1-9]\d{3}-\d{3}$') class PTCitizenCardNumberField(Field): """ A field which validates Portuguese Citizen Card numbers (locally CC - 'Cartão do Cidadão'). - Citizen Card numbers have the format XXXXXXXXXYYX or XXXXXXXX-XYYX (where X is a digit and Y is an alphanumeric character). - Citizen Card numbers validate as per http://bit.ly/RP0BzW. - The input string may or may not have an hyphen separating the identity number from the document's check-digits. - This field does NOT validate old ID card numbers (locally BI - 'Bilhete de Identidade'). """
return arg else: return Invalid inner.__name__ = str("int_between_{}_and_{}_coerce".format(min, max)) inner.__doc__ = inner.__doc__.format(min, max) return inner else: msg = '"{}" must be less than or equal "{}"' raise ValueError(msg.format(min, max)) # Identifiers and strings # TODO: In Py3k "ña" is a valid identifier and this regex won't allow it _IDENTIFIER_REGEX = regex_compile("(?i)^[_a-z][\w]*$") @coercer def identifier_coerce(arg): """Check if `arg` is a valid Python identifier. .. note:: Only Python 2's version of valid identifier. This means that some Python 3 valid identifiers are not considered valid. This helps to keep things working the same in Python 2 and 3. """ from xoutil.eight import string_types ok = isinstance(arg, string_types) and _IDENTIFIER_REGEX.match(arg) return str(arg) if ok else Invalid
def load(self): configParser = SafeConfigParser() def readConfig(configFile): for okFile in configParser.read((configFile.path,)): log.msg("Read configuration file: {0}".format(configFile.path)) def valueFromConfig(section, option, default): try: value = configParser.get(section, option) if value: return value else: return default except (NoSectionError, NoOptionError): return default def filePathFromConfig(section, option, root, segments): if section is None: path = None else: path = valueFromConfig(section, option, None) if path is None: fp = root for segment in segments: fp = fp.child(segment) elif path.startswith("/"): fp = FilePath(path) else: fp = root for segment in path.split(os.path.sep): fp = fp.child(segment) return fp readConfig(self.configFile) self.ServerRoot = filePathFromConfig( "Core", "ServerRoot", self.configFile.parent().parent(), () ) log.msg("Server root: {0}".format(self.ServerRoot.path)) self.ConfigRoot = filePathFromConfig("Core", "ConfigRoot", self.ServerRoot, ("conf",)) log.msg("Config root: {0}".format(self.ConfigRoot.path)) self.UserDB = filePathFromConfig("Core", "UserDB", self.ConfigRoot, ("users.pwdb",)) log.msg("User DB: {0}".format(self.UserDB.path)) self.DataRoot = filePathFromConfig("Core", "DataRoot", self.ServerRoot, ("data",)) log.msg("Data root: {0}".format(self.DataRoot.path)) self.Resources = filePathFromConfig("Core", "Resources", self.ServerRoot, ("resources",)) log.msg("Resources: {0}".format(self.Resources.path)) rejectClients = valueFromConfig("Core", "RejectClients", "") rejectClients = tuple([e for e in rejectClients.split("\n") if e]) self.RejectClients = rejectClients self.RejectClientsRegex = tuple([regex_compile(e) for e in rejectClients]) log.msg("RejectClients: {0}".format(self.RejectClients)) self.DMSHost = valueFromConfig("DMS", "Hostname", None) self.DMSDatabase = valueFromConfig("DMS", "Database", None) self.DMSUsername = valueFromConfig("DMS", "Username", None) self.DMSPassword = valueFromConfig("DMS", "Password", None) self.IncidentTypes = ( "Admin", "Art", "Assault", "Commerce", "Echelon", "Eviction", "Fire", "Gate", "Green Dot", "HQ", "Law Enforcement", "Lost Child", "Medical", "Mental Health", "MOOP", "SITE", "Staff", "Theme Camp", "Vehicle", "Junk", ) # # Persist some objects # self.dms = DutyManagementSystem( host = self.DMSHost, database = self.DMSDatabase, username = self.DMSUsername, password = self.DMSPassword, ) storage = Storage(self.DataRoot) storage.provision() self.storage = storage self.IncidentTypesJSON = to_json_text(self.IncidentTypes)
import datetime from decimal import Decimal from decimal import InvalidOperation from functools import lru_cache from json import dumps from re import compile as regex_compile from time import mktime from pyramid.compat import is_nonstr_iter from ines import CAMELCASE_UPPER_WORDS DATE = datetime.date DATETIME = datetime.datetime REPLACE_CAMELCASE_REGEX = regex_compile('[^A-Z0-9_.]').sub CLEAR_SPACES_REGEX = regex_compile(' +').sub NULLS = frozenset(['null', '', 'none']) BYTES_REFERENCES = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] VOWEL = frozenset(('a', 'e', 'i', 'o', 'u')) IGNORE_WORDS = frozenset(('by', )) def to_string(value, encoding='utf-8', errors='strict'): if isinstance(value, str): return value elif isinstance(value, bytes): return value.decode(encoding, errors)
return False def is_list(arg): if not isinstance(arg, list): log.error('List value was expected, got %s', str(type(arg))) return False return True # http://stackoverflow.com/a/7160778/1874604 URL_REGEX = regex_compile( r'^(?:http|ftp)s?://' r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' r'localhost|' r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' r'(?::\d+)?' r'(?:/?|[/?]\S+)$', IGNORECASE) def is_url(arg): if not URL_REGEX.match(arg): log.error('URL expected, got "%s"', arg) return False return True def is_references(arg): if not is_list(arg):
############################################################################### if not PY2: from .py3 import capitalize as specific_capitalize else: try: from .py2 import capitalize as specific_capitalize except LookupError: from .py3 import capitalize as specific_capitalize ############################################################################### FIX_PUNCTUATION = regex_compile(r"\s+(?=[\.,!?:;])") SEPARATORS = regex_compile(r"([\[\]\(\)'\"\{\}\.,?!:;])") FIX_LEFT_QUOTES = regex_compile(r"(?<=['\"])\s+") FIX_RIGHT_QUOTES = regex_compile(r"(?<!\w|\d)\s+(?=['\"])", re_UNICODE) FIX_LEFT_BRACES = regex_compile(r"(?<=[\[\(\}])\s+") FIX_RIGHT_BRACES = regex_compile(r"\s+(?=[\]\}\)])") FIX_SPACES = regex_compile(r"\s{2,}") FIX_QUOTES = regex_compile(r"'{2,}") FIX_SHORT_FORMS = regex_compile(r"\s+'(re|s|t)\b", re_IGNORECASE) FIX_ROMAN_NUMERALS = regex_compile( r""" \b M{0,4} (CM|CD|D?C{0,3}) (XC|XL|L?X{0,3}) (IX|IV|V?I{0,3})
from __future__ import unicode_literals from re import compile as regex_compile from re import sub as regex_replace from django.core.validators import EMPTY_VALUES from django.forms import ValidationError from django.forms.fields import Field, RegexField, Select from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from .pt_regions import REGION_CHOICES CITIZEN_CARD_NUMBER_REGEX = regex_compile(r"^(\d{8})-?(\d[A-Z0-9]{2}\d)$") PHONE_NUMBER_REGEX = regex_compile(r"^((00|\+)351)?\d{3,9}$") SOCIAL_SECURITY_NUMBER_MULTIPLIERS = [29, 23, 19, 17, 13, 11, 7, 5, 3, 2] SOCIAL_SECURITY_NUMBER_REGEX = regex_compile(r"^[12]\d{10}$") ZIP_CODE_REGEX = regex_compile(r"^[1-9]\d{3}-\d{3}$") class PTCitizenCardNumberField(Field): """ A field which validates Portuguese Citizen Card numbers (locally CC - 'Cartão do Cidadão'). - Citizen Card numbers have the format XXXXXXXXXYYX or XXXXXXXX-XYYX (where X is a digit and Y is an alphanumeric character). - Citizen Card numbers validate as per http://bit.ly/RP0BzW. - The input string may or may not have an hyphen separating the identity number from the document's check-digits. - This field does NOT validate old ID card numbers (locally BI - 'Bilhete de Identidade').
from os import scandir, remove from os.path import join from werkzeug.utils import secure_filename from re import compile as regex_compile from uchicagoldrapicore.app import app from uchicagoldrapicore.responses.apiresponse import APIResponse from uchicagoldrapicore.lib.apiexceptionhandler import APIExceptionHandler from hierarchicalrecord.hierarchicalrecord import HierarchicalRecord from hierarchicalrecord.recordconf import RecordConf from hierarchicalrecord.recordvalidator import RecordValidator # Globals _ALPHANUM_PATTERN = regex_compile("^[a-zA-Z0-9]+$") _NUMERIC_PATTERN = regex_compile("^[0-9]+$") _EXCEPTION_HANDLER = APIExceptionHandler() _STORAGE_ROOT = app.config['STORAGE_ROOT'] # Most of these are abstracted because they should be hooked # to some kind of database model in the future # # TODO # Probably make these base functions delegators to # implementation specific functions def only_alphanumeric(x):
def __init__(self, regexp, flags=0): if isinstance(regexp, str): self.regexp = regex_compile(regexp, flags) else: self.regexp = regexp
def load(self): configParser = SafeConfigParser() def readConfig(configFile): for okFile in configParser.read((configFile.path,)): log.msg("Read configuration file: {0}".format(configFile.path)) else: log.msg("No configuration file read.") def valueFromConfig(section, option, default): try: value = configParser.get(section, option) if value: return value else: return default except (NoSectionError, NoOptionError): return default def filePathFromConfig(section, option, root, segments): if section is None: path = None else: path = valueFromConfig(section, option, None) if path is None: fp = root for segment in segments: fp = fp.child(segment) elif path.startswith("/"): fp = FilePath(path) else: fp = root for segment in path.split(pathsep): fp = fp.child(segment) return fp readConfig(self.configFile) self.ServerRoot = filePathFromConfig( "Core", "ServerRoot", self.configFile.parent().parent(), () ) log.msg("Server root: {0}".format(self.ServerRoot.path)) self.ConfigRoot = filePathFromConfig( "Core", "ConfigRoot", self.ServerRoot, ("conf",) ) log.msg("Config root: {0}".format(self.ConfigRoot.path)) self.UserDB = filePathFromConfig( "Core", "UserDB", self.ConfigRoot, ("users.pwdb",) ) log.msg("User DB: {0}".format(self.UserDB.path)) self.DataRoot = filePathFromConfig( "Core", "DataRoot", self.ServerRoot, ("data",) ) log.msg("Data root: {0}".format(self.DataRoot.path)) self.Resources = filePathFromConfig( "Core", "Resources", self.ServerRoot, ("resources",) ) log.msg("Resources: {0}".format(self.Resources.path)) self.CachedResources = filePathFromConfig( "Core", "CachedResources", self.ServerRoot, ("cached",) ) log.msg("CachedResources: {0}".format(self.CachedResources.path)) rejectClients = valueFromConfig("Core", "RejectClients", "") rejectClients = tuple([e for e in rejectClients.split("\n") if e]) self.RejectClients = rejectClients self.RejectClientsRegex = tuple([ regex_compile(e) for e in rejectClients ]) log.msg("RejectClients: {0}".format(self.RejectClients)) timeZoneName = valueFromConfig( "Core", "TimeZone", "America/Los_Angeles" ) environ["TZ"] = timeZoneName tzset() self.TimeZone = FixedOffsetTimeZone.fromLocalTimeStamp(time()) self.ReadOnly = ( valueFromConfig("Core", "TimeZone", "false") == "true" ) log.msg("ReadOnly: {0}".format(self.ReadOnly)) self.Debug = ( valueFromConfig("Core", "Debug", "false") == "true" ) log.msg("Debug: {0}".format(self.Debug)) self.DMSHost = valueFromConfig("DMS", "Hostname", None) self.DMSDatabase = valueFromConfig("DMS", "Database", None) self.DMSUsername = valueFromConfig("DMS", "Username", None) self.DMSPassword = valueFromConfig("DMS", "Password", None) self.IncidentTypes = ( u"Art", u"Assault", u"Commerce", u"Echelon", u"Eviction", u"Fire", u"Gate", u"Green Dot", u"HQ", u"Law Enforcement", u"Lost Child", u"Medical", u"Mental Health", u"MOOP", u"SITE", u"Staff", u"Theme Camp", u"Vehicle", IncidentType.Admin.value, IncidentType.Junk.value, ) # # Persist some objects # self.dms = DutyManagementSystem( host=self.DMSHost, database=self.DMSDatabase, username=self.DMSUsername, password=self.DMSPassword, ) if self.ReadOnly: storageClass = ReadOnlyStorage else: storageClass = Storage storage = storageClass(self.DataRoot) self.storage = storage self.IncidentTypesJSON = json_as_text(self.IncidentTypes)