def get_sha_sums(mydir): '''Recursively searches a directory for all folders and files and returns a tuple of a list and a dict: The list holds directory names and the dict's keys are file names and the values are their SHA-512 hash. ''' dirs = list() files = dict() # TODO Reduce this to less than 80 chars. I have a feeling the for myfile # is redundant. Found it here: # https://stackoverflow.com/questions/18394147/recursive-sub-folder-search-and-return-files-in-a-list-python for myfile in [ y for x in os.walk(mydir) for y in glob(os.path.join(x[0], '*')) ]: if os.path.isdir(myfile): # It's actually a directory. Don't attempt SHA-512. # Remove the client_code_base and device dir myfile = re_sub(client_code_base, '', myfile) myfile = '/'.join(myfile.split('/')[2:]) dirs.append(myfile) continue sha = get_file_sha512(myfile) # TODO Redundant # Remove the client_code_base and device dir myfile = re_sub(client_code_base, '', myfile) myfile = '/'.join(myfile.split('/')[2:]) files[myfile] = sha return (dirs, files)
def _simplify_dataframe_columns(self, function: str, df: DataFrame) -> DataFrame or None: """Simplifies DataFrame Column Names given a 'function'.""" if function == "CURRENCY_EXCHANGE_RATE": column_names = [ "index", "from", "from_name", "to", "to_name", "rate", "refreshed", "tz", "bid", "ask" ] elif function == "OVERVIEW": column_names = ["item", "value"] elif function in ["CRYPTO_RATING", "GLOBAL_QUOTE"]: column_names = [ re_sub(r'\d+(|\w). ', "", name) for name in df.columns ] elif function == "SYMBOL_SEARCH": column_names = [ "symbol", "name", "type", "region", "market_open", "market_close", "tz", "currency", "match" ] else: column_names = [ re_sub(r'\d+(|\w). ', "", name) for name in df.columns ] column_names = [ re_sub(r' amount', "", name) for name in column_names ] column_names = [ re_sub(r'adjusted', "adj", name) for name in column_names ] column_names = [re_sub(r' ', "_", name) for name in column_names] df.columns = column_names return df
def clean_sentence(sentence: str, keep_case: Optional[bool] = False, remove_punctuation: Optional[bool] = True, remove_specials: Optional[bool] = True) -> str: """Cleans a sentence. :param sentence: Sentence to be clean. :type sentence: str :param keep_case: Keep capitals and small (True) or turn\ everything to small case (False) :type keep_case: bool :param remove_punctuation: Remove punctuation from sentence? :type remove_punctuation: bool :param remove_specials: Remove special tokens? :type remove_specials: bool :return: Cleaned sentence. :rtype: str """ the_sentence = sentence if keep_case else sentence.lower() # Remove any forgotten space before punctuation and double space. the_sentence = re_sub(r'\s([,.!?;:"](?:\s|$))', r'\1', the_sentence).replace(' ', ' ') if remove_specials: the_sentence = the_sentence.replace('<SOS> ', '').replace('<sos> ', '') the_sentence = the_sentence.replace(' <EOS>', '').replace(' <eos>', '') if remove_punctuation: the_sentence = re_sub('[,.!?;:\"]', '', the_sentence) return the_sentence
def make_class( cls, int_flag_class: Type[IntFlag], name: Optional[str] = None, prefix: str = '', attribute_name_formatter: Optional[Callable[[str], str]] = None ) -> Type[Mask]: """ Dynamically create a new `Mask` child class from an `IntFlag` class. :param int_flag_class: An `IntFlag` class with enumeration members to be added to the class to be created. :param name: The name of the class to be created. :param prefix: A prefix of the enumeration member attributes in `int_flag_class` that is to be ignored. :param attribute_name_formatter: A function that will format the attribute names. :return: A mask class with attributes corresponding to those in the provided `IntFlag` class. """ mask_class = type( name or re_sub(r'(Flag|Mask)+$', '', int_flag_class.__name__), (cls, ), dict()) attribute_name_formatter: Callable[ [str], str] = attribute_name_formatter or to_snake_case def make_field_property_accessor(enum_member: IntFlag): def field_getter(self) -> bool: return enum_member in self._mask def field_setter(self, value: bool) -> None: if value: self._mask |= enum_member else: self._mask &= ~enum_member return property(field_getter, field_setter) attribute_name_to_false: Dict[str, bool] = {} for enum_member in int_flag_class: attribute_name: str = attribute_name_formatter( re_sub(pattern=f'^{prefix}', repl='', string=enum_member.name)) setattr(mask_class, attribute_name, make_field_property_accessor(enum_member=enum_member)) attribute_name_to_false[attribute_name] = False def constructor(self, **kwargs): super(mask_class, self).__init__() for attribute_name, value in { **attribute_name_to_false, **kwargs }.items(): if attribute_name not in attribute_name_to_false: raise ValueError( f'{attribute_name} is not part of the mask.') setattr(self, attribute_name, value) setattr(mask_class, '__init__', constructor) setattr(mask_class, 'INT_FLAG_CLASS', int_flag_class) return mask_class
def os_fix_filename(filename): """Alter filenames containing illegal characters, depending on the OS. Ref: https://stackoverflow.com/questions/1976007/what-characters-are-forbidden-in-windows-and-linux-directory-names/31976060#31976060 """ if system() == "Windows": return re_sub(r"[\<\>\:\"\/\\\|\?\*]+", "", filename) return re_sub(r"[\/]+", r"\\", filename)
def __convert_from_camelcase(phrase): """Splits a CamelCased string into a new one, with each word capitalized, where words are separated by blanks. :param phrase: a possibly CamelCased string. :return: """ # http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case s1 = re_sub('(.)([A-Z][a-z]+)', r'\1_\2', phrase) with_blanks = str(re_sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower().replace("_", " ").capitalize()) return " ".join([w.capitalize() for w in with_blanks.split(" ")])
def ted(url: str) -> str: print("Extrair dados no TED.com") _req = req_get(url=url).text _title, _author = html.fromstring(_req).xpath('//meta[@itemprop="name"]/@content') _body = "".join(list(map(lambda x: re_sub(r"\t", " ", re_sub(r"\n|\t{2}", "", x)).strip(), html.fromstring(_req).xpath('//div[contains(@class, "Grid__cell")]/p/text()')))) with open(f'articles/{url.split("/")[4]}.json', "w") as _file: json.dump({"author": _author, "body": _body, "title": _title, "type": "video", "url": url}, _file, ensure_ascii=False, indent=4) return url.split("/")[4]
def startse(url: str) -> str: print("Extrair dados no startse.com") _req = req_get(url=url).text _json = json.loads(re_sub(r"\n|\t|\r", '', html.fromstring(_req).xpath('string(//script[@type="application/ld+json"])'))) _body = "".join(list(map(lambda x: re_sub(r"\t", " ", re_sub(r"\n|\t{2}", "", x)).strip(), html.fromstring(_req).xpath('//span[@style="font-weight: 400;"]/text()')))) with open(f'articles/{url.split("/")[-1]}.json', "w") as _file: json.dump({"author": _json['@graph'][5]['name'], "body": _body, "title": _json['@graph'][4]['headline'], "type": _json['@graph'][4]['@type'], "url": url}, _file, ensure_ascii=False, indent=4) return url.split("/")[-1]
def escape_script_tag(text): return re_sub( r'<script>', '<script>', re_sub( r'</script>', '</script>', text, flags=RE_IGNORECASE ), flags=RE_IGNORECASE )
def norm_key(k: str) -> str: """ Normalize a key name """ norm = \ str(k)\ .lower()\ .strip()\ .translate( str.maketrans({'/': '_', '(': '', ')': '', '{': '', '}': ''}) ) norm = re_sub(r'\s+', '_', norm) norm = re_sub(r'_+', '_', norm) norm = re_sub(r'(^_|_$)', '', norm) return norm
def sanitize_seq(seq, alphabet): alphdict = alphabet.todict() assert (len(GAPS) > 0 and len(seq) > 0 and len(alphdict) > 0) try: seq = str(seq) seq = seq.upper() seq = re_sub(r'[%s]' % GAPS, '-', seq) seq = re_sub(r'[^%s]' % ''.join(alphdict.keys()), 'X', seq) except TypeError: raise RuntimeError( 'something is amiss with things:\n GAPS = %s\n seq = %s\n alphabet = %s\n' % (GAPS, seq, alphdict)) return seq
def sanitize_seq(seq, alphabet): alphdict = alphabet.todict() assert(len(GAPS) > 0 and len(seq) > 0 and len(alphdict) > 0) try: seq = str(seq) seq = seq.upper() seq = re_sub(r'[%s]' % GAPS, '-', seq) seq = re_sub(r'[^%s]' % ''.join(alphdict.keys()), 'X', seq) except TypeError: raise RuntimeError( 'something is amiss with things:\n GAPS = %s\n seq = %s\n alphabet = %s\n' % ( GAPS, seq, alphdict) ) return seq
def getMesg(self, smtpData): global outDIR, verbose, flagM if flagM:# Get header/text/html? msg = message_from_string(smtpData) # Use regex to clean up timestamp for pathname msgDIR = os.path.join(outDIR, re_sub('[:,-]','',msg.get("date").replace(' ','_'))) if debug: print "msgDIR: ", msgDIR if os.path.isdir(msgDIR) == False: os.makedirs(msgDIR, 0755) fh = open(os.path.join(msgDIR, "header"),'w') # For each header item for i in msg.items(): if debug: print("%s: %s" % (i[0], i[1])) # Write field and value fh.write("%s: %s\n" % (i[0], i[1])) fh.close() for part in msg.walk(): # Open these files to append just in case there's more than one of this content type. Don't overwrite! #RFC6838 & http://www.iana.org/assignments/media-types/media-types.xhtml if (part.get_content_type() == "text/plain"): if debug or verbose: print "\tSaving text... " if debug: print part.get_payload(decode=True) fh = open(os.path.join(msgDIR, "message_text"),'a') fh.write(part.get_payload(decode=True)) fh.close() elif (part.get_content_type() == "text/html"): if debug or verbose: print "\tSaving HTML... " if debug: print part.get_payload(decode=True) fh = open(os.path.join(msgDIR, "message_HTML"),'a') fh.write(part.get_payload(decode=True)) fh.close() else: msgDIR = outDIR self.getAttach(smtpData, msgDIR)# Get attachments now
def applyConfig(self, ret = False): if ret == True: data = {'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, 'username': False, 'password': False, 'mounttype': False, 'options': False, 'hdd_replacement': False} data['mountusing'] = self.mountusingConfigEntry.value data['active'] = self.activeConfigEntry.value data['ip'] = self.ipConfigEntry.getText() data['sharename'] = re_sub('\\W', '', self.sharenameConfigEntry.value) if self.sharedirConfigEntry.value.startswith('/'): data['sharedir'] = self.sharedirConfigEntry.value[1:] else: data['sharedir'] = self.sharedirConfigEntry.value data['options'] = self.optionsConfigEntry.value data['mounttype'] = self.mounttypeConfigEntry.value data['username'] = self.usernameConfigEntry.value data['password'] = self.passwordConfigEntry.value data['hdd_replacement'] = self.hdd_replacementConfigEntry.value self.applyConfigRef = None self.applyConfigRef = self.session.openWithCallback(self.applyConfigfinishedCB, MessageBox, _('Please wait for activation of your network mount...'), type=MessageBox.TYPE_INFO, enable_input=False) iAutoMount.automounts[self.sharenameConfigEntry.value] = data iAutoMount.writeMountsConfig() iAutoMount.getAutoMountPoints(self.applyConfigDataAvail) else: self.close()
def ok(self): current = self["config"].getCurrent() if current == self.sharenameEntry or current == self.sharedirEntry or current == self.sharedirEntry or current == self.optionsEntry or current == self.usernameEntry or current == self.passwordEntry: if current[1].help_window.instance is not None: current[1].help_window.instance.hide() sharename = re_sub("\W", "", self.sharenameConfigEntry.value) if self.sharedirConfigEntry.value.startswith("/"): sharedir = self.sharedirConfigEntry.value[1:] else: sharedir = self.sharedirConfigEntry.value sharexists = False for data in self.mounts: if self.mounts[data]['sharename'] == sharename: if self.mounts[data]['sharedir'] != sharedir: sharexists = True break if sharexists: self.session.open(MessageBox, _("A mount entry with this name already exists!\nand is not this share folder, please use a different name.\n"), type = MessageBox.TYPE_INFO ) elif self.old_sharename != self.sharenameConfigEntry.value: self.session.openWithCallback(self.updateConfig, MessageBox, _("You have changed the share name!\nUpdate existing entry and continue?\n"), default=False ) elif self.mounts.has_key(sharename) is True: self.session.openWithCallback(self.updateConfig, MessageBox, _("A mount entry with this name already exists!\nUpdate existing entry and continue?\n"), default=False ) else: self.session.openWithCallback(self.applyConfig, MessageBox, _("Are you sure you want to save this network mount?\n\n") )
def intraday(self, symbol: str, interval=5, adjusted=True, **kwargs) -> DataFrame or None: """Simple wrapper to _av_api_call method for Intraday requests.""" parameters = { "function": "TIME_SERIES_INTRADAY", "symbol": symbol.upper(), "datatype": self.datatype, "outputsize": self.output_size, "adjusted": "true" if adjusted else "false" } if isinstance(interval, str) and interval in self.__api_series_interval: parameters["interval"] = interval elif isinstance(interval, int) and interval in [ int(re_sub(r'min', "", x)) for x in self.__api_series_interval ]: parameters["interval"] = f"{interval}min" else: return None download = self._av_api_call(parameters, **kwargs) return download if download is not None else None
def replace_entities(text): #{ """ Replaces HTML/XML character references and entities in a text string with actual Unicode characters. @param text The HTML (or XML) source text. @return The plain text, as a Unicode string, if necessary. >>> a = '<a href="/abc?a=50&amp;b=test">' >>> print replace_entities( a ) <a href="/abc?a=50&b=test"> """ def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = unichr(name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re_sub("&#?\w+;", fixup, string(text))
def handle_data(self, data): # getting the name of a document result if self.in_title and self.not_in_processing: self.current_doc_title = data.strip() + " " # getting the snapshot of a document result elif (not self.in_script) and ( not self.in_title) and self.in_para and self.not_in_processing: if len(data.strip()) > 1: self.current_line += data.strip() + " " # HTML document processing elif (not self.in_script) and (not self.in_cite) and ( not self.in_span) and (not self.not_in_processing): data_list = (re_sub(r"[^a-zA-Z0-9_ ]+", "", data.lower().strip())).split() # re.sub = remove non alphanumeric characters from the string; NOTE: this alters the format of hyperlinks # do not add raw numbers to the token stream rm_dl_stopwords = [ dl for dl in data_list if (dl not in self.english_words) and (not dl.isdigit()) ] stemmed_list = [self.stemmer.stem(dl) for dl in rm_dl_stopwords] if len(stemmed_list) > 0: for sl in stemmed_list: self.out_file.write(sl + " ")
def read_article(hashed=None, keyword=None): global HASHED hashed = int(hashed) if keyword: like_keyword(keyword) articles = list() more_articles = list() with Bot() as b: if hashed: link = None try: link = DEHASHED[hashed] except KeyError: for article in b.database["articles"]: if hashed == hash(article): link = article break if link: b.update_article(link, read=True) article = dict(b.database["articles"][link]) article['source'] = __get_source_domain(link) article['date'] = time.ctime(article['release']) original_content = markdown.markdown(escape(article['content'])) spaned_content = [] for paragraph in [p for p in RE_PARAGRAPHS.findall(original_content) if p]: sentences = [s for s in RE_SENTENCES.findall(paragraph) if s] if not sentences: continue elif len(sentences) == 1: spaned_content.append("<p><span>%s</span></p>" % sentences[0]) else: spaned_content.append( "<p>%s</p>" % \ ("<span>%s</span>"*3 % \ (sentences[0], "".join(sentences[1:-2]), sentences[-1])) ) article['spaned_content'] = " ".join(spaned_content) if keyword: article['spaned_content'] = re_sub(r"(%s)" % keyword, r"<strong>\1</strong>", article['spaned_content'], flags=IGNORECASE) articles.append(article) unread_with_keyword = lambda x: not x["read"] and keyword in x["keywords"] more_articles = sorted([x for x in b.database["articles"].values() if unread_with_keyword(x)], key=b.relevance_of_article) HASHED.update({hash(x["link"]): x["link"] for x in more_articles}) return render_template("read.html", style=url_for("static", filename="default.css"), articles=articles, more_articles=more_articles, hashed=HASHED, keyword=keyword)
def _storeArticle(article): """ _safeArticle(Dict) -> Bool private help method to safe an aticle param article:Dict - """ # try: #make a path according to the article's topics path = re_sub('http://www.spiegel.de/','', article['link']).split('/') filename = path.pop(-1) storePath = os_path_join(BASE_PATH,os_path_join(*path)) #create directories if not os_path_exists(storePath): os_makedirs(storePath) #write article as json to the file with open(os_path_join(storePath, filename),'w') as o: json.dump(article, o) #write the article name to the log if os_path_isfile(BASE_PATH + 'article_log'): log = open(BASE_PATH + 'article_log','a') else: log = open(BASE_PATH + 'article_log','w') log.write(article['link'] + '\n') log.close() return True
def save_to_firebase(self): validation = self.validate() if validation: print("validation success") email = self.lineEdit_Login.text() password = self.lineEdit_Senha.text() name = self.lineEdit_nome.text() cpf = self.lineEdit_CPF.text() #Remove dots and dashes from CPF from re import sub as re_sub cpf = re_sub('[.-]', '', cpf) user = User(email, name, cpf, LevelOfAccess.COMMON_USER) auth.create_user_with_email_and_password(email, password) db.child('users').push(user.to_dict()) msg = QtWidgets.QMessageBox() msg.setIcon(QtWidgets.QMessageBox.NoIcon) msg.setText("Sucesso") msg.setInformativeText("Cadastrado com sucesso!") msg.setWindowTitle("Sucesso") msg.exec_() global loggedUser if self.mainWindow: if loggedUser != None and loggedUser.level == LevelOfAccess.ADMIN: self.mainWindow.stackedWidget.setCurrentIndex(4) else: self.mainWindow.stackedWidget.setCurrentIndex(0) else: print("validation error")
def forceNumber(n): n = re_sub(r'[^0-9.-]', '', n) n = n.replace('.', '#', 1).replace('.', '').replace('#', '.') n = ('-' if n.startswith('-') else '') + n.replace('-', '') if not any([i.isdigit() for i in n]): n = n + '1' return float(n)
def applyConfig(self, ret = False): if ret: if self._cfgMounttype.value == 'nfs': data = iAutoMount.DEFAULT_OPTIONS_NFS else: data = iAutoMount.DEFAULT_OPTIONS_CIFS data['active'] = self._cfgActive.value data['ip'] = self._cfgIp.getText() data['sharename'] = re_sub("\W", "", self._cfgSharename.value) # "\W" matches everything that is "not numbers, letters, or underscores",where the alphabet defaults to ASCII. if self._cfgSharedir.value.startswith("/"): data['sharedir'] = self._cfgSharedir.value[1:] else: data['sharedir'] = self._cfgSharedir.value data['options'] = self._cfgOptions.value data['mounttype'] = self._cfgMounttype.value data['username'] = self._cfgUsername.value data['password'] = self._cfgPassword.value data['hdd_replacement'] = self._cfgHddReplacement.value self._applyConfigMsgBox = self.session.openWithCallback(self.applyConfigfinishedCB, MessageBox, _("Please wait while I'm saving your network mount..."), type = MessageBox.TYPE_INFO, enable_input = False) iAutoMount.mounts[self._cfgSharename.value] = data iAutoMount.save() iAutoMount.reload(self.applyConfigDataAvail) else: self.close()
def list_archive(self, archive_name): """ function calls << 7z l -slt -ba {container_path}>> function, the output of which looks something like ... Path = first_file.py Size = 123 Path = second_file.py Size = 123 extract the key and the value from the output """ # Parse input container_path = self.clean_filename(archive_name) path_args = format_path_args(container_path) command = self.get_command('list', path_args=path_args) output = execute_subprocess(command) file_list = [{}] for line in output.strip().splitlines(): if not line: file_list.append({}) continue file_item = file_list[-1] key = re_match(r"^\w+", line)[0].lower() value = re_sub(r"^\w+\s=\s", "", line) file_item.update({key: value}) return [f for f in file_list if f]
def applyConfig(self, ret=False): if (ret == True): data = { 'isMounted': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, \ 'username': False, 'password': False, 'mounttype' : False, 'options' : False, 'hdd_replacement' : False } data['active'] = self.activeConfigEntry.value data['ip'] = self.ipConfigEntry.getText() data['sharename'] = re_sub("\W", "", self.sharenameConfigEntry.value) # "\W" matches everything that is "not numbers, letters, or underscores",where the alphabet defaults to ASCII. if self.sharedirConfigEntry.value.startswith("/"): data['sharedir'] = self.sharedirConfigEntry.value[1:] else: data['sharedir'] = self.sharedirConfigEntry.value data['options'] = self.optionsConfigEntry.value data['mounttype'] = self.mounttypeConfigEntry.value data['username'] = self.usernameConfigEntry.value data['password'] = self.passwordConfigEntry.value data['hdd_replacement'] = self.hdd_replacementConfigEntry.value self.applyConfigRef = None self.applyConfigRef = self.session.openWithCallback( self.applyConfigfinishedCB, MessageBox, _("Please wait for activation of your network mount..."), type=MessageBox.TYPE_INFO, enable_input=False) iAutoMount.automounts[self.sharenameConfigEntry.value] = data iAutoMount.writeMountsConfig() iAutoMount.getAutoMountPoints(self.applyConfigDataAvail) else: self.close()
def main(args=None): if args is None: args = sys_argv[1:] parser = ArgumentParser(description='') parser.add_argument('TREE', type=PathType) parser.add_argument('FEATURES', type=feattype) ns = parser.parse_args(args) tree, alignment, colnames, _ = PhyloGzFile.read(ns.TREE) icolnames = [(idx, colname) for idx, colname in enumerate(colnames) if int(NUMERIC.sub('', colname)) in ns.FEATURES] for r in alignment: # labels has length of icolnames plus the ic50 labels = [None] * (len(icolnames) + 1) i = 1 for idx, colname in icolnames: if len(colnames) > 1: labels[i] = colname + r.seq[idx] else: labels[i] = r.seq[idx] i += 1 try: labels[0] = '%.3g' % mean(seqrecord_get_values(r)) except ValueError: if not (len(r.id) > 4 and r.id[:4].lower() == 'node'): print(r) labels.pop(0) # include the ':' here to make sure we grab the end of the label tree = re_sub(r'([,()])' + r.id + r'(?:_[0-9]+)?:', r'\g<1>' + '_'.join(labels) + ':', tree) print(tree) return 0
def deal_title(self, text): # print(text) dealers = [ # trim and remove Creole in head ['\\s*[\\*#=\\|]*\\s*(.+?)\\s*$', '\\1'], # \t to space ['(?<!\\\\)\\\\t', ' '], # \\ to \ ['\\\\\\\\', '\\\\'], # *=| ['\\|(\\s*[\\*#=\\|]*)?\\s*', ' '], # |$ ['\\|\\s*$', ''], # \\text\\ ['\\*{2}(.+)\\*{2}', '\\1'], # __text__ ['_{2}(.+)_{2}', '\\1'], # //text// ['\\/{2}(.+)\\/{2}', '\\1'], # ""text"" ['"{2}(.+)"{2}', '\\1'], # --text-- ['-{2}(.+)-{2}', '\\1'], # ~~text~~ ['~{2}(.+)~{2}', '\\1'], # remove invalid chrs ['[\\\\/:*?"<>|]', ' '] ] for [fnd, repl] in dealers: # print(fnd+', '+repl) text = re_sub(fnd, repl, text) return text.strip()
def decrypt_file(self, path, password=None): try: with open(path, 'rb') as f: salt = f.read(self._salt_len) iv = f.read(self._iv_len) f.seek(file_size(path) - self._mac_len) mac = f.read(self._mac_len) aes_key, mac_key = self._keys(salt, password) self._verify_file(path, mac, mac_key) cipher = self._cipher(aes_key, iv) new_path = re_sub(r'\.'+settings.EXTENSION_AES_ENCRYPTION+'$', '', path) with open(new_path, 'wb') as f: chunks = self._file_chunks( path, self._salt_len + self._iv_len, self._mac_len ) for chunk, is_last in chunks: data = cipher.decrypt(chunk) if self._mode == 'CBC' and is_last: data = unpad(data, AES.block_size) f.write(data) return new_path except (TypeError, ValueError, IOError) as e: self._error_handler(e)
def applyConfig(self, ret=False): if ret == True: data = iAutoMount.DEFAULT_OPTIONS_NFS data['active'] = self._cfgActive.value data['ip'] = self._cfgIp.getText() data['sharename'] = re_sub("\W", "", self._cfgSharename.value) # "\W" matches everything that is "not numbers, letters, or underscores",where the alphabet defaults to ASCII. if self._cfgSharedir.value.startswith("/"): data['sharedir'] = self._cfgSharedir.value[1:] else: data['sharedir'] = self._cfgSharedir.value data['options'] = self._cfgOptions.value data['mounttype'] = self._cfgMounttype.value data['username'] = self._cfgUsername.value data['password'] = self._cfgPassword.value data['hdd_replacement'] = self._cfgHddReplacement.value self._applyConfigMsgBox = self.session.openWithCallback( self.applyConfigfinishedCB, MessageBox, _("Please wait while I'm saving your network mount..."), type=MessageBox.TYPE_INFO, enable_input=False) iAutoMount.mounts[self._cfgSharename.value] = data iAutoMount.save() iAutoMount.reload(self.applyConfigDataAvail) else: self.close()
def search(self, query): """Do a Yandex web search for *query*. Returns a list of URLs ranked by relevance (as determined by Yandex). Raises :py:exc:`~earwigbot.exceptions.SearchQueryError` on errors. """ domain = self.cred.get("proxy", "yandex.com") url = "https://{0}/search/xml?".format(domain) query = re_sub(r"[^a-zA-Z0-9 ]", "", query).encode("utf8") params = { "user": self.cred["user"], "key": self.cred["key"], "query": '"' + query + '"', "l10n": "en", "filter": "none", "maxpassages": "1", "groupby": "mode=flat.groups-on-page={0}".format(self.count) } result = self._open(url + urlencode(params)) try: data = lxml.etree.fromstring(result) return [elem.text for elem in data.xpath(".//url")] except lxml.etree.Error as exc: raise SearchQueryError("Yandex XML parse error: " + str(exc))
def get_lyrics(self, title, artist): lyrics = '' artist = artist.replace(' ', '_').lower() artist = normalize('NFD', artist).encode('ascii', 'ignore') title = title.replace(' ', '_').lower() title = normalize('NFD', title).encode('ascii', 'ignore') url = ('http://www.lyricsmode.com/lyrics/%s/%s/%s.html' % (urllib_quote(artist.decode('utf-8'))[0], urllib_quote(artist.decode('utf-8')), urllib_quote(title.decode('utf-8')))) try: page = self.get_html(url) except HTTPError: page = '' clean_reg = re_compile('<.*?>') for txt in re_findall( '(?s)<p id="lyrics_text" ' + 'class="ui-annotatable">(.*?)</p>', str(page)): txt = re_sub(clean_reg, '', txt) txt = txt.replace('\\\'', "'") txt = txt.replace('\\n', '\n') lyrics = txt if lyrics != '': return lyrics else: return None
def go_to_chapter(self, chap_num): """ Make Pyscandl go to the asked chapter. :param chap_num: chapter number that was asked for :type chap_num: int/str/float """ self.fetcher.go_to_chapter(chap_num) # in case windows is the os, remove the banned characters if os.name == "nt": chapter_name = re_sub(r'[\\/*?:"<>|]', u"█", self.fetcher.chapter_name) else: chapter_name = self.fetcher.chapter_name self._path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}/" self._img_bin_list = [] # prepares the next pdf path and name if self._tiny: if isinstance(self.fetcher, StandaloneFetcher): self._pdf_path = f"{self._output}{chapter_name}.pdf" self._name_metadata_pdf = f"{self.fetcher.chapter_name}" else: self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf" self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}" else: if isinstance(self.fetcher, StandaloneFetcher): self._pdf_path = f"{self._output}{self.fetcher.manga_name} - {chapter_name}.pdf" self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}" else: self._pdf_path = f"{self._output}{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf" self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
def intraday_extended(self, symbol: str, interval=5, slice="year1month1", adjusted=True, **kwargs) -> DataFrame or None: """Simple wrapper to _av_api_call method for Intraday Extended requests.""" parameters = { "function": "TIME_SERIES_INTRADAY_EXTENDED", "symbol": symbol.upper(), "adjusted": "true" if adjusted else "false" } if isinstance(interval, str) and interval in self.__api_series_interval: parameters["interval"] = interval elif isinstance(interval, int) and interval in [ int(re_sub(r'min', "", x)) for x in self.__api_series_interval ]: parameters["interval"] = f"{interval}min" else: return None if isinstance(slice, str) and slice.lower() in self.__api_slice: parameters["slice"] = slice.lower() self.datatype = "csv" # Returns csv by default download = self._av_api_call(parameters, **kwargs) if self.export: self._save_df(parameters["function"], download) return download if download is not None else None
def next_chapter(self): """ Goes to the next chapter """ self.fetcher.next_chapter() # in case windows is the os, remove the banned characters if os.name == "nt": chapter_name = re_sub(r'[\\/*?:"<>|]', u"█", self.fetcher.chapter_name) else: chapter_name = self.fetcher.chapter_name self._path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}/" self._img_bin_list = [] # prepares the next pdf path and name if self._tiny: if isinstance(self.fetcher, StandaloneFetcher): self._pdf_path = f"{self._output}{chapter_name}.pdf" self._name_metadata_pdf = f"{self.fetcher.chapter_name}" else: self._pdf_path = f"{self._output}ch.{self.fetcher.chapter_number} {chapter_name}.pdf" self._name_metadata_pdf = f"ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}" else: if isinstance(self.fetcher, StandaloneFetcher): self._pdf_path = f"{self._output}{self.fetcher.manga_name} - {chapter_name}.pdf" self._name_metadata_pdf = f"{self.fetcher.manga_name} - {self.fetcher.chapter_name}" else: self._pdf_path = f"{self._output}{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {chapter_name}.pdf" self._name_metadata_pdf = f"{self.fetcher.manga_name} - ch.{self.fetcher.chapter_number} {self.fetcher.chapter_name}"
def replace_entities( text ): #{ """ Replaces HTML/XML character references and entities in a text string with actual Unicode characters. @param text The HTML (or XML) source text. @return The plain text, as a Unicode string, if necessary. >>> a = '<a href="/abc?a=50&amp;b=test">' >>> print replace_entities( a ) <a href="/abc?a=50&b=test"> """ def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = unichr(name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re_sub("&#?\w+;", fixup, string(text))
def add_plugins_zshrc(zshrc): plugins = ( "python", "django", "pip", "pep8", "autopep8", "zsh-syntax-highlighting", "zsh-autosuggestions", ) current = plugins_current_zshrc(zshrc) new_plugins = [] for plugin in plugins: if plugin not in current: new_plugins.append(plugin) if len(new_plugins) > 0: current_zshrc = read_zshrc(zshrc) plugins = f'plugins=({" ".join(current)} {" ".join(new_plugins)})' new_zsh_rc = re_sub(rf"^plugins=\(.*", plugins, current_zshrc, flags=re_m) snakypy_file_create(new_zsh_rc, zshrc, force=True) return new_zsh_rc return
def replace_match(self, data, replacement, occurences=0, multiline=False): """ Replaces the string which match the regex string with the given replacement. :param str data: The data to work with. :param str replacement: The replacement of the matched regex. :param int occurences: The number of occurences to replace. .. note:: :code:`0` means all occurences. :rtype: str """ if isinstance(replacement, str): return re_sub( self.regex, replacement, data, occurences, flags=MULTILINE if multiline else 0, ) return data
def _parse_interrupts(cls, node: OMNode) -> List[OMInterrupt]: """Parse interrupts definitions. :param node: the object model node to parse :return: a list of device interrupts """ ints = [] sections = [] namecount = {} for section in node.get('interrupts', []): if not isinstance(section, dict) or '_types' not in section: continue types = section['_types'] if 'OMInterrupt' not in types: continue sections.append(section) name = section['name'].split('@')[0] if name not in namecount: namecount[name] = 1 else: namecount[name] += 1 if not sections: return ints for pos, section in enumerate(sections): names = section['name'].lower().split('@', 1) name = re_sub(r'[\s\-]', '_', names[0]) instance = HexInt(int(names[1], 16) if len(names) > 1 else 0) channel = section['numberAtReceiver'] parent = section['receiver'] if namecount[names[0]] > 1: name = f'{name}{pos}' ints.append(OMInterrupt(name, instance, channel, parent)) return ints
def deal_title(self, text): # print(text) dealers = [ # trim and remove Creole in head ['\\s*[\\*#=\\|]*\\s*(.+?)\\s*$', '\\1'], # \t to space ['(?<!\\\\)\\\\t', ' '], # \\ to \ ['\\\\\\\\', '\\\\'], # *=| ['\\|(\\s*[\\*#=\\|]*)?\\s*', ' '], # |$ ['\\|\\s*$', ''], # \\text\\ ['\\*{2}(.+)\\*{2}', '\\1'], # __text__ ['_{2}(.+)_{2}', '\\1'], # //text// ['\\/{2}(.+)\\/{2}', '\\1'], # ""text"" ['"{2}(.+)"{2}', '\\1'], # --text-- ['-{2}(.+)-{2}', '\\1'], # ~~text~~ ['~{2}(.+)~{2}', '\\1'], # remove invalid chrs ['[\\\\/:*?"<>|]',' '] ] for [fnd,repl] in dealers: # print(fnd+', '+repl) text = re_sub(fnd,repl,text) return text.strip()
def applyConfig(self, ret = False): if (ret == True): data = { 'isMounted': False, 'mountusing': False, 'active': False, 'ip': False, 'sharename': False, 'sharedir': False, \ 'username': False, 'password': False, 'mounttype' : False, 'options' : False, 'hdd_replacement' : False } data['mountusing'] = self.mountusingConfigEntry.value data['active'] = self.activeConfigEntry.value data['ip'] = self.ipConfigEntry.getText() data['sharename'] = re_sub("\W", "", self.sharenameConfigEntry.value) # "\W" matches everything that is "not numbers, letters, or underscores",where the alphabet defaults to ASCII. if self.sharedirConfigEntry.value.startswith("/"): data['sharedir'] = self.sharedirConfigEntry.value[1:] else: data['sharedir'] = self.sharedirConfigEntry.value data['options'] = self.optionsConfigEntry.value data['mounttype'] = self.mounttypeConfigEntry.value data['username'] = self.usernameConfigEntry.value data['password'] = self.passwordConfigEntry.value data['hdd_replacement'] = self.hdd_replacementConfigEntry.value self.applyConfigRef = None self.applyConfigRef = self.session.openWithCallback(self.applyConfigfinishedCB, MessageBox, _("Please wait for activation of your network mount..."), type = MessageBox.TYPE_INFO, enable_input = False) iAutoMount.automounts[self.sharenameConfigEntry.value] = data iAutoMount.writeMountsConfig() iAutoMount.getAutoMountPoints(self.applyConfigDataAvail, True) else: self.close()
def save_to_firebase(self): validation = self.validate() if validation: name = self.lineEdit_Nome.text() cpf = self.lineEdit_CPF.text() level = 1 if self.radioButton_admin.isChecked() else 0 #Remove dots and dashes from CPF from re import sub as re_sub cpf = re_sub('[.-]', '', cpf) user_update = {'name': name, 'cpf': cpf, 'level': level} user = db.child('users').order_by_child("email").equal_to( self.editing_user.email).get().each() db.child('users').child(user[0].key()).update(user_update) msg = QtWidgets.QMessageBox() msg.setIcon(QtWidgets.QMessageBox.NoIcon) msg.setText("Sucesso") msg.setInformativeText("Alterações salvas com sucesso!") msg.setWindowTitle("Sucesso") msg.exec_() if self.mainWindow: if loggedUser != None and loggedUser.level == LevelOfAccess.ADMIN: self.mainWindow.stackedWidget.setCurrentIndex(4) else: self.mainWindow.stackedWidget.setCurrentIndex(0)
async def urban_func(answers, text): results = await arq.urbandict(text) if not results.ok: answers.append( InlineQueryResultArticle( title="Error", description=results.result, input_message_content=InputTextMessageContent(results.result), )) return answers results = results.result[0:48] for i in results: clean = lambda x: re_sub(r"[\[\]]", "", x) msg = f""" **Query:** {text} **Definition:** __{clean(i.definition)}__ **Example:** __{clean(i.example)}__""" answers.append( InlineQueryResultArticle( title=i.word, description=clean(i.definition), input_message_content=InputTextMessageContent(msg), )) return answers
def gallery(offset=0, number=12, since=259200, keyword=None): """Arrangement of unread articles.""" global HASHED, DEHASHED offset = int(offset) number = int(number) back_then = int(since) HASHED = dict() DEHASHED = dict() with Bot() as b: articles = b.hot_articles(offset, number, since, keyword) watched_keywords = frozenset(b.database["keyword_clicks"].keys()) for article in articles: link = article["link"] if not article["keywords"]: b.update_article(link, read=True) continue # generate and remember hash values HASHED[link] = hash(link) DEHASHED[hash(link)] = link # split headline into links split_headline = unicode(escape(article["title"].lower())).split(" ") sorted_kwords = sorted(article["keywords"], key=len, reverse=True) linked_headline = [] contained_watched_keywords = watched_keywords & set(sorted_kwords) for word in split_headline: kwords = [kw for kw in sorted_kwords if kw.lower() in word.lower()] if not kwords: continue template = r"""<a href="/read/%s/because/of/\1" target="_blank">\1</a>""" if word in contained_watched_keywords: template = "<i>%s</i>" % template linked_headline.append( re_sub(r"(%s)" % kwords[0], template % HASHED[link], word, flags=IGNORECASE)) if not linked_headline: continue article["linked_headline"] = " ".join(linked_headline) [int(k) for k in HASHED.values()] # prepare data sets for gallery scores = {a["link"]: b.relevance_of_article(a) for a in articles} scores["all"] = sum([b.relevance_of_article(x) for x in articles]) content = render_template("gallery.html", style=url_for("static", filename="default.css"), articles=articles, new_offset=offset + 1, hashed=HASHED, scores=scores) return content
def plot_save_regressions(regressions_for_noise_amount, net_filename): from pylab import (imshow,subplot,bar,xticks,xlim,axhline,title, xlabel,ylabel,arange,show,cm,figure,savefig,save,imsave) name = net_filename.rsplit('.', 1)[0] # how many noise levels we have to draw N = len(regressions_for_noise_amount) print("Will plot for for {} noise levels...".format(N)) ind = arange(N) # the x locations for the groups print("ind = {}".format(ind)) width = 0.35 # the width of the bars # projection id -> name, as returned into tuples by http://ffnet.sourceforge.net/apidoc.html#ffnet.ffnet.test y_name = ["slope", "intercept", "r-value", "p-value", "slope stderr", "estim. stderr"] for projection_id in range(6): # todo has bug? how do i select the data #subplot(11 + projection_id * 100) # a new plot figure() projection_name = y_name[projection_id] ylabel(projection_name) print("Plotting for projection: " + projection_name) projections = regressions_for_noise_amount.T[projection_id] print("Projections on {} tuple field ({}) = {}".format(projection_id, projection_name, projections)) title(projection_name + " for noise levels...") # todo change me? for i in ind: bar(i, projections[i], width, color='b') # plot it # bar(ind, projections[ind], width, color='b') # plot it xticks(ind+width/2., range(0, N)) # todo print noise levels xlim(-width,N-width) axhline(linewidth=1, color='black') xlabel("Noise amount") # debug uncomment to look at graphs # show() plot_output_formats = ['png', 'eps'] for format in plot_output_formats: plot_name = re_sub( "[^a-z]", "_", y_name[projection_id].lower() ) plot_filename = "{}_plot_{}.{}".format( name, plot_name, format) savefig(plot_filename, orientation='portrait') print("Saved plot as: {}.".format(plot_filename))
def updateKey(self): key = '' if self.url != '': # Use regex to extract update_key regex = r"^.*update.php\?([0-9A-Za-z=]*)$" key = re_sub(regex, r'\1', self.url) return key
def splitReadheader(self, alignedRead, dontTrustSamFlags=False): """Split the read header to determine the common part of a pair and it's pair ref""" query = re_sub("[_/\.][12]$", '', alignedRead.qname) # this will not affect illumina headers if dontTrustSamFlags: # try work out pairing info from the read header itself try: end = int(re_sub(".*[_/\.]",'', alignedRead.qname)) except: return (query, None) if end != 1 and end != 2: return (query, None) else: if alignedRead.is_read1: end = 1 else: end = 2 return (query, end)
def check_url_fname(fname, fname_max_len): """Check file name given in URL after last slash / and remove not allowed chars""" # Remove unprinted chars and strip it fname = re_sub("[\x00-\x19/]", "", fname).strip() if len(fname) > fname_max_len: return 0 return fname
def fetch(self): """ Fetch the WebFinger profile and return the XML document. """ template_url = self._get_template() target_url = re_sub( "\{uri\}", quote_plus(self.request_email.scheme + ":" + self.request_email.path), template_url ) return etree.parse(urlopen(target_url))
def getvendorexact(raw,vendors_str): brand='Other' vendors=vendors_str.split(',') sep_title=re_sub(r'([\(\)\[\]\{\},])', ' \1 ', raw) title_words=sep_title.lower() for brnd in vendors: if brnd.lower() in title_words: brand=brnd break return brand
def add_schedule(schedule, element): name = normalize("NFKD", unicode(element.string)).strip() if len(name): times = re_findall("\d*[,.:]\d{2}", name) timetable = [] for time in times: hour, minute = re_sub("[,.]", ":", time).split(":") if len(hour): timetable.append("{:02d}:{:s}".format(int(hour), minute)) schedule.append({"time": timetable, "name": name})
def asterisk_string(u, no_space=False): """Convert arbitrary unicode string to a string acceptable by Asterisk Parameters: u: unicode string no_space: """ u = normalize("NFKD", u).encode("ascii", "ignore") if no_space: u = re_sub(r"\W", "_", u) return u
def ask_for_file(s, filename): from re import sub as re_sub filename = re_sub(r"^[(\.\./)|(\./)|(/)]+", "/", filename) message = "GET /%s HTTP1.1\r\n\r\n" % filename try: s.sendall(message) except socket.error: print "Send failed" exit(-1) print "Message send successfully"
def get_task_results(context): with open('/tmp/aprinto_celery_tail','r') as f: x = f.readlines() cols = ['task_status','task_name','task_id'] df = pd_DataFrame(columns=cols) for it in x: if it.split()[3]=='task': t=re_sub('(.*)(\\btask\\b\s)([^:]+)(: document_processing\.\\b)([^\(]+)\(([^\)]+)\)(.*)','\g<3>,\g<5>,\g<6>',it).replace('\n','') D = dict(zip(cols,t.split(','))) df = df.append(D,ignore_index=True) context.celery_results = df return context
def full_sync(msg_num,all_mail_uids,g_msg_ids,msg_ids): msgs_as_json = map(lambda m: self._msg_to_json(m),msg_grp) cmd = unicode("",encoding='utf8',errors='ignore') for i in range(msg_num): D = {'orig_msg' : msgs_as_json[i], 'all_mail_uid' : all_mail_uids[i], 'g_msg_id' : g_msg_ids[i], 'msg_id' : msg_ids[i]} upsert = """ INSERT into gmail ( orig_msg, all_mail_uid, g_msg_id, msg_id ) SELECT to_json($txt$%(orig_msg)s$txt$::text)::jsonb,%(all_mail_uid)s,%(g_msg_id)s,'%(msg_id)s' FROM ( SELECT array_agg(all_mail_uid) all_uids FROM gmail ) as f1, ( SELECT array_agg(g_msg_id) all_g_m_ids FROM gmail ) as f2 -- msg_id ignored as sampling showed such value was not unique to each msg -- ( -- SELECT array_agg(msg_id) all_m_ids FROM gmail -- ) as f3 WHERE ( not all_uids @> array['%(all_mail_uid)s'::bigint] AND not all_g_m_ids @> array['%(g_msg_id)s'::bigint] --AND not all_m_ids @> array['%(msg_id)s'::text] ) OR ( all_uids is null OR all_g_m_ids is null --OR all_m_ids is null ) ; """ _out = upsert % D _out = re_sub(r'[^\x00-\x7F]+',' ', _out) cmd += unicode(self.T.codecs.encode(_out,'ascii','ignore'),errors='ignore') # cmd += unicode(upsert,errors='ignore') if type(upsert) is not unicode else upsert self.T.conn.set_isolation_level(0) self.T.cur.execute( cmd)
def phone_numbers(self, id_, timestamp, soup): phones = set() phone_class = soup.find(**{'class':"phone1"}) if phone_class: phone_strs = phone_class.text.split() phone_strs = [re_sub('[+()]', '', z) for z in phone_strs] for i in reversed(range(len(phone_strs))): if not phone_strs[i].isdigit(): phones.add('-'.join(phone_strs[i+1:])) phone_strs = phone_strs[:i] self.redis.sadd('daftpunk:%s:phone_numbers' % id_, *phones)
def get_conf(fpath): """Parse configuration file and return as dict""" assert isfile(fpath), "config file not found" conf = ConfigParser() conf.read(fpath) assert conf.has_section("server"), "<server> section not found" assert conf.has_section("http_codes"), "<http_codes> section not found" assert conf.has_section("get"), "<get> section not found" assert conf.has_section("put"), "<put> section not found" assert conf.has_section("ssl"), "<ssl> section not found" ret = {} for section in conf.sections(): ret[section] = {} for option in conf.options(section): ret[section][option] = conf.get(section, option).strip() # Remove slashes in the end of paths paths = (("get", "base_dir"), ("put", "base_dir"), ("ssl", "verify_loc")) for section, option in paths: ret[section][option] = re_sub("/+$", "", ret[section][option]) # Convert some options to integer opts_to_int = (("server", "port"), ("ssl", "enable"), ("ssl", "verify_client"), ("put", "fname_max_len")) for section, option in opts_to_int: ret[section][option] = int(ret[section][option]) # Convert some options to float opts_to_float = (("server", "timeout"), ("server", "wait")) for section, option in opts_to_float: ret[section][option] = float(ret[section][option]) # Convert some options to octal opts_to_oct = (("put", "dirs_mode"), ("put", "files_mode")) for section, option in opts_to_oct: ret[section][option] = int(ret[section][option], 8) # Convert some options to lists opts_to_list = (("get", "order"),) for section, option in opts_to_list: ret[section][option] = [i.strip().lower() for i in ret[section][option].split(",")] if not ret["server"]["timeout"]: ret["server"]["timeout"] = None if not ret["server"]["wait"]: ret["server"]["wait"] = None return ret
def update_content_length(self, message_bytes, is_request): if is_request: message_info = self.helpers.analyzeRequest(message_bytes) else: message_info = self.helpers.analyzeResponse(message_bytes) content_length = len(message_bytes) - message_info.getBodyOffset() msg_as_string = self.helpers.bytesToString(message_bytes) msg_as_string = re_sub( 'Content-Length: \d+\r\n', 'Content-Length: {}\r\n'.format(content_length), msg_as_string, 1 ) return self.helpers.stringToBytes(msg_as_string)
def fetch(self): """ Fetch the WebFinger profile and return the XML document. """ template_url = self._get_template() target_url = re_sub( '\{uri\}', quote_plus( self.request_email.scheme + ':' + self.request_email.path ), template_url ) req = Request(target_url) req.add_header('User-Agent', USER_AGENT) return etree.parse(urlopen(req))
def ok(self): current = self["config"].getCurrent() if ( current == self.sharenameEntry or current == self.sharedirEntry or current == self.sharedirEntry or current == self.optionsEntry or current == self.usernameEntry or current == self.passwordEntry ): if current[1].help_window.instance is not None: current[1].help_window.instance.hide() sharename = re_sub("\W", "", self.sharenameConfigEntry.value) if self.sharedirConfigEntry.value.startswith("/"): sharedir = self.sharedirConfigEntry.value[1:] else: sharedir = self.sharedirConfigEntry.value sharexists = False for data in self.mounts: if self.mounts[data]["sharename"] == self.old_sharename: sharexists = True break if not self.newmount and self.old_sharename and self.old_sharename != self.sharenameConfigEntry.value: self.session.openWithCallback( self.updateConfig, MessageBox, _("You have changed the share name!\nUpdate existing entry and continue?\n"), default=False, ) elif ( not self.newmount and self.old_sharename and self.old_sharename == self.sharenameConfigEntry.value and sharexists ): self.session.openWithCallback( self.updateConfig, MessageBox, _("A mount entry with this name already exists!\nUpdate existing entry and continue?\n"), default=False, ) else: self.session.openWithCallback( self.applyConfig, MessageBox, _("Are you sure you want to save this network mount?\n\n") )
def quick_sync(msg_num,all_mail_uids,g_msg_ids,msg_ids): cmd = unicode("",encoding='utf8',errors='ignore') for i in range(msg_num): D = {'all_mail_uid' : all_mail_uids[i], 'g_msg_id' : g_msg_ids[i], 'msg_id' : msg_ids[i], 'UPDATE_TABLE' : 'gmail_chk'} upsert = """ INSERT into %(UPDATE_TABLE)s ( all_mail_uid, g_msg_id, msg_id ) SELECT %(all_mail_uid)s,%(g_msg_id)s,'%(msg_id)s' FROM ( SELECT array_agg(all_mail_uid) all_uids FROM %(UPDATE_TABLE)s ) as f1, ( SELECT array_agg(g_msg_id) all_g_m_ids FROM %(UPDATE_TABLE)s ) as f2 -- msg_id ignored as sampling showed such value was not unique to each msg -- ( -- SELECT array_agg(msg_id) all_m_ids FROM %(UPDATE_TABLE)s -- ) as f3 WHERE ( not all_uids @> array['%(all_mail_uid)s'::bigint] AND not all_g_m_ids @> array['%(g_msg_id)s'::bigint] ) OR ( all_uids is null OR all_g_m_ids is null ); """ _out = upsert % D _out = re_sub(r'[^\x00-\x7F]+',' ', _out) cmd += unicode(self.T.codecs.encode(_out,'ascii','ignore'),errors='ignore') # cmd += unicode(upsert,errors='ignore') if type(upsert) is not unicode else upsert self.T.conn.set_isolation_level(0) self.T.cur.execute( cmd)