def _decode_single_body(self): self.body = self.body.strip() cte = self.headers.get('Content-Transfer-Encoding', '').lower() if 'quoted-printable' in cte: LOG.debug("Detected quoted-printable encoding, decoding") self.body = quopri.decodestring(self.body) if 'base64' in cte: LOG.debug("Detected base64 encoding, decoding") try: self.body = base64.decodestring(self.body) except base64.binascii.Error: LOG.info("base64 decoder failed, trying partial decoding") self.body = base64_partial_decode(self.body) LOG.debug("Detected charset: %s", self.charset) try: self.body = self.body.decode( validate_charset(self.charset) and self.charset or 'ascii', 'strict' ) except UnicodeDecodeError: LOG.info('Error during strict decoding') self.email_stats['charset_errors'] = 1 self.body = self.body.decode( validate_charset(self.charset) and self.charset or 'ascii', 'ignore' ) if self._guess_html(): LOG.debug("Message recognized as HTML") self._parse_html() else: LOG.debug("Message recognized as plaintext")
def filtered_files(self): LOG.debug("%s filtered_files", self.__class__.__name__) ret = list() for k, v in self.char_table.items(): #LOG.debug("{0} {1}".format(k, v)) if len(v) > 1: ret.extend(v) return ret
def dup_files(self): LOG.debug("%s dup_files", self.__class__.__name__) ret = list() for k, v in self.char_table.items(): # LOG.debug("{0} {1}".format(k, v)) # LOG.debug(k) if len(v) > 1: ret.append(v) return ret
def find(self): LOG.debug("%s find", self.__class__.__name__) for _file in self.files: character = _file.size entry = self.char_table.get(character) if entry: entry.append(_file) else: self.char_table[character] = [_file] self.progress = self.progress + 1
def detail(id): blogs = execute_sql('select id, title, created_time, content from blog where id =?', (id,)) if not len(blogs): raise HTTPError(404, 'Blog does not exist.') LOG.debug('column created time type: %s', type(blogs[0]['created_time'])) #myapp.set_lang(['jp']) msg = myapp._('test i18n in py') LOG.debug('i18n msg: %s', msg) myapp.set_lang(['ja']) return {'blog': blogs[0], 'msg':msg, '_': myapp._}
def dump2csv(self, output_csv): LOG.debug("%s dump2csv", self.__class__.__name__) rows = list() for files in self.sorted_dup_files: data = [utils.size_renderer(files[0].size)] #data.append(files[0].size) data.extend([_file.path for _file in files]) rows.append(data) with open(output_csv, 'wb') as f: writer = UnicodeCSVWriter(f) writer.writerows(rows)
def handle_endtag(self, tag): if tag in HTML_PARSER_IGNORE_TAGS: return if self.tag_stack and tag == self.tag_stack[0]: del self.tag_stack[0] else: LOG.debug("Invalid closing tag at %r", self.getpos()) if tag in self.tag_stack: idx = self.tag_stack.index(tag) del self.tag_stack[:idx + 1] self.stats['errors_count'] += idx + 1
def find(self): LOG.debug("%s find", self.__class__.__name__) for _file in self.files: md5sum = _file.md5sum if md5sum == setting.UNKNOWN_SYMBOL: continue entry = self.char_table.get(md5sum) if entry: entry.append(_file) else: self.char_table[md5sum] = [_file] self.progress = self.progress + 1
def find(self): LOG.debug("%s find", self.__class__.__name__) for _file in self.files: character = _file.character if character == setting.UNKNOWN_SYMBOL: continue entry = self.char_table.get(character) if entry: entry.append(_file) else: self.char_table[character] = [_file] self.progress = self.progress + 1
def _decode_body(self): if self.mime_type and (self.mime_type.startswith('image/') or self.mime_type.startswith('application/')): LOG.info("Body marked as image, skipping body") self.email_stats['attached_images'] += 1 self.body = "" return if self.is_multipart: LOG.debug("Detected multipart/* content-type") self._decode_multipart_body() else: self._decode_single_body()
def start_find(self): LOG.debug("start_find button click") self.disable_all() # start to find paths = [self.path_field1.get()] if (self.path_field2.get()): paths.append(self.path_field2.get()) if (self.path_field3.get()): paths.append(self.path_field3.get()) if (self.path_field4.get()): paths.append(self.path_field4.get()) LOG.debug(paths) LOG.debug("Full Scan {0}".format(str(self.full_scan.get()))) LOG.debug("Ouput csv {0}".format(str(self.output_csv.get()))) do_it = messagebox.askyesno('', 'It may take several minutes to complete please wait') if not do_it: self.enable_all() return self.find_complete = False filters = [ core.algorithm.SizeFilter(), core.algorithm.CharacterFilter() ] if (self.full_scan.get()): filters.append(core.algorithm.FullScanner()) dup_finder = core.dup_finder.DupFinder(paths, filters) self.status_thread = threading.Thread(target=self.update_status, args=(dup_finder,)) self.find_thread = threading.Thread(target=self.background_find, args=(dup_finder,)) self.find_thread.start() self.status_thread.start()
def dump2file(self, output_file): LOG.debug("%s dump2file", self.__class__.__name__) if utils.get_python_version() == 3: fp = codecs.open(output_file, "w", "utf-8") else: fp = open(output_file, 'w') try: for files in self.sorted_dup_files: fp.write("================\n") for _file in files: size = utils.size_renderer(_file.size) fp.write("Size: {0}, File: {1}\n".format(size, _file.path)) finally: fp.close()
def start_find(self): LOG.debug("start_find button click") self.disable_all() # start to find paths = [self.path_field1.get()] if (self.path_field2.get()): paths.append(self.path_field2.get()) if (self.path_field3.get()): paths.append(self.path_field3.get()) if (self.path_field4.get()): paths.append(self.path_field4.get()) LOG.debug(paths) LOG.debug("Full Scan {0}".format(str(self.full_scan.get()))) LOG.debug("Ouput csv {0}".format(str(self.output_csv.get()))) do_it = messagebox.askyesno( '', 'It may take several minutes to complete please wait') if not do_it: self.enable_all() return self.find_complete = False filters = [ core.algorithm.SizeFilter(), core.algorithm.CharacterFilter() ] if (self.full_scan.get()): filters.append(core.algorithm.FullScanner()) dup_finder = core.dup_finder.DupFinder(paths, filters) self.status_thread = threading.Thread(target=self.update_status, args=(dup_finder, )) self.find_thread = threading.Thread(target=self.background_find, args=(dup_finder, )) self.find_thread.start() self.status_thread.start()
def do_post(): title = request.forms.title content = request.forms.content id = request.forms.id if not id: LOG.debug('add new post...', id) created_time = datetime.now() modified_time = created_time execute_sql('insert into blog values (?,?,?,?,?)' , (None, title, content, created_time, modified_time)) redirect('/') else: LOG.debug('post id is: %s', id) modified_time = datetime.now() execute_sql('update blog set title=?, content=?, last_modified_time=? where id=?' , (title, content, modified_time, id)) redirect('/post/%s' % id)
def get_language_list(self): if self.lang_code is not None: return [self.lang_code] expected_langs = self.extra_client_expected_langs() LOG.debug("web client accept langs: %s", expected_langs) lang_codes = [] for lang, priority in expected_langs: lang_country = lang.split("-") if len(lang_country) == 1: lang_codes.append(lang) continue country = lang_country[1] lang_codes.append("%s_%s" % (lang_country[0], country)) lang_codes.append("%s_%s" % (lang_country[0], country.swapcase())) return lang_codes
def check_alive_thread(): while True: # LOG.debug('检查客户端是否在线...') # 不断地对keep_alive_count做递减的操作,当递减到零后,会认为客户端下线了 from protocol import CLIENT_GROUP # 记录下线的客户端的id offline_id_list = list() for client in CLIENT_GROUP.get_members().values(): client.keep_alive_count -= 1 if client.keep_alive_count == 0: offline_id_list.append(client.id) # 移除那些离线的客户端 for c_id in offline_id_list: if CLIENT_GROUP.remove(c_id): LOG.debug('id: {}的客户端已经离线'.format(c_id)) sleep(CHECK_INTERVAL)
def check_alive_thread(): while True: # LOG.debug('检查客户端是否在线...') # 不断地对keep_alive_count做递减的操作,当递减到零后,会认为客户端下线了 from protocol import CLIENT_GROUP # 记录下线的客户端的id offline_id_list = list() for client in CLIENT_GROUP.get_members().values(): client.keep_alive_count -= 1 if client.keep_alive_count == 0: offline_id_list.append(client.id) # 移除那些离线的客户端 for c_id in offline_id_list: if CLIENT_GROUP.remove(c_id): LOG.debug('id: {} client is offline'.format(c_id)) sleep(CHECK_INTERVAL)
def prepare(self, langs=None): LOG.debug("bottle request.headers.keys %s", request.headers.get("Accept-Language", None)) if langs is None: langs = self.get_language_list() LOG.debug("web client accept langs: %s", langs) prepared_key = tuple(langs) if prepared_key in self.prepared: trans = self.prepared.get(prepared_key) if trans: trans.install(True) self.app._ = trans.gettext else: self.app._ = lambda s: s return LOG.debug("setup i18n ...") try: trans = gettext.translation(self.domain, self.locale_dir, languages=langs) trans.install(True) self.app._ = trans.gettext self.prepared[prepared_key] = trans except Exception, e: LOG.warn('can not install application for language "%s" with locale path as "%s"', langs, self.locale_dir) LOG.warn(e) self.app._ = lambda s: s self.prepared[prepared_key] = None
def set_files(self, files): LOG.debug("%s set_files", self.__class__.__name__) self.files = files