def GET(self): template_name = "code/view_source.html" path = xutils.get_argument("path", "") key = xutils.get_argument("key", "") if path == "": return xtemplate.render(template_name, error = "path is empty") else: error = "" try: path = xutils.get_real_path(path) content = xutils.readfile(path) # 使用JavaScript来处理搜索关键字高亮问题 # if key != "": # content = xutils.html_escape(content) # key = xhtml_escape(key) # content = textutil.replace(content, key, htmlutil.span("?", "search-key"), ignore_case=True, use_template=True) return xtemplate.render(template_name, pathlist = xutils.splitpath(path), name = os.path.basename(path), path = path, content = content, lines = content.count("\n")+1) except Exception as e: xutils.print_stacktrace() error = e return xtemplate.render(template_name, error = error, lines = 0, content="")
def load_mapping(pattern, func_str): global _mappings try: mod, func_name = func_str.rsplit('.', 1) # mod = __import__(mod, None, None, ['']) mod = six._import_module(mod) func = getattr(mod, func_name) _mappings.append(r"^%s\Z" % pattern) _mappings.append(func) except Exception as e: xutils.print_stacktrace()
def POST(self): name = xutils.get_argument("name", "") tags = xutils.get_argument("tags", "") key = xutils.get_argument("key", "") content = xutils.get_argument("content", "") type = xutils.get_argument("type", "post") _type = xutils.get_argument("_type", "") parent_id = xutils.get_argument("parent_id", 0, type=int) if key == "": key = time.strftime("%Y.%m.%d") file = FileDO(name) file.atime = dateutil.get_seconds() file.satime = dateutil.format_time() file.mtime = dateutil.get_seconds() file.smtime = dateutil.format_time() file.ctime = dateutil.get_seconds() file.sctime = dateutil.format_time() file.creator = xauth.get_current_name() # 默认私有 file.groups = file.creator file.parent_id = parent_id file.type = type file.content = content code = "fail" error = "" try: if name != '': f = dao.get_by_name(name) if f != None: key = name raise Exception(u"%s 已存在" % name) # 分组提前 if file.type == "group": file.priority = 1 f = dao.insert(file) inserted = dao.get_by_name(name) if _type == "json": return dict(code="success", id=inserted.id) raise web.seeother("/file/view?id={}".format(inserted.id)) except web.HTTPError as e1: raise e1 except Exception as e: xutils.print_stacktrace() error = str(e) return xtemplate.render("file/add.html", key="", name=key, tags=tags, error=error, message=error, code=code)
def POST(self): path = xutils.get_argument("path", "") dirname = xutils.get_argument("dirname", "") if path == "": return dict(code="fail", message="path is empty") newpath = os.path.join(path, dirname) try: os.makedirs(newpath) return dict(code="success") except Exception as e: xutils.print_stacktrace() return dict(code="fail", message=str(e))
def default_request(self): content = xutils.get_argument("content") try: xutils.say(content) return dict(code="success") except Exception as e: xutils.print_stacktrace() return dict(code="fail", message=str(e)) finally: # 报异常 # voice.Release() # return "OK" pass
def POST(self): try: args = web.input(file={}, download_res="off") file = args.file download_res = args.download_res == "on" address = xutils.get_argument("address", "") filename = file.filename if not isempty(address): html = readhttp(address) else: html = "" for chunk in file.file: html += chunk.decode("utf-8") print("Read html, filename={}, length={}".format( filename, len(html))) soup = BeautifulSoup(html, "html.parser") # import pdb # pdb.set_trace() images = soup.find_all("img") links = soup.find_all("a") csses = soup.find_all("link") scripts = soup.find_all("script") # texts = soup.find_all(["p", "span", "div", "h1", "h2", "h3", "h4"]) h = HTML2Text(baseurl=address) text = h.handle(html) texts = [text] images = get_addr_list(images) scripts = get_addr_list(scripts) # texts = get_text_list(texts) if download_res: download_res_list(images, filename) return xtemplate.render("tools/analyze_html.html", images=images, links=links, csses=csses, scripts=scripts, texts=texts, address=address) except Exception as e: xutils.print_stacktrace() return xtemplate.render("tools/analyze_html.html", error=str(e))
def read_range(self, path, http_range, blocksize): xutils.trace("Download", "==> HTTP_RANGE %s" % http_range) range_list = http_range.split("bytes=") if len(range_list) == 2: # 包含完整的范围 range_list = range_list[1] try: range_start, range_end = range_list.split('-') range_start = int(range_start) total_size = os.stat(path).st_size if range_end != "": range_end = int(range_end) else: range_end = total_size - 1 web.header("Content-Length", total_size) content_range = "bytes %s-%s/%s" % (range_start, range_end, total_size) # 设置HTTP响应状态 web.ctx.status = "206 Partial Content" # 发送HTTP首部 web.header("Accept-Ranges", "bytes") web.header("Content-Range", content_range) xutils.trace("Download", "<== Content-Range:%s" % content_range) # 发送数据 fp = open(path, "rb") try: fp.seek(range_start) rest = range_end - range_start + 1 readsize = min(rest, blocksize) while readsize > 0: # print("%s send %s K" % (time.ctime(), readsize)) yield fp.read(readsize) rest -= readsize readsize = min(rest, blocksize) finally: # 基本上和with等价,这里打印出来 xutils.trace("Download", "close %s" % path) fp.close() except Exception as e: # 其他未知异常 xutils.print_stacktrace() # yield最好不要和return混用 yield self.read_all(path, blocksize) else: # 处理不了,返回所有的数据 yield self.read_all(path, blocksize)
def full_search(self, key): global _mappings mappings = _mappings words = textutil.split_words(key) files = [] for i in range(0, len(mappings), 2): pattern = mappings[i] func = mappings[i + 1] m = re.match(pattern, key) if m: try: results = func(*m.groups()) if results is not None: files += results except Exception as e: xutils.print_stacktrace() return files
def POST(self): try: file = xutils.get_argument("file", {}) address = xutils.get_argument("url", "") name = xutils.get_argument("name", "") filename = "" if hasattr(file, "filename"): filename = file.filename plain_text = "" if not isempty(address): html = readhttp(address) else: # 读取文件 html = "" for chunk in file.file: html += chunk.decode("utf-8") print("Read html, filename={}, length={}".format( filename, len(html))) soup = BeautifulSoup(html, "html.parser") element_list = soup.find_all(["script", "style"]) for element in element_list: element.extract() plain_text = soup.get_text(separator=" ") plain_text = clean_whitespace(plain_text) images = soup.find_all("img") links = soup.find_all("a") csses = soup.find_all("link") scripts = soup.find_all("script") # texts = soup.find_all(["p", "span", "div", "h1", "h2", "h3", "h4"]) h = HTML2Text(baseurl=address) text = "From %s\n\n" % address + h.handle(html) texts = [text] images = get_addr_list(images) scripts = get_addr_list(scripts) if name != "" and name != None: dirname = os.path.join(xconfig.DATA_DIR, time.strftime("archive/%Y/%m/%d")) xutils.makedirs(dirname) path = os.path.join( dirname, "%s_%s.md" % (name, time.strftime("%H%M%S"))) xutils.savetofile(path, text) print("save file %s" % path) if False: user_name = xauth.get_current_name() xutils.call("note.create", name=name, content=content, type="md", tags=["来自网络"], creator=user_name) return xtemplate.render(self.template_path, show_aside=False, images=images, links=links, csses=csses, scripts=scripts, texts=texts, address=address, url=address, plain_text=plain_text) except Exception as e: xutils.print_stacktrace() return xtemplate.render(self.template_path, show_aside=False, error=str(e))
def POST(self): try: file = xutils.get_argument("file", {}) address = xutils.get_argument("url", "") name = xutils.get_argument("name", "") filename = "" if hasattr(file, "filename"): filename = file.filename plain_text = "" if not isempty(address): html = readhttp(address) else: # 读取文件 html = "" for chunk in file.file: html += chunk.decode("utf-8") print("Read html, filename={}, length={}".format(filename, len(html))) soup = BeautifulSoup(html, "html.parser") element_list = soup.find_all(["script", "style"]) for element in element_list: element.extract() plain_text = soup.get_text(separator=" ") plain_text = clean_whitespace(plain_text) images = soup.find_all("img") links = soup.find_all("a") csses = soup.find_all("link") scripts = soup.find_all("script") title = get_html_title(soup) h = HTML2Text(baseurl = address) text = "From %s\n\n" % address + h.handle(html) texts = [text] images = get_addr_list(images) scripts = get_addr_list(scripts) if name != "" and name != None: save_to_archive_dir(name) return xtemplate.render(self.template_path, show_aside = False, images = images, links = links, csses = csses, scripts = scripts, texts = texts, address = address, url = address, article_title = title, plain_text = plain_text) except Exception as e: xutils.print_stacktrace() return xtemplate.render(self.template_path, show_aside = False, error = str(e))