def parse_doc(url): """Parsing the documentation.""" v = content_request(url) soup = BeautifulSoup(v) stuff = soup.find("div", "docstring").find("pre") l = [] if not stuff: ret = " \n" + "No documentation available!\n" l.append(ret.split("\n")) return l ret = "Documentation: \n" + stuff.text l.append(ret.split("\n")) return l
def parse_source(url): """Parsing source""" v = content_request(url) soup = BeautifulSoup(v) stuff = soup.find("div", "source_content") l = [] if not stuff: ret = " \n" + "No source code available!\n" l.append(ret.split("\n")) return l stuff = stuff.find("pre", "brush: clojure") ret = "Source: \n" + stuff.text l.append(ret.split("\n")) return l
def parse_doc(url): """Parsing the documentation.""" v = content_request(url) soup = BeautifulSoup(v) stuff = soup.find("div", "doc").find("div", "content") for e in stuff.findAll("br"): e.replace_with("\n") l = [] if not stuff: ret = " \n" + "No documentation available!\n" l.append(ret.split("\n")) return l ret = "Documentation: \n" + stuff.text l.append(ret.split("\n")) return l