コード例 #1
0
ファイル: cljdocs.py プロジェクト: deflexor/ClojureDoc-Search
def parse_doc(url):
    """Parsing the documentation."""
    v = content_request(url)
    soup = BeautifulSoup(v)
    stuff = soup.find("div", "docstring").find("pre")
    l = []
    if not stuff:
        ret = " \n" + "No documentation available!\n"
        l.append(ret.split("\n"))
        return l
    ret = "Documentation: \n" + stuff.text
    l.append(ret.split("\n"))
    return l
コード例 #2
0
ファイル: cljdocs.py プロジェクト: deflexor/ClojureDoc-Search
def parse_source(url):
    """Parsing source"""
    v = content_request(url)
    soup = BeautifulSoup(v)
    stuff = soup.find("div", "source_content")
    l = []
    if not stuff:
        ret = " \n" + "No source code available!\n"
        l.append(ret.split("\n"))
        return l
    stuff = stuff.find("pre", "brush: clojure")
    ret = "Source:        \n" + stuff.text
    l.append(ret.split("\n"))
    return l
コード例 #3
0
ファイル: cljdocs.py プロジェクト: Foxboron/ClojureDoc-Search
def parse_source(url):
    """Parsing source"""
    v = content_request(url)
    soup = BeautifulSoup(v)
    stuff = soup.find("div", "source_content")
    l = []
    if not stuff:
        ret = " \n" + "No source code available!\n"
        l.append(ret.split("\n"))
        return l
    stuff = stuff.find("pre", "brush: clojure")
    ret = "Source:        \n" + stuff.text
    l.append(ret.split("\n"))
    return l
コード例 #4
0
ファイル: cljdocs.py プロジェクト: Foxboron/ClojureDoc-Search
def parse_doc(url):
    """Parsing the documentation."""
    v = content_request(url)
    soup = BeautifulSoup(v)
    stuff = soup.find("div", "doc").find("div", "content")
    for e in stuff.findAll("br"):
        e.replace_with("\n")
    l = []
    if not stuff:
        ret = " \n" + "No documentation available!\n"
        l.append(ret.split("\n"))
        return l
    ret = "Documentation: \n" + stuff.text
    l.append(ret.split("\n"))
    return l
コード例 #5
0
ファイル: cljdocs.py プロジェクト: Foxboron/ClojureDoc-Search
def parse_doc(url):
    """Parsing the documentation."""
    v = content_request(url)
    soup = BeautifulSoup(v)
    stuff = soup.find("div", "doc").find("div", "content")
    for e in stuff.findAll("br"):
        e.replace_with("\n")
    l = []
    if not stuff:
        ret = " \n" + "No documentation available!\n"
        l.append(ret.split("\n"))
        return l
    ret = "Documentation: \n" + stuff.text
    l.append(ret.split("\n"))
    return l