#!/usr/bin/python
import os
import parse_utils

# delete previous output
if os.path.exists('output.txt'):
  os.remove('output.txt')

# delete previous methods coverage
if os.path.exists('./cover/methods.txt'):
  os.remove('./cover/methods.txt')

# iterate package "java"
for f in parse_utils.collectDocFilesFrom('./docs/api/java'):
  parse_utils.output("output.txt", parse_utils.getDocs(f))
  parse_utils.getClassMethods("./cover/methods.txt", f)

# iterate package "javax"
for f in parse_utils.collectDocFilesFrom('./docs/api/javax'):
  parse_utils.output("output.txt", parse_utils.getDocs(f))
  parse_utils.getClassMethods("./cover/methods.txt", f)
Esempio n. 2
0
#!/usr/bin/python
import os
import re
import parse_utils
from bs4 import BeautifulSoup

# delete previous output
if os.path.exists('output.txt'):
    os.remove('output.txt')

# Parse root file
content = BeautifulSoup(parse_utils.readRootFile(), 'html.parser')

# iterate classes
for f in parse_utils.collectDocFilesFrom('./docs/javadoc-api/org'):
    result = content.find_all(
        href=re.compile(f.replace("./docs/javadoc-api/", "")))
    classUrl = ""
    if len(result) != 0:
        classUrl = result[0].get('href')
    parse_utils.output("output.txt", parse_utils.getDocs(f, classUrl))
Esempio n. 3
0
#!/usr/bin/python
import os
import parse_utils

# delete previous output
if os.path.exists('output.txt'):
    os.remove('output.txt')

# iterate package "java"
for f in parse_utils.collectDocFilesFrom('./docs/api/java'):
    parse_utils.output("output.txt", parse_utils.getDocs(f))

# iterate package "javax"
for f in parse_utils.collectDocFilesFrom('./docs/api/javax'):
    parse_utils.output("output.txt", parse_utils.getDocs(f))
#!/usr/bin/python
import os
import re
import parse_utils
from bs4 import BeautifulSoup

# delete previous output
if os.path.exists('output.txt'):
    os.remove('output.txt')

# Parse root file
content = BeautifulSoup(parse_utils.readRootFile(), 'html.parser')

# iterate classes
for f in parse_utils.collectDocFilesFrom('./docs/javadoc-api/org'):
    result = content.find_all(href=re.compile(f.replace("./docs/javadoc-api/", "")))
    classUrl = ""
    if len(result) != 0:
        classUrl = result[0].get('href')
    parse_utils.output("output.txt", parse_utils.getDocs(f, classUrl))