Beispiel #1
0
def mostUsed(fileName, top=10):
    fileData = taxonomy.readFile(fileName)
    section = [taxonomy.sanitize(word) for word in fileData.split()]
    wordCounts = {word: 0 for word in section}
    for word in section:
        wordCounts[word] += 1
    allWords = [{'label': word, 'value': wordCounts[word]} \
        for word in wordCounts]
    allWords = sorted(allWords, key=lambda x: x['value'], reverse=True)
    topWords = allWords[:top]
    return topWords
Beispiel #2
0
def mostUsed(fileName, top=10):
    fileData = taxonomy.readFile(fileName)
    section = [taxonomy.sanitize(word) for word in fileData.split()]
    wordCounts = {word: 0 for word in section}
    for word in section:
        wordCounts[word] += 1
    allWords = [{'label': word, 'value': wordCounts[word]} \
        for word in wordCounts]
    allWords = sorted(allWords, key=lambda x: x['value'], reverse=True)
    topWords = allWords[:top]
    return topWords
Beispiel #3
0
def main():
    allLines = taxonomy.readFile(args.args.file)
    num = 1
    sentences = [sentence.strip() for sentence in allLines.split('.')]
    graphSentenceLength(sentences)
    graphSentenceWordUse(sentences)
    graphSentenceWordUse(sentences, top=5)
    for sentence in sentences:
        if len(sentence) > 0:
            alreadyDone = taxonomy.word(num)
            if alreadyDone == None:
                handleSentence(sentence, num)
            num += 1
    graphSentencePartsOfSpeech(sentences)
Beispiel #4
0
def main():
    allLines = taxonomy.readFile(args.args.file)
    num = 1
    sentences = [sentence.strip() for sentence in allLines.split('.')]
    for sentence in sentences:
        if len(sentence) > 0:
            alreadyDone = taxonomy.word(num)
            if alreadyDone == None or \
                not 'independent' in alreadyDone or \
                not 'dependent' in alreadyDone:
                handleSentence(sentence, num)
            num += 1
    graphSentenceClauses(sentences)
    graphSentenceKinds(sentences)
Beispiel #5
0
def main():
    allLines = taxonomy.readFile(args.args.file)
    num = 1
    sentences = [sentence.strip() for sentence in allLines.split('.')]
    for sentence in sentences:
        if len(sentence) > 0:
            alreadyDone = taxonomy.word(num)
            if alreadyDone == None or \
                not 'independent' in alreadyDone or \
                not 'dependent' in alreadyDone:
                handleSentence(sentence, num)
            num += 1
    graphSentenceClauses(sentences)
    graphSentenceKinds(sentences)
Beispiel #6
0
def main():
    allLines = taxonomy.readFile(args.args.file)
    num = 1
    sentences = [sentence.strip() for sentence in allLines.split('.')]
    graphSentenceLength(sentences)
    graphSentenceWordUse(sentences)
    graphSentenceWordUse(sentences, top=5)
    for sentence in sentences:
        if len(sentence) > 0:
            alreadyDone = taxonomy.word(num)
            if alreadyDone == None:
                handleSentence(sentence, num)
            num += 1
    graphSentencePartsOfSpeech(sentences)
Beispiel #7
0
def main():
    allLines = taxonomy.readFile(args.args.file)
    sentences = [sentence.strip() for sentence in allLines.split('.')]
    graphAllPuntuation(sentences)