Exemplo n.º 1
0
def save_analysis_to_file():
    wr = open(fileName_analysis.get(), "w")
    tagged_file = read_file(directory.get())
    if not tagged_file:
        wr.write("ERROR: Empty file.")
    else:
        sent_length = sent_length_average(tagged_file)
        word_length = word_length_average(tagged_file)
        pron_count = count_pronouns_per_sentence(tagged_file)
        ttr = ttr_tagged_sents(tagged_file)
        wr.write("The average sentence length is "+str(sent_length)+"\n")
        wr.write("The average word length is "+str(word_length)+"\n")
        wr.write("The number of pronouns per number of sentences is "+str(pron_count)+"\n")
        wr.write("The type-token ratio is "+str(ttr)+"\n")
        trad_complexity = (sent_length+word_length)/2
        lex_complexity = (pron_count+(1/ttr))/2
        complexity = (sent_length+word_length+pron_count+(1/ttr))/4
        if complexity < 5:
            wr.write("->> The OVERALL COMPLEXITY of the given text is EASY \n")
        elif 5 <= complexity < 8:
            wr.write("->> The OVERALL COMPLEXITY of the given text is ADVANCED \n")
        else:
            wr.write("->> The OVERALL COMPLEXITY of the given text is DIFFICULT \n")
        wr.write("<-- The TRADITIONAL complexity is "+str(trad_complexity)+"\n")
        wr.write("<-- The LEXICAL complexity is "+str(lex_complexity)+"\n")
    wr.close()
Exemplo n.º 2
0
def main():
    """Read file input
       Convert adjacency matrix to adjacency list and use it as main graph data structure
       Call Hierholzer Algorithm"""

    graph_size, adj_matrix = read_file()
    graph = matrix_to_list(adj_matrix)
    hierholzer(graph, 0)
Exemplo n.º 3
0
def main():
    dir_ = dirname(dirname(realpath(__file__)))
    dir_ = abspath(dir_ + "/data")
    filename = dir_ + "/" + selection_menu("Select data file", listdir(dir_))

    description, endpoints, requests, video_sizes = read_file(filename)
    # Useful abbreviation
    derv = description, endpoints, requests, video_sizes

    # Keys are human readable representations of the corresponding functions
    # Using a dict helps imitate switch() case: functionality
    search_methods = OrderedDict()
    search_methods['Hill Climbing'] = hill_climb
    search_methods['Genetic Algorithm'] = genetic_search
    search_methods['Random Search'] = random_search
    search_methods['Simulated Annealing'] = simulated_annealing
    search_methods['Run all'] = None

    method = selection_menu("Select search method", search_methods.keys())

    # Time To Run
    ttr = int(input("How many seconds should the algorithm run for?\n--> "))

    if method == "Run all":
        best = {'score': -1, 'method': None, 'solution': None}

        for k, search_method in search_methods.items():
            if search_method:  # "Run all" is None
                print("\n{}...".format(k))
                n_attempts, score, solution = search_method(*derv, ttr)
                display_results(n_attempts, score, solution)

                if score > best['score']:
                    best['score'] = score
                    best['method'] = k
                    best['solution'] = solution

        s = "\nThe best solution was found by {}, with a score of {}"
        print(s.format(best['method'], best['score']))

        verbose = input("Show winning solution? y/N \n -->")
        if verbose == 'y':
            for s in best['solution']:
                print(s)

    else:
        n_attempts, score, solution = search_methods[method](*derv, ttr)
        display_results(n_attempts, score, solution)
        if score != -1:
            verbose = input("Show winning solution? y/N \n -->")
            if verbose == 'y':
                for s in solution:
                    print(s)
Exemplo n.º 4
0
def print_analysis():
    tagged_file = read_file(directory.get())
    if not tagged_file:
        print "ERROR: Empty file."
    else:
        sent_length = sent_length_average(tagged_file)
        word_length = word_length_average(tagged_file)
        pron_count = count_pronouns_per_sentence(tagged_file)
        ttr = ttr_tagged_sents(tagged_file)
        print "The average sentence length is "+str(sent_length)
        print "The average word length is "+str(word_length)
        print "The number of pronouns per number of sentences is "+str(pron_count)
        print "The type-token ratio is "+str(ttr)
        trad_complexity = (sent_length+word_length)/2
        lex_complexity = (pron_count+(1/ttr))/2
        complexity = (sent_length+word_length+pron_count+(1/ttr))/4
        if complexity < 5:
            print "\033[1m ->> The overall complexity of the given text is easy \033[0m \n"
        elif 5 <= complexity < 8:
            print "\033[1m ->> The overall complexity of the given text is advanced \033[0m \n"
        else:
            print "\033[1m ->> The overall complexity of the given text is difficult \033[0m \n"
        print "<-- The TRADITIONAL complexity is "+str(trad_complexity)+"\n"
        print "<-- The LEXICAL complexity is "+str(lex_complexity)+"\n"
Exemplo n.º 5
0
from read_input import read_file
import re
from functools import lru_cache

lines = read_file('input9.txt')


def twosum(lst, target):

    d = {}
    for num in lst:
        if target - num in d:
            return True
        d[num] = 1

    return False


# Part 1
def process(lines, preamble=25):

    lst = []
    for i, line in enumerate(lines):

        num = int(line)
        if i >= preamble:
            if not twosum(lst, num):
                print('Found it', num)
                break
        lst.append(num)
        if len(lst) > preamble:
Exemplo n.º 6
0
from read_input import read_file
import re
from functools import lru_cache

lines = read_file('input10.txt')


# Part 1
def process(lines):

    nums = [int(line) for line in lines]
    num_set = set(nums)
    max_num = max(nums)

    counts = [0] * 4
    curr, prev = 1, 0
    while curr - prev <= 3:
        if curr in num_set:
            # print('found match', curr, prev)
            counts[curr - prev] += 1
            prev, curr = curr, curr + 1
        else:
            curr += 1
    counts[3] += 1
    print(counts)
    print(counts[3] * counts[1])


process(lines)

Exemplo n.º 7
0
def main():
    graph_size, adj_matrix = read_file()
    graph, edges_weights = matrix_to_list(adj_matrix)
    print("Graph: ", graph)
    print("Edges: ", edges_weights)
    mst_alt(graph, edges_weights)
Exemplo n.º 8
0
        s = ""
        for word in ts:
            if is_op(word):
                continue
            s += word + " "  # read the sentences and generate human readable sentences without the classifying tags eg. '+Noun dog' -> 'dog'

        pchart, n = cyk_parse.cyk_parse(
            s, gram)  # run the cyk_parse on all the generated grammar
        orig = orig.replace("*", "").replace("+", "")
        parsed_sentence = pchart.retrieve_sentence(n)
        if verbose:  # verbose output
            print(parsed_sentence, end=' ')
            if orig == parsed_sentence:  # if the cyk generated parse matches the labelled parses then we have a correct label
                print("Right")
                accuracy += 1
            else:
                print("Wrong")
    n = len(test_sentences)
    if verbose:  # determine the accuracy of the training of the data
        print(
            "\nAccuracy: The parser was tested on %d sentences. It got %d right, for an accuracy of %.2f."
            % (n, accuracy, accuracy / n))


if __name__ == "__main__":
    args = get_args()
    n = args.n
    training_set, test_set = read_file(n)
    train(training_set, args.v)
    test(test_set, args.v)
Exemplo n.º 9
0
from read_input import read_file

lines = [int(line) for line in read_file('input1.txt')]


# Part 1
def find_pair(lines, target):
    d = {}
    for num in lines:
        if target - num in d:
            return num * (target - num)
            break
        else:
            d[num] = 1
    return None


print(find_pair(lines, 2020))

# Part 2
sorted_nums = sorted(lines)
n = len(sorted_nums)
for i in range(n):
    tmp = find_pair(sorted_nums[i:], 2020 - sorted_nums[i])
    if tmp:
        print(sorted_nums[i] * tmp)