def __init__(self, maxlen, num_tags, word_index, embeddings, model_type,
              texts_to_eval_dir, dumpPath):
     self.num_tags = num_tags
     self.word_index = word_index
     self.texts_to_eval_dir = texts_to_eval_dir
     self.dumpPath = dumpPath
     self.model_maker = nm.NeuralModel(maxlen, num_tags, word_index,
                                       embeddings)
     num_measures = 1 + 3 * (num_tags - 2)
     self.evaluator = ev.Evaluator(num_tags, num_measures,
                                   self.model_maker.tags)
     self.postprocessing = pp.PostProcessing(num_tags,
                                             self.model_maker.tags)
import ast
import copy
import statistics
import Evaluator as ev
import PostProcessing as pp
import text_to_model_output as ttmo
import statistics

tags = ['(I)', '(O)', '(P)', '(C)']
num_tags = 4
num_measures = 1 + 3*(num_tags - 2)

evaluator = ev.Evaluator(num_tags, num_measures, tags)
postprocessing = pp.PostProcessing(num_tags, tags)

def cluster_counter(unencodedY, y_test_dist, y_pred_dist):
    (true_spans, pred_spans) = postprocessing.replace_argument_tag(unencodedY)

    true_spans_dict_list = evaluator.spanCreator(true_spans)

    true_spans_closure = evaluator.edge_closure(true_spans, true_spans_dict_list, y_test_dist)

    true_edges = evaluator.remove_redundant_edges(true_spans_closure, true_spans, 'true')

    cluster_counter = []
    cluster_sizes = []
    for text in range(0, len(true_edges)):
        cluster_sets = []
        cluster_size = []
        for node, links in true_edges[text].items():
            new_cluster = set(links)