示例#1
0
def edge_direction_evaluation(direction):
    """
    Evaluate impact of using different edge directions on dependency networks.

    Values for *direction*: ``forward``, ``backward``, and ``undirected``.
    """
    results = {'_edge-direction':direction}

    print '------ CLASSIFICATION EVALUATION --------'

    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    rep = []
    for i, text in enumerate(texts):
        if i%100==0: print '   ',str(i)+'/'+str(len(texts))
        g = graph_representation.construct_dependency_network(text, direction=direction)
        metric  = graph.GraphMetrics.CLOSENESS
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_classification(rep, labels)
    print '   score:', score
    results['classification'] = score

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)

    print '> Creating representations..'
    rep = []
    for i, text in enumerate(description_texts):
        if i%100==0: print '   ',str(i)+'/'+str(len(description_texts))
        g = graph_representation.construct_dependency_network(text, direction=direction)
        metric = graph.GraphMetrics.EIGENVECTOR
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_retrieval(rep, solution_vectors)
    print '   score:', score
    results['retrieval'] = score

    data.pickle_to_file(results, 'output/dependencies/stop_words_retr_'+direction)

    pp.pprint(results)
    return results
示例#2
0
def print_common_hub_words(rem_stop_words):
    """
    Print a list of the most common hub words in the created networks.
    Purpose of experiment was to show that hub words typically are stop words.

    The *rem_stop_words* determine whether stop words are removed before creating
    the networks.
    """
    results = {'_removing stop-words': rem_stop_words}

    print '------ CLASSIFICATION EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    fd = nltk.probability.FreqDist()
    for i, text in enumerate(texts):
        if i % 100 == 0: print '   ', str(i) + '/' + str(len(texts))
        g = graph_representation.construct_dependency_network(
            text, remove_stop_words=rem_stop_words)
        hubs = graph.get_hubs(g, 10)
        for h in hubs:
            fd.inc(h[0])
        g = None  # just to make sure..

    results['tasa'] = fd.keys()

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    fd = nltk.probability.FreqDist()
    for i, text in enumerate(description_texts):
        if i % 100 == 0:
            print '   ', str(i) + '/' + str(len(description_texts))
        g = graph_representation.construct_dependency_network(
            text, remove_stop_words=rem_stop_words)
        hubs = graph.get_hubs(g, 10)
        for h in hubs:
            fd.inc(h[0])
        g = None  # just to make sure..

    results['air'] = fd.keys()

    if rem_stop_words:
        modifier = 'without'
    else:
        modifier = 'with'
    data.pickle_to_file(
        results, 'output/dependencies/common_hubs_' + modifier + 'stop_words')

    pp.pprint(results)
    return results
示例#3
0
def print_common_hub_words(rem_stop_words):
    """
    Print a list of the most common hub words in the created networks.
    Purpose of experiment was to show that hub words typically are stop words.

    The *rem_stop_words* determine whether stop words are removed before creating
    the networks.
    """
    results = {'_removing stop-words':rem_stop_words}

    print '------ CLASSIFICATION EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    fd = nltk.probability.FreqDist()
    for i, text in enumerate(texts):
        if i%100==0: print '   ',str(i)+'/'+str(len(texts))
        g = graph_representation.construct_dependency_network(text, remove_stop_words=rem_stop_words)
        hubs = graph.get_hubs(g, 10)
        for h in hubs:
            fd.inc(h[0])
        g = None # just to make sure..

    results['tasa'] = fd.keys()

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    fd = nltk.probability.FreqDist()
    for i, text in enumerate(description_texts):
        if i%100==0: print '   ',str(i)+'/'+str(len(description_texts))
        g = graph_representation.construct_dependency_network(text, remove_stop_words=rem_stop_words)
        hubs = graph.get_hubs(g, 10)
        for h in hubs:
            fd.inc(h[0])
        g = None # just to make sure..

    results['air'] = fd.keys()

    if rem_stop_words:
        modifier = 'without'
    else:
        modifier = 'with'
    data.pickle_to_file(results, 'output/dependencies/common_hubs_'+modifier+'stop_words')

    pp.pprint(results)
    return results
示例#4
0
def corpus_dependency_properties(dataset = 'air/problem_descriptions'):
    """
    Identify and pickle to file various properties of the given dataset.
    These can alter be converted to pretty tables using
    :func:`~experiments.print_network_props`.
    """
    print '> Reading data..', dataset
    corpus_path = '../data/'+dataset+'_dependencies'
    (documents, labels) = data.read_files(corpus_path)

    props = {}
    giant = nx.DiGraph()
    print '> Building networks..'
    for i, text in enumerate(documents):
        if i%10==0: print '   ',str(i)+'/'+str(len(documents))
        g = graph_representation.construct_dependency_network(text,remove_stop_words=True)
        giant.add_edges_from(g.edges())
        p = graph.network_properties(g)
        for k,v in p.iteritems():
            if i==0: props[k] = []
            props[k].append(v)
        g = None # just to make sure..

    print '> Calculating means and deviations..'
    props_total = {}
    for key in props:
        props_total[key+'_mean'] = numpy.mean(props[key])
        props_total[key+'_std'] = numpy.std(props[key])

    data.pickle_to_file(giant, 'output/properties/dependency/corpus_network_air_all_no_stop_words')
    data.pickle_to_file(props, 'output/properties/dependency/docs_air_all_no_stop_words')
    data.pickle_to_file(props_total, 'output/properties/dependency/docs_air_all_no_stop_words_total')
示例#5
0
def retrieval_demo():
    """Function intended to illustrate retrieval in the experimental framework.

    Intended as a basis for new experiments for those not intimately
    familiar with the code.
    """
    print 'Evaluation type: Retrieval'
    print 'Graph type:      Dependency'
    print 'Centrality:      PageRank'
    print
    print '> Reading data..'
    desc_path = '../data/air/problem_descriptions_dependencies'
    sol_path = '../data/air/solutions_preprocessed'
    problems, _ = data.read_files(desc_path)
    solutions, _ = data.read_files(sol_path)

    print '> Creating solution representations..'
    metric = freq_representation.FrequencyMetrics.TF_IDF
    sol_vectors = freq_representation.text_to_vector(solutions, metric)

    print '> Creating problem description representations..'
    dicts = []
    for i, doc in enumerate(problems):
        print '   ',str(i)+'/'+str(len(problems))
        g = graph_representation.construct_dependency_network(doc)
        d = graph_representation.graph_to_dict(g, graph.GraphMetrics.PAGERANK)
        dicts.append(d)
    desc_vectors = graph_representation.dicts_to_vectors(dicts)

    print '> Evaluating..'
    score = evaluation.evaluate_retrieval(desc_vectors, sol_vectors)
    print '    score:', score
    print
示例#6
0
def print_degree_distributions(dataset):
    """
    Extracts degree distribution values from networks, and print them to
    cvs-file.

    **warning** overwrites if file exists.
    """
    print '> Reading data..', dataset
    corpus_path = '../data/' + dataset + '_dependencies'
    (documents, labels) = data.read_files(corpus_path)

    degsfile = open(
        'output/properties/dependency/degrees_docs_' +
        dataset.replace('/', '.'), 'w')

    giant = nx.DiGraph()
    print '> Building networks..'
    for i, text in enumerate(documents):
        if i % 10 == 0: print '   ', str(i) + '/' + str(len(documents))
        g = graph_representation.construct_dependency_network(text)
        giant.add_edges_from(g.edges())
        degs = nx.degree(g).values()
        degs = [str(d) for d in degs]
        degsfile.write(','.join(degs) + '\n')
    degsfile.close()

    print '> Writing giant\'s distribution'
    with open(
            'output/properties/dependency/degrees_giant_' +
            dataset.replace('/', '.'), 'w') as f:
        ds = nx.degree(giant).values()
        ds = [str(d) for d in ds]
        f.write(','.join(ds))
示例#7
0
def print_degree_distributions(dataset):
    """
    Extracts degree distribution values from networks, and print them to
    cvs-file.

    **warning** overwrites if file exists.
    """
    print '> Reading data..', dataset
    corpus_path = '../data/'+dataset+'_dependencies'
    (documents, labels) = data.read_files(corpus_path)

    degsfile = open('output/properties/dependency/degrees_docs_'+dataset.replace('/','.'), 'w')

    giant = nx.DiGraph()
    print '> Building networks..'
    for i, text in enumerate(documents):
        if i%10==0: print '   ',str(i)+'/'+str(len(documents))
        g = graph_representation.construct_dependency_network(text)
        giant.add_edges_from(g.edges())
        degs = nx.degree(g).values()
        degs = [str(d) for d in degs]
        degsfile.write(','.join(degs)+'\n')
    degsfile.close()

    print '> Writing giant\'s distribution'
    with open('output/properties/dependency/degrees_giant_'+dataset.replace('/','.'), 'w') as f:
        ds = nx.degree(giant).values()
        ds = [str(d) for d in ds]
        f.write(','.join(ds))
示例#8
0
def retrieval_demo():
    """Function intended to illustrate retrieval in the experimental framework.

    Intended as a basis for new experiments for those not intimately
    familiar with the code.
    """
    print 'Evaluation type: Retrieval'
    print 'Graph type:      Dependency'
    print 'Centrality:      PageRank'
    print
    print '> Reading data..'
    desc_path = '../data/air/problem_descriptions_dependencies'
    sol_path = '../data/air/solutions_preprocessed'
    problems, _ = data.read_files(desc_path)
    solutions, _ = data.read_files(sol_path)

    print '> Creating solution representations..'
    metric = freq_representation.FrequencyMetrics.TF_IDF
    sol_vectors = freq_representation.text_to_vector(solutions, metric)

    print '> Creating problem description representations..'
    dicts = []
    for i, doc in enumerate(problems):
        print '   ', str(i) + '/' + str(len(problems))
        g = graph_representation.construct_dependency_network(doc)
        d = graph_representation.graph_to_dict(g, graph.GraphMetrics.PAGERANK)
        dicts.append(d)
    desc_vectors = graph_representation.dicts_to_vectors(dicts)

    print '> Evaluating..'
    score = evaluation.evaluate_retrieval(desc_vectors, sol_vectors)
    print '    score:', score
    print
示例#9
0
def centrality_weights_retrieval(weighted=True):
    """
    Evaluate whether edge weights are beneficial to the depdendency
    network represenation for the retrieval task.
    """
    results = {'_is_weighted': weighted, '_evaluation': 'retrieval'}
    graph_metrics = graph_representation.get_metrics(weighted)

    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)

    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(
        solution_texts, freq_representation.FrequencyMetrics.TF_IDF)

    rep = {}
    for metric in graph_metrics:
        rep[metric] = []

    print '> Creating graph representations..'
    for i, text in enumerate(description_texts):
        if i % 10 == 0: print '   ', str(i) + '/' + str(len(description_texts))
        g = graph_representation.construct_dependency_network(
            text, weighted=weighted)
        for metric in graph_metrics:
            d = graph_representation.graph_to_dict(g, metric)
            rep[metric].append(d)
        g = None  # just to make sure..
        if i % 100 == 0:
            if weighted:
                postfix = '_weighted'
            else:
                postfix = '_unweighted'
            data.pickle_to_file(
                rep,
                'output/dependencies/exp1_retr_tmp_' + str(i) + '_' + postfix)

    print '> Creating vector representations..'
    for metric in graph_metrics:
        rep[metric] = graph_representation.dicts_to_vectors(rep[metric])

    print '> Evaluating..'
    for metric in graph_metrics:
        vectors = rep[metric]
        score = evaluation.evaluate_retrieval(vectors, solution_vectors)
        print '   ', metric, score
        results[metric] = score

    if weighted:
        postfix = '_weighted'
    else:
        postfix = '_unweighted'
    data.pickle_to_file(results, 'output/dependencies/exp1_retr' + postfix)

    pp.pprint(results)
    return results
示例#10
0
def evaluate_tc_icc_retrieval():
    graph_metrics = graph_representation.get_metrics(False, exclude_flow=True)

    print '> Reading cases..'
    corpus = 'air/problem_descriptions'
    solutions_path = '../data/air/solutions_preprocessed'
    path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(path)

    rep = {}
    icc = {}
    print '> Calculating ICCs..'
    for metric in graph_metrics:
        print
        print metric
        rep[metric] = []
        centralities = retrieve_centralities(corpus, metric)
        if centralities:
            icc[metric] = graph_representation.calculate_icc_dict(centralities)
        else:
            icc[metric] = None

    print '> Creating solution representations..'
    solutions_texts, labels = data.read_files(solutions_path)
    solutions_rep = freq_representation.text_to_vector(
        solutions_texts, freq_representation.FrequencyMetrics.TF_IDF)

    print '> Creating problem description representations..'
    for i, text in enumerate(description_texts):
        if i % 10 == 0:
            print '    document', str(i) + '/' + str(len(description_texts))
        g = graph_representation.construct_dependency_network(text)
        for metric in graph_metrics:
            if not icc[metric]: continue
            d = graph_representation.graph_to_dict(g, metric, icc[metric])
            rep[metric].append(d)
        g = None  # just to make sure..

    print '> Creating vector representations..'
    for metric in graph_metrics:
        if not icc[metric]: continue
        rep[metric] = graph_representation.dicts_to_vectors(rep[metric])

    print '> Evaluating..'
    results = {}
    for metric in graph_metrics:
        if not icc[metric]:
            results[metric] = None
            continue
        vectors = rep[metric]
        score = evaluation.evaluate_retrieval(vectors, solutions_rep)
        print '   ', metric, score
        results[metric] = score

    pp.pprint(results)
    data.pickle_to_file(results, 'output/tc_icc/dependency/retrieval.res')
    return results
示例#11
0
def evaluate_tc_icc_retrieval():
    graph_metrics = graph_representation.get_metrics(False, exclude_flow=True)

    print '> Reading cases..'
    corpus = 'air/problem_descriptions'
    solutions_path  = '../data/air/solutions_preprocessed'
    path            = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(path)

    rep = {}
    icc = {}
    print '> Calculating ICCs..'
    for metric in graph_metrics:
        print
        print metric
        rep[metric] = []
        centralities = retrieve_centralities(corpus, metric)
        if centralities:
            icc[metric] = graph_representation.calculate_icc_dict(centralities)
        else:
            icc[metric] = None

    print '> Creating solution representations..'
    solutions_texts, labels = data.read_files(solutions_path)
    solutions_rep = freq_representation.text_to_vector(solutions_texts, freq_representation.FrequencyMetrics.TF_IDF)

    print '> Creating problem description representations..'
    for i, text in enumerate(description_texts):
        if i%10==0: print '    document',str(i)+'/'+str(len(description_texts))
        g = graph_representation.construct_dependency_network(text)
        for metric in graph_metrics:
            if not icc[metric]: continue
            d = graph_representation.graph_to_dict(g, metric, icc[metric])
            rep[metric].append(d)
        g = None # just to make sure..

    print '> Creating vector representations..'
    for metric in graph_metrics:
        if not icc[metric]: continue
        rep[metric] = graph_representation.dicts_to_vectors(rep[metric])

    print '> Evaluating..'
    results = {}
    for metric in graph_metrics:
        if not icc[metric]:
            results[metric] = None
            continue
        vectors = rep[metric]
        score = evaluation.evaluate_retrieval(vectors, solutions_rep)
        print '   ', metric, score
        results[metric] = score

    pp.pprint(results)
    data.pickle_to_file(results, 'output/tc_icc/dependency/retrieval.res')
    return results
示例#12
0
def centrality_weights_retrieval(weighted=True):
    """
    Evaluate whether edge weights are beneficial to the depdendency
    network represenation for the retrieval task.
    """
    results = {'_is_weighted':weighted, '_evaluation':'retrieval'}
    graph_metrics = graph_representation.get_metrics(weighted)

    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)

    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)

    rep = {}
    for metric in graph_metrics:
        rep[metric] = []

    print '> Creating graph representations..'
    for i, text in enumerate(description_texts):
        if i%10==0: print '   ',str(i)+'/'+str(len(description_texts))
        g = graph_representation.construct_dependency_network(text, weighted=weighted)
        for metric in graph_metrics:
            d = graph_representation.graph_to_dict(g, metric)
            rep[metric].append(d)
        g = None # just to make sure..
        if i%100==0:
            if weighted:
                postfix = '_weighted'
            else:
                postfix = '_unweighted'
            data.pickle_to_file(rep, 'output/dependencies/exp1_retr_tmp_'+str(i)+'_'+postfix)

    print '> Creating vector representations..'
    for metric in graph_metrics:
        rep[metric] = graph_representation.dicts_to_vectors(rep[metric])

    print '> Evaluating..'
    for metric in graph_metrics:
        vectors = rep[metric]
        score = evaluation.evaluate_retrieval(vectors, solution_vectors)
        print '   ', metric, score
        results[metric] = score

    if weighted:
        postfix = '_weighted'
    else:
        postfix = '_unweighted'
    data.pickle_to_file(results, 'output/dependencies/exp1_retr'+postfix)

    pp.pprint(results)
    return results
示例#13
0
def term_centrality_study(doc='air/reports_text/2005/a05a0059.html', num=20):
    def _print_terms(cents, rep, num):
        ts = _top_cents(cents, num)
        terms = []
        for t in ts:
            terms.append(t[0])
        print rep + ' & ' + ', '.join(terms) + ' \\\\'

    def _top_cents(cents, num):
        return sorted(cents.iteritems(),
                      key=operator.itemgetter(1),
                      reverse=True)[0:num]

    def _calc_cents(g, metric, gcents=None):
        if gcents: icc = graph_representation.calculate_icc_dict(gcents)
        else: icc = None
        return graph_representation.graph_to_dict(g, metric, icc)

    import operator
    import dependency_experiments
    import co_occurrence_experiments

    dataset = 'air/reports'
    path = '../data/' + doc
    doc = data.read_file(path)

    metric = graph.GraphMetrics.DEGREE
    context = 'window'
    g = graph_representation.construct_cooccurrence_network(doc,
                                                            context=context)
    cents = _calc_cents(g, metric)
    _print_terms(cents, 'Co-occurrence TC', num)
    gcents = co_occurrence_experiments.retrieve_centralities(
        dataset, context, metric)
    cents = _calc_cents(g, metric, gcents)
    _print_terms(cents, 'Co-occurrence TC-ICC', num)

    metric = graph.GraphMetrics.EIGENVECTOR
    deps = data._text_to_dependencies(doc)
    g = graph_representation.construct_dependency_network(deps)
    cents = _calc_cents(g, metric)
    _print_terms(cents, 'Dependency TC', num)
    gcents = dependency_experiments.retrieve_centralities(dataset, metric)
    cents = _calc_cents(g, metric, gcents)
    _print_terms(cents, 'Dependency TC-ICC', num)

    fdict = freq_representation.text_to_dict(
        [doc], freq_representation.FrequencyMetrics.TF_IDF)[0]
    _print_terms(fdict, 'TF-IDF', num)

    fdict = freq_representation.text_to_dict(
        [doc], freq_representation.FrequencyMetrics.TF)[0]
    _print_terms(fdict, 'TF', num)
示例#14
0
def evaluate_tc_icc_classification():
    graph_metrics = graph_representation.get_metrics(False, exclude_flow=True)

    print '> Reading cases..'
    corpus = 'tasa/TASA900'
    path = '../data/'+corpus+'_dependencies'
    #~ path = '../data/tasa/TASATest_dependencies'
    texts, labels = data.read_files(path)

    rep = {}
    icc = {}
    print '> Calculating ICCs..'
    for metric in graph_metrics:
        print
        print metric
        rep[metric] = []
        centralities = retrieve_centralities(corpus, metric)
        if centralities:
            icc[metric] = graph_representation.calculate_icc_dict(centralities)
        else:
            icc[metric] = None

    print '> Creating graph representations..'
    for i, text in enumerate(texts):
        if i%10==0: print '   ',str(i)+'/'+str(len(texts))
        g = graph_representation.construct_dependency_network(text)
        for metric in graph_metrics:
            if not icc[metric]: continue
            d = graph_representation.graph_to_dict(g, metric, icc[metric])
            rep[metric].append(d)
        g = None # just to make sure..

    print '> Creating vector representations..'
    for metric in graph_metrics:
        if not icc[metric]: continue
        rep[metric] = graph_representation.dicts_to_vectors(rep[metric])

    print '> Evaluating..'
    results = {}
    for metric in graph_metrics:
        if not icc[metric]:
            results[metric] = None
            continue
        vectors = rep[metric]
        score = evaluation.evaluate_classification(vectors, labels)
        print '   ', metric, score
        results[metric] = score

    pp.pprint(results)
    data.pickle_to_file(results, 'output/tc_icc/dependency/classification.res')
    return results
示例#15
0
def evaluate_tc_icc_classification():
    graph_metrics = graph_representation.get_metrics(False, exclude_flow=True)

    print '> Reading cases..'
    corpus = 'tasa/TASA900'
    path = '../data/' + corpus + '_dependencies'
    #~ path = '../data/tasa/TASATest_dependencies'
    texts, labels = data.read_files(path)

    rep = {}
    icc = {}
    print '> Calculating ICCs..'
    for metric in graph_metrics:
        print
        print metric
        rep[metric] = []
        centralities = retrieve_centralities(corpus, metric)
        if centralities:
            icc[metric] = graph_representation.calculate_icc_dict(centralities)
        else:
            icc[metric] = None

    print '> Creating graph representations..'
    for i, text in enumerate(texts):
        if i % 10 == 0: print '   ', str(i) + '/' + str(len(texts))
        g = graph_representation.construct_dependency_network(text)
        for metric in graph_metrics:
            if not icc[metric]: continue
            d = graph_representation.graph_to_dict(g, metric, icc[metric])
            rep[metric].append(d)
        g = None  # just to make sure..

    print '> Creating vector representations..'
    for metric in graph_metrics:
        if not icc[metric]: continue
        rep[metric] = graph_representation.dicts_to_vectors(rep[metric])

    print '> Evaluating..'
    results = {}
    for metric in graph_metrics:
        if not icc[metric]:
            results[metric] = None
            continue
        vectors = rep[metric]
        score = evaluation.evaluate_classification(vectors, labels)
        print '   ', metric, score
        results[metric] = score

    pp.pprint(results)
    data.pickle_to_file(results, 'output/tc_icc/dependency/classification.res')
    return results
示例#16
0
def store_corpus_network(corpus):
    print '> Constructing corpus network for', corpus
    path = '../data/'+corpus+'_dependencies'
    store_path = 'output/giants/dependency/'+corpus+'/graph.net'
    if data.pickle_from_file(store_path, suppress_warning=True):
        print '    already present, skipping'
        return
    texts, labels = data.read_files(path)
    gdeps = {}
    for i, text in enumerate(texts):
        if i%1==0: print '   ',str(i)+'/'+str(len(texts))
        d = pickle.loads(text)
        for dep in d.keys():
            gdeps[dep] = gdeps.get(dep, []) + d[dep]
    giant = graph_representation.construct_dependency_network(gdeps,verbose=True,unpickle=False)
    print '> Serializing and saving..'
    data.pickle_to_file(giant, store_path)
示例#17
0
def term_centrality_study(doc='air/reports_text/2005/a05a0059.html', num=20):
    def _print_terms(cents, rep, num):
        ts = _top_cents(cents, num)
        terms = []
        for t in ts:
            terms.append(t[0])
        print rep + ' & ' + ', '.join(terms) + ' \\\\'
    def _top_cents(cents,num):
        return sorted(cents.iteritems(), key = operator.itemgetter(1), reverse = True)[0:num]
    def _calc_cents(g, metric, gcents=None):
        if gcents: icc = graph_representation.calculate_icc_dict(gcents)
        else: icc = None
        return graph_representation.graph_to_dict(g, metric, icc)

    import operator
    import dependency_experiments
    import co_occurrence_experiments

    dataset = 'air/reports'
    path = '../data/'+doc
    doc = data.read_file(path)

    metric = graph.GraphMetrics.DEGREE
    context = 'window'
    g = graph_representation.construct_cooccurrence_network(doc, context=context)
    cents = _calc_cents(g, metric)
    _print_terms(cents, 'Co-occurrence TC', num)
    gcents = co_occurrence_experiments.retrieve_centralities(dataset, context, metric)
    cents = _calc_cents(g, metric, gcents)
    _print_terms(cents, 'Co-occurrence TC-ICC', num)

    metric = graph.GraphMetrics.EIGENVECTOR
    deps = data._text_to_dependencies(doc)
    g = graph_representation.construct_dependency_network(deps)
    cents = _calc_cents(g, metric)
    _print_terms(cents, 'Dependency TC', num)
    gcents = dependency_experiments.retrieve_centralities(dataset, metric)
    cents = _calc_cents(g, metric, gcents)
    _print_terms(cents, 'Dependency TC-ICC', num)

    fdict = freq_representation.text_to_dict([doc], freq_representation.FrequencyMetrics.TF_IDF)[0]
    _print_terms(fdict, 'TF-IDF', num)

    fdict = freq_representation.text_to_dict([doc], freq_representation.FrequencyMetrics.TF)[0]
    _print_terms(fdict, 'TF', num)
示例#18
0
def store_corpus_network(corpus):
    print '> Constructing corpus network for', corpus
    path = '../data/' + corpus + '_dependencies'
    store_path = 'output/giants/dependency/' + corpus + '/graph.net'
    if data.pickle_from_file(store_path, suppress_warning=True):
        print '    already present, skipping'
        return
    texts, labels = data.read_files(path)
    gdeps = {}
    for i, text in enumerate(texts):
        if i % 1 == 0: print '   ', str(i) + '/' + str(len(texts))
        d = pickle.loads(text)
        for dep in d.keys():
            gdeps[dep] = gdeps.get(dep, []) + d[dep]
    giant = graph_representation.construct_dependency_network(gdeps,
                                                              verbose=True,
                                                              unpickle=False)
    print '> Serializing and saving..'
    data.pickle_to_file(giant, store_path)
示例#19
0
def corpus_dependency_properties(dataset='air/problem_descriptions'):
    """
    Identify and pickle to file various properties of the given dataset.
    These can alter be converted to pretty tables using
    :func:`~experiments.print_network_props`.
    """
    print '> Reading data..', dataset
    corpus_path = '../data/' + dataset + '_dependencies'
    (documents, labels) = data.read_files(corpus_path)

    props = {}
    giant = nx.DiGraph()
    print '> Building networks..'
    for i, text in enumerate(documents):
        if i % 10 == 0: print '   ', str(i) + '/' + str(len(documents))
        g = graph_representation.construct_dependency_network(
            text, remove_stop_words=True)
        giant.add_edges_from(g.edges())
        p = graph.network_properties(g)
        for k, v in p.iteritems():
            if i == 0: props[k] = []
            props[k].append(v)
        g = None  # just to make sure..

    print '> Calculating means and deviations..'
    props_total = {}
    for key in props:
        props_total[key + '_mean'] = numpy.mean(props[key])
        props_total[key + '_std'] = numpy.std(props[key])

    data.pickle_to_file(
        giant,
        'output/properties/dependency/corpus_network_air_all_no_stop_words')
    data.pickle_to_file(
        props, 'output/properties/dependency/docs_air_all_no_stop_words')
    data.pickle_to_file(
        props_total,
        'output/properties/dependency/docs_air_all_no_stop_words_total')
示例#20
0
def evaluate_dep_types():
    """
    Leave-one-out evaluation of the various dependency types from the stanford parser.
    """
    exclude_list = ['dep', 'aux', 'auxpass', 'cop', 'agent', 'acomp',
                    'attr', 'ccomp', 'xcomp', 'complm', 'dobj', 'iobj',
                    'pobj', 'mark', 'rel', 'nsubj', 'nsubjpass', 'csubj',
                    'csubjpass', 'cc', 'conj', 'expl', 'abbrev', 'amod',
                    'appos', 'advcl', 'purpcl', 'det', 'predet', 'preconj',
                    'infmod', 'mwe', 'partmod', 'advmod', 'neg', 'rcmod',
                    'quantmod', 'tmod', 'nn', 'npadvmod', 'num', 'number',
                    'prep', 'poss', 'possessive', 'prt', 'parataxis',
                    'punct', 'ref', 'xsubj', 'pcomp', 'prepc']
    results = {'classification':[], 'retrieval':[]}

    print '------ CLASSIFICATION EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)
    print '> Creating representations..'
    rep = {}
    for exclude_label in exclude_list:
        rep[exclude_label] = []
    metric  = graph.GraphMetrics.CLOSENESS
    for i, text in enumerate(texts):
        if i%10==0: print '   ',str(i)+'/'+str(len(texts))
        full_graph = graph_representation.construct_dependency_network(text)
        for exclude_label in exclude_list:
            g = graph.reduce_edge_set(full_graph, exclude_label)
            d = graph_representation.graph_to_dict(g, metric)
            rep[exclude_label].append(d)
            g = None # just to make sure..
        full_graph = None
    for exclude_label in exclude_list:
        rep[exclude_label] = graph_representation.dicts_to_vectors(rep[exclude_label])
    print '> Evaluating..'
    for exclude_label in exclude_list:
        score = evaluation.evaluate_classification(rep[exclude_label], labels)
        print '  ', exclude_label, score
        results['classification'].append(score)

    data.pickle_to_file(results, 'output/dependencies/types_eval_tmp')

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)
    print '> Creating representations..'
    rep = {}
    for exclude_label in exclude_list:
        rep[exclude_label] = []
    metric = graph.GraphMetrics.EIGENVECTOR
    for i, text in enumerate(description_texts):
        if i%1==0: print '   ',str(i)+'/'+str(len(description_texts))
        full_graph = graph_representation.construct_dependency_network(text)
        for exclude_label in exclude_list:
            g = graph.reduce_edge_set(full_graph, exclude_label)
            d = graph_representation.graph_to_dict(g, metric)
            rep[exclude_label].append(d)
            g = None # just to make sure..
        full_graph = None
        #~ if i%100==0: data.pickle_to_file(rep, 'output/dependencies/types_eval_rep_'+str(i))
    for exclude_label in exclude_list:
        rep[exclude_label] = graph_representation.dicts_to_vectors(rep[exclude_label])
    print '> Evaluating..'
    for exclude_label in exclude_list:
        score = evaluation.evaluate_retrieval(rep[exclude_label], solution_vectors)
        print '  ', exclude_label, score
        results['retrieval'].append(score)

    pp.pprint(results)
    data.pickle_to_file(results, 'output/dependencies/types_eval')

    return results
示例#21
0
def stop_word_evaluation(rem_stop_words):
    """
    Experiment for determining what effect removing stop words have on
    dependency networks.
    """
    results = {'_removing stop-words':rem_stop_words}

    print '------ CLASSIFICATION EVALUATION --------'

    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    rep = []
    total_nodes = 0
    for i, text in enumerate(texts):
        if i%100==0: print '   ',str(i)+'/'+str(len(texts))
        g = graph_representation.construct_dependency_network(text, remove_stop_words=rem_stop_words)
        total_nodes += len(g.nodes())
        metric  = graph.GraphMetrics.CLOSENESS
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_classification(rep, labels)
    print '   score:', score
    print '(the networks had a total of',total_nodes,'nodes)'
    results['classification'] = score

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)

    print '> Creating representations..'
    rep = []
    total_nodes = 0
    for i, text in enumerate(description_texts):
        if i%100==0: print '   ',str(i)+'/'+str(len(description_texts))
        g = graph_representation.construct_dependency_network(text, remove_stop_words=rem_stop_words)
        total_nodes += len(g.nodes())
        metric = graph.GraphMetrics.EIGENVECTOR
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_retrieval(rep, solution_vectors)
    print '   score:', score
    print '(the networks had a total of',total_nodes,'nodes)'
    results['retrieval'] = score

    if rem_stop_words:
        postfix = '_without'
    else:
        postfix = '_with'
    data.pickle_to_file(results, 'output/dependencies/stop_words_retr'+postfix)

    pp.pprint(results)
    return results
示例#22
0
def stop_word_evaluation(rem_stop_words):
    """
    Experiment for determining what effect removing stop words have on
    dependency networks.
    """
    results = {'_removing stop-words': rem_stop_words}

    print '------ CLASSIFICATION EVALUATION --------'

    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    rep = []
    total_nodes = 0
    for i, text in enumerate(texts):
        if i % 100 == 0: print '   ', str(i) + '/' + str(len(texts))
        g = graph_representation.construct_dependency_network(
            text, remove_stop_words=rem_stop_words)
        total_nodes += len(g.nodes())
        metric = graph.GraphMetrics.CLOSENESS
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None  # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_classification(rep, labels)
    print '   score:', score
    print '(the networks had a total of', total_nodes, 'nodes)'
    results['classification'] = score

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(
        solution_texts, freq_representation.FrequencyMetrics.TF_IDF)

    print '> Creating representations..'
    rep = []
    total_nodes = 0
    for i, text in enumerate(description_texts):
        if i % 100 == 0:
            print '   ', str(i) + '/' + str(len(description_texts))
        g = graph_representation.construct_dependency_network(
            text, remove_stop_words=rem_stop_words)
        total_nodes += len(g.nodes())
        metric = graph.GraphMetrics.EIGENVECTOR
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None  # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_retrieval(rep, solution_vectors)
    print '   score:', score
    print '(the networks had a total of', total_nodes, 'nodes)'
    results['retrieval'] = score

    if rem_stop_words:
        postfix = '_without'
    else:
        postfix = '_with'
    data.pickle_to_file(results,
                        'output/dependencies/stop_words_retr' + postfix)

    pp.pprint(results)
    return results
示例#23
0
def edge_direction_evaluation(direction):
    """
    Evaluate impact of using different edge directions on dependency networks.

    Values for *direction*: ``forward``, ``backward``, and ``undirected``.
    """
    results = {'_edge-direction': direction}

    print '------ CLASSIFICATION EVALUATION --------'

    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)

    print '> Creating representations..'
    rep = []
    for i, text in enumerate(texts):
        if i % 100 == 0: print '   ', str(i) + '/' + str(len(texts))
        g = graph_representation.construct_dependency_network(
            text, direction=direction)
        metric = graph.GraphMetrics.CLOSENESS
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None  # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_classification(rep, labels)
    print '   score:', score
    results['classification'] = score

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(
        solution_texts, freq_representation.FrequencyMetrics.TF_IDF)

    print '> Creating representations..'
    rep = []
    for i, text in enumerate(description_texts):
        if i % 100 == 0:
            print '   ', str(i) + '/' + str(len(description_texts))
        g = graph_representation.construct_dependency_network(
            text, direction=direction)
        metric = graph.GraphMetrics.EIGENVECTOR
        d = graph_representation.graph_to_dict(g, metric)
        rep.append(d)
        g = None  # just to make sure..
    rep = graph_representation.dicts_to_vectors(rep)

    print '> Evaluating..'
    score = evaluation.evaluate_retrieval(rep, solution_vectors)
    print '   score:', score
    results['retrieval'] = score

    data.pickle_to_file(results,
                        'output/dependencies/stop_words_retr_' + direction)

    pp.pprint(results)
    return results
示例#24
0
def evaluate_dep_types():
    """
    Leave-one-out evaluation of the various dependency types from the stanford parser.
    """
    exclude_list = [
        'dep', 'aux', 'auxpass', 'cop', 'agent', 'acomp', 'attr', 'ccomp',
        'xcomp', 'complm', 'dobj', 'iobj', 'pobj', 'mark', 'rel', 'nsubj',
        'nsubjpass', 'csubj', 'csubjpass', 'cc', 'conj', 'expl', 'abbrev',
        'amod', 'appos', 'advcl', 'purpcl', 'det', 'predet', 'preconj',
        'infmod', 'mwe', 'partmod', 'advmod', 'neg', 'rcmod', 'quantmod',
        'tmod', 'nn', 'npadvmod', 'num', 'number', 'prep', 'poss',
        'possessive', 'prt', 'parataxis', 'punct', 'ref', 'xsubj', 'pcomp',
        'prepc'
    ]
    results = {'classification': [], 'retrieval': []}

    print '------ CLASSIFICATION EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)
    print '> Creating representations..'
    rep = {}
    for exclude_label in exclude_list:
        rep[exclude_label] = []
    metric = graph.GraphMetrics.CLOSENESS
    for i, text in enumerate(texts):
        if i % 10 == 0: print '   ', str(i) + '/' + str(len(texts))
        full_graph = graph_representation.construct_dependency_network(text)
        for exclude_label in exclude_list:
            g = graph.reduce_edge_set(full_graph, exclude_label)
            d = graph_representation.graph_to_dict(g, metric)
            rep[exclude_label].append(d)
            g = None  # just to make sure..
        full_graph = None
    for exclude_label in exclude_list:
        rep[exclude_label] = graph_representation.dicts_to_vectors(
            rep[exclude_label])
    print '> Evaluating..'
    for exclude_label in exclude_list:
        score = evaluation.evaluate_classification(rep[exclude_label], labels)
        print '  ', exclude_label, score
        results['classification'].append(score)

    data.pickle_to_file(results, 'output/dependencies/types_eval_tmp')

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(
        solution_texts, freq_representation.FrequencyMetrics.TF_IDF)
    print '> Creating representations..'
    rep = {}
    for exclude_label in exclude_list:
        rep[exclude_label] = []
    metric = graph.GraphMetrics.EIGENVECTOR
    for i, text in enumerate(description_texts):
        if i % 1 == 0: print '   ', str(i) + '/' + str(len(description_texts))
        full_graph = graph_representation.construct_dependency_network(text)
        for exclude_label in exclude_list:
            g = graph.reduce_edge_set(full_graph, exclude_label)
            d = graph_representation.graph_to_dict(g, metric)
            rep[exclude_label].append(d)
            g = None  # just to make sure..
        full_graph = None
        #~ if i%100==0: data.pickle_to_file(rep, 'output/dependencies/types_eval_rep_'+str(i))
    for exclude_label in exclude_list:
        rep[exclude_label] = graph_representation.dicts_to_vectors(
            rep[exclude_label])
    print '> Evaluating..'
    for exclude_label in exclude_list:
        score = evaluation.evaluate_retrieval(rep[exclude_label],
                                              solution_vectors)
        print '  ', exclude_label, score
        results['retrieval'].append(score)

    pp.pprint(results)
    data.pickle_to_file(results, 'output/dependencies/types_eval')

    return results
示例#25
0
def evaluate_dep_type_sets():
    """
    Evaluation of various sets of dependency relations.

    Each set is excluded from the representation, and the performance recorded.
    The best strategy is to exclude those dependencies which removal lead to the
    greatest imporovement for the representation.
    """
    strategies = {
            'defensive': ['agent', 'advcl', 'parataxis'],
            'aggressive': ['agent', 'advcl', 'parataxis', 'dep', 'aux', 'ccomp', 'xcomp', 'dobj', 'pobj', 'nsubj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'preconj', 'advmod', 'neg', 'rcmod', 'tmod', 'poss', 'prepc'],
            'compromise_1': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc'],
            'compromise_2': ['agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass', 'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc', 'attr', 'csubj', 'csubjpass', 'number', 'possessive', 'punct', 'ref']
        }
    results = {'classification':{}, 'retrieval':{}}

    print '------ CLASSIFICATION EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)
    print '> Creating representations..'
    rep = {}
    for strategy in strategies:
        rep[strategy] = []
    metric  = graph.GraphMetrics.CLOSENESS
    for i, text in enumerate(texts):
        if i%10==0: print '   ',str(i)+'/'+str(len(texts))
        for strategy in strategies:
            g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])
            d = graph_representation.graph_to_dict(g, metric)
            rep[strategy].append(d)
            g = None # just to make sure. I don't trust this damn garbage collector...
    for strategy in strategies:
        rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])
    print '> Evaluating..'
    for strategy in strategies:
        score = evaluation.evaluate_classification(rep[strategy], labels)
        print '  ', strategy, score
        results['classification'][strategy] = score

    data.pickle_to_file(results, 'output/dependencies/types_set_eval_tmp')

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(solution_texts, freq_representation.FrequencyMetrics.TF_IDF)
    print '> Creating representations..'
    rep = {}
    for strategy in strategies:
        rep[strategy] = []
    metric = graph.GraphMetrics.EIGENVECTOR
    for i, text in enumerate(description_texts):
        if i%1==0: print '   ',str(i)+'/'+str(len(description_texts))
        full_graph = graph_representation.construct_dependency_network(text)
        for strategy in strategies:
            g = graph_representation.construct_dependency_network(text, exclude=strategies[strategy])
            d = graph_representation.graph_to_dict(g, metric)
            rep[strategy].append(d)
            g = None # just to make sure..
        full_graph = None
        #~ if i%100==0: data.pickle_to_file(rep, 'output/dependencies/types_eval_rep_'+str(i))
    for strategy in strategies:
        rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])
    print '> Evaluating..'
    for strategy in strategies:
        score = evaluation.evaluate_retrieval(rep[strategy], solution_vectors)
        print '  ', strategy, score
        results['retrieval'][strategy] = score

    pp.pprint(results)
    data.pickle_to_file(results, 'output/dependencies/types_set_eval')

    return results
示例#26
0
def evaluate_dep_type_sets():
    """
    Evaluation of various sets of dependency relations.

    Each set is excluded from the representation, and the performance recorded.
    The best strategy is to exclude those dependencies which removal lead to the
    greatest imporovement for the representation.
    """
    strategies = {
        'defensive': ['agent', 'advcl', 'parataxis'],
        'aggressive': [
            'agent', 'advcl', 'parataxis', 'dep', 'aux', 'ccomp', 'xcomp',
            'dobj', 'pobj', 'nsubj', 'nsubjpass', 'cc', 'abbrev', 'purpcl',
            'predet', 'preconj', 'advmod', 'neg', 'rcmod', 'tmod', 'poss',
            'prepc'
        ],
        'compromise_1': [
            'agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass',
            'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc'
        ],
        'compromise_2': [
            'agent', 'advcl', 'parataxis', 'aux', 'xcomp', 'pobj', 'nsubjpass',
            'cc', 'abbrev', 'purpcl', 'predet', 'neg', 'tmod', 'poss', 'prepc',
            'attr', 'csubj', 'csubjpass', 'number', 'possessive', 'punct',
            'ref'
        ]
    }
    results = {'classification': {}, 'retrieval': {}}

    print '------ CLASSIFICATION EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/tasa/TASA900_dependencies'
    texts, labels = data.read_files(descriptions_path)
    print '> Creating representations..'
    rep = {}
    for strategy in strategies:
        rep[strategy] = []
    metric = graph.GraphMetrics.CLOSENESS
    for i, text in enumerate(texts):
        if i % 10 == 0: print '   ', str(i) + '/' + str(len(texts))
        for strategy in strategies:
            g = graph_representation.construct_dependency_network(
                text, exclude=strategies[strategy])
            d = graph_representation.graph_to_dict(g, metric)
            rep[strategy].append(d)
            g = None  # just to make sure. I don't trust this damn garbage collector...
    for strategy in strategies:
        rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])
    print '> Evaluating..'
    for strategy in strategies:
        score = evaluation.evaluate_classification(rep[strategy], labels)
        print '  ', strategy, score
        results['classification'][strategy] = score

    data.pickle_to_file(results, 'output/dependencies/types_set_eval_tmp')

    print '------ RETRIEVAL EVALUATION --------'
    print '> Reading cases..'
    descriptions_path = '../data/air/problem_descriptions_dependencies'
    description_texts, labels = data.read_files(descriptions_path)
    solutions_path = '../data/air/solutions_preprocessed'
    solution_texts, labels = data.read_files(solutions_path)
    solution_vectors = freq_representation.text_to_vector(
        solution_texts, freq_representation.FrequencyMetrics.TF_IDF)
    print '> Creating representations..'
    rep = {}
    for strategy in strategies:
        rep[strategy] = []
    metric = graph.GraphMetrics.EIGENVECTOR
    for i, text in enumerate(description_texts):
        if i % 1 == 0: print '   ', str(i) + '/' + str(len(description_texts))
        full_graph = graph_representation.construct_dependency_network(text)
        for strategy in strategies:
            g = graph_representation.construct_dependency_network(
                text, exclude=strategies[strategy])
            d = graph_representation.graph_to_dict(g, metric)
            rep[strategy].append(d)
            g = None  # just to make sure..
        full_graph = None
        #~ if i%100==0: data.pickle_to_file(rep, 'output/dependencies/types_eval_rep_'+str(i))
    for strategy in strategies:
        rep[strategy] = graph_representation.dicts_to_vectors(rep[strategy])
    print '> Evaluating..'
    for strategy in strategies:
        score = evaluation.evaluate_retrieval(rep[strategy], solution_vectors)
        print '  ', strategy, score
        results['retrieval'][strategy] = score

    pp.pprint(results)
    data.pickle_to_file(results, 'output/dependencies/types_set_eval')

    return results