Ejemplo n.º 1
0
        else:
            continue
        if ( m_score > 0.0 ):
            machine_score.append(m_score)
            human_score.append(float(p[2]))
    p_val, p_rel = sci.stats.spearmanr(human_score, machine_score)
    print "Simple Linear Approach", p_val
if __name__ == "__main__":
    word_vectors = nlp.read_word_vectors(VECTOR_DIR + VECTOR_NAME)
    word_pairs = nlp.read_csv(CSV_DIR + CSV_NAME)
    vocab = []
    for p in word_pairs:
        vocab.append(p[0].lower())
        vocab.append(p[1].lower())
    vocab = list(set(vocab))
    for w in vocab:
        word_hypernyms[w] = nlp.read_hypernyms(w)
        word_hyponyms[w] = nlp.read_hyponyms(w)
        word_synonyms[w] = nlp.read_synonyms(w)
        senses[w] = nlp.read_senses(w)
        for s in senses[w]:
            sense_vectors[s] = np.zeros(VECTOR_DIM)
            sense_hypernyms[s] = nlp.read_hypernyms_by_sense(s)
            sense_hyponyms[s] = nlp.read_hyponyms_by_sense(s)
            sense_synonyms[s] = nlp.read_synonyms_by_sense(s)
            sense_vectors[s] = nlp.get_pooling(s, sense_hypernyms,sense_synonyms,sense_hyponyms, word_vectors, VECTOR_DIM)
            if ( word_vectors.has_key(w)):
                sense_vectors[s] = sense_vectors[s] + word_vectors[w]
        word_pool[w] = nlp.get_pooling(w, word_hypernyms, word_synonyms, word_hyponyms, word_vectors, VECTOR_DIM)
    test_sense_vectors()
Ejemplo n.º 2
0
Archivo: 100_7.py Proyecto: YuXunLu/nlp
            print "pre_cost",pre_cost, "cost", cost
            if ( (pre_cost - cost) <= 1e-8 ):
                s_vecs = pre_vecs
                break
            pre_vecs = s_vecs
        i = 0
        for s in senses[w]:
            sense_vectors[s] = s_vecs[i]
            i = i + 1
    test_sense_vectors()
if __name__ == "__main__":
    word_vectors = nlp.read_word_vectors(VECTOR_DIR + VECTOR_NAME)
    print "LEARNING_RATE", L_RATE
    word_pairs = nlp.read_csv(CSV_DIR + CSV_NAME)
    vocab = []
    for p in word_pairs:
        vocab.append(p[0])
        vocab.append(p[1])
    vocab = list(set(vocab))
    for w in vocab:
        word_hypernyms[w] = nlp.read_hypernyms(w)
        word_hyponyms[w] = nlp.read_hyponyms(w)
        word_synonyms[w] = nlp.read_synonyms(w)
        senses[w] = nlp.read_senses(w)
        for s in senses[w]:
            sense_hypernyms[s] = nlp.read_hypernyms_by_sense(s)
            sense_hyponyms[s] = nlp.read_hyponyms_by_sense(s)
            sense_synonyms[s] = nlp.read_synonyms_by_sense(s)
        word_pool[w] = nlp.get_pooling(w, word_hypernyms, word_synonyms, word_hyponyms, word_vectors)
    train_NN()
Ejemplo n.º 3
0
    for p in word_pairs:
        vocab.append(p[0].lower())
        vocab.append(p[1].lower())
    vocab = list(set(vocab))
    for w in vocab:
        word_hypernyms[w] = nlp.read_hypernyms(w)
        word_hyponyms[w] = nlp.read_hyponyms(w)
        word_synonyms[w] = nlp.read_synonyms(w)
        senses[w] = nlp.read_senses(w)
        for s in senses[w]:
            sense_vectors[s] = np.zeros(VECTOR_DIM)
            sense_hypernyms[s] = nlp.read_hypernyms_by_sense(s)
            sense_hyponyms[s] = nlp.read_hyponyms_by_sense(s)
            sense_synonyms[s] = nlp.read_synonyms_by_sense(s)
            sense_pool[s] = nlp.get_pooling(s, sense_hypernyms, sense_synonyms,
                                            sense_hyponyms, word_vectors,
                                            VECTOR_DIM)
            if (word_vectors.has_key(w)):
                sense_pool[s] = sense_pool[s] + word_vectors[w]
            sense_vectors[s] = sense_pool[s]
            for l in s.lemmas():
                word = str(l.name())
                word_hypernyms[word] = nlp.read_hypernyms(word)
                word_hyponyms[word] = nlp.read_hyponyms(word)
                word_synonyms[word] = nlp.read_synonyms(word)
                word_pool[word] = nlp.get_pooling(word, word_hypernyms,
                                                  word_synonyms, word_hyponyms,
                                                  word_vectors, VECTOR_DIM)

        word_pool[w] = nlp.get_pooling(w, word_hypernyms, word_synonyms,
                                       word_hyponyms, word_vectors, VECTOR_DIM)
Ejemplo n.º 4
0
Archivo: f1.py Proyecto: xu94-nlp/nlp
            s_vecs, s_star = calc_NN(w, p_w = para_w, p_u = para_u, p_v = para_v, p_b = para_b)
            cost = cost_function(word_pool[w], s_star)
            print "pre_cost",pre_cost, "cost", cost
            if ( cost > pre_cost ):
                s_vecs = pre_vecs
                break
        i = 0
        for s in senses[w]:
            sense_vectors[s] = s_vecs[i]
            i = i + 1
if __name__ == "__main__":
    word_vectors = nlp.read_word_vectors(VECTOR_DIR + VECTOR_NAME)
    print "LEARNING_RATE", L_RATE
    word_pairs = nlp.read_csv(CSV_DIR + CSV_NAME)
    vocab = []
    for p in word_pairs:
        vocab.append(p[0])
        vocab.append(p[1])
    vocab = list(set(vocab))
    for w in vocab:
        word_hypernyms[w] = nlp.read_hypernyms(w)
        word_hyponyms[w] = nlp.read_hyponyms(w)
        word_synonyms[w] = nlp.read_synonyms(w)
        senses[w] = nlp.read_senses(w)
        for s in senses[w]:
            sense_hypernyms[s] = nlp.read_hypernyms_by_sense(s)
            sense_hyponyms[s] = nlp.read_hyponyms_by_sense(s)
            sense_synonyms[s] = nlp.read_synonyms_by_sense(s)
        word_pool[w] = nlp.get_pooling(w, word_hypernyms, word_synonyms, word_hyponyms, word_vectors)
    train_NN()