# LEARNING
#read_terms(open('../data/mbto.obo'))
#read_heads(open('../data/mbto.heads'), action='learn')
#test.cleaning_helper()
print '#' * 100
#test.cleaning_helper()

# LEARNING
print 'LEARNING'
print "Reading lemma"
read_lemma(open('../data/expe1_20120910/lemma'))
print "Reading types"
read_types(open('../data/expe1_20120910/types'))
print "Reading heads"
read_heads(open('../data/expe1_20120910/heads_tolearn'), action='learn')
print "Saving"
test.save(prefix='../dumps/expe1_20120910/expe1_20120910_after_learning')

# TAGGING
print "\nTAGGING"
print "Tagging heads"
read_heads(open('../data/expe1_20120910/heads_totag'), action='tag')
print "Saving"
test.save(prefix='../dumps/expe1_20120910/expe1_20120910_after_tagging')

#trouves = 0
#non_trouves = 0
#for k,v in test.terms.items():
#    print k, ':', v
#    if hasattr(v, 'head') and hasattr(v, '_subsets'):
예제 #2
0
if ONTO_OK:
    #############################################################################
    # LEARNING ONTO
    print 'LEARNING ONTO'
    print '-' * 80
    #############################################################################

    print "Reading onto"
    read_terms(codecs.open(BASE_DATA + u'onto'))
    print '%d terms currently in memory.\n' % len(test.terms)
    print 'The following inconsistencies were found in the ontology:'
    test.cleaning_helper()
    print

    print "Reading onto heads"
    read_heads(codecs.open(BASE_DATA + u'heads_tolearn_onto', encoding='UTF-8'), action='learn')
    print '%d terms currently in memory.\n' % len(test.terms)
    print 'The following inconsistencies were found in the ontology:'
    test.cleaning_helper()
    print

    print "Saving after learning onto"
    test.save(prefix=BASE_DUMPS + EXPE + u'_after_learning_onto')
    print '-' * 80

if FLAT_OK:
    #############################################################################
    # LEARNING FLAT RESOURCES
    print 'LEARNING FLAT RESOURCES'
    print '-' * 80
    #############################################################################
예제 #3
0
import test
from obo import read_terms
from onto_utils import read_heads, read_blacklist, read_types, read_lemma

# First
read_blacklist(open('/bibdev/travail/typage/typage_biotope_task3.4/data/expe_20120912/blacklist.txt'))

# LEARNING
read_terms(open('/bibdev/travail/typage/typage_biotope_task3.4/data/expe_20120912/bacteria_habitat_OntoBiotope-34'))
read_heads(open('/bibdev/travail/typage/typage_biotope_task3.4/data/expe_20120912/heads_tolearn_onto'), action='learn')
#test.cleaning_helper()
print '#' * 100
#test.cleaning_helper()

# LEARNING
print 'LEARNING'
print "Reading lemma"
read_lemma(open('/bibdev/travail/typage/typage_biotope_task3.4/data/expe_20120912/lemma'))
print "Reading types"
read_types(open('/bibdev/travail/typage/typage_biotope_task3.4/data/expe_20120912/types'))
print "Reading heads"
read_heads(open('/bibdev/travail/typage/typage_biotope_task3.4/data/expe_20120912/heads_tolearn_dico'), action='learn')
print "Saving"
test.save(prefix='/bibdev/travail/typage/typage_biotope_task3.4/dumps/expe1_20120912/expe1_20120912_after_learning')

# TAGGING
print "\nTAGGING"
print "Tagging heads"
read_heads(open('/bibdev/travail/typage/typage_biotope_task3.4/data/expe_20120912/heads_totag'), action='tag')
print "Saving"
test.save(prefix='/bibdev/travail/typage/typage_biotope_task3.4/dumps/expe1_20120912/expe1_20120912_after_tagging')
# LEARNING
# read_terms(open('../data/mbto.obo'))
# read_heads(open('../data/mbto.heads'), action='learn')
# test.cleaning_helper()
print "#" * 100
# test.cleaning_helper()

# LEARNING
print "LEARNING"
print "Reading lemma"
read_lemma(open("../data/expe1_20120908/lemma"))
print "Reading types"
read_types(open("../data/expe1_20120908/types"))
print "Reading heads"
read_heads(open("../data/expe1_20120908/heads_tolearn"), action="learn")
print "Saving"
test.save(prefix="../dumps/expe1/expe1_20120908_after_learning")

# TAGGING
print "\nTAGGING"
print "Tagging heads"
read_heads(open("../data/expe1_20120908/heads_totag"), action="tag")
print "Saving"
test.save(prefix="../dumps/expe1/expe1_20120908_after_tagging")

# trouves = 0
# non_trouves = 0
# for k,v in test.terms.items():
#    print k, ':', v
#    if hasattr(v, 'head') and hasattr(v, '_subsets'):