#!/usr/bin/python
# -*- coding: latin-1 -*-

'''
Experiments with apriori
'''

import apriori
import random
import loadText

support = 0.4
loadText.importFromFile('spanish_db.txt')
dataset = loadText.rawPriori
#print dataset
C1 = apriori.createC1(dataset)
#print 'C1', C1
D = map(set,dataset)
#print 'D', D
L1, support_data = apriori.scanD(D,C1,support)
#print 'L1', L1
#print 'support_data', support_data
k_length = 2
transactions = apriori.aprioriGen(L1, k_length)
#print 'transactions', transactions
#print '\n*** *** ***'
L,support_data = apriori.apriori(dataset, support)
#print 'L', L
#print 'support_data', support_data
rules = apriori.generateRules(L, support_data, min_confidence=0.7)
#print 'rules', rules
Beispiel #2
0
#!/usr/bin/python
# -*- coding: latin-1 -*-
'''
Experiments with markov models
'''

import random
import loadText

loadText.importFromFile('snowflakes_db.txt')
words = loadText.words
neighbors = loadText.neighbors

## testing
if __name__ == '__main__':
    #print '\n\n***\n'
    predicate = random.choice(words.keys())
    sentence = [predicate]
    for i in range(8):
        c1 = set(words[sentence[-1]])
        nextTo = neighbors[sentence[-1]]
        i_c_n = c1.intersection(nextTo)  #
        if i_c_n == set([]):
            print 'no intersection of candidates and neighbors'
            association = random.choice(nextTo)
        else:
            association = random.choice(tuple(i_c_n))
        print '\t association:', association
        sentence.append(association)
        print 'the current sentence is:', sentence
#!/usr/bin/python
# -*- coding: latin-1 -*-

'''
Experiments with apriori
'''

import apriori
import random
import loadText

support = 0.1
loadText.importFromFile('snowflakes_db.txt')
dataset = loadText.rawPriori
#print dataset
C1 = apriori.createC1(dataset)
#print 'C1', C1
D = map(set,dataset)
#print 'D', D
L1, support_data = apriori.scanD(D,C1,support)
#print 'L1', L1
#print 'support_data', support_data
print 'support_data'
for k,v in support_data.iteritems():
    print k,v
k_length = 2
transactions = apriori.aprioriGen(L1, k_length)
#print 'transactions', transactions
#print '\n*** *** ***'
L,support_data = apriori.apriori(dataset, support)
#print 'L', L
Beispiel #4
0
#!/usr/bin/python
# -*- coding: latin-1 -*-
'''
Experiments with markov models
'''

import random
import loadText

loadText.importFromFile('association_test_db_full.txt')
words = loadText.words
neighbors = loadText.neighbors

## testing
if __name__ == '__main__':
    #print '\n\n***\n'
    predicate = random.choice(words.keys())
    sentence = [predicate]
    for i in range(8):
        c1 = set(words[sentence[-1]])
        nextTo = neighbors[sentence[-1]]
        i_c_n = c1.intersection(nextTo)  #
        if i_c_n == set([]):
            print 'no intersection of candidates and neighbors'
            association = random.choice(nextTo)
        else:
            association = random.choice(tuple(i_c_n))
        print '\t association:', association
        sentence.append(association)
        print 'the current sentence is:', sentence
#!/usr/bin/python
# -*- coding: latin-1 -*-

'''
Experiments with markov models
'''

import random
import loadText

loadText.importFromFile('association_test_db_full.txt')
words = loadText.words
neighbors = loadText.neighbors


## testing
if __name__ == '__main__':
    #print '\n\n***\n'
    predicate = random.choice(words.keys())
    sentence = [predicate]
    for i in range(8):
        c1 = set(words[sentence[-1]])
        nextTo = neighbors[sentence[-1]]
        i_c_n = c1.intersection(nextTo) # 
        if i_c_n == set([]): 
            print 'no intersection of candidates and neighbors'
            association = random.choice(nextTo)
        else:
            association = random.choice(tuple(i_c_n))
        print '\t association:', association
        sentence.append(association)
Beispiel #6
0
#!/usr/bin/python
# -*- coding: latin-1 -*-
'''
Experiments with apriori
'''

import apriori
import random
import loadText

support = 0.4
loadText.importFromFile('spanish_db.txt')
dataset = loadText.rawPriori
#print dataset
C1 = apriori.createC1(dataset)
#print 'C1', C1
D = map(set, dataset)
#print 'D', D
L1, support_data = apriori.scanD(D, C1, support)
#print 'L1', L1
#print 'support_data', support_data
k_length = 2
transactions = apriori.aprioriGen(L1, k_length)
#print 'transactions', transactions
#print '\n*** *** ***'
L, support_data = apriori.apriori(dataset, support)
#print 'L', L
#print 'support_data', support_data
rules = apriori.generateRules(L, support_data, min_confidence=0.7)
#print 'rules', rules