Example #1
0
def maketeams(request):
    """
    Actually adds the team that was requested by the addteams page
    """
    #fh = open("log.txt")
    #fh.write("I got to step 1 \n")
    if auth(request):
        parse.getData(request.POST['url']) # Sanitize your inputs
        #fh.write("I got to step 2 \n")
        t = Team.objects.get(link=request.POST['url'])
        #fh.write("I got to step 3 \n")
        return redirect('/admin/')
    else:
        return redirect('/login/')
def main():
    decision_lists = {}
    trainingFile = '/data/cs65/senseval3/train/EnglishLS.train'
    trainData = getData(trainingFile)
    testFile = '/data/cs65/senseval3/test/EnglishLS.test'
    testData = getData(testFile)
    k = 10
    
    for word in trainData.keys():
        decision_lists[word] = build_decision_list(trainData, word, k)

    total = 0
    correct = 0
    correct_less = 0
    correct_cutoff = 0
    for word in testData.keys():
        MFS = most_frequent_sense(trainData, word)
        modified_declist = [x for x in decision_lists[word] if x[2] > 1.0]
        for instance in testData[word].keys():
            total += 1
            instanceData = testData[word][instance]
            classification = classify(instanceData, decision_lists[word], k, MFS)
            less_rules = classify(instanceData, decision_lists[word][:100], k, MFS)
            cutoff = classify(instanceData, modified_declist, k, MFS)
            if classification in instanceData['answers']:
                correct += 1
            if less_rules in instanceData['answers']:
                correct_less += 1
            if cutoff in instanceData['answers']:
                correct_cutoff += 1
    accuracy = float(correct) / float(total)
    accuracy_less = float(correct_less) / float(total)
    accuracy_cutoff = float(correct_cutoff) / float(total)
    print "Accurately classified %f of all words" % (accuracy)
    print "%d correct of %d total" % (correct, total)
    print "With 100 rules, accurately classified %f of all words" % (accuracy_less)
    print "%d correct of %d total" % (correct_less, total)
    print "With cutoff at 1.0, Accurately classified %f of all words" % (accuracy_cutoff)
    print "%d correct of %d total" % (correct_cutoff, total)

    """
    12. Got 0.593 accuracy, slightly better than the 0.571 accuracy
    of the MFS baseline.
    14. The modified classifications perform approximately the same; see 
    output
    """

    # print decision_lists['organization.n'][:100]
    """
def classify(train, test):
	trainData, testData = getData(train, test);
	#print len(trainData[0]), len(testData[0])
	#print len(trainData[0][0]), len(trainData[1]), len(testData[0][0]), len(testData[1])
	#annClass = learnANN(trainData[0], trainData[1]);
	#annRes = annClass.predict(testData[0]);
	#annAcc = 0;
	bayesAcc = 0;
	svmAcc = 0;
	bayesClass = learnBayes(trainData[0], trainData[1]);
	bayesRes = bayesClass.predict(testData[0]);
	bayesRes = map(lambda x: 0 if x < 0.5 else 1, bayesRes)
	svmClass = learnSVM(trainData[0], trainData[1]);
	svmRes = svmClass.predict(testData[0]);
	svmRes = map(lambda x: 0 if x < 0.5 else 1, svmRes)
	for i in xrange(len(testData[1])):
		#if annRes[i] == testData[1][i]:
		#	annAcc += 1
		if bayesRes[i] == testData[1][i]:
			bayesAcc += 1
		if svmRes[i] == testData[1][i]:
			svmAcc += 1
	#print "ANN Accuracy:", annAcc/(len(testData[1])*1.0);
	print "Bayes Accuracy:", bayesAcc/(len(testData[1])*1.0);
	print "SVM Accuracy:", svmAcc/(len(testData[1])*1.0);
Example #4
0
def updateDB():
    data = getData('data.pickle')

    with open('data.csv', 'w') as csvFile:
        writer = csv.writer(csvFile)
        writer.writerow(['Company', 'Keyword', 'Salience'])

        for company in data:
            for pair in data[company]:
                writer.writerow([company, pair[0], pair[1]])
Example #5
0
def buildBook(filename):
	if not os.access(filename, os.R_OK):
		print filename, 'is not exist'
		return

	basename = os.path.basename(filename)
	WORKDIR = basename

	try:
		data = parse.getData(filename)
		TITLE, AUTHOR, SECTIONS = parse.parse(data)
	except ParseError, e:
		print 'parse error', filename, e
		return
Example #6
0
 
  for rule in decList[key]:
    if rule[1][1] in words:
      return rule[1][0]
    if rule[0] < 0:
      break
  
  tsenses = getSenses(testData, key)
  tsenseFreq = map(lambda x: (x, tsenses.count(x)), set(tsenses))
  mfs = freqSense(tsenseFreq)[0]
  return mfs


if __name__=='__main__':
    trainingFile = '/data/cs65/senseval3/train/EnglishLS.train'
    data = getData(trainingFile)
    testingFile = '/data/cs65/senseval3/test/EnglishLS.test'
    testData = getData(testingFile)
    k = 10
    mfs = {}  #values are tuples of mfs, count of mfs
    decList = {}
    for key in data.keys():
      senses = list(set(getSenses(data, key)))
      #below is list of frequencies
      senseFreq = map(lambda x: (x, senses.count(x)), set(senses))
      mfs[key] = freqSense(senseFreq)[0]
      scores = []
      counts = {}
      for sense in senses:
        senseInst = instanceSense(data, key, sense)
        counts[sense] = countf(data, key, k, senseInst)
def doTest():
	damage_data = parse.getData()
	print("#### DAMAGE TEST ####")
	if damage_data["jigglypuff"]["Jab1"] == 3.0:
		print("jiggylypuff Jab1 -- PASS")
	else:
		print("jiggylypuff Jab1 -- FAIL")


	if damage_data["robin"]["U-smash"] == 15.0:
		print("robin U-smash -- PASS")
	else:
		print("robin U-smash -- FAIL")


	if damage_data["pikachu"]["Nair"] == 8.5: 
		print("pikachu Nair -- PASS")
	else:
		print("pikachu Nair -- FAIL")


	if damage_data["marth"]["Final Smash"] == 60.0: 
		print("marth Final Smash -- PASS")
	else:
		print("marth Final Smash -- FAIL")


	if damage_data["metaknight"]["U-throw"] == 10.0: 
		print("metaknight U-throw -- PASS")
	else:
		print("metaknight U-throw -- FAIL")


	if damage_data["falco"]["Reflector (ground)"] == 5.0: 
		print("falco Reflector (ground) -- PASS")
	else:
		print("falco Reflector (ground) -- FAIL")


	if damage_data["villager"]["Timber (axe)"] == 14.0: 
		print("village Timber (axe) -- PASS")
	else:
		print("village Timber (axe) -- FAIL")


	if damage_data["gamewatch"]["F-smash (normal)"] == 18.0: 
		print("gamewatch F-smash (normal) -- PASS")
	else:
		print("gamewatch F-smash (normal) -- FAIL")


	if damage_data["wiifit"]["Header (head spike)"] == 15.0: 
		print("wiifit Header (head spike) -- PASS")
	else:
		print("wiifit Header (head spike) -- FAIL")


	if damage_data["zelda"]["Phantom Strike (fully charged uppercut slash)"] == 12.0: 
		print("zelda Phantom Strike (fully charged uppercut slash) -- PASS")
	else:
		print("zelda Phantom Strike (fully charged uppercut slash) -- FAIL")
	print("#### DAMAGE TEST END  ####")
Example #8
0
import sys
import config
import parse
import nn

if len(sys.argv) < 2:
	sys.exit('Usage: %s directory-name' % sys.argv[0])

d = sys.argv[1]
#try:
translate = parse.buildTranslate(d)
data = parse.getData(d, "training", True)

data['translate'] = translate
nn = nn.neuralNetwork( data )
#print nn

#data = parse.getData(d, "test")

#for row in data['inputs']:
#	print nn.predict( row )
#except Exception as error:
#	print error
Example #9
0
#! coding: UTF-8

import numpy as np
import parse as Parser
import kmeans as km

list = [
    "北海道", "青森県", "岩手県", "宮城県", "秋田県", "山形県", "福島県", "茨城県", "栃木県", "群馬県",
    "埼玉県", "千葉県", "東京", "神奈川県", "新潟県", "山梨県", "長野県", "富山県", "石川県", "福井県",
    "岐阜県", "静岡県", "愛知県", "三重県", "滋賀県", "京都府", "大阪府", "兵庫県", "奈良県", "和歌山県",
    "鳥取県", "島根県", "岡山県", "広島県", "山口県", "徳島県", "香川県", "愛媛県", "高知県", "福岡県",
    "佐賀県", "長崎県", "熊本県", "大分県", "宮崎県", "鹿児島県", "沖縄"
]

print("クラスタ数を入力してください")
cluster = int(input())

result = km.kmeans(Parser.getData(), cluster)

for cluster_num in range(cluster):
    cluster_list = []
    for list_num in range(len(list)):
        if result[list_num] == cluster_num:
            cluster_list.append(list[list_num])
    print("クラスタ" + str(cluster_num))
    print(cluster_list)
    print("--------------")
Example #10
0
import sys
import config
import parse
import nn

if len(sys.argv) < 2:
    sys.exit('Usage: %s directory-name' % sys.argv[0])

d = sys.argv[1]
#try:
translate = parse.buildTranslate(d)
data = parse.getData(d, "training", True)

data['translate'] = translate
nn = nn.neuralNetwork(data)
#print nn

#data = parse.getData(d, "test")

#for row in data['inputs']:
#	print nn.predict( row )
#except Exception as error:
#	print error
Example #11
0
    trainSenses = retrieveMostCommonSenses(trainData)
    lexelts = testData.keys()
    for lexelt in lexelts:
        instances = testData[lexelt].keys()
        for instance in instances:
            senses = testData[lexelt][instance]["answers"]
            if trainSenses[lexelt][0] in senses:
                totalCorrect += 1
            totalGuesses += 1

    print float(totalCorrect)/totalGuesses * 100
        

if __name__=='__main__':
    trainingFile = '/data/cs65/senseval3/train/EnglishLS.train'
    trainData = getData(trainingFile)
    testingFile = '/data/cs65/senseval3/test/EnglishLS.test'
    testData = getData(testingFile)
    
    # questions on training data
    print "\nQuestion 1:\n"
    question1(trainData)
    print "\nQuestion 2:\n"
    question2(trainData)
    print "\nQuestion 3:\n"
    question3(trainData)
    print "\nQuestion 4:\n"
    question4(trainData)
    print "\nQuestion 5:\n"
    question5(trainData)
    print "\nQuestion 6:\n"