def run(dominios, targets, anotacoes, atributos, incluiParticipante):
    folds = cross.crossValidationParticipant(6, anotacoes)
    
    diceTotal = []
    masiTotal = []
    acuraciaTotal = 0.0
    results = []
    
    acertosT = {}
    totalT = {}
    
    for participante in folds.keys():
        resultadoTotal, dice, masi, acuracia = exp4.run(dominios, targets, folds[participante], atributos, {}, incluiParticipante)
        
        diceTotal.extend(dice)
        masiTotal.extend(masi)
        acuraciaTotal = acuraciaTotal + acuracia
        
        for resultados in resultadoTotal:
            acertos = resultados[0]
            total = resultados[1]
            
            for atributo in acertos.keys():
                if atributo not in acertosT:
                    acertosT[atributo] = 0.0
                    totalT[atributo] = 0.0
                
                acertosT[atributo] = acertosT[atributo] + acertos[atributo]
                totalT[atributo] = totalT[atributo] + total[atributo]
        results.append([acertosT, totalT])
    
    print "\n"
    print "General:"
    print 50 * "*"
    print "Expressions: "
    print "Dice: " + str(np.mean(diceTotal))
    print "Masi: " + str(np.mean(masiTotal))
    print "Accuracy: " + str(acuraciaTotal / len(diceTotal))
    print "\n"       
    
    print "Attributes:"
    print 15 * "-"     
    for atributo in acertosT.keys():
        print "Attribute: " + str(atributo)
        print "Accuracy: " + str(acertosT[atributo] / totalT[atributo])
        print 10 * "-" 
    
    return results, diceTotal, masiTotal, acuraciaTotal
Beispiel #2
0
import numpy as np
from scipy.stats import wilcoxon, chisquare
# import matplotlib.pyplot as plt
# from hcluster import *
import Assurance as ass
import Parser as parser
import Experiment1 as exp1
import Experiment5 as exp5
import Experiment6 as exp6
import CrossValidation as cross
import SVMValidatedExperiment as exp7

def initialize():
    trials = parser.parse()
    atributos = ["type", "orientation", "age", "hairColour", "hasBeard", "hasHair", "hasGlasses", "hasShirt", "hasTie", "hasSuit", "x-dimension", "y-dimension"]
    return trials, atributos

if __name__ == '__main__':
    trials, atributos = initialize()
    
#     trials = exp1.run(trials, atributos)
    
    folds = cross.crossValidation(10, trials)
      
#     exp5.run(folds, atributos, 0.7)
      
#     exp6.run(folds, atributos, 0.7)
    
#     exp7.run(trials, folds, atributos, {}, False)
    
    exp7.run(trials, folds, atributos, {}, True)
Beispiel #3
0
# from hcluster import *
import ParserStars as parser
import CrossValidation as cross
import Experiment1 as exp1
import SVMValidatedExperiment as exp2
import SVMValidatedExperiment2 as exp3
import ExperimentDecisionTree as exp4
import ValidatedExperimentIndividual as exp5

def initialize():
    anotacoes = parser.parseAnnotation()
    dominios = parser.parseDominio()
    participantes = {}
    atributos = ["type", "size", "colour", "hpos", "vpos", "near", "left", "right", "below", "above", "in-front-of"]
    targets = {"01f-t1n":"h", "01f-t1r":"h", "01f-t2n":"h", "01f-t2r":"h", "01o-t1n":"h", "01o-t1r":"h", "01o-t2n":"h", "01o-t2r":"h", "02f-t1n":"o", "02f-t1r":"o", "02f-t2n":"o", "02f-t2r":"o", "02o-t1n":"o", "02o-t1r":"o", "02o-t2n":"o", "02o-t2r":"o", "03f-t1n":"m", "03f-t1r":"m", "03f-t2n":"m", "03f-t2r":"m", "03o-t1n":"m", "03o-t1r":"m", "03o-t2n":"m", "03o-t2r":"m", "04f-t1n":"a", "04f-t1r":"a", "04f-t2n":"a", "04f-t2r":"a", "04o-t1n":"a", "04o-t1r":"a", "04o-t2n":"a", "04o-t2r":"a", "05f-t1n":"m", "05f-t2n":"m", "05f-t1r":"m", "05f-t2r":"m", "05o-t1n":"m", "05o-t1r":"m", "05o-t2n":"m", "05o-t2r":"m", "06f-t1n":"h", "06f-t1r":"h", "06f-t2n":"h", "06f-t2r":"h", "06o-t1n":"h", "06o-t1r":"h", "06o-t2n":"h", "06o-t2r":"h", "07f-t1n":"i", "07f-t1r":"i", "07f-t2n":"i", "07f-t2r":"i", "07o-t1n":"i", "07o-t1r":"i", "07o-t2n":"i", "07o-t2r":"i", "08f-t1n":"a", "08f-t1r":"a", "08f-t2n":"a", "08f-t2r":"a", "08o-t1n":"a", "08o-t1r":"a", "08o-t2n":"a", "08o-t2r":"a" }
    return dominios, targets, anotacoes, atributos, participantes


if __name__ == '__main__':
    dominios, targets, anotacoes, atributos, participantes = initialize()
    
    folds = cross.crossValidation(10, anotacoes)
    
    print "Machine Learning sem ID"
#     exp5.run(dominios, targets, anotacoes, atributos, False)
    exp2.run(dominios, targets, folds, atributos, {}, False)
    
    print "\n\n"
    print "Machine Learning com ID"
#     exp5.run(dominios, targets, anotacoes, atributos, True)
    exp2.run(dominios, targets, folds, atributos, {}, True)
Beispiel #4
0
import sys

#Generate the data from the basis function
if(len(sys.argv) == 1):
	#Generate the order of the random true polynomial function
	trueOrder = random.randint(1,10)	
	D = Data.genData(trueOrder)
elif(sys.argv[1] == "nonpoly"):
	D = Data.genNonPoly()
else:
	raise Exception("Invalid command line argument")


#In the following, D is the data set which has all the x values as its first entry and the y values as its second.

error,order = CV.kFoldErrorChoose(D[0],D[1],10,5)

#Graph the points on the base polynomial
Graph.lineColor(D[0],D[1],'red')

#Add Gaussian noise to the data outputs
D[1] = Data.addGaussianNoise(D[1],1.0/2000)

#Graph them as points in blue
Graph.pointsSimple(D[0],D[1])

#Estimate the coefficients of the polynomial with best order
fit = Regression.polyTrain(D[0],D[1],order)

#Get the function's estimates for the training x values
z = [fit(i) for i in D[0]]
Beispiel #5
0
        print("Then press Enter to continue...")
        raw_input()
        GetGenes.Sort(fps, labels)
        GetGenes.getDiff_Badge(fps, labels)
        GetGenes.nuID2enterzID(fps, labels)


    import David
    David.davidCall(fps, labels)

    import String
    String.stringCall(path, fps, labels)
    String.genEdgeList(path, fps, labels)
    String.genNetworkInput(path, fps, labels)
    String.genNetwork(path, progpath)
    String.annoNetwork(path, progpath, fps, labels)

    import CrossValidation
    CrossValidation.exprToArff(path, fps, labels)
    CrossValidation.syncArffFeatures(path, fps, labels)
    CrossValidation.callWeka(fps, labels)

    import WriteReport
    WriteReport.writeDocReport(path, IOpath, fps, labels)
    WriteReport.writeXlsReport(path, IOpath, fps, labels)





    
def execute_softmax(X_train,y_train,OX_test,oy_test):

    learning_rates = [1e-5, 1e-8]
    regularization_strengths = [10e2, 10e4]
    results = {}
    best_val = -1
    best_softmax = None
    # X_train = getCIFAR_as_32Pixels_Image(X_train)
    # OX_test = getCIFAR_as_32Pixels_Image(OX_test)
    accuracy = []
    totalAccuracy = 0.0

    ## Implementing Cross Validation
    crossValidObj = CrossValidation(5, X_train, y_train)
    foldsGen = crossValidObj.generateTrainAndTest()
    for i in range(5):
        next(foldsGen)
        X_test = OX_test
        X_train = crossValidObj.train
        y_train = crossValidObj.labels_train
        X_val = crossValidObj.test
        y_val = crossValidObj.labels_test

        # Preprocessing: reshape the image data into rows
        X_train = np.reshape(X_train, (X_train.shape[0], -1))
        X_val = np.reshape(X_val, (X_val.shape[0], -1))
        X_test = np.reshape(X_test, (X_test.shape[0], -1))

        # Normalize the data: subtract the mean image
        mean_image = np.mean(X_train, axis = 0)
        X_train -= mean_image
        X_val -= mean_image
        X_test -= mean_image

        # Add bias dimension and transform into columns
        X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]).T
        X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]).T
        X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]).T

        softmax_sgd = Softmax()
        tic = time.time()
        losses_sgd = softmax_sgd.train(X_train, y_train, method='sgd', batch_size=200, learning_rate=1e-6,
                      reg = 1e5, num_iters=1000, verbose=False, vectorized=True)
        toc = time.time()


        y_train_pred_sgd = softmax_sgd.predict(X_train)[0]
        print('Training accuracy: %f' % (np.mean(y_train == y_train_pred_sgd)))
        y_val_pred_sgd = softmax_sgd.predict(X_val)[0]
        print('Validation accuracy: %f' % (np.mean(y_val == y_val_pred_sgd)))


        # Choose the best hyperparameters by tuning on the validation set
        i = 0
        interval = 5
        for learning_rate in np.linspace(learning_rates[0], learning_rates[1], num=interval):
            i += 1
            print('The current iteration is %d/%d' % (i, interval))
            for reg in np.linspace(regularization_strengths[0], regularization_strengths[1], num=interval):
                softmax = Softmax()
                softmax.train(X_train, y_train, method='sgd', batch_size=200, learning_rate=learning_rate,
                      reg = reg, num_iters=1000, verbose=False, vectorized=True)
                y_train_pred = softmax.predict(X_train)[0]
                y_val_pred = softmax.predict(X_val)[0]
                train_accuracy = np.mean(y_train == y_train_pred)
                val_accuracy = np.mean(y_val == y_val_pred)
                results[(learning_rate, reg)] = (train_accuracy, val_accuracy)
                if val_accuracy > best_val:
                    best_val = val_accuracy
                    best_softmax = softmax
                else:
                    pass

        # Print out the results
        for learning_rate, reg in sorted(results):
            train_accuracy,val_accuracy = results[(learning_rate, reg)]
            print('learning rate %e and regularization %e, \n \
            the training accuracy is: %f and validation accuracy is: %f.\n' % (learning_rate, reg, train_accuracy, val_accuracy))

        y_test_predict_result = best_softmax.predict(X_test)
        y_test_predict = y_test_predict_result[0]
        test_accuracy = np.mean(oy_test == y_test_predict)
        accuracy.append(test_accuracy)
        totalAccuracy+=test_accuracy
        print('The test accuracy is: %f' % test_accuracy)
    print(accuracy)
    avgAccuracy = totalAccuracy / 5.0
    print('Average Accuracy: %f' % avgAccuracy)