def crossvalidation(self, fold, v, k):
		
		error = 0
		data = Data(k, 0, 0)
		data.importDataFromMat()
		data.normalize()

		n = data.train_cat.shape[1]
		all_indices = sp.arange(n)
		data.shuffleData()

		sq = SquaredErrorLinearClassifier(v,k)

		dataset_indices = sp.split(all_indices, fold)

		for i in range(fold):
			set_without_D_i_indices = sp.concatenate(dataset_indices[0:i]+dataset_indices[i+1:fold])
			
			#print "-"*30+"train"+"-"*30
			sq.train(data.train_left[:,set_without_D_i_indices], data.train_right[:,set_without_D_i_indices], data.train_cat[:,set_without_D_i_indices])
			#print "-"*30+"classify"+"-"*30
			results, classes = sq.classify(data.train_left[:,dataset_indices[i]], data.train_right[:,dataset_indices[i]])
			#print "-"*30+"error"+"-"*30

			err,  _ = sq.error(results, data.train_cat[:,dataset_indices[i]].T)

			error += fold/(n*1.0)*err

		error = error / fold

		with open("results/crossvalidation.txt", "a") as myfile:
			toWrite = "v="+str(v)+" error="+str(error)
			myfile.write(toWrite)
	def findNuMuLinearClass(self, batchsize, k):

		k=5

		data = Data(k, 0, 0)
		data.importDataFromMat()
		data.normalize()

		error_fig = plt.figure()
		ax1 = error_fig.add_subplot(111)

		for nu in [0.05, 0.1] :
			for mu in [0.05, 0.1] :
				lg = LogisticLinearClassifier(nu, mu, 576, k, data)
				err_train, miss_train, err_val, miss_val = lg.train(30)
				label1 = "validation error mu="+str(mu)+" nu="+str(nu)
				label2 = "training error mu="+str(mu)+" nu="+str(nu)
				ax1.plot(err_train, label=label1)
				ax1.plot(err_val, label=label2)
		
		
		ax1.set_ylabel('error')
		ax1.set_xlabel('epoch')

		title = "Validation and training errors"
		error_fig.suptitle(title)

		plt.legend()
Пример #3
0
def testLogisticError():
    k = 5

    data = Data(k, 0, 0)
    data.importDataFromMat()
    data.normalize()

    lg = LogisticLinearClassifier(0.03, 0.03, 576, k, data)
    err_train, miss_train, err_val, miss_val = lg.train(30)
    mis_fig = plt.figure()
    ax2 = mis_fig.add_subplot(111)
    ax2.plot(err_val, label="error (validation)")
    ax2.plot(err_train, label="error (training)")
    title = "std(val)=%f std(err)=%f" % (sp.std(err_val), sp.std(err_train))
    mis_fig.suptitle(title)
    ax2.set_ylabel("error")
    ax2.set_xlabel("epoch")
    plt.legend()

    mis_fig = plt.figure()
    ax2 = mis_fig.add_subplot(111)
    ax2.plot(miss_val, label="misclassification ratio (validation)")
    ax2.plot(miss_train, label="misclassification ratio (training)")
    mis_fig.suptitle(title)
    ax2.set_ylabel("misclassification ratio")
    ax2.set_xlabel("epoch")
    plt.legend()

    results, cat = lg.classify(data.test_left, data.test_right)
    lg.confusion_matrix(cat, data.test_cat.argmax(axis=0))

    err = Error()
    err, misclass = err.norm_total_error(results.T, data.test_cat, k)
    print "Error on the test set " + str(err)
    print "Misclassification ratio on the test set " + str(misclass)
Пример #4
0
def compareParameters():

    k = 5

    data = Data(k, 0, 0)
    data.importDataFromMat()
    data.normalize()

    train = TrainerValidator(k, 40, 80, 60, 0.001, 0.1, 1, data)
    train.trainAndClassify()
    train2 = TrainerValidator(k, 40, 80, 60, 0.04, 0.1, 1, data)
    train2.trainAndClassify()
    train3 = TrainerValidator(k, 40, 80, 60, 0.1, 0.1, 1, data)
    train3.trainAndClassify()

    error_fig = plt.figure()
    ax1 = error_fig.add_subplot(111)
    ax1.plot(train.validation_error, label="validation error mu=0.1 nu=0.001")
    ax1.plot(train.training_error, label="training error mu=0.1 nu=0.001")
    ax1.plot(train2.validation_error, label="validation error mu=0.1 nu=0.04")
    ax1.plot(train2.training_error, label="training error mu=0.1 nu=0.04")
    ax1.plot(train3.validation_error, label="validation error mu=0.1 nu=0.1")
    ax1.plot(train3.training_error, label="training error mu=0.1 nu=0.1")
    ax1.set_ylabel("error")
    ax1.set_xlabel("epoch")

    title = "Validation and training errors k=5 H1=80 H2=60 batchsize=1"
    error_fig.suptitle(title)

    plt.legend()
	def createMLPsH(self, H1, nu, mu, batchsize, k):
		
		for j in range(10) :
			data = Data(k, 0, 0)
			data.importDataFromMat()
			data.normalize()
			train = TrainerValidator(k, 5, H1, (j+1)*10, nu, mu, batchsize, data)
			train.trainAndClassify()
			train.plotResults()
	def createMLPsP(self, H1, H2, nu, batchsize, k):
		
		for j in range(4,8) :
			data = Data(k, 0, 0)
			data.importDataFromMat()
			data.normalize()
			train = TrainerValidator(k, 50, H1, H2, nu, j/10.0, batchsize, data)
			train.trainAndClassify()
			train.plotResults()
Пример #7
0
def testBinary():
    k = 2

    data = Data(k, 0, 0)
    data.importDataFromMat()
    data.normalize()

    train = TrainerValidator(k, 70, 100, 10, 0.1, 0.2, 1, data)
    train.trainAndClassify()
    train.plotResults()

    test = Test(train.getMLP(), data, k)
    test.classify()
    test.examples()
    test.plot_confusion_matrix()
Пример #8
0
def testSquaredError():
    k = 5

    data = Data(k, 0, 0)
    data.importDataFromMat()
    data.normalize()

    sq = SquaredErrorLinearClassifier(2 ** 10, k)
    sq.train(data.train_left, data.train_right, data.train_cat)
    results, cat = sq.classify(data.test_left, data.test_right)
    sq.confusion_matrix(cat, data.test_cat.argmax(axis=0))

    err = Error()
    err, misclass = err.norm_total_error(results.T, data.test_cat, k)
    print "Error on the test set " + str(err)
    print "Misclassification ratio on the test set " + str(misclass)