Esempio n. 1
0
    def test_regularization(self):
        X, y = data1()

        absth0 = np.abs(solution.LogRegLearner(0.0)(X, y).th[1:])
        absth10 = np.abs(solution.LogRegLearner(10.0)(X, y).th[1:])
        absth100 = np.abs(solution.LogRegLearner(100.0)(X, y).th[1:])

        self.assertTrue(np.all(absth0 > absth10))
        self.assertTrue(np.all(absth10 > absth100))
Esempio n. 2
0
            ind = numpy.argsort(dists)
            prob[yi, xi] = classifier(X[ind[0]])[1]

    pylab.imshow(prob, extent=(minx, maxx, maxy, miny))

    pylab.xlim(minx, maxx)
    pylab.ylim(miny, maxy)
    pylab.xlabel(at1)
    pylab.ylabel(at2)

    pylab.show()


X, y = solution.load('reg.data')

learner = solution.LogRegLearner(lambda_=0.)
classifier = learner(X, y)

draw_decision(X, y, classifier, 0, 1)

learner = solution.LogRegLearner(lambda_=0.01)
classifier = learner(X, y)

draw_decision(X, y, classifier, 0, 1)

learner = solution.LogRegLearner(lambda_=0.3)
classifier = learner(X, y)

draw_decision(X, y, classifier, 0, 1)

for lam in [0.0, 0.0001, 0.0005, 0.001, 0.01, 0.05, 0.1, 0.2, 0.3, 0.5]:
Esempio n. 3
0
 def test_logreg_noreg_learning_ca(self):
     X, y = data1()
     logreg = solution.LogRegLearner(lambda_=0)
     pred = solution.test_learning(logreg, X, y)
     ca = solution.CA(y, pred)
     self.assertAlmostEqual(ca, 1.)
Esempio n. 4
0
#
#
#     s.draw_decision(i, X, y, classifier, 0, 1)


# Part 3 test_cv:

X, y = s.load('reg.data')

lambdas = [10, 1, 0.5, 0.1, 0.075, 0.05, 0.03, 0.01, 0.001, 0.0001, 0.]

lambdas_ca = {l: 0 for l in lambdas}

for i in range(1,21):
    for l in lambdas:
        learner = s.LogRegLearner(lambda_=l)
        res = s.test_cv(learner, X, y, seed=i)

        ca = s.CA(y, res)

        lambdas_ca[l] += ca

for l in lambdas_ca.keys():
    print('Lambda:', l)
    print('Tocnost', lambdas_ca[l]/20)


# # Part 3 test_learning:
#
# X, y = s.load('reg.data')
#