Beispiel #1
0
def q12():
    clf = DecisionStump()
    X, y = load_ada_boost_train()
    clf.fit(X, y)
    print 'Ein:', clf.score(X, y)
    X, y = load_ada_boost_test()
    print 'Eout:', clf.score(X, y)
Beispiel #2
0
	def boostRound(self):
		weakLearners = []
		self.weights /= self.weights.sum()

		for feature in self.featuretbl:
			rec1 = feature[:4]
			rec2 = feature[2:]

			stump = DecisionStump(rec1, rec2)
			stump.fit(self.iimages, self.labels, self.weights)
			weakLearners.append(stump)

		errors = np.array([learner.error for learner in weakLearners])
		bestLearner = weakLearners[errors.argmin()]
		error = bestLearner.error

		beta = error/(1-error)
		alpha = np.log(1/beta)

		predictions = bestLearner.predict(self.iimages)
		self.featuretbl = np.delete(self.featuretbl, np.argmin(errors), 0)
		self.weights *= np.power(beta, 1 - np.equal(predictions, self.labels))

		return alpha, bestLearner
Beispiel #3
0
                        required=True,
                        choices=["1.1", "1.2", "1.3", "1.4", "1.5"])

    io_args = parser.parse_args()
    module = io_args.module

    # Decision Stump using inequalities/threshold
    if module == "1.1":
        # 1. Load citiesSmall dataset
        dataset = load_dataset("citiesSmall.pkl")
        X = dataset["X"]
        y = dataset["y"]

        # 2. Evaluate decision stump
        model = DecisionStump()
        model.fit(X, y)
        y_pred = model.predict(X)

        error = np.mean(y_pred != y)
        print("Decision Stump with inequality rule error: %.3f" % error)

        # PLOT RESULT
        utils.plotClassifier(model, X, y)

        fname = os.path.join("..", "figs", "decision_stump_boundary.pdf")
        plt.savefig(fname)
        print("\nFigure saved as '%s'" % fname)

    # Simple decision tree using decision stumps
    elif module == "1.2":
        # 1. Load citiesSmall dataset