def test_gradient_descent(self):
        scale = Scale.scale_from_data(self.test_dataset)
        # self.logreg = LogisticRegressor(self.train_dataset, self.train_labels, scale=scale)

        self.logreg = LogisticRegressor(self.train_dataset, self.train_labels)

        gr = GradientDescentRunner(self.logreg.get_gradient(), len(self.samples[0]) + 1,
                                   self.logreg.get_error_function(), alpha=1e-8, max_iter=300)
        weights = gr.run()
        self.logreg.weights = weights

        mismatches = 0
        predictions = [self.logreg.predict(test_data) for test_data in self.test_dataset]
        print "All predictions: ", predictions

        for test_data, label in zip(self.test_dataset, self.test_labels):
            prediction = self.logreg.predict(test_data)
            if prediction != label:
                mismatches +=1
                # print "Mismatch! Predicted ", prediction, ", True ", label

        total = len(self.test_dataset)
        print "total data: ", total
        print "total mismatch: ", mismatches
        print "percentage success: ", (100 - float(mismatches) / float(total) * 100), "%"
示例#2
0
    def test_gradient_descent(self):
        scale = Scale.scale_from_data(self.test_dataset)
        # self.logreg = LogisticRegressor(self.train_dataset, self.train_labels, scale=scale)

        self.logreg = LogisticRegressor(self.train_dataset, self.train_labels)

        gr = GradientDescentRunner(self.logreg.get_gradient(),
                                   len(self.samples[0]) + 1,
                                   self.logreg.get_error_function(),
                                   alpha=1e-8,
                                   max_iter=300)
        weights = gr.run()
        self.logreg.weights = weights

        mismatches = 0
        predictions = [
            self.logreg.predict(test_data) for test_data in self.test_dataset
        ]
        print "All predictions: ", predictions

        for test_data, label in zip(self.test_dataset, self.test_labels):
            prediction = self.logreg.predict(test_data)
            if prediction != label:
                mismatches += 1
                # print "Mismatch! Predicted ", prediction, ", True ", label

        total = len(self.test_dataset)
        print "total data: ", total
        print "total mismatch: ", mismatches
        print "percentage success: ", (
            100 - float(mismatches) / float(total) * 100), "%"
示例#3
0
def test_gradient_descent(samples, test_data, scikit_pred):
    reg = LinearRegressor(samples, labels)
    gr = GradientDescentRunner(reg.get_gradient(), len(samples[0]) + 1)
    _, weights = gr.run_once()
    reg.weights = weights
    p = reg.predict(test_data)
    print "Prediction from gradient descent: ", p
    def test_lets_just_look_at_the_outputs(self):
        gr = GradientDescentRunner(self.logreg.get_gradient(), len(self.samples[0]) + 1,
                                   self.logreg.get_error_function(), alpha=1e-8, max_iter=300)

        _, weights = gr.run_once()
        self.logreg.weights = weights

        predictions = [self.logreg.get_probability(d) for d in self.train_dataset]
        import pprint
        pprint.pprint(zip(predictions, self.train_labels))
示例#5
0
    def test_lets_just_look_at_the_outputs(self):
        gr = GradientDescentRunner(self.logreg.get_gradient(),
                                   len(self.samples[0]) + 1,
                                   self.logreg.get_error_function(),
                                   alpha=1e-8,
                                   max_iter=300)

        _, weights = gr.run_once()
        self.logreg.weights = weights

        predictions = [
            self.logreg.get_probability(d) for d in self.train_dataset
        ]
        import pprint
        pprint.pprint(zip(predictions, self.train_labels))