Пример #1
0
def main():
    import models
    model = models.PerceptronModel(3)
    dataset = PerceptronDataset(model)
    model.train(dataset)

    model = models.RegressionModel()
    dataset = RegressionDataset(model)
    model.train(dataset)

    model = models.FashionClassificationModel()
    dataset = FashionClassificationDataset(model)
    model.train(dataset)
Пример #2
0
def main():
    import models
    model = models.PerceptronModel(3)
    dataset = PerceptronDataset(model)
    model.train(dataset)

    model = models.RegressionModel()
    dataset = RegressionDataset(model)
    model.train(dataset)

    model = models.DigitClassificationModel()
    dataset = DigitClassificationDataset(model)
    model.train(dataset)

    model = models.LanguageIDModel()
    dataset = LanguageIDDataset(model)
    model.train(dataset)
Пример #3
0
def check_perceptron(tracker):
    import models

    print("Sanity checking perceptron...")
    np_random = np.random.RandomState(0)
    # Check that the perceptron weights are initialized to a vector with `dimensions` entries.
    for dimensions in range(1, 10):
        p = models.PerceptronModel(dimensions)
        p_weights = p.get_weights()
        verify_node(p_weights, 'parameter', (1, dimensions),
                    "PerceptronModel.get_weights()")

    # Check that run returns a node, and that the score in the node is correct
    for dimensions in range(1, 10):
        p = models.PerceptronModel(dimensions)
        p_weights = p.get_weights()
        verify_node(p_weights, 'parameter', (1, dimensions),
                    "PerceptronModel.get_weights()")
        point = np_random.uniform(-10, 10, (1, dimensions))
        score = p.run(nn.Constant(point))
        verify_node(score, 'node', (1, 1), "PerceptronModel.run()")
        calculated_score = nn.as_scalar(score)
        expected_score = float(
            np.dot(point.flatten(), p_weights.data.flatten()))
        assert np.isclose(calculated_score, expected_score), (
            "The score computed by PerceptronModel.run() ({:.4f}) does not match the expected score ({:.4f})"
            .format(calculated_score, expected_score))

    # Check that get_prediction returns the correct values, including the
    # case when a point lies exactly on the decision boundary
    for dimensions in range(1, 10):
        p = models.PerceptronModel(dimensions)
        random_point = np_random.uniform(-10, 10, (1, dimensions))
        for point in (random_point, np.zeros_like(random_point)):
            prediction = p.get_prediction(nn.Constant(point))
            assert prediction == 1 or prediction == -1, (
                "PerceptronModel.get_prediction() should return 1 or -1, not {}"
                .format(prediction))

            expected_prediction = np.asscalar(
                np.where(np.dot(point,
                                p.get_weights().data.T) >= 0, 1, -1))
            assert prediction == expected_prediction, (
                "PerceptronModel.get_prediction() returned {}; expected {}".
                format(prediction, expected_prediction))

    tracker.add_points(2)  # Partial credit for passing sanity checks

    print("Sanity checking perceptron weight updates...")

    # Test weight updates. This involves constructing a dataset that
    # requires 0 or 1 updates before convergence, and testing that weight
    # values change as expected. Note that (multiplier < -1 or multiplier > 1)
    # must be true for the testing code to be correct.
    dimensions = 2
    for multiplier in (-5, -2, 2, 5):
        p = models.PerceptronModel(dimensions)
        orig_weights = p.get_weights().data.reshape((1, dimensions)).copy()
        if np.abs(orig_weights).sum() == 0.0:
            # This autograder test doesn't work when weights are exactly zero
            continue
        point = multiplier * orig_weights
        sanity_dataset = backend.Dataset(x=np.tile(point, (500, 1)),
                                         y=np.ones((500, 1)) * -1.0)
        p.train(sanity_dataset)
        new_weights = p.get_weights().data.reshape((1, dimensions))

        if multiplier < 0:
            expected_weights = orig_weights
        else:
            expected_weights = orig_weights - point

        if not np.all(new_weights == expected_weights):
            print()
            print("Initial perceptron weights were: [{:.4f}, {:.4f}]".format(
                orig_weights[0, 0], orig_weights[0, 1]))
            print("All data points in the dataset were identical and had:")
            print("    x = [{:.4f}, {:.4f}]".format(point[0, 0], point[0, 1]))
            print("    y = -1")
            print("Your trained weights were: [{:.4f}, {:.4f}]".format(
                new_weights[0, 0], new_weights[0, 1]))
            print("Expected weights after training: [{:.4f}, {:.4f}]".format(
                expected_weights[0, 0], expected_weights[0, 1]))
            print()
            assert False, "Weight update sanity check failed"

    print("Sanity checking complete. Now training perceptron")
    model = models.PerceptronModel(3)
    dataset = backend.PerceptronDataset(model)

    model.train(dataset)
    backend.maybe_sleep_and_close(1)

    assert dataset.epoch != 0, "Perceptron code never iterated over the training data"

    accuracy = np.mean(
        np.where(
            np.dot(dataset.x,
                   model.get_weights().data.T) >= 0.0, 1.0, -1.0) == dataset.y)
    if accuracy < 1.0:
        print(
            "The weights learned by your perceptron correctly classified {:.2%} of training examples"
            .format(accuracy))
        print(
            "To receive full points for this question, your perceptron must converge to 100% accuracy"
        )
        return

    tracker.add_points(4)