Exemplo n.º 1
0
def check_digit_classification(tracker):
    import models
    model = models.DigitClassificationModel()
    dataset = backend.DigitClassificationDataset(model)

    detected_parameters = None
    for batch_size in (1, 2, 4):
        inp_x = nn.Constant(dataset.x[:batch_size])
        inp_y = nn.Constant(dataset.y[:batch_size])
        output_node = model.run(inp_x)
        verify_node(output_node, 'node', (batch_size, 10),
                    "DigitClassificationModel.run()")
        trace = trace_node(output_node)
        assert inp_x in trace, "Node returned from DigitClassificationModel.run() does not depend on the provided input (x)"

        if detected_parameters is None:
            detected_parameters = [
                node for node in trace if isinstance(node, nn.Parameter)
            ]

        for node in trace:
            assert not isinstance(
                node, nn.Parameter
            ) or node in detected_parameters, (
                "Calling DigitClassificationModel.run() multiple times should always re-use the same parameters, but a new nn.Parameter object was detected"
            )

    for batch_size in (1, 2, 4):
        inp_x = nn.Constant(dataset.x[:batch_size])
        inp_y = nn.Constant(dataset.y[:batch_size])
        loss_node = model.get_loss(inp_x, inp_y)
        verify_node(loss_node, 'loss', None,
                    "DigitClassificationModel.get_loss()")
        trace = trace_node(loss_node)
        assert inp_x in trace, "Node returned from DigitClassificationModel.get_loss() does not depend on the provided input (x)"
        assert inp_y in trace, "Node returned from DigitClassificationModel.get_loss() does not depend on the provided labels (y)"

        for node in trace:
            assert not isinstance(
                node, nn.Parameter
            ) or node in detected_parameters, (
                "DigitClassificationModel.get_loss() should not use additional parameters not used by DigitClassificationModel.run()"
            )

    tracker.add_points(2)  # Partial credit for passing sanity checks

    model.train(dataset)

    test_logits = model.run(nn.Constant(dataset.test_images)).data
    test_predicted = np.argmax(test_logits, axis=1)
    test_accuracy = np.mean(test_predicted == dataset.test_labels)

    accuracy_threshold = 0.97
    if test_accuracy >= accuracy_threshold:
        print("Your final test set accuracy is: {:%}".format(test_accuracy))
        tracker.add_points(4)
    else:
        print(
            "Your final test set accuracy ({:%}) must be at least {:.0%} to receive full points for this question"
            .format(test_accuracy, accuracy_threshold))
Exemplo n.º 2
0
def check_digit_classification(tracker):
    import models, backend
    model = models.DigitClassificationModel()
    assert model.get_data_and_monitor == backend.get_data_and_monitor_digit_classification, "DigitClassificationModel.get_data_and_monitor is not set correctly"
    assert model.learning_rate > 0, "DigitClassificationModel.learning_rate is not set correctly"
    model.train()

    stats = backend.get_stats(model)
    accuracy_threshold = 0.97
    if stats['dev_accuracy'] >= accuracy_threshold:
        tracker.add_points(1)
    else:
        print(
            "Your final validation accuracy ({:%}) must be at least {:.0%} to receive points for this question"
            .format(stats['dev_accuracy'], accuracy_threshold))
Exemplo n.º 3
0
def main():
    import models
    model = models.PerceptronModel(3)
    dataset = PerceptronDataset(model)
    model.train(dataset)

    model = models.RegressionModel()
    dataset = RegressionDataset(model)
    model.train(dataset)

    model = models.DigitClassificationModel()
    dataset = DigitClassificationDataset(model)
    model.train(dataset)

    model = models.LanguageIDModel()
    dataset = LanguageIDDataset(model)
    model.train(dataset)
Exemplo n.º 4
0
def check_digit_classification(tracker):
    import models
    model = models.DigitClassificationModel()
    dataset = backend.DigitClassificationDataset(model)

    detected_parameters = None
    for batch_size in (1, 2, 4):
        inp_x = nn.Constant(dataset.x[:batch_size])
        inp_y = nn.Constant(dataset.y[:batch_size])
        output_node = model.run(inp_x)
        verify_node(output_node, 'node', (batch_size, 10), "DigitClassificationModel.run()")
        trace = trace_node(output_node)
        assert inp_x in trace, "Node returned from DigitClassificationModel.run() does not depend on the provided input (x)"

        if detected_parameters is None:
            detected_parameters = [node for node in trace if isinstance(node, nn.Parameter)]

        for node in trace:
            assert not isinstance(node, nn.Parameter) or node in detected_parameters, (
                "Calling DigitClassificationModel.run() multiple times should always re-use the same parameters, but a new nn.Parameter object was detected")

    for batch_size in (1, 2, 4):
        inp_x = nn.Constant(dataset.x[:batch_size])
        inp_y = nn.Constant(dataset.y[:batch_size])
        loss_node = model.get_loss(inp_x, inp_y)
        verify_node(loss_node, 'loss', None, "DigitClassificationModel.get_loss()")
        trace = trace_node(loss_node)
        assert inp_x in trace, "Node returned from DigitClassificationModel.get_loss() does not depend on the provided input (x)"
        assert inp_y in trace, "Node returned from DigitClassificationModel.get_loss() does not depend on the provided labels (y)"

        for node in trace:
            assert not isinstance(node, nn.Parameter) or node in detected_parameters, (
                "DigitClassificationModel.get_loss() should not use additional parameters not used by DigitClassificationModel.run()")

    tracker.add_points(0.5) # Partial credit for passing sanity checks

    time_out = False
    if platform == "linux" or platform == "linux2" or platform == "darwin":
        # linux
        # OS X
        # print("Using MacOS")
        signal.signal(signal.SIGALRM, handler)
        signal.alarm(600)
        
        try: 
            model.train(dataset)
        except Exception as exc:
            print(exc)
            time_out = True
        signal.alarm(0)
    elif platform == "win32":
        # Windows...
        print("Using Windows no automatic timeout included")
        model.train(dataset)
   

    if time_out == False: 
        test_logits = model.run(nn.Constant(dataset.test_images)).data
        test_predicted = np.argmax(test_logits, axis=1)
        test_accuracy = np.mean(test_predicted == dataset.test_labels)

        accuracy_threshold = 0.97
        if test_accuracy >= accuracy_threshold:
            print("Your final test set accuracy is: {:%}".format(test_accuracy))
            tracker.add_points(1.5)
        else:
            print("Your final test set accuracy ({:%}) must be at least {:.0%} to receive full points for this question".format(test_accuracy, accuracy_threshold))
    else: 
        print("Your training timed out, therefore no test set accuracy is reported and tested.")