Пример #1
0
    X_train = pre_process_images(X_train, mean, std)

    X_val = pre_process_images(X_val, mean, std)
    Y_train = one_hot_encode(Y_train, 10)
    Y_val = one_hot_encode(Y_val, 10)

    model = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model, learning_rate, batch_size, shuffle_data, early_stop,
        X_train, Y_train, X_val, Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    ## RUN WITH MOMENTUM
    use_momentum = True
    model_momentum = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer_momentum = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model_momentum, learning_rate, batch_size, shuffle_data, early_stop,
        X_train, Y_train, X_val, Y_val,
    )
    train_history_momentum, val_history_momentum = trainer_momentum.train(
        num_epochs)
Пример #2
0
    X_train, Y_train, X_val, Y_val = utils.load_full_mnist()
    X_train = pre_process_images(X_train)
    X_val = pre_process_images(X_val)
    Y_train = one_hot_encode(Y_train, 10)
    Y_val = one_hot_encode(Y_val, 10)

    model = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    ### Task 3. ###
    # Add improved weight
    use_improved_weight_init = True
    model = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history_w, val_history_w = trainer.train(num_epochs)
Пример #3
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    # Example created in assignment text - Comparing with and without shuffling.

    # FIRST CASE (weights)
    use_improved_weight_init = True

    model_weights = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                                 use_improved_weight_init)
    trainer_weights = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_weights,
        learning_rate,
        batch_size,
        shuffle_data,
Пример #4
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )

    train_history_single, val_history_single = trainer.train(num_epochs)
    print("---------", neurons_per_layer, "----------")
    print("Final Train Cross Entropy Loss:",
          cross_entropy_loss(Y_train, model.forward(X_train)))
    print("Final Validation Cross Entropy Loss:",
          cross_entropy_loss(Y_val, model.forward(X_val)))
    print("Train accuracy:", calculate_accuracy(X_train, Y_train, model))
    print("Validation accuracy:", calculate_accuracy(X_val, Y_val, model))

    # ten hidden layers
    neurons_per_layer = [64] * 10
    neurons_per_layer.append(10)
    print(neurons_per_layer)
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
Пример #5
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    # Example created in assignment text - Comparing with and without shuffling.
    learning_rate = .02
    shuffle_data = True

    use_improved_weight_init = True
    use_improved_sigmoid = True
    use_momentum = True

    model_no_shuffle = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                                    use_improved_weight_init)
    trainer_shuffle = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_no_shuffle,
Пример #6
0
    X_train = pre_process_images(X_train)
    X_val = pre_process_images(X_val)
    Y_train = one_hot_encode(Y_train, 10)
    Y_val = one_hot_encode(Y_val, 10)

    # 32 neurons per layer
    model_a = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer_a = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model_a, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history_a, val_history_a = trainer_a.train(num_epochs)

    # 128 neurons per layer
    neurons_per_layer = [128, 10]
    model_b = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer_b = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model_b, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history_b, val_history_b = trainer_b.train(num_epochs)

Пример #7
0
    # 64 hidden units
    model64 = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                           use_improved_weight_init)
    trainer64 = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model64,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history64, val_history64 = trainer64.train(num_epochs)

    # 32 hidden units
    neurons_per_layer = [32, 10]
    model32 = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                           use_improved_weight_init)
    trainer32 = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model32,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
Пример #8
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    #two hidden layers
    neurons_per_layer = [59, 59, 10]
    model_two_hidden_layers = SoftmaxModel(neurons_per_layer,
                                           use_improved_sigmoid,
                                           use_improved_weight_init)
    trainer_two_hidden_layers = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_two_hidden_layers,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
Пример #9
0
    model_two = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                             use_improved_weight_init)
    trainer_two = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_two,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )

    train_history, val_history = trainer.train(num_epochs)
    train_history_two, val_history_two = trainer_two.train(num_epochs)

    plt.figure(figsize=(20, 10))
    plt.subplot(1, 2, 1)
    utils.plot_loss(train_history["accuracy"], "Base model task 3 ")
    utils.plot_loss(train_history_two["accuracy"], "Ten layer model")
    plt.ylim([0.85, 1.0])
    plt.ylabel("Training Accuracy")
    plt.legend()
    plt.subplot(1, 2, 2)
    plt.ylim([0.85, 1.0])
    utils.plot_loss(val_history["accuracy"], "Base model task 3")
    utils.plot_loss(val_history_two["accuracy"], "Ten layer model")
    plt.ylabel("Accuracy")
    plt.legend()
Пример #10
0
    neurons_per_layer = [64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 10]
    model_64_layers = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                                   use_improved_weight_init)
    trainer_shuffle = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_64_layers,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history_64_layers, val_history_64_layers = trainer_shuffle.train(
        num_epochs)

    plt.figure(figsize=(20, 12))
    plt.subplot(1, 2, 1)

    plt.ylim([0.00, 1.00])
    utils.plot_loss(train_history_64_layers["loss"],
                    "Training Loss 10 hidden layers",
                    npoints_to_average=10)
    plt.ylabel("Training Loss")
    utils.plot_loss(val_history_64_layers["loss"],
                    "Validation Loss 10 hidden layers")
    plt.ylabel("Validation Loss")
    utils.plot_loss(train_history["loss"],
                    "Training Loss 2 hidden layers",
                    npoints_to_average=10)
Пример #11
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    # Example created in assignment text - Comparing with and without shuffling.
    # YOU CAN DELETE EVERYTHING BELOW!
    use_improved_weight_init = True
    use_improved_sigmoid = True
    use_momentum = True
    learning_rate = 0.02

    new_model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                             use_improved_weight_init)
    new_trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        new_model,
        learning_rate,
Пример #12
0
    ######Naked model - no improvments######
    model_naked = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                               use_improved_weight_init)
    trainer_naked = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_naked,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history_naked, val_history_naked = trainer_naked.train(num_epochs)

    print("just basic")
    print("Train accuracy:", calculate_accuracy(X_train, Y_train, model_naked))
    print("Validation accuracy:", calculate_accuracy(X_val, Y_val,
                                                     model_naked))
    print("Final Validation Cross Entropy Loss:",
          cross_entropy_loss(Y_val, model_naked.forward(X_val)))
    print("Final Train Cross Entropy Loss:",
          cross_entropy_loss(Y_train, model_naked.forward(X_train)))

    ######1nd model - improved weights######
    use_improved_sigmoid = False
    use_improved_weight_init = True
    use_momentum = False
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
Пример #13
0
    sd = X_train.std()
    X_train = pre_process_images(X_train, mean, sd)
    X_val = pre_process_images(X_val, mean, sd)
    Y_train = one_hot_encode(Y_train, 10)
    Y_val = one_hot_encode(Y_val, 10)

    model = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)


    # Improved weights
    use_improved_weight_init = True

    model_improved_w = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer_improved_w = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model_improved_w, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history_improved_w, val_history_improved_w = trainer_improved_w.train(
Пример #14
0
    X_train = pre_process_images(X_train)
    X_val = pre_process_images(X_val)
    Y_train = one_hot_encode(Y_train, 10)
    Y_val = one_hot_encode(Y_val, 10)

    print("Training standard model:\n")
    model = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    print("Final Train Cross Entropy Loss:",
        cross_entropy_loss(Y_train, model.forward(X_train)))
    print("Final Validation Cross Entropy Loss:",
        cross_entropy_loss(Y_val, model.forward(X_val)))
    print("Train accuracy:", calculate_accuracy(X_train, Y_train, model))
    print("Validation accuracy:", calculate_accuracy(X_val, Y_val, model))
    print("\n\n")

    # Example created in assignment text - Comparing with and without shuffling.
    # YOU CAN DELETE EVERYTHING BELOW!

    # model with improved sigmoid
    use_improved_sigmoid = True
Пример #15
0
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    print(
        f'Training the Original model {neurons_per_layer[0]} neurons in hidden layer'
    )
    train_history, val_history = trainer.train(num_epochs)

    # Task 4
    # TASK 4 - a) 32 neurons for hidden layer
    neurons_per_layer = [32, 10]
    model_32neu = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                               use_improved_weight_init)
    trainer_32neu = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_32neu,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
Пример #16
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    if compare_improved_weight_init:
        # Task 3a
        # Comparing baseline model to improved_weight_init
        print("Comparing baseline model to improved_weight_init")
        use_improved_weight_init = True

        model_improved_weight = SoftmaxModel(neurons_per_layer,
                                             use_improved_sigmoid,
                                             use_improved_weight_init)
        trainer_improved_weight = SoftmaxTrainer(
            momentum_gamma,
            use_momentum,
            model_improved_weight,
            learning_rate,
Пример #17
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history_64, val_history_64 = trainer.train(num_epochs)
    print("\n\n --------------------------")
    print(neurons_per_layer)
    print("Final Train Cross Entropy Loss:",
          cross_entropy_loss(Y_train, model.forward(X_train)))
    print("Final Validation Cross Entropy Loss:",
          cross_entropy_loss(Y_val, model.forward(X_val)))
    print("Train accuracy:", calculate_accuracy(X_train, Y_train, model))
    print("Validation accuracy:", calculate_accuracy(X_val, Y_val, model))
    print("\n\n --------------------------")

    neurons_per_layer = [32, 10]

    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
Пример #18
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )

    train_history_nothing, val_history_nothing = trainer.train(num_epochs)

    # Adding improved weights
    use_improved_weight_init = True
    use_improved_sigmoid = False
    use_momentum = False

    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
Пример #19
0
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )

    start = time.time()
    train_history, val_history = trainer.train(num_epochs)
    end = time.time()

    print("Elapsed training time (s): ", end - start)

    # Plot accuracy
    plt.figure(figsize=(20, 12))
    plt.ylim([0.90, 1.01])
    utils.plot_loss(train_history["accuracy"], "Training accuracy")
    utils.plot_loss(val_history["accuracy"], "Validation accuracy")
    plt.xlabel("Number of Training Steps")
    plt.ylabel("Accuracy")
    plt.legend()
    plt.show()

    # Plot loss
Пример #20
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    # Example created in assignment text - Comparing with and without shuffling.
    # YOU CAN DELETE EVERYTHING BELOW!
    use_improved_weight_init = True
    """
    model_improved_weights = SoftmaxModel(
        neurons_per_layer,
        use_improved_sigmoid,
        use_improved_weight_init)
    trainer_improved_weights = SoftmaxTrainer(
        momentum_gamma, use_momentum,
        model_improved_weights, learning_rate, batch_size, shuffle_data,
        X_train, Y_train, X_val, Y_val,
    )
    train_history_improved_weights, val_history_improved_weights = trainer_improved_weights.train(
Пример #21
0
    print("Training model:\n")
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    plt.figure(figsize=(20, 12))
    plt.subplot(1, 2, 1)
    plt.ylim([0., .5])
    utils.plot_loss(train_history["loss"],
                    "Training Loss",
                    npoints_to_average=10)
    utils.plot_loss(val_history["loss"], "Validation Loss")
    plt.legend()
    plt.xlabel("Number of Training Steps")
    plt.ylabel("Cross Entropy Loss - Average")
    # Plot accuracy
    plt.subplot(1, 2, 2)
    plt.ylim([0.90, .99])
    utils.plot_loss(train_history["accuracy"], "Training Accuracy")
Пример #22
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    neurons_per_layer = [64, 64, 10]
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
Пример #23
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    # Adding improved weights
    use_improved_weight_init = True

    model_weights = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                                 use_improved_weight_init)
    trainer_weights = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model_weights,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
Пример #24
0
        model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                             use_improved_weight_init)
        trainer = SoftmaxTrainer(
            momentum_gamma,
            use_momentum,
            model,
            learning_rate,
            batch_size,
            shuffle_data,
            X_train,
            Y_train,
            X_val,
            Y_val,
        )
        current_train_history, current_val_history = trainer.train(num_epochs)
        train_history[model_name] = current_train_history
        val_history[model_name] = current_val_history

    plt.figure(figsize=(16, 10))
    plt.subplot(1, 2, 1)
    plt.ylim([0, .5])
    for model_name in train_history.keys():
        utils.plot_loss(train_history[model_name]["loss"],
                        model_name,
                        npoints_to_average=10)
    for model_name in train_history.keys():
        utils.plot_loss(val_history[model_name]["loss"],
                        f'Validation {model_name[10:]}',
                        npoints_to_average=10)
    plt.xlabel("Number of Training Steps")
Пример #25
0
    ###Model with 32 neurons in hidden layer###
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    print("32 neurons")
    print("Train accuracy:", calculate_accuracy(X_train, Y_train, model))
    print("Validation accuracy:", calculate_accuracy(X_val, Y_val, model))
    print("Final Validation Cross Entropy Loss:",
          cross_entropy_loss(Y_val, model.forward(X_val)))

    ###Model with 128 neurons in hidden layer###
    neurons_per_layer = [128, 10]
    model1 = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                          use_improved_weight_init)
    trainer1 = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model1,
Пример #26
0
    use_momentum = True
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )
    train_history, val_history = trainer.train(num_epochs)

    ######2nd model - network from task 4d ######
    neurons_per_layer = [60, 60, 10]
    model1 = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                          use_improved_weight_init)
    trainer1 = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model1,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
Пример #27
0
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,
        use_momentum,
        model,
        learning_rate,
        batch_size,
        shuffle_data,
        X_train,
        Y_train,
        X_val,
        Y_val,
    )

    train_history_single, val_history_single = trainer.train(num_epochs)
    print("---------", neurons_per_layer, "----------")
    print("Final Train Cross Entropy Loss:",
          cross_entropy_loss(Y_train, model.forward(X_train)))
    print("Final Validation Cross Entropy Loss:",
          cross_entropy_loss(Y_val, model.forward(X_val)))
    print("Train accuracy:", calculate_accuracy(X_train, Y_train, model))
    print("Validation accuracy:", calculate_accuracy(X_val, Y_val, model))

    # Two hidden layers
    neurons_per_layer = [59, 59, 10]

    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)
    trainer = SoftmaxTrainer(
        momentum_gamma,