Exemplo n.º 1
0
def test_model(model_name: str,
               hidden_layers: int,
               nodes_per_layer: int,
               elu_alpha: int,
               epoch_number: Optional[int] = None):

    network = Network()
    create_ELU_model_logistic_output(network, 2, 1, hidden_layers,
                                     nodes_per_layer, elu_alpha)

    epoch = load_weights(network, model_name, epoch_number)

    print(f"Loaded epoch {epoch} of {model_name}. Ready to test:")

    while True:
        a = float(input('Enter a: '))
        b = float(input('Enter b: '))

        [output] = network.predict([a, b])

        if output >= 0.5:
            print(
                f"Network is {round(output * 100, 2)}% sure that {a} is perfectly divisible by {b}"
            )
        else:
            print(
                f"Network is {round(100 - output * 100, 2)}% sure that {a} is NOT perfectly divisible by {b}"
            )
Exemplo n.º 2
0
def test_model(model_name: str,
               hidden_layers: int,
               nodes_per_layer: int,
               elu_alpha: float,
               epoch_number: Optional[int] = None):
    network = Network()
    create_elu_logistic_layers_model_linear_output(network, 1, 1,
                                                   hidden_layers,
                                                   nodes_per_layer, elu_alpha)

    epoch = load_weights(network, model_name, epoch_number)

    print(f"Loaded epoch {epoch} of {model_name}. Ready to test:")

    while True:
        x = float(input('Enter x: '))

        [output] = network.predict([x])

        print(f'>> f(x) = {output}')
Exemplo n.º 3
0
def train_model(model_name: str,
                hidden_layers: int,
                nodes_per_layer: int,
                elu_alpha: float,
                stop_after_epoch: int,
                save_every_n_epochs: int = 1,
                a: float = 1,
                b: float = 0,
                c: float = 0,
                d: float = 0,
                training_min: float = -5,
                training_max: float = 3,
                val_min: float = -3,
                val_max: float = 5,
                training_data_size: int = 800,
                val_data_size: int = 200,
                training_iterations_per_epoch: int = 80,
                val_iterations_per_epoch: int = 40,
                step_size: float = 0.0005,
                momentum: float = 0.1,
                decay: float = 0.00005,
                log_level: int = 0,
                pause_after_iter: Optional[float] = None):
    """
    Scaffolds the network model, loads previous weights (if any), trains, validates and saves new weights & losses

    :param model_name: Used to save & load the model

    :param hidden_layers: The number of layers.

    :param nodes_per_layer: The number of fully connected nodes per layer

    :param elu_alpha: The alpha constant used in the ELU activation function for the ELU nodes.

    :param stop_after_epoch: Stop training after training this epoch number

    :param save_every_n_epochs: Only save every nth epoch (where epoch `mod` n == 0 is satisfied)

    :param training_min: Minimum value of training data

    :param training_max: Maximum value of training data

    :param val_min:  Minimum value of the training data

    :param val_max: Maximum value of the training data

    :param training_data_size: Number of training data samples to generate.

    :param val_data_size: Number of validation data samples to generate.

    :param training_iterations_per_epoch: Number of samples to train on per epoch.

    :param val_iterations_per_epoch: Number of samples to perform validation on per epoch.

    :param step_size:
            Multiple of the d(loss)/d(weight) derivative to nudge the weight by per iteration.

    :param momentum:
            How much of the previous weight update is applied to the current weight update.
            (1 --> 100%, 0 --> 0%)
            Using momentum prevents the network from getting stuck in local minimas.
            (Imagine a ball rolling down the loss function curve. If there is a pothole in the curve, momentum
            may allow the ball to not be stuck.)

    :param decay:
            How much of the previous weight to subtract the current weight by.
            (1 --> 100%, 0 --> 0%)
            This will make the weight gravitate towards zero, so that the weights won't explode to NaN.
    :return:
    """
    print('Training cubic estimation model')

    training_data = generate_data_cubic_equation(training_data_size,
                                                 training_min, training_max, a,
                                                 b, c, d)
    val_data = generate_data_cubic_equation(val_data_size, val_min, val_max, a,
                                            b, c, d)
    network = Network()
    create_elu_logistic_layers_model_linear_output(network, 1, 1,
                                                   hidden_layers,
                                                   nodes_per_layer, elu_alpha)
    prev_epoch = load_weights(network, model_name) or 0

    if prev_epoch >= stop_after_epoch:
        print(
            f"WARN: Not training anything! Epoch {stop_after_epoch} was already trained."
        )

    curr_epoch = prev_epoch + 1

    while curr_epoch <= stop_after_epoch:
        print(f"\nTraining new epoch: {curr_epoch} / {stop_after_epoch}")
        train_one_epoch(network,
                        training_data,
                        val_data,
                        model_name,
                        curr_epoch,
                        step_size,
                        momentum,
                        decay,
                        training_iterations_per_epoch,
                        val_iterations_per_epoch,
                        save_weights=curr_epoch % save_every_n_epochs == 0,
                        log_level=log_level,
                        pause_after_iter=pause_after_iter)
        curr_epoch += 1
Exemplo n.º 4
0
if __name__ == '__main__':

	''' 0. Set Hyperparameters '''

	num_epochs = 100
	batch_size = 64
	shape = 100
	folder_path = 'cs-ioc5008-hw1'

	''' 1. Load and Preprocess data '''

	classes = utilities.load_classes(folder_path)						# load classes
	generators = utilities.load_data(folder_path, batch_size, shape, classes)		# load and augment data
	testX = utilities.load_test(folder_path, shape)						# load test data

	class_weight = utilities.weight_classes(folder_path, classes)		# weight classes so that we can later provide
																		# bias to minority classes during training

	''' 2. Build and Fit the model '''

	model = network.build_model(len(classes), shape)					# build model using transfer learning

	model = network.fit_model(generators, model, num_epochs, batch_size, class_weight)	# fit model

	''' 3. Restore best model and output predictions '''

	model = network.load_weights(model, 'weights.best.hdf5')				# load best model found

	predictions = model.predict(testX)				# compute output predictions for Kaggle challenge
	utilities.output_predictions(predictions, classes, testX)
Exemplo n.º 5
0
def train_model(model_name: str,
                hidden_layers: int,
                nodes_per_layer: int,
                elu_alpha: int,
                stop_after_epoch: int,
                save_every_n_epochs: int = 1,
                training_min: int = -1,
                training_max: int = 1,
                training_max_mult: int = 100,
                val_min: int = -2,
                val_max: int = 2,
                val_max_mult: int = 200,
                bidirectional: bool = True,
                training_data_size: int = 800,
                val_data_size: int = 200,
                training_iterations_per_epoch: int = 80,
                val_iterations_per_epoch: int = 40,
                step_size: float = 0.0005,
                momentum: float = 0.1,
                decay: float = 0.0005):
    """
    Scaffolds the network model, loads previous weights (if any), trains, validates and saves new weights & losses

    :param model_name: Used to save & load the model

    :param hidden_layers: The number of layers.

    :param nodes_per_layer: The number of fully connected nodes per layer

    :param elu_alpha: The alpha constant used in the ELU activation function for the ELU nodes.

    :param stop_after_epoch: Stop training after training this epoch number

    :param save_every_n_epochs: Only save every nth epoch (where epoch `mod` n == 0 is satisfied)

    :param training_min: Minimum value of training data

    :param training_max: Maximum value of training data

    :param training_max_mult:
            Maximum integer value of c in the training data where x = c * y & {x, y} are the two input nodes.

    :param val_min:  Minimum value of the training data

    :param val_max: Maximum value of the training data

    :param val_max_mult:
            Maximum integer value of c in the validation data where x = c * y & {x, y} are the two input nodes.

    :param bidirectional:
            If true, Either x or y can be perfectly divisible by the other to give a [1] output.
            Otherwise, only x can be perfectly divisible by y and not the other way around.
            This applies for both training and validation data.

    :param training_data_size: Number of training data samples to generate.

    :param val_data_size: Number of validation data samples to generate.

    :param training_iterations_per_epoch: Number of samples to train on per epoch.

    :param val_iterations_per_epoch: Number of samples to perform validation on per epoch.

    :param step_size:
            Multiple of the d(loss)/d(weight) derivative to nudge the weight by per iteration.

    :param momentum:
            How much of the previous weight update is applied to the current weight update.
            (1 --> 100%, 0 --> 0%)
            Using momentum prevents the network from getting stuck in local minimas.
            (Imagine a ball rolling down the loss function curve. If there is a pothole in the curve, momentum
            may allow the ball to not be stuck.)

    :param decay:
            How much of the previous weight to subtract the current weight by.
            (1 --> 100%, 0 --> 0%)
            This will make the weight gravitate towards zero, so that the weights won't explode to NaN.
    :return:
    """
    training_data = generate_data_divisible_check(training_data_size,
                                                  training_min, training_max,
                                                  training_max_mult,
                                                  bidirectional)
    val_data = generate_data_divisible_check(val_data_size, val_min, val_max,
                                             val_max_mult, bidirectional)
    network = Network()
    create_ELU_model_logistic_output(network, 2, 1, hidden_layers,
                                     nodes_per_layer, elu_alpha)
    # Load most recently trained epoch
    # If no trained weights exist, start training from epoch 1
    prev_epoch = load_weights(network, model_name) or 0

    if prev_epoch >= stop_after_epoch:
        print(
            f"WARN: Not training anything! Epoch {stop_after_epoch} was already trained."
        )

    curr_epoch = prev_epoch + 1

    while curr_epoch <= stop_after_epoch:
        print(f"Training new epoch: {curr_epoch} / {stop_after_epoch}")
        train_one_epoch(network,
                        training_data,
                        val_data,
                        model_name,
                        curr_epoch,
                        step_size,
                        momentum,
                        decay,
                        training_iterations_per_epoch,
                        val_iterations_per_epoch,
                        save_weights=curr_epoch % save_every_n_epochs == 0)
        curr_epoch += 1