示例#1
0
    def test_model_training(self):
        # Create nn object
        nn_object = Sequential()
        nn_object.add_layer(
            DenseLayer(units=16,
                       activation="sigmoid",
                       input_dimension=self.input_data_dimension))
        nn_object.add_layer(
            DenseLayer(units=self.output_data_dimension, activation="softmax"))
        nn_object.compile(loss="cross_entropy")

        # Train the neural network
        nn_object.train(self.input_data, self.target_data, max_epochs=1)
示例#2
0
    def construct_nn(self):
        # First construct an optimizer to use
        optimizer = ADAM(lr=self.learning_rate)

        # Create L2 regularizer
        regularizer = L2Regularizer(self.regularization_coefficient)

        self.nn = Sequential()
        self.nn.add_layer(
            DenseLayer(units=32,
                       activation="tanh",
                       input_dimension=self.input_data_dimension,
                       regularizer=regularizer))
        self.nn.add_layer(
            DenseLayer(units=32, activation="relu", regularizer=regularizer))
        self.nn.add_layer(
            DenseLayer(units=32, activation="tanh", regularizer=regularizer))
        self.nn.add_layer(
            DenseLayer(units=self.output_data_dimension,
                       activation="softmax",
                       regularizer=regularizer))
        self.nn.compile(loss="cross_entropy",
                        error_threshold=self.error_threshold,
                        optimizer=optimizer)
示例#3
0
    def train_network(self, x_train, y_train):
        # Create a logger for training
        logger = logging.getLogger("cifar10")
        logger.setLevel(logging.INFO)

        fh = logging.FileHandler("cifar10_training.log")
        logger.addHandler(fh)

        # Construct the network
        optimizer = ADAM(lr=0.001)

        regularizer = L2Regularizer(0.01)

        self.nn = Sequential()
        self.nn.add_layer(
            Convolution2D(4, (3, 3),
                          input_shape=(3, 32, 32),
                          regularizer=regularizer))
        self.nn.add_layer(MaxPooling(pool_size=(2, 2)))
        self.nn.add_layer(Convolution2D(8, (3, 3), regularizer=regularizer))
        self.nn.add_layer(MaxPooling(pool_size=(2, 2)))
        self.nn.add_layer(Convolution2D(16, (3, 3), regularizer=regularizer))
        self.nn.add_layer(Flatten())
        self.nn.add_layer(DenseLayer(units=10, activation="softmax"))
        self.nn.compile(loss="cross_entropy",
                        error_threshold=0.01,
                        optimizer=optimizer)

        self.nn.train(x_train,
                      y_train,
                      logging_frequency=1,
                      max_epochs=20,
                      training_logger=logger,
                      update_frequency=100,
                      layers_filename=self.weights_filename,
                      mini_batch_size=1024)
示例#4
0
class MNISTNN():
    def __init__(self):
        self.nn = None
        self.input_data_dimension = 784
        self.output_data_dimension = 10

        self.learning_rate = 0.001
        self.error_threshold = 0.1
        self.momentum = 0.9
        self.regularization_coefficient = 0.01

        self.logging_frequency = 10
        self.update_frequency = 100

        self.layers_filename = "mnist_nn_layers.pkl"
        self.test_data_filename = "mnist_test_data.pkl"

        self.mnist_logger = logging.getLogger("mnist")
        self.mnist_logger.setLevel(logging.INFO)

        fh = logging.FileHandler("mnist_training.log")
        self.mnist_logger.addHandler(fh)

    def construct_nn(self):
        # First construct an optimizer to use
        optimizer = ADAM(lr=self.learning_rate)

        # Create L2 regularizer
        regularizer = L2Regularizer(self.regularization_coefficient)

        self.nn = Sequential()
        self.nn.add_layer(
            DenseLayer(units=32,
                       activation="tanh",
                       input_dimension=self.input_data_dimension,
                       regularizer=regularizer))
        self.nn.add_layer(
            DenseLayer(units=32, activation="relu", regularizer=regularizer))
        self.nn.add_layer(
            DenseLayer(units=32, activation="tanh", regularizer=regularizer))
        self.nn.add_layer(
            DenseLayer(units=self.output_data_dimension,
                       activation="softmax",
                       regularizer=regularizer))
        self.nn.compile(loss="cross_entropy",
                        error_threshold=self.error_threshold,
                        optimizer=optimizer)

    def train(self, train_data, train_labels):
        """
        Train the neural network constructed
        """

        # Convert target to one hot encoding vectors
        train_targets = one_hot_encoding(train_labels)

        self.nn.train(train_data,
                      train_targets,
                      logging_frequency=self.logging_frequency,
                      update_frequency=self.update_frequency,
                      layers_filename=self.layers_filename,
                      training_logger=self.mnist_logger)

    def store_test_data(self, x_test, y_test, training_mean):
        """
        Store the test data in a file
        """
        # Store the test data on a file
        logging.info("Storing test data")

        with gzip.open(self.test_data_filename, "wb") as file:
            pickle.dump((x_test, y_test, training_mean), file)

        logging.info("Test data saved")

    def load_test_data(self):
        """
        Load test data from pickle file
        """
        with gzip.open(self.test_data_filename, "rb") as file:
            x_test, y_test, training_mean = pickle.load(file)

        return x_test, y_test, training_mean

    def evaluate_performance(self, x_test, y_test):
        """
        Predict the output on the input data and evaluate accuracy
        """
        predicted_output = self.predict(x_test)
        target_output = y_test

        return categorical_accuracy(predicted_output, target_output)

    def load_pretrained_network(self):
        self.nn = Sequential()
        self.nn.load_layer_weights(self.layers_filename)

    def predict(self, input_matrix):
        predicted_output = self.nn.predict(input_matrix)

        labels = vector_to_label(predicted_output)

        return labels
示例#5
0
 def load_pretrained_network(self):
     self.nn = Sequential()
     self.nn.load_layer_weights(self.layers_filename)
示例#6
0
 def load_pretrained_model(self):
     logging.info("Trying to load pretrained model")
     self.nn = Sequential()
     self.nn.load_layer_weights(self.weights_filename)
示例#7
0
class CIFAR10CNN():
    def __init__(self):
        self.test_data_filename = "cifar10_test.pkl"
        self.weights_filename = "cifar10_cnn_weights.pkl"

        self.nn = None

    def train_network(self, x_train, y_train):
        # Create a logger for training
        logger = logging.getLogger("cifar10")
        logger.setLevel(logging.INFO)

        fh = logging.FileHandler("cifar10_training.log")
        logger.addHandler(fh)

        # Construct the network
        optimizer = ADAM(lr=0.001)

        regularizer = L2Regularizer(0.01)

        self.nn = Sequential()
        self.nn.add_layer(
            Convolution2D(4, (3, 3),
                          input_shape=(3, 32, 32),
                          regularizer=regularizer))
        self.nn.add_layer(MaxPooling(pool_size=(2, 2)))
        self.nn.add_layer(Convolution2D(8, (3, 3), regularizer=regularizer))
        self.nn.add_layer(MaxPooling(pool_size=(2, 2)))
        self.nn.add_layer(Convolution2D(16, (3, 3), regularizer=regularizer))
        self.nn.add_layer(Flatten())
        self.nn.add_layer(DenseLayer(units=10, activation="softmax"))
        self.nn.compile(loss="cross_entropy",
                        error_threshold=0.01,
                        optimizer=optimizer)

        self.nn.train(x_train,
                      y_train,
                      logging_frequency=1,
                      max_epochs=20,
                      training_logger=logger,
                      update_frequency=100,
                      layers_filename=self.weights_filename,
                      mini_batch_size=1024)

    def store_test_data(self, x_test, y_test):
        logging.info("Storing test data")
        # Store the test data
        with open(self.test_data_filename, "wb") as file:
            pickle.dump((x_test, y_test), file)

    def load_test_data(self):
        with open(self.test_data_filename, "rb") as file:
            x_test, y_test = pickle.load(file)
            return x_test, y_test

    def load_pretrained_model(self):
        logging.info("Trying to load pretrained model")
        self.nn = Sequential()
        self.nn.load_layer_weights(self.weights_filename)

    def test_network(self, x_test, y_test):
        predicted_output = self.nn.predict(x_test)

        # Convert predicted output vector to labels
        predicted_labels = vector_to_label(predicted_output)
        return categorical_accuracy(predicted_labels, y_test)
示例#8
0
def main():
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)

    number_of_samples = 50

    input_data_dimension = 10
    output_data_dimension = 3

    nn_object = Sequential()
    nn_object.add_layer(
        DenseLayer(units=32,
                   activation="tanh",
                   input_dimension=input_data_dimension))
    nn_object.add_layer(DenseLayer(units=64, activation="tanh"))
    nn_object.add_layer(
        DenseLayer(units=output_data_dimension, activation="softmax"))
    nn_object.compile(loss="cross_entropy", error_threshold=0.001)

    input_data = -0.5 + np.random.rand(input_data_dimension, number_of_samples)
    targets = np.random.randint(0, output_data_dimension,
                                number_of_samples).reshape(-1)

    # Convert labels to one hot encoding
    output_data = one_hot_encoding(targets)

    nn_object.train(input_data, output_data)

    predicted_output = nn_object.predict(input_data)
    predicted_labels = vector_to_label(predicted_output)

    print("Accuracy: {}%".format(
        100 * categorical_accuracy(predicted_labels, targets)))