예제 #1
0
    def __init__(self,
                 train,
                 valid,
                 test,
                 learningRate=0.01,
                 epochs=50,
                 activation='sigmoid',
                 error='mse'):

        self.learningRate = learningRate
        self.epochs = epochs

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test

        # Initialize the weight vector with small values
        self.weight = 0.01 * np.random.randn(1,
                                             self.trainingSet.input.shape[1])
        weight_Plus_bias = np.insert(self.weight, 0, 1, axis=1)
        self.weight = weight_Plus_bias

        # Choose the error function
        self.errorString = error
        self._initialize_error(error)

        #initialize also the layer
        self.layer = LogisticLayer(nIn=self.trainingSet.input.shape[1],
                                   nOut=1,
                                   activation='sigmoid',
                                   weights=weight_Plus_bias)
예제 #2
0
    def __init__(self, train, valid, test, learning_rate=0.01, epochs=50):

        self.learning_rate = learning_rate
        self.epochs = epochs

        self.training_set = train
        self.validation_set = valid
        self.test_set = test

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        # Use a logistic layer as one-neuron classification (output) layer
        self.layer = LogisticLayer(train.input.shape[1],
                                   1,
                                   is_classifier_layer=True)

        # add bias values ("1"s) at the beginning of all data sets
        self.training_set.input = np.insert(self.training_set.input,
                                            0,
                                            1,
                                            axis=1)
        self.validation_set.input = np.insert(self.validation_set.input,
                                              0,
                                              1,
                                              axis=1)
        self.test_set.input = np.insert(self.test_set.input, 0, 1, axis=1)
예제 #3
0
    def _costructNetwork(self, netStruct, activationFunctions):
        
        prevSize = self.trainingSet.input.shape[1] - 1
        for (size, func) in zip(netStruct, activationFunctions):
            self.layers.append(LogisticLayer(prevSize, size, None, func, False))
            prevSize = size

        self.layers[-1].isClassifierLayer = True
예제 #4
0
def main(args):
    hidden_layers = [
        LogisticLayer(128, 128, isClassifierLayer=True)
        for layer in range(args.num_layers)
    ]
    data = MNISTSeven(args.dataset, 3000, 1000, 1000, oneHot=True)
    MLP = MultilayerPerceptron(data.trainingSet, data.validationSet,
                               data.testSet, hidden_layers)
    MLP.train(verbose=True)
예제 #5
0
    def __init__(self, train, valid, test, learningRate=0.01, epochs=50):

        self.learningRate = learningRate
        self.epochs = epochs

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test
        
        
        self.logisticLayer = LogisticLayer(len(self.trainingSet.input[0]),
                                           1)
예제 #6
0
    def __init__(self, data, learningRate=0.01, epochs=50, hiddensize=50):

        self.learningRate = learningRate
        self.epochs = epochs
        self.trainingSet = data.trainingSet
        self.validationSet = data.validationSet
        self.testSet = data.testSet
        self.data=data
        self.layer=LogisticLayer(data.trainingSet.input.shape[1],hiddensize,learningRate)

        # Initialize the weight vector with small values
        self.weight = 0.01*np.random.randn(self.layer.size)
    def __init__(self,
                 train,
                 valid,
                 test,
                 learningRate=0.01,
                 epochs=50,
                 loss='bce'):

        self.learningRate = learningRate
        self.epochs = epochs

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test

        if loss == 'bce':
            self.loss = BinaryCrossEntropyError()
        elif loss == 'sse':
            self.loss = SumSquaredError()
        elif loss == 'mse':
            self.loss = MeanSquaredError()
        elif loss == 'different':
            self.loss = DifferentError()
        elif loss == 'absolute':
            self.loss = AbsoluteError()
        else:
            raise ValueError('There is no predefined loss function ' +
                             'named ' + str)

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        # Use a logistic layer as one-neuron classification (output) layer
        self.layer = LogisticLayer(train.input.shape[1],
                                   1,
                                   activation='sigmoid',
                                   isClassifierLayer=True)

        # add bias values ("1"s) at the beginning of all data sets
        self.trainingSet.input = np.insert(self.trainingSet.input,
                                           0,
                                           1,
                                           axis=1)
        self.validationSet.input = np.insert(self.validationSet.input,
                                             0,
                                             1,
                                             axis=1)
        self.testSet.input = np.insert(self.testSet.input, 0, 1, axis=1)
예제 #8
0
    def __init__(self,
                 train,
                 valid,
                 test,
                 layers=None,
                 input_weights=None,
                 output_task='classification',
                 output_activation='sigmoid',
                 cost='mse',
                 learning_rate=0.01,
                 epochs=50):
        """
        A digit-7 recognizer based on logistic regression algorithm

        Parameters
        ----------
        train : list
        valid : list
        test : list
        layers: list
            List of layers
        input_weights: list
            weight layer
        learning_rate : float
        epochs : positive int

        Attributes
        ----------
        training_set : list
        validation_set : list
        test_set : list
        learning_rate : float
        epochs : positive int
        performances: array of floats
        """

        self.learning_rate = learning_rate
        self.epochs = epochs
        self.output_task = output_task  # Either classification or regression
        self.output_activation = output_activation
        self.cost_string = cost
        self.cost = loss_functions.get_loss(cost)

        print("Task: {}, Activation Function {}, Error Function: {}".format(
            self.output_task, self.output_activation, self.cost_string))

        self.training_set = train
        self.validation_set = valid
        self.test_set = test

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        self.layers = layers
        self.input_weights = input_weights

        if layers is None:
            if output_task == 'classification':
                self.layers = []
                output_activation = "sigmoid"
                self.layers.append(
                    LogisticLayer(train.input.shape[1],
                                  10,
                                  activation=output_activation,
                                  is_classifier_layer=False))
                self.layers.append(
                    LogisticLayer(10,
                                  10,
                                  activation=output_activation,
                                  is_classifier_layer=False))
                self.layers.append(
                    LogisticLayer(10,
                                  1,
                                  activation=output_activation,
                                  is_classifier_layer=True))
            elif output_task == 'classify_all':
                self.layers = []
                self.layers.append(
                    Layer(train.input.shape[1],
                          100,
                          activation='sigmoid',
                          is_classifier_layer=False))
                self.layers.append(
                    Layer(100,
                          10,
                          activation='softmax',
                          is_classifier_layer=True))

        else:
            self.layers = layers

        # add bias values ("1"s) at the beginning of all data sets
        self.training_set.input = np.insert(self.training_set.input,
                                            0,
                                            1,
                                            axis=1)
        self.validation_set.input = np.insert(self.validation_set.input,
                                              0,
                                              1,
                                              axis=1)
        self.test_set.input = np.insert(self.test_set.input, 0, 1, axis=1)
예제 #9
0
    def __init__(self,
                 train,
                 valid,
                 test,
                 layers=None,
                 input_weights=None,
                 output_task='classification',
                 output_activation='softmax',
                 cost='crossentropy',
                 learning_rate=0.01,
                 epochs=50):
        """
        A digit-7 recognizer based on logistic regression algorithm

        Parameters
        ----------
        train : list
        valid : list
        test : list
        learning_rate : float
        epochs : positive int

        Attributes
        ----------
        training_set : list
        validation_set : list
        test_set : list
        learning_rate : float
        epochs : positive int
        performances: array of floats
        """

        self.learning_rate = learning_rate
        self.epochs = epochs
        self.output_task = output_task  # Either classification or regression
        self.output_activation = output_activation
        self.cost = cost
        # Should polish the loss_function a little bit more
        self.error = CrossEntropyError

        self.training_set = train
        self.validation_set = valid
        self.test_set = test

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        self.layers = layers
        self.input_weights = input_weights

        # Build up the network from specific layers
        if layers is None:
            self.layers = []

            # First hidden layer
            number_of_1st_hidden_layer = 100

            self.layers.append(
                LogisticLayer(train.input.shape[1],
                              number_of_1st_hidden_layer,
                              None,
                              activation="sigmoid",
                              is_classifier_layer=False))

            # Output layer
            self.layers.append(
                LogisticLayer(number_of_1st_hidden_layer,
                              10,
                              None,
                              activation="softmax",
                              is_classifier_layer=True))

        else:
            self.layers = layers

        # add bias values ("1"s) at the beginning of all data sets
        self.training_set.input = np.insert(self.training_set.input,
                                            0,
                                            1,
                                            axis=1)
        self.validation_set.input = np.insert(self.validation_set.input,
                                              0,
                                              1,
                                              axis=1)
        self.test_set.input = np.insert(self.test_set.input, 0, 1, axis=1)
예제 #10
0
    def __init__(self,
                 train,
                 valid,
                 test,
                 layers=None,
                 inputWeights=None,
                 outputTask='classification',
                 outputActivation='softmax',
                 loss='bce',
                 learningRate=0.01,
                 epochs=50):
        """
        A MNIST recognizer based on multi-layer perceptron algorithm

        Parameters
        ----------
        train : list
        valid : list
        test : list
        learningRate : float
        epochs : positive int

        Attributes
        ----------
        trainingSet : list
        validationSet : list
        testSet : list
        learningRate : float
        epochs : positive int
        performances: array of floats
        """

        self.learningRate = learningRate
        self.epochs = epochs
        self.outputTask = outputTask  # Either classification or regression
        self.outputActivation = outputActivation

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test

        if loss == 'bce':
            self.loss = BinaryCrossEntropyError()
        elif loss == 'sse':
            self.loss = SumSquaredError()
        elif loss == 'mse':
            self.loss = MeanSquaredError()
        elif loss == 'different':
            self.loss = DifferentError()
        elif loss == 'absolute':
            self.loss = AbsoluteError()
        else:
            raise ValueError('There is no predefined loss function ' +
                             'named ' + str)

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        self.layers = layers

        # Build up the network from specific layers
        self.layers = []

        self.hiddenNeurons = 50

        # Input layer
        inputActivation = "sigmoid"
        self.layers.append(
            LogisticLayer(train.input.shape[1], 128, None, inputActivation,
                          False))

        # Hidden layers - slightly increased accuracy with 2 hidden layers
        hiddenActivation = "sigmoid"
        self.layers.append(
            LogisticLayer(128, self.hiddenNeurons, None, hiddenActivation,
                          False))

        self.layers.append(
            LogisticLayer(self.hiddenNeurons, self.hiddenNeurons, None,
                          hiddenActivation, False))

        # Output layer
        outputActivation = "softmax"
        self.layers.append(
            LogisticLayer(self.hiddenNeurons, 10, None, outputActivation,
                          True))

        self.inputWeights = inputWeights

        # add bias values ("1"s) at the beginning of all data sets
        self.trainingSet.input = np.insert(self.trainingSet.input,
                                           0,
                                           1,
                                           axis=1)
        self.validationSet.input = np.insert(self.validationSet.input,
                                             0,
                                             1,
                                             axis=1)
        self.testSet.input = np.insert(self.testSet.input, 0, 1, axis=1)
예제 #11
0
def main():
    data = MNISTSeven("../data/mnist_seven.csv",
                      3000,
                      1000,
                      1000,
                      oneHot=False)

    # myLRClassifier = LogisticRegression(data.trainingSet,
    #                                     data.validationSet,
    #                                     data.testSet,
    #                                     learningRate=0.005,
    #                                     epochs=30)
    hidden_layers = [
        LogisticLayer(128, 32, isClassifierLayer=True) for layer in range(1)
    ]
    mlp = MultilayerPerceptron(data.trainingSet,
                               data.validationSet,
                               data.testSet,
                               hidden_layers,
                               learningRate=0.005,
                               epochs=30)

    # Train the classifiers
    #print("=========================")
    print("Training...")

    # print("\nLogistic Regression has been training..")
    # myLRClassifier.train()
    # print("Done..")

    print("Training MLP...")
    mlp.train()
    print("Done.")

    # Do the recognizer
    # Explicitly specify the test set to be evaluated
    # stupidPred = myStupidClassifier.evaluate()
    # perceptronPred = myPerceptronClassifier.evaluate()
    # lrPred = myLRClassifier.evaluate()
    mlpPred = mlp.evaluate()

    # Report the result
    print("=========================")
    evaluator = Evaluator()

    # print("Result of the stupid recognizer:")
    #     # #evaluator.printComparison(data.testSet, stupidPred)
    #     # evaluator.printAccuracy(data.testSet, stupidPred)
    #     #
    #     # print("\nResult of the Perceptron recognizer:")
    #     # #evaluator.printComparison(data.testSet, perceptronPred)
    #     # evaluator.printAccuracy(data.testSet, perceptronPred)
    #     #
    #     # print("\nResult of the Logistic Regression recognizer:")
    #     # #evaluator.printComparison(data.testSet, lrPred)
    #     # evaluator.printAccuracy(data.testSet, lrPred)

    print("Result of the MLP recognizer:")
    evaluator.printComparison(data.testSet, mlpPred)
    evaluator.printAccuracy(data.testSet, mlpPred)

    # Draw
    plot = PerformancePlot("Logistic Regression validation")
    # plot.draw_performance_epoch(myLRClassifier.performances,
    #                             myLRClassifier.epochs)
    plot.draw_performance_epoch(mlp.performances, mlp.epochs)
예제 #12
0
파일: Run.py 프로젝트: mctigger/NNPraktikum
def main():
    #data = MNISTSeven("../data/mnist_seven.csv", 3000, 1000, 1000,
    #                 one_hot=True, target_digit='7')

    # NOTE:
    # Comment out the MNISTSeven instantiation above and
    # uncomment the following to work with full MNIST task
    data = MNISTSeven("../data/mnist_seven.csv",
                      3000,
                      1000,
                      1000,
                      one_hot=False)

    # # NOTE:
    # # Other 1-digit classifiers do not make sense now for comparison purpose
    # # So you should comment them out, let alone the MLP training and evaluation
    #
    # # Train the classifiers #
    # print("=========================")
    # print("Training..")
    #
    # # Stupid Classifier
    # myStupidClassifier = StupidRecognizer(data.training_set,
    #                                       data.validation_set,
    #                                       data.test_set)
    #
    # print("\nStupid Classifier has been training..")
    # myStupidClassifier.train()
    # print("Done..")
    # # Do the recognizer
    # # Explicitly specify the test set to be evaluated
    # stupidPred = myStupidClassifier.evaluate()
    #
    # # Perceptron
    # myPerceptronClassifier = Perceptron(data.training_set,
    #                                     data.validation_set,
    #                                     data.test_set,
    #                                     learning_rate=0.005,
    #                                     epochs=10)
    #
    # print("\nPerceptron has been training..")
    # myPerceptronClassifier.train()
    # print("Done..")
    # # Do the recognizer
    # # Explicitly specify the test set to be evaluated
    # perceptronPred = myPerceptronClassifier.evaluate()
    #
    # # Logistic Regression
    # myLRClassifier = LogisticRegression(data.training_set,
    #                                     data.validation_set,
    #                                     data.test_set,
    #                                     learning_rate=0.005,
    #                                     epochs=30)
    #
    # print("\nLogistic Regression has been training..")
    # myLRClassifier.train()
    # print("Done..")
    # # Do the recognizer
    # # Explicitly specify the test set to be evaluated
    # lrPred = myLRClassifier.evaluate()

    # Build up the network from specific layers
    # Here is an example of a MLP acting like the Logistic Regression
    layers = []
    layers.append(LogisticLayer(784, 5, None, "sigmoid", True))
    layers.append(LogisticLayer(5, 10, None, "softmax", False))

    myMLPClassifier = MultilayerPerceptron(data.training_set,
                                           data.validation_set,
                                           data.test_set,
                                           learning_rate=0.5,
                                           epochs=30,
                                           layers=layers)
    print("\nLogistic Regression has been training..")
    myMLPClassifier.train()
    print("Done..")
    # Do the recognizer
    # Explicitly specify the test set to be evaluated
    mlpPred = myMLPClassifier.evaluate()
    #
    # Report the result #
    print("=========================")
    evaluator = Evaluator()
    #
    # # print("Result of the stupid recognizer:")
    # # evaluator.printComparison(data.testSet, stupidPred)
    # evaluator.printAccuracy(data.test_set, stupidPred)
    # #
    # # print("\nResult of the Perceptron recognizer (on test set):")
    # # evaluator.printComparison(data.testSet, perceptronPred)
    # evaluator.printAccuracy(data.test_set, perceptronPred)
    # #
    # # print("\nResult of the Logistic Regression recognizer (on test set):")
    # # evaluator.printComparison(data.testSet, perceptronPred)
    # evaluator.printAccuracy(data.test_set, lrPred)
    #
    print("\nResult of the Multi-layer Perceptron recognizer (on test set):")
    # evaluator.printComparison(data.testSet, perceptronPred)
    evaluator.printAccuracy(data.test_set, mlpPred)
    #
    # # Draw
    # plot = PerformancePlot("Logistic Regression")
    # plot.draw_performance_epoch(myLRClassifier.performances,
    #                             myLRClassifier.epochs)

    # 3D Plot learning_rates + epochs -> accuracies
    print("Creating 3D plot. This may take some minutes...")
    learning_rate_sample_count = 5
    epochs_sample_count = 20
    xticks = np.logspace(-10.0,
                         0,
                         base=10,
                         num=learning_rate_sample_count,
                         endpoint=False)
    accuracies = []
    learning_rates = []
    epoch_values = []

    for i in itertools.product(range(learning_rate_sample_count)):
        learning_rate = 100 / np.exp(i)
        print("Calculating accuracy for: learning rate = %s" % (learning_rate))
        myMLPClassifier = MultilayerPerceptron(data.training_set,
                                               data.validation_set,
                                               data.test_set,
                                               learning_rate=learning_rate,
                                               epochs=epochs_sample_count,
                                               layers=layers)
        epoch_accuracies = myMLPClassifier.train(False)
        lrPred = myMLPClassifier.evaluate()
        epoch_values.append([e for e in range(epochs_sample_count)])
        learning_rates.append(
            [learning_rate for _ in range(epochs_sample_count)])
        accuracies.append(epoch_accuracies)

    accuracies_merged = list(itertools.chain(*accuracies))
    epochs_merged = list(itertools.chain(*epoch_values))
    learning_rates_merged = list(itertools.chain(*learning_rates))
    print(accuracies_merged)
    print(epochs_merged)
    print(learning_rates)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    ax.scatter(np.log10(learning_rates_merged), epochs_merged,
               accuracies_merged)
    ax.set_xlabel("Learning Rate")

    ax.set_xticks(np.log10(xticks))
    ax.set_xticklabels(xticks)
    ax.set_ylabel('Epochs')
    ax.set_zlabel('Accuracy')
    plt.show()
예제 #13
0
    def __init__(self,
                 train,
                 valid,
                 test,
                 layers=None,
                 input_weights=None,
                 output_task='classification',
                 output_activation='softmax',
                 inputActivation='sigmoid',
                 cost='crossentropy',
                 learning_rate=0.01,
                 epochs=50,
                 learningRateReductionFactor=1.0,
                 layerNeurons=[10]):
        """
        A digit-7 recognizer based on logistic regression algorithm

        Parameters
        ----------
        train : list
        valid : list
        test : list
        learning_rate : float
        epochs : positive int

        Attributes
        ----------
        training_set : list
        validation_set : list
        test_set : list
        learning_rate : float
        epochs : positive int
        performances: array of floats
        """

        self.learning_rate = learning_rate
        self.epochs = epochs
        self.output_task = output_task  # Either classification or regression
        self.output_activation = output_activation
        self.cost = cost

        self.training_set = train
        self.validation_set = valid
        self.test_set = test

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        self.layers = layers
        self.input_weights = input_weights

        # activation function for the hidden layers
        self.inputActivation = inputActivation
        # reduction factor of learning rate per epoch
        self.learningRateReductionFactor = learningRateReductionFactor

        #######################
        #    CREATE LAYERS    #
        #######################
        # Build up the network from specific layers
        self.layers = []
        # check for correct argument
        if (len(layerNeurons) < 1):
            raise ValueError(
                'Error: layerNeurons must contain at least one layer with neurons!'
            )
        # if there is only one layer it is an output layer
        if (len(layerNeurons) == 1):
            self.layers.append(
                LogisticLayer(train.input.shape[1], layerNeurons[0], None,
                              self.output_activation, True))
        # if there are more than one layer
        else:
            # first layer (hidden layer)
            self.layers.append(
                LogisticLayer(train.input.shape[1], layerNeurons[0], None,
                              self.inputActivation, False))
            # rest of the hidden layers
            for i in xrange(1, len(layerNeurons) - 1):
                self.layers.append(
                    LogisticLayer(layerNeurons[i - 1], layerNeurons[i], None,
                                  self.inputActivation, False))
            # output layer
            self.layers.append(
                LogisticLayer(layerNeurons[len(layerNeurons) - 2],
                              layerNeurons[len(layerNeurons) - 1], None,
                              self.output_activation, True))

        # total number of output neurons
        self.totalOutputs = layerNeurons[len(layerNeurons) - 1]
        # total number of layers
        self.totalLayers = len(self.layers)

        # add bias values ("1"s) at the beginning of all data sets
        self.training_set.input = np.insert(self.training_set.input,
                                            0,
                                            1,
                                            axis=1)
        self.validation_set.input = np.insert(self.validation_set.input,
                                              0,
                                              1,
                                              axis=1)
        self.test_set.input = np.insert(self.test_set.input, 0, 1, axis=1)