示例#1
0
    def train(self, verbose=True):
        """Train the perceptron with the perceptron learning algorithm.

        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        # Try to use the abstract way of the framework
        from util.loss_functions import DifferentError
        loss = DifferentError()

        learned = False
        iteration = 0

        # Train for some epochs if the error is not 0
        while not learned:
            totalError = 0
            for input, label in zip(self.trainingSet.input,
                                    self.trainingSet.label):
                output = self.fire(input)
                if output != label:
                    error = loss.calculateError(label, output)
                    self.updateWeights(input, error)
                    totalError += error

            iteration += 1
            
            if verbose:
                logging.info("Epoch: %i; Error: %i", iteration, -totalError)
            
            if totalError == 0 or iteration >= self.epochs:
                # stop criteria is reached
                learned = True
示例#2
0
    def train(self, verbose=True):
        """Train the perceptron with the perceptron learning algorithm.

        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        from util.loss_functions import DifferentError

        loss = DifferentError()

        learned = False
        iteration = 0

        while not learned:
            totalError = 0
            for input, label in zip(self.trainingSet.input, self.trainingSet.label):
                output = self.fire(input)
                if output != label:
                    error = loss.calculateError(label, output)
                    self.updateWeights(input, error)
                    totalError += error

            iteration += 1

            if verbose:
                logging.info("Epoch: %i; Error: %i", iteration, -totalError)

            if totalError == 0 or iteration >= self.epochs:
                # stop criteria is reached
                learned = True
示例#3
0
    def train(self, verbose=True):
        """Train the Logistic Regression.

        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        from util.loss_functions import DifferentError
        loss = DifferentError()

        learned = False
        iteration = 0

        while not learned:

            self.shuffle()

            totalError = 0

            for input, label in zip(self.trainingSet.input,
                                    self.trainingSet.label):
                # feedforward
                inputarray = input.reshape(1,len(input))
                layeroutput = self.layer.forward(inputarray)
                output = self.fire(layeroutput)
                # compute gradient of regression
                delta=label - output
                grad =delta * self.layer.output
                # backpropagation
                self.layer.computeDerivative(delta,self.weight)
                #update all weights
                self.updateWeights(grad)
                self.layer.updateWeights()

            # compute recognizing error, not BCE using validation data
            for input, label in zip(self.validationSet.input,
                                    self.validationSet.label):
                predictedLabel = self.classify(input)
                error = loss.calculateError(label, predictedLabel)
                totalError += error

            totalError = abs(totalError)

            iteration += 1

            if verbose:
                logging.info("Epoch: %i; Error: %i", iteration, totalError)


            if totalError == 0 or iteration >= self.epochs:
                # stop criteria is reached
                learned = True
示例#4
0
    def train(self, verbose=True):
        """
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """
        DE = DifferentError()  # Working
        AE = AbsoluteError()  # nope
        BCE = BinaryCrossEntropyError()  # Working(?)
        CE = CrossEntropyError()  # Not implemented
        SSE = SumSquaredError()  # Working(?)
        MSE = MeanSquaredError()  # Working(?)
        # ----------------------------------
        # use loss to choose error function
        # ----------------------------------
        loss = DE

        learned = False
        it = 0
        totE = 0  # total error
        errHist = []  # error history

        # Instead of all labels in one array, we make a matrix of [1xL] where L is the number of labels
        labels = np.matrix(np.column_stack((self.trainingSet.label, )))
        # add bias (column of ones)
        inputs = np.matrix(
            np.append(np.ones((self.trainingSet.input.shape[0], 1)),
                      self.trainingSet.input,
                      axis=1))

        while not learned:
            # shuffle the data in a convenient way
            data = zip(inputs, labels)
            shuffle(data)

            for input, label in data:
                # forward pass
                output = self._forwardPass(input)
                # compute the error
                totE += abs(loss.calculateError(label, output))
                # backward pass
                self._backwardPass(label, output)
                # update weights
                self._updateWeights()

            # update local variables
            errHist.append(totE)
            it += 1

            # compute error difference for logging purposes
            deltaE = 0 if it < 2 else abs(errHist[-1] - errHist[-2])

            if verbose:
                logging.info("Epoch: %i; Error: %i; ΔE: %f", it, totE, deltaE)

            # convergence criteria
            learned = (totE == 0 or it >= self.epochs)
            totE = 0

        self._plotResults(errHist)
示例#5
0
    def train(self, verbose=False):
        """Train the Logistic Regression.

        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """
        from util.loss_functions import DifferentError
        loss = DifferentError()

        learned = False
        iteration = 0
        yzhou = []
        while not learned:
            totalError = 0

            grad = np.zeros(self.trainingSet.input[0].shape)
            for input, label in zip(self.trainingSet.input,
                                    self.trainingSet.label):
                output = self.fire(input)

                error = loss.calculateError(label, output)
                grad=grad+error*input
                if label==1 and output < 0.5:
                    totalError += 1
                if label==0 and output >= 0.5:
                    totalError += 1

            yzhou.append(totalError)
            self.updateWeights(grad)
            iteration += 1

            if verbose:
                logging.info("Epoch: %i; Error: %i", iteration, totalError)

            if totalError == 0 or iteration >= self.epochs:
                # stop criteria is reached
                xzhou=np.arange(iteration)
                plt.plot(xzhou,np.array(yzhou))
                plt.title("Lerning rate %f"%self.learningRate);
                plt.xlabel('epoch')
                plt.ylabel('Error')
                plt.show()
                learned = True
        pass
示例#6
0
    def train(self, verbose=True):
        """Train the Logistic Regression.

        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        from util.loss_functions import DifferentError
        loss = DifferentError()

        learned = False
        iteration = 0

        while not learned:
            grad = 0
            totalError = 0
            for input, label in zip(self.trainingSet.input,
                                    self.trainingSet.label):
                output = self.fire(input)
                # compute gradient
                grad += -(label - output) * input

                # compute recognizing error, not BCE
                predictedLabel = self.classify(input)
                error = loss.calculateError(label, predictedLabel)
                totalError += error

            self.updateWeights(grad)
            totalError = abs(totalError)

            iteration += 1

            if verbose:
                logging.info("Epoch: %i; Error: %i", iteration, totalError)

            if totalError == 0 or iteration >= self.epochs:
                # stop criteria is reached
                learned = True
    def train(self, verbose=True):
        """Train the Logistic Regression.

        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        from util.loss_functions import DifferentError

        loss = DifferentError()

        learned = False
        iteration = 0

        while not learned:
            grad = 0
            totalError = 0
            for input, label in zip(self.trainingSet.input, self.trainingSet.label):
                output = self.fire(input)
                # compute gradient
                grad += -(label - output) * input

                # compute recognizing error, not BCE
                predictedLabel = self.classify(input)
                error = loss.calculateError(label, predictedLabel)
                totalError += error

            self.updateWeights(grad)
            totalError = abs(totalError)

            iteration += 1

            if verbose:
                logging.info("Epoch: %i; Error: %i", iteration, totalError)

            if totalError == 0 or iteration >= self.epochs:
                # stop criteria is reached
                learned = True
示例#8
0
    def __init__(self,
                 train,
                 valid,
                 test,
                 layers=None,
                 inputWeights=None,
                 outputTask='classification',
                 outputActivation='softmax',
                 loss='bce',
                 learningRate=0.01,
                 epochs=50):
        """
        A MNIST recognizer based on multi-layer perceptron algorithm

        Parameters
        ----------
        train : list
        valid : list
        test : list
        learningRate : float
        epochs : positive int

        Attributes
        ----------
        trainingSet : list
        validationSet : list
        testSet : list
        learningRate : float
        epochs : positive int
        performances: array of floats
        """

        self.learningRate = learningRate
        self.epochs = epochs
        self.outputTask = outputTask  # Either classification or regression
        self.outputActivation = outputActivation
        #self.cost = cost

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test

        if loss == 'bce':
            self.loss = BinaryCrossEntropyError()
        elif loss == 'sse':
            self.loss = SumSquaredError()
        elif loss == 'mse':
            self.loss = MeanSquaredError()
        elif loss == 'different':
            self.loss = DifferentError()
        elif loss == 'absolute':
            self.loss = AbsoluteError()
        else:
            raise ValueError('There is no predefined loss function ' +
                             'named ' + str)

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        self.layers = layers

        # Build up the network from specific layers
        self.layers = []

        # Input layer
        inputActivation = "sigmoid"
        self.layers.append(
            LogisticLayer(train.input.shape[1], 128, None, inputActivation,
                          False))

        # Output layer
        outputActivation = "softmax"
        self.layers.append(LogisticLayer(128, 10, None, outputActivation,
                                         True))

        self.inputWeights = inputWeights

        # add bias values ("1"s) at the beginning of all data sets
        self.trainingSet.input = np.insert(self.trainingSet.input,
                                           0,
                                           1,
                                           axis=1)
        self.validationSet.input = np.insert(self.validationSet.input,
                                             0,
                                             1,
                                             axis=1)
        self.testSet.input = np.insert(self.testSet.input, 0, 1, axis=1)