Пример #1
0
    def __init__(self, nIn, nOut, weights=None,
                 activation='softmax', isClassifierLayer=True):

        # Get activation function from string
        # Notice the functional programming paradigms of Python + Numpy
        self.activationString = activation
        self.activation = Activation.getActivation(self.activationString)
        self.activationPrime = Activation.getDerivative(self.activationString)

        self.nIn = nIn
        self.nOut = nOut

        # Adding bias
        self.input = np.ndarray((nIn+1, 1))
        self.input[0] = 1
        self.output = np.ndarray((nOut, 1))
        self.delta = np.zeros((nOut, 1))

        # You can have better initialization here
        # wij means the weight from Input(j) to the Output(i)
        if weights is None:
            rns = np.random.RandomState(int(time.time()))
            self.weights = rns.uniform(size=(nOut, nIn + 1))-0.5
        else:
            self.weights = weights

        self.isClassifierLayer = isClassifierLayer

        # Some handy properties of the layers
        self.size = self.nOut
        self.shape = self.weights.shape
Пример #2
0
    def __init__(self,
                 n_in,
                 n_out,
                 weights=None,
                 activation='sigmoid',
                 is_classifier_layer=False):

        # Get activation function from string
        self.activation_string = activation
        self.activation = Activation.get_activation(self.activation_string)
        self.activation_derivative = Activation.get_derivative(
            self.activation_string)

        self.n_in = n_in
        self.n_out = n_out

        self.inp = np.ndarray((n_in + 1, 1))
        self.inp[0] = 1
        self.outp = np.ndarray((n_out, 1))
        self.deltas = np.zeros((n_out, 1))

        # You can have better initialization here
        if weights is None:
            self.weights = np.random.rand(n_in + 1, n_out) / 10
        else:
            assert (weights.shape == (n_in + 1, n_out))
            self.weights = weights

        self.is_classifier_layer = is_classifier_layer

        # Some handy properties of the layers
        self.size = self.n_out
        self.shape = self.weights.shape
    def __init__(self,
                 nIn,
                 nOut,
                 weights=None,
                 activation='sigmoid',
                 isClassifierLayer=False):

        # Get activation function from string
        self.activationString = activation
        self.activation = Activation.getActivation(self.activationString)
        self.activationDerivative = Activation.getDerivative(
            self.activationString)

        self.nIn = nIn
        self.nOut = nOut

        self.inp = np.ndarray((nIn + 1, 1))
        #  self.inp[0] = 1
        self.outp = np.ndarray((nOut, 1))
        self.deltas = np.zeros((nOut, 1))

        # You can have better initialization here
        if weights is None:
            rns = np.random.RandomState(int(time.time()))
            self.weights = rns.uniform(size=(nIn + 1, nOut)) - 0.5
        else:
            assert (weights.shape == (nIn + 1, nOut))
            self.weights = weights

        self.isClassifierLayer = isClassifierLayer

        # Some handy properties of the layers
        self.size = self.nOut
        self.shape = self.weights.shape
Пример #4
0
    def __init__(self, n_in, n_out, weights=None,
                 activation='sigmoid', is_classifier_layer=False):

        # Get activation function from string
        self.activation_string = activation
        self.activation = Activation.get_activation(self.activation_string)
        self.activation_derivative = Activation.get_derivative(
                                    self.activation_string)

        self.n_in = n_in
        self.n_out = n_out

        self.inp = np.ndarray(n_in + 1)
        self.inp[0] = 1
        self.outp = np.ndarray(n_out)
        self.deltas = np.zeros(n_out)

        # You can have better initialization here
        if weights is None:
            self.weights = np.random.rand(n_in + 1, n_out) / 10 - 0.05

            # Adjust weights to zero mean
            for i in range(n_out):
                self.weights[:][i] -= (sum(self.weights[:][i]) / len(self.weights[:][i]))
        else:
            assert(weights.shape == (n_in + 1, n_out))
            self.weights = weights

        self.is_classifier_layer = is_classifier_layer

        # Some handy properties of the layers
        self.size = self.n_out
        self.shape = self.weights.shape
Пример #5
0
 def compute_output(self, input):
     if len(input) != len(self.weights):
         raise ValueError("MLPNeuron: Bad input dimensions: "
                          "Got vector of length {}, expected {}".format(
                              len(input), len(self.weights)))
     weighted_sum = np.dot(input, self.weights) + self.bias
     return Activation.getActivation(self.activation)(weighted_sum)
Пример #6
0
    def __init__(self, nIn, nOut, weights=None,
                 activation='softmax', isClassifierLayer=True):

        # Get activation function from string
        # Notice the functional programming paradigms of Python + Numpy
        self.activationString = activation
        self.activation = Activation.getActivation(self.activationString)

        self.nIn = nIn
        self.nOut = nOut

        self.input = np.ndarray((nIn+1, 1))
        self.input[0] = 1
        self.output = np.ndarray((nOut, 1))
        self.delta = np.zeros((nOut, 1))

        # You can have better initialization here
        if weights is None:
            rns = np.random.RandomState(int(time.time()))
            self.weights = rns.uniform(size=(nOut, nIn + 1))-0.5
        else:
            self.weights = weights

        self.isClassifierLayer = isClassifierLayer

        # Some handy properties of the layers
        self.size = self.nOut
        self.shape = self.weights.shape
Пример #7
0
    def __init__(self, n_in, n_out, weights=None,
                 activation='sigmoid', is_classifier_layer=False):

        # Get activation function from string
        self.activation_string = activation
        self.activation = Activation.get_activation(self.activation_string)

        self.n_in = n_in
        self.n_out = n_out

        self.inp = np.ndarray((n_in+1, 1))
        self.inp[0] = 1
        self.outp = np.ndarray((n_out, 1))
        self.deltas = np.zeros((n_out, 1))

        # You can have better initialization here
        if weights is None:
            self.weight = np.random.rand(n_in, n_out)/10
        else:
            self.weights = weights

        self.is_classifier_layer = is_classifier_layer

        # Some handy properties of the layers
        self.size = self.n_out
        self.shape = self.weights.shape
Пример #8
0
    def __init__(self, train, valid, test, layers=None, input_weights=None,
                 output_task='classification', output_activation='softmax',
                 cost='crossentropy', learning_rate=0.01, epochs=50):

        """
        A MNIST recognizer

        Parameters
        ----------
        train : list
        valid : list
        test : list
        learning_rate : float
        epochs : positive int

        Attributes
        ----------
        training_set : list
        validation_set : list
        test_set : list
        learning_rate : float
        epochs : positive int
        performances: array of floats
        """

        self.learning_rate = learning_rate
        self.epochs = epochs
        self.classification_task = True if output_task == 'classification' else False # Either classification or regression
        self.output_activation = output_activation
        self.output_activation_func = Activation.get_activation(self.output_activation)
        self.cost = cost

        if self.cost == 'crossentropy':
            self.cost_function = CrossEntropyError()
        else:
            # nothing else supported...
            raise ValueError('not supported')

        self.training_set = train
        self.validation_set = valid
        self.test_set = test

        # Record the performance of each epoch for later usages
        # e.g. plotting, reporting..
        self.performances = []

        self.layers = layers
        self.input_weights = input_weights

        # add bias values ("1"s) at the beginning of all data sets
        self.training_set.input = np.insert(self.training_set.input, 0, 1,
                                            axis=1)
        self.validation_set.input = np.insert(self.validation_set.input, 0, 1,
                                              axis=1)
        self.test_set.input = np.insert(self.test_set.input, 0, 1, axis=1)
Пример #9
0
 def _train_one_epoch(self):
     """
     Train one epoch, seeing all input instances
     """
     for img in self.training_set.input:
         self.noise = 0.1
         noisy = img + self.noise * np.random.uniform(-1.0,1.0)
         normalized = Activation.tanh(noisy)
         self.MLP._feed_forward(normalized)
         self.MLP._compute_error(normalized[1:])
         self.MLP._update_weights()
     pass
Пример #10
0
    def __init__(self, train, valid, test,
                 learningRate=0.01, epochs=50,
                 activation='sigmoid',
                 error='mse'):

        self.learningRate = learningRate
        self.epochs = epochs

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test

        # Initialize the weight vector with small random values
        # between -0.3 and 0.3 to encourage sigmoid function learning
        self.weight = np.random.rand(self.trainingSet.input.shape[1]) * 0.6 - 0.3
                    # np.ones(self.trainingSet.input.shape[1])

        self.activation = Activation.getActivation(activation)
        self.activationPrime = Activation.getDerivative(activation)
        self.activationString = activation[0].upper() + activation[1:]

        self.erString = error

        if error == 'absolute':
            self.erf = erf.AbsoluteError()
        elif error == 'different':
            self.erf = erf.DifferentError()
        elif error == 'mse':
            self.erf = erf.MeanSquaredError()
        elif error == 'sse':
            self.erf = erf.SumSquaredError()
        elif error == 'bce':
            self.erf = erf.BinaryCrossEntropyError()
        elif error == 'crossentropy':
            self.erf = erf.CrossEntropyError()
        else:
            raise ValueError('Cannot instantiate the requested '
                             'error function: ' + error + 'not available')
Пример #11
0
    def classify(self, testInstance):
        """Classify a single instance.

        Parameters
        ----------
        testInstance : list of floats

        Returns
        -------
        bool :
            True if the testInstance is recognized as a 7, False otherwise.
        """
        # Write your code to do the classification on an input image
        return Activation.sign(np.dot(testInstance, self.weight))
Пример #12
0
    def train(self, verbose=True):
        """Train the perceptron with the perceptron learning algorithm.

        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        # Write your code to train the perceptron here
        pass
        for epoch in range(self.epochs):
            for i in range(self.trainingSet.input.shape[0]):
                pre_result = dot(self.weight, self.trainingSet.input[i, :])
                result = Activation.sign(pre_result, threshold=0)
                error = self.trainingSet.label[i] - result
                self.updateWeights(self.trainingSet.input[i, :], error)
            correct = 0.0
            for j in range(self.validationSet.input.shape[0]):
                valid_result = Activation.sign(
                    dot(self.weight, self.validationSet.input[j, :]))
                if valid_result == self.validationSet.label[j]:
                    correct += 1.0
            accuracy = correct / self.validationSet.input.shape[0]
            # print('after %d times training, valiation accuracy : %.4f %d' %(epoch, accuracy, correct))
            # Den Schwellwert hab ich selbst auf 0.98 gesetzt
            if accuracy >= 0.98:
                if verbose:
                    print(
                        'After %d times training, Validation accuracy:%.4f>0.98'
                        % (epoch, accuracy))
                    print('Stop training to avoid overfitting!')
                break
        if epoch == self.epochs - 1:
            print(
                'No accuracy >= threshold, no need to break loop to avoid overfitting'
            )
Пример #13
0
    def forward(self, input):
        """
        Compute forward step over the input using its weights

        Parameters
        ----------
        input : ndarray
            a numpy array (1,nIn + 1) containing the input of the layer

        Returns
        -------
        ndarray :
            a numpy array (nOut,1) containing the output of the layer
        """
        self.input[1:, :] = input.T
        self.output = Activation.sigmoid(np.dot(self.weights, self.input))
        return self.output
Пример #14
0
    def classify(self, testInstance):
        """Classify a single instance.

        Parameters
        ----------
        testInstance : list of floats

        Returns
        -------
        bool :
            True if the testInstance is recognized as a 7, False otherwise.
        """
        # Write your code to do the classification on an input image
        pass
        testResult = Activation.sign(dot(self.weight, testInstance),
                                     threshold=0)
        return testResult
Пример #15
0
    def __init__(self, nIn, nOut, weights=None, activation='sigmoid'):

        # Get activation function from string
        # Notice the functional programming paradigms of Python + Numpy
        self.activationString = activation
        self.activation = Activation.getActivation(self.activationString)

        self.nIn = nIn
        self.nOut = nOut

        # Some handy properties of the layers
        self.size = self.nOut
        self.shape = self.weights.shape

        # You can have better initialization here
        if weights is None:
            rns = np.random.RandomState(int(time.time()))
            self.weights = rns.uniform(size=(nOut, nIn + 1))
        else:
            self.weights = weights
Пример #16
0
    def computeDerivative(self, nextDerivatives, nextWeights):
        """
        Compute the derivatives (back)

        Parameters
        ----------
        nextDerivatives: ndarray
            a numpy array containing the derivatives from next layer
        nextWeights : ndarray
            a numpy array containing the weights from next layer

        Returns
        -------
        ndarray :
            a numpy array containing the partial derivatives on this layer
        """
        temp1 = np.dot(nextDerivatives.reshape(nextDerivatives.size, 1),
                       nextWeights.reshape(1, nextWeights.size))
        temp2 = (np.sum(temp1, axis=0)).reshape(self.size, 1)
        self.delta = (Activation.sigmoidPrime(self.output)) * temp2

        pass
Пример #17
0
    def __init__(self, n_in, n_out, weights=None,
                 activation='sigmoid', is_classifier_layer=False):

        # Get activation function from string
        self.activation_string = activation
        self.activation = Activation.get_activation(self.activation_string)

        self.n_in = n_in
        self.n_out = n_out

        #self.inp = np.ndarray((n_in+1, 1))
        self.inp = np.ndarray((n_in+1))
        self.inp[0] = 1
        #self.outp = np.ndarray((n_out, 1))
        self.outp = np.ndarray((n_out))
        #self.deltas = np.zeros((n_out, 1))
        self.deltas = np.zeros((n_out))

        # You can have better initialization here
        if weights is None:
            self.weights = np.random.rand(n_in+1, n_out)/10
        else:
            self.weights = weights
Пример #18
0
 def _fire(self, inp):
     return Activation.sigmoid(np.dot(np.array(inp), self.weight))
Пример #19
0
 def fire(self, input):
     """Fire the output of the perceptron corresponding to the input """
     return Activation.sign(np.dot(np.array(input), self.weight))
Пример #20
0
 def fire(self, input):
     """Fire the output of the perceptron corresponding to the input """
     # I already implemented it for you to see how you can work with numpy
     return Activation.sign(np.dot(np.array(input), self.weight[1:]) + self.weight[0])
Пример #21
0
    def _fire(self, inp):
	#print np.array(inp).shape
	#print np.array(self.weights).shape
        return Activation.sigmoid(np.dot(np.array(inp), self.weights))
        pass
Пример #22
0
 def _fire(self, inp, weightsOfNeuron):
     return Activation.sigmoid(np.dot(np.array(inp), np.array(weightsOfNeuron)))
Пример #23
0
 def _fire(self, inp):
     return Activation.sigmoid(np.dot(np.array(inp), self.weights))
Пример #24
0
 def fire(self, input):
     return Activation.sigmoid(np.dot(np.array(input), self.weight))
Пример #25
0
 def fire(self, input):
     # input (n,1)
     return Activation.sigmoid(np.dot(self.weight,input))
Пример #26
0
 def fire(self, input):
     """Fire the output of the perceptron corresponding to the input """
     return Activation.sign(np.dot(np.array(input), self.weight))
Пример #27
0
 def _fire(self, inp):
     #TODO compute a vector containing all sigmoids of neurons
     ret = np.zeros(self.n_out)
     for i in range(0, self.n_out):
         ret[i] = Activation.sigmoid(np.dot(np.append(1,inp), self.weights[:,i]))
     return ret
Пример #28
0
 def _fire(self, input):
     """Fire the output of the perceptron corresponding to the input """
     # I already implemented it for you to see how you can work with numpy
     return Activation.sign(np.dot(np.array(input), self.weight))
 def fire(self, input):
     # Look at how we change the activation function here!!!!
     # Not Activation.sign as in the perceptron, but sigmoid
     return Activation.sigmoid(np.dot(np.array(input), self.weight))