Exemple #1
0
    def forwardPropagate(self, inputValue, output):
        outputVec = 1 * (self.labelRange == output)

        #propagate to the hidden layer, bias corresponds to a unit with constant ouput 1
        self.hiddenLayer.inputValue = inputValue
        self.hiddenLayer.linearOutput = np.dot(self.hiddenLayer.inputValue,
                                               self.hiddenLayer.W)
        self.hiddenLayer.y = self.hiddenLayer.activation(
            self.hiddenLayer.linearOutput)
        self.hiddenLayer.y[0] = 1

        self.hiddenLayer.yPrime = self.hiddenLayer.activationGradient(
            self.hiddenLayer.linearOutput)
        self.hiddenLayer.yPrime[0] = 0

        #propagate to the top layer, bias corresponds to a unit with constant ouput 1
        self.softmaxTopLayer.inputValue = self.hiddenLayer.y
        self.softmaxTopLayer.linearOutput = np.dot(
            self.softmaxTopLayer.inputValue, self.softmaxTopLayer.W)
        self.softmaxTopLayer.y = SoftMaxFunc(self.softmaxTopLayer.linearOutput)
        self.softmaxTopLayer.predict = np.argmax(self.softmaxTopLayer.y)

        self.softmaxTopLayer.delta = outputVec - self.softmaxTopLayer.y
        self.softmaxTopLayer.weightGradient = self.softmaxTopLayer.weightGradient - \
         np.outer(self.softmaxTopLayer.inputValue,self.softmaxTopLayer.delta)
Exemple #2
0
    def forward(self, inputValue, output):
        t = self.t
        self.x_t[t, :] = inputValue
        self.preHidden[t, :] = self.hiddenLayer.preHidden

        #propagate to the hidden layer, bias corresponds to a unit with constant ouput 1
        #(3.30)
        linearOutput = np.dot(inputValue, self.hiddenLayer.W_ih) \
            + np.dot(self.hiddenLayer.preHidden, self.hiddenLayer.W_hh) \
            + self.hiddenLayer.bias

        self.a_h_t[t, :] = linearOutput

        #(3.31)
        y = self.hiddenLayer.activation(linearOutput)
        self.b_h_t[t, :] = y
        self.hiddenLayer.preHidden = y

        #propagate to the top layer
        linearOutput = np.dot(y, self.softmaxLayer.W) + \
                                               self.softmaxLayer.bias
        y = SoftMaxFunc(linearOutput)
        #predict = np.argmax(y)

        delta = output - y
        self.delta_k_t[t, :] = delta

        t = t + 1
        self.t = t
        if (t == self.T):
            self.backward()
            self.t = 0
Exemple #3
0
    def onePass(self, preHidden, inputValue):
        linearOutput = np.dot(inputValue, self.hiddenLayer.W_ih) \
            + np.dot(preHidden, self.hiddenLayer.W_hh) \
            + self.hiddenLayer.bias

        y = self.hiddenLayer.activation(linearOutput)
        preHidden = y
        linearOutput = np.dot(y, self.softmaxLayer.W) + \
                                            self.softmaxLayer.bias
        y = SoftMaxFunc(linearOutput)
        return (y, preHidden)
Exemple #4
0
    def __init__(self, rng, n_in, n_out):
        self.W = np.asarray(
            rng.uniform(low=-np.sqrt(6.0 / (n_in + n_out)),
                        high=np.sqrt(6.0 / (n_in + n_out)),
                        size=(n_in, n_out)))
        #W(t) - W(t - 1)
        self.deltaW = np.zeros((n_in, n_out))

        self.delta = np.zeros(n_out)
        #\partial (E) / \partial (W)
        self.weightGradient = np.zeros((n_in, n_out))
        self.inputValue = np.zeros(n_in)
        self.y = SoftMaxFunc(np.dot(self.inputValue, self.W))
        self.predict = np.argmax(self.y)
        self.params = self.W
Exemple #5
0
    def accuracy(self, INPUT, OUTPUT):
        count = 0
        for inputValue, output in zip(INPUT, OUTPUT):
            linearOutputHidden = np.dot(inputValue, self.hiddenLayer.W)
            yHidden = self.hiddenLayer.activation(linearOutputHidden)
            yHidden[0] = 1

            linearOutput = np.dot(yHidden, self.softmaxTopLayer.W)
            y = SoftMaxFunc(linearOutput)
            predict = np.argmax(y)

            if predict == output:
                count = count + 1

        return (float(count) / len(OUTPUT))