Beispiel #1
0
    def _gradient(self, weights, lambda_value):
        thetas = list(reshape_vector(weights, self.theta_shapes))
        activations = self._activations(thetas)
        l = lambda_value

        # sigmas = [sigma3, sigma2, ...]
        sigmas = [activations[-1]-self.Y]
        for ind,layer in enumerate(self.z[-2::-1]):
            if self.add_bias:
                layer = add_bias(layer)
                sigma = np.dot(sigmas[-1], thetas[-1-ind])*sigmoid_gradient(layer)
                sigmas.append(del_bias(sigma))

        # deltas = [delta1, delta2, ...]
        deltas = []
        for activation, sigma in zip(activations, sigmas[::-1]):
            deltas.append(np.dot(sigma.T, activation))

        # gradients = [theta1_grad, theta2_grad, ...]
        gradients = []
        for theta, delta in zip(thetas, deltas):
            theta = del_bias(theta)
            theta = add_bias(theta, values_function=np.zeros)
            gradient = delta/self.m + np.dot((l/self.m), theta)
            gradients.append(gradient.T.ravel())

        return np.concatenate(gradients)
Beispiel #2
0
    def _gradient(self, weights, lambda_value):
        thetas = list(reshape_vector(weights, self.theta_shapes))
        activations = self._activations(thetas)
        l = lambda_value

        # sigmas = [sigma3, sigma2, ...]
        sigmas = [activations[-1] - self.Y]
        for ind, layer in enumerate(self.z[-2::-1]):
            if self.add_bias:
                layer = add_bias(layer)
                sigma = np.dot(sigmas[-1],
                               thetas[-1 - ind]) * sigmoid_gradient(layer)
                sigmas.append(del_bias(sigma))

        # deltas = [delta1, delta2, ...]
        deltas = []
        for activation, sigma in zip(activations, sigmas[::-1]):
            deltas.append(np.dot(sigma.T, activation))

        # gradients = [theta1_grad, theta2_grad, ...]
        gradients = []
        for theta, delta in zip(thetas, deltas):
            theta = del_bias(theta)
            theta = add_bias(theta, values_function=np.zeros)
            gradient = delta / self.m + np.dot((l / self.m), theta)
            gradients.append(gradient.T.ravel())

        return np.concatenate(gradients)
Beispiel #3
0
    def _activations(self, thetas):
        if self.add_bias:
            input_layer = add_bias(self.X)
        else:
            input_layer = self.X

        # activations = [a1, a2, ...]
        activations = [input_layer]
        self.z = []

        # Process hidden layers
        for i in range(len(thetas)):
            self.z.append(np.dot(activations[-1], thetas[i].T))
            activations.append(sigmoid(self.z[-1]))

            # Don't add bias terms on the last layer
            if self.add_bias and i < len(thetas)-1:
                activations[-1] = add_bias(activations[-1])

        return activations
Beispiel #4
0
    def _activations(self, thetas):
        if self.add_bias:
            input_layer = add_bias(self.X)
        else:
            input_layer = self.X

        # activations = [a1, a2, ...]
        activations = [input_layer]
        self.z = []

        # Process hidden layers
        for i in range(len(thetas)):
            self.z.append(np.dot(activations[-1], thetas[i].T))
            activations.append(sigmoid(self.z[-1]))

            # Don't add bias terms on the last layer
            if self.add_bias and i < len(thetas) - 1:
                activations[-1] = add_bias(activations[-1])

        return activations
bc1 = np.load('../../Predictor/server_net/models/model_16/biases/bc1.npy')
bc2 = np.load('../../Predictor/server_net/models/model_16/biases/bc2.npy') 
bf1 = np.load('../../Predictor/server_net/models/model_16/biases/bd1.npy')
otb = np.load('../../Predictor/server_net/models/model_16/biases/bout.npy')

test_im = np.reshape(test_im,(1,28,28,1))


#conv2d takes (1,28,28,1) image and (5,5,1,5) kernel and gives (1,28,28,5) output
con1 = conv2d(test_im,wc1)
# np.save('con1.npy',con1)
print "conv1"
print con1
#add_bias takes the 5 (1,28,28) inputs and 5 biases, and adds the bias to every pixel in the 5 inputs
con1 = add_bias(con1,bc1)
# np.save('con1bias.npy',con1)
print "conv1addbias"
print con1
#activ_fun takes (1,28,28,5) input and squares each and every element in the input
con1 = activ_fun(con1)
# np.save('con1act.npy',con1)
print "conv1act"
print con1
#meanpool2 takes (1,28,28,5) input and performs meanpooling on each of the 5 28x28 matrices seperately
#and gives a (1,14,14,5) output
mean1 = meanpool2(con1)
# np.save('con1mean.npy',mean1)
print "mean pooling 1"
print mean1
#conv2d takes (1,14,14,5) input and (5,5,5,10) kernel and gives (1,14,14,10) output