def main(): theta = np.random.randn(6, 1) p = Problem("database.txt") #x = p.getX() y = p.getY() c = Controller(10, p) #x_b = np.c_[np.ones((len(x), 1)), x] ''' #theta, cost_history, theta_history = c.gradientDescent(theta, x_b) theta, cost_history = c.stochasticGradientDescent(theta) error = c.getError(theta) print("Theta:") print(theta[0][0]) print("Theta1:") print(theta[1][0]) print("Theta2:") print(theta[2][0]) print("Theta3:") print(theta[3][0]) print("Theta4:") print(theta[4][0]) print("Error:") print(error) ''' X = 2 * np.random.rand(100, 5) #y = 4 +3 * X+np.random.randn(100,1) print(X) print("bine") print("bine") print("bine") print(y)
class Controller: def __init__(self, filename): self.__problem = Problem(filename) self.__x = self.__problem.getX() self.__y = self.__problem.getY() self.__nn = ArtificialNeuralNetwork(self.__x, self.__y, 2) def solve(self): self.__nn.loss=[] iterations =[] for i in range(4000): self.__nn.feedforward() learning_rate = 0.5 self.__nn.backpropagation(learning_rate) iterations.append(i) print(self.__nn.output) mpl.pyplot.plot(iterations, self.__nn.loss, label='loss value vs iteration') mpl.pyplot.xlabel('Iterations') mpl.pyplot.ylabel('loss function') mpl.pyplot.legend() mpl.pyplot.show()