def gradientDescentLinear(theta, data, learnRate=1.0):
    counter = 0
    # print("Gradient descent running!\n")
    # print("Initial theta:\n{}".format(theta.getData()))
    while hL.costFunction(theta, data) > 0.0001:
        tmpTheta = list()
        for index, t in enumerate(theta.getData()):
            tmpTheta.append(gradientDescentLinearIteration(theta, data,
                                                           index, learnRate))
        theta.read(tmpTheta)
        #print("Theta after {0} iterations: \n{1}".format(counter,
                                                         #theta.getData()))
        counter += 1
        if counter > 10000:
            break
    return theta
for item in data.getData():
    x=item.getX()
    y=item.getY()
    X.append(x[1])
    Y.append(y)
    print(x,y)
theta = th.theta(dimension=2)
theta.printData()


ex1 = data.getExample(0)
index = 0

print("Linear hypothesis value:\n{}".format(hL.hypothesisLinearTrain(theta,
                                                                     ex1)))
print("Simple cost for hypothesis:\n{}".format(hL.cost(theta, ex1)))
print("Number of examples:\n{}".format(data.getNumberOfExamples()))
print("Total cost for hypothesis:\n{}".format(hL.costFunction(theta, data)))
print("Cost function derivative for index {0}:\n{1}\
      ".format(index, hL.costFunctionDerivative(theta, data, index)))

theta = grad.gradientDescentLinear(theta, data, 0.01)
print(theta.getData())
t = theta.getData()
a = np.arange(min(X), max(X), 0.1)
b = list(map(lambda x: t[0]+t[1]*x, a ))

pylab.plot(X,Y,'o')
pylab.plot(a,b)
pylab.show()