Пример #1
0
print('****Minibatch Gradient Descent****')
print('\n--Training--\n')
hyperparam = {'eta': 0.3,
              'epochs': 300,
              'minibatches': 1,
              'adaptive': 0.99}
print('\nHyperparamters\n')
for k, v in hyperparam.items():
    print(k, '\t', v)
print('\nNumber of Training Examples: ', X_train.shape[0], '\n')

h_thetaf, cost = glm.fit(lr.J,
                         lr.gradJ,
                         hyperparam,
                         scaledtrain_data)
plot_cost(cost)
h_thetad = scale.denormalize(h_thetaf)
print('Coefficients\t', h_thetaf)
for i, h_theta in enumerate(h_thetad):
    print('h_theta' + str(i), '\t', h_theta)
yp_train = glm.predict(identity, X_train, h_thetaf)
plot_errors(y_train, yp_train)
corr_train = metrics.r2(X_train, y_train, h_thetaf)
print('R**2\t', corr_train)

print('\n--Testing--')
yp_test = glm.predict(identity, X_test, h_thetaf)
plot_errors(y_test, yp_test)
corr_test = metrics.r2(X_test, y_test, h_thetaf)
print('R**2\t', corr_test)
Пример #2
0
scaledtrain_data = zip(X_train, y_train)
# Scale the testing data using the same scaling parameters
# used for the training data
Z_test, y_test = zip(*test_data)
X_test = transform(Z_test)

h_theta0 = [0., 0., 0., 0., 0.]
print('****Gradient Descent****')
h_thetaf, cost = glm.fit(lr.J,
                         lr.gradJ,
                         h_theta0,
                         eta=0.3,
                         it_max=5000,
                         gf='gd')(scaledtrain_data)
lr.plot_cost(cost)
h_thetad = scale.denormalize(h_thetaf)
yp_train = glm.predict(identity, X_train, h_thetaf)

print('\n--Training--')
print('Coefficients\t', h_thetaf)
print(
    tabulate(list(zip(yp_train, y_train)),
             headers=['yp', 'yi'],
             tablefmt='fancy_grid'))

print('Coefficients\t', h_thetad)
for i, h_theta in enumerate(h_thetad):
    print('h_theta' + unicode(i), '\t', h_theta)

corr_train = metrics.r2(X_train, y_train, h_thetaf)
print('R**2\t', corr_train)