Ejemplo n.º 1
0
def main(rate):
    result = [np.array([[3], [4]])]
    dem = 0
    while True:
        xnew = result[-1] - np.reshape(rate*concatenate((funcx(result[-1]),funcy(result[-1]))),result[-1].shape)
        if np.linalg.norm(result[-1] - xnew) < 1e-3:
            break
        result.append(xnew)
        dem += 1
    print(dem)
    return result[-1]
Ejemplo n.º 2
0
                    train_y,
                    epochs=50,
                    batch_size=72,
                    validation_data=(test_X, test_y),
                    verbose=2)

#画图
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()

# make the prediction
yHat = model.predict(test_X)

inv_yHat = concatenate((yHat, test_x[:, 1:]), axis=1)  # 数组拼接
inv_yHat = inv_yHat[:, 0]

test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_x[:, 1:]), axis=1)
inv_y = inv_y[:, 0]

rmse = sqrt(mean_squared_error(inv_yHat, inv_y))
print('Test RMSE: %.8f' % rmse)
mse = mean_squared_error(inv_yHat, inv_y)
print('Test MSE: %.8f' % mse)

yhat = model.predict(test_X)
test_X_reshaped = test_X.reshape((test_X.shape[0], test_X.shape[2]))

inv_yhat = concatenate((yhat, yhat, test_X_reshaped[:, 1:]), axis=1)
Ejemplo n.º 3
0
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
history = model.fit(train_X,
                    train_y,
                    epochs=10,
                    batch_size=72,
                    validation_data=(test_X, test_y),
                    verbose=2)
'''
    对数据绘图
'''
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()

# make the prediction,为了在原始数据的维度上计算损失,需要将数据转化为原来的范围再计算损失
yHat = model.predict(test_X)

inv_yHat = concatenate((yHat, test_x[:, 1:]), axis=1)  # 数组拼接
inv_yHat = inv_yHat[:, 0]

test_y = test_y.reshape((len(test_y), 1))
inv_y = concatenate((test_y, test_x[:, 1:]), axis=1)
inv_y = inv_y[:, 0]

rmse = sqrt(mean_squared_error(inv_yHat, inv_y))
print('Test RMSE: %.3f' % rmse)
Ejemplo n.º 4
0
'''
Created on Mar 19, 2019

@author: BAO
'''
import numpy as np
import matplotlib.pyplot as plt
from numpy.core._multiarray_umath import concatenate
from scipy.spatial.distance import cdist
from numpy.linalg import norm
x = np.random.rand(1000, 1)
y = 4 + 3 * (x) + .2 * np.random.randn(1000, 1)
one = np.ones((x.shape[0], 1))
X = concatenate((one, x), axis=1)
w = np.dot(np.linalg.pinv(X), y)
print("liblary result: ", w)
plt.plot(x, y, 'b.')
plt.plot(x, X.dot(w), 'y')


def grad(w):
    return np.dot(X.T, X.dot(w) - y) / (X.shape[0])


def Gradian(x, y):
    result = [np.array([[2], [1]])]
    dem = 0
    while True:
        wnew = result[-1] - 1 * grad(result[-1])
        if norm(wnew - result[-1]) < 1e-3:
            break
Ejemplo n.º 5
0
from numpy import *
from numpy.core._multiarray_umath import concatenate
from array import array

# More solution on creating arrays in python 

# arr = array([1,2,3,4,5])
# arr = arr + 6
# print(arr)

# arr1 = array([1,2,3,4,5])
# arr2 = array([1,3,9,9,5])
# arr3 = arr1 + arr2
# print(arr3)

# How to add one array to another Array

arr1 = array([1,2,3,4,5])
arr2 = array([1,3,9,9,5])
print(concatenate([arr1,arr2]))

# This is how to concatenate one array to another array