def main(): #os.makedirs('result') # 建立深层循环网络模型 #regressor_abs = learn.Estimator(model_fn=lstm_model) regressor_abs = SKCompat(learn.Estimator(model_fn=lstm_model,model_dir="Models/model_2")) # load data #matfn = 'D:\\MATLAB\\work\\fitting\\after full-duplex data\\full-duplex.mat' # the path of .mat data matfn = 'result/full-duplex.mat' data = sio.loadmat(matfn) for j in [0,5,10,15,20,25,30,34]: x = data['x'+str(j)+'b'] y = data['y'+str(j)+'b'] Error = [] for seqnum in range(100): print("", seqnum) complex_x = [] complex_y = [] for i in range(len(x[seqnum])): x_abs = abs(x[seqnum][i]) x_w = math.atan(x[seqnum][i].imag / x[seqnum][i].real) complex_x.append([x_abs, x_w]) for i in range(len(y[seqnum])): y_abs = abs(y[seqnum][i]) y_w = math.atan(y[seqnum][i].imag / y[seqnum][i].real) complex_y.append([y_abs, y_w]) ####for abs train_X_abs = generate_data(complex_x[0:TRAINING_EXAMPLES]) #train_y_abs = complex_y[int(TIMESTEPS / 2):TRAINING_EXAMPLES + int(TIMESTEPS / 2) - 1] train_y_abs = complex_y[TIMESTEPS-1:TRAINING_EXAMPLES] test_X_abs = generate_data(complex_x[TRAINING_EXAMPLES:TESTING_EXAMPLES + TRAINING_EXAMPLES - 1]) #test_y_abs = complex_y[TRAINING_EXAMPLES + int(TIMESTEPS / 2):TESTING_EXAMPLES + TRAINING_EXAMPLES + int(TIMESTEPS / 2) - 1] test_y_abs = complex_y[TRAINING_EXAMPLES :TESTING_EXAMPLES + TRAINING_EXAMPLES - 1] # 调用fit函数训练模型 regressor_abs.fit(train_X_abs, train_y_abs, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) #regressor_abs.fit(train_X_abs, train_y_abs, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) # 预测 predicted_abs = [[pred] for pred in regressor_abs.predict(test_X_abs)] # db a_real = [] b_real = [] a_pre = [] b_pre = [] for i in range(len(test_y_abs)): a_real.append(test_y_abs[i][0] * math.cos(math.atan(test_y_abs[i][1]))) b_real.append(test_y_abs[i][0] * math.sin(math.atan(test_y_abs[i][1]))) a_pre.append(np.array(predicted_abs[i])[0][0] * math.cos(math.atan(np.array(predicted_abs[i])[0][1]))) b_pre.append(np.array(predicted_abs[i])[0][0] * math.sin(math.atan(np.array(predicted_abs[i])[0][1]))) diff_real = [] diff_imag = [] for i in range(len(a_real)): diff_real.append(a_real[i] - a_pre[i]) diff_imag.append(b_real[i] - b_pre[i]) error = 10 * math.log10(np.mean(np.square(predicted_abs))) - 10 * math.log10(np.mean(np.square(diff_real) + np.square(diff_imag))) print("db is : %f" % error) Error.append(error) sio.savemat('result/' + str(j) + 'DB.mat', {'Error': Error})
class dp_LSTM: """ Parameters ------------ Attributes ------------ """ def __init__(self,HIDDEN_SIZE=50,NUM_LAYERS=5,BATCH_SIZE=32,TRAINING_STEPS=3000, learning_rate=0.1,optimizer ='Adagrad'): # 神经网络参数 self.HIDDEN_SIZE = HIDDEN_SIZE # LSTM隐藏节点个数 self.NUM_LAYERS = NUM_LAYERS # LSTM层数 self.BATCH_SIZE = BATCH_SIZE # batch大小 # 数据参数 self.TRAINING_STEPS = TRAINING_STEPS # 训练轮数 self.learning_rate = learning_rate # 学习率 self.optimizer = optimizer self.regressor=None # LSTM结构单元 def LstmCell(self): lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.HIDDEN_SIZE) return lstm_cell def lstm_model(self,X, y): # 使用多层LSTM,不能用lstm_cell*NUM_LAYERS的方法,会导致LSTM的tensor名字都一样 cell = tf.contrib.rnn.MultiRNNCell([self.LstmCell() for _ in range(self.NUM_LAYERS)]) # 将多层LSTM结构连接成RNN网络并计算前向传播结果 output, _ = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32) output = tf.reshape(output, [-1, self.HIDDEN_SIZE]) # 通过无激活函数的全联接层计算线性回归,并将数据压缩成一维数组的结构 predictions = tf.contrib.layers.fully_connected(output, 1, None) # 将predictions和labels调整为统一的shape y = tf.reshape(y, [-1]) predictions = tf.reshape(predictions, [-1]) # 计算损失值,使用平均平方误差 loss = tf.losses.mean_squared_error(predictions, y) # 创建模型优化器并得到优化步骤 train_op = tf.contrib.layers.optimize_loss( loss, tf.train.get_global_step(), optimizer=self.optimizer, learning_rate=self.learning_rate) return predictions, loss, train_op def fit(self,train_X=None,train_y=None): # 建立深层循环网络模型 self.regressor = SKCompat(tf.contrib.learn.Estimator(model_fn=self.lstm_model)) # 调用fit函数训练模型 self.regressor.fit(train_X, train_y, batch_size=self.BATCH_SIZE, steps=self.TRAINING_STEPS) def predict(self,test_X): # 使用训练好的模型对测试集进行预测 predicted = array([pred for pred in self.regressor.predict(test_X)]) return predicted
def train(): # 用sin生成训练和测试数据集 test_start = TRAINING_EXAMPLES * SAMPLE_GAP test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP train_X, train_y = generate_data( np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32))) test_X, test_y = generate_data( np.sin( np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32))) # 建立深层循环网络模型 regressor = SKCompat( learn.Estimator(model_fn=lstm_model, model_dir='model/')) plt.ion() # plt.show() # 调用fit函数训练模型 regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) # regressor.fit(train_X, train_y, batch_size=1, steps=1) # 使用训练好的模型对测试集进行预测 predicted = [[pred] for pred in regressor.predict(test_X)] # 计算rmse作为评价指标 rmse = np.sqrt(((predicted - test_y)**2).mean(axis=0)) print('Mean Square Error is: %f' % (rmse[0])) # 对预测曲线绘图,并存储到sin.jpg plot_predicted, = plt.plot(predicted, label='predicted') plot_test, = plt.plot(test_y, label='real_sin') # plt.plot(xs[0, :], res[0].flatten(), 'r', xs[0, :], pred.flatten()[:TIME_STEPS], 'b--') plt.legend([plot_predicted, plot_test], ['predicted', 'real_sin']) plt.ylim((-1.2, 1.2)) plt.draw() # plt.pause(0.3) plt.savefig('cos.png')
def main(unused_argv): # Load datasets abalone_train, abalone_test, abalone_predict = maybe_download( FLAGS.train_data, FLAGS.test_data, FLAGS.predict_data) # Training examples training_set = tf.contrib.learn.datasets.base.load_csv_without_header( filename=abalone_train, target_dtype=np.int, features_dtype=np.float32) # Test examples test_set = tf.contrib.learn.datasets.base.load_csv_without_header( filename=abalone_test, target_dtype=np.int, features_dtype=np.float32) # Set of 7 examples for which to predict abalone ages prediction_set = tf.contrib.learn.datasets.base.load_csv_without_header( filename=abalone_predict, target_dtype=np.int, features_dtype=np.float32) # Set model params model_params = {"learning_rate": LEARNING_RATE} # Instantiate Estimator est = Estimator(model_fn=model_fn, params=model_params) nn = SKCompat(est) # Fit nn.fit(x=training_set.data, y=training_set.target, steps=5000) # Score accuracy ev = est.evaluate(x=test_set.data, y=test_set.target, steps=1) print("Loss: %s" % ev["loss"]) print("Root Mean Squared Error: %s" % ev["rmse"]) # Print out predictions print(prediction_set.data.shape) predictions = nn.predict(x=prediction_set.data) for i, p in enumerate(predictions): print("Prediction %s: %s" % (i + 1, p))
#regressor = learn.Estimator(model_fn=lstm_model, model_dir=MODEL_PATH) # 生成数据 train_X, train_y = generate_data(normalize_data[0:train_length]) test_X, test_y = generate_data(normalize_data[train_length:data_length]) train_X = np.transpose(train_X, [0, 2, 1]) train_y = np.transpose(train_y, [0, 2, 1]) test_X = np.transpose(test_X, [0, 2, 1]) test_y = np.transpose(test_y, [0, 2, 1]) # 拟合数据 # In[] #regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) # 计算预测值 # In[] #predicted = [[pred] for pred in regressor.predict(test_X)] regressor.score(test_X, test_y) predicted_list = list(regressor.predict(test_X)) # In[] def final_data_for_plot(predicted_list, test_y): test_y_list = test_y.reshape(test_y.shape[0] * test_y.shape[1], 1).tolist() final_predicted_list = [] final_test_y_list = [] for i in range(0, len(predicted_list) - PREDICT_STEPS + 1): if i % (PREDICT_STEPS * PREDICT_STEPS) == 0: final_predicted_list.extend(predicted_list[i:i + PREDICT_STEPS]) final_test_y_list.extend(test_y_list[i:i + PREDICT_STEPS]) final_predicted = np.array(final_predicted_list).reshape( len(final_predicted_list), 1)
learning_rate=0.1) return predictions, loss, train_op #建立深层模型 # Estimator里面还有一个叫SKCompat的类,如果使用x,y而不是input_fn来传参数的形式,需要用这个类包装一下: # 第二个参数用于本地保存 regressor = SKCompat( learn.Estimator(model_fn=lstm_model, model_dir="Models/model_2")) test_start = TRAINING_EXAMPLES * SAMPLE_GAP test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP train_X, train_y = generate_data( np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32))) test_X, test_y = generate_data( np.sin( np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32))) regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) predicted = [[pred] for pred in regressor.predict(test_X)] # 计算MSE rmse = np.sqrt(((predicted - test_y)**2).mean(axis=0)) fig = plt.figure() plot_predicted, = plt.plot(predicted, label='predicted') plot_test, = plt.plot(test_y, label='real_sin') plt.legend([plot_predicted, plot_test], ['predicted', 'real_sin']) fig.savefig('sin.png') plt.show() print("Mean Square Error is:%f" % rmse[0])
test_start = TRAINING_EXAMPLES * SAMPLE_GAP test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP train_X, train_y = generate_data( np.sin(np.linspace(0, test_start, TRAINING_EXAMPLES, dtype=np.float32))) test_X, test_y = generate_data( np.sin( np.linspace(test_start, test_end, TESTING_EXAMPLES, dtype=np.float32))) # 拟合数据 print "--------------------------------------------" print np.shape(train_X) print np.shape(train_y) print np.shape(test_y), test_y print "--------------------------------------------" regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) # 计算预测值 predicted = [[pred] for pred in regressor.predict(test_X)] print "predicted:", predicted # 计算MSE rmse = np.sqrt(((predicted - test_y)**2).mean(axis=0)) print("Mean Square Error is:%f" % rmse[0]) #print predicted print np.shape(test_X) print test_X print np.shape(test_X[-1:]) print regressor.predict(test_X[-1:]) plot_predicted, = plt.plot(predicted, label='predicted') plot_test, = plt.plot(test_y, label='real_sin')
# + '''对原数据进行尺度缩放''' data = data_processing(data) '''将所有样本来作为训练样本''' train_X, train_y = generate_data(data) '''将所有样本作为测试样本''' test_X, test_y = generate_data(data) '''以仿sklearn的形式训练模型,这里指定了训练批尺寸和训练轮数''' regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) # + '''利用已训练好的LSTM模型,来生成对应测试集的所有预测值''' predicted = np.array([pred for pred in regressor.predict(test_X)]) '''绘制反标准化之前的真实值与预测值对比图''' plt.plot(predicted, label='预测值') plt.plot(test_y, label='真实值') plt.title('反标准化之前') plt.legend() plt.show() # - # 可以看到,预测值与真实值非常的吻合,但这并不是我们需要的形式,我们需要的是反标准化后的真实数值,下面进行相关操作; # + '''自定义反标准化函数''' def scale_inv(raw_data,scale=True): '''读入原始数据并转为list'''
def test_func(test_X, test_y, model_path="Models/model_sin"): regressor = SKCompat( learn.Estimator(model_fn=lstm_model, model_dir=model_path)) a = regressor.score(x=test_X, y=test_y) predicted_data = [[pred] for pred in regressor.predict(test_X)] return predicted_data
def main(): ########################################################################## # 数据预处理 data = pd.read_excel(data_str, header=0, sheetname=sheet_str) data.head() # 获取时间及收盘价 time = data.iloc[:, 0].tolist() data = data.iloc[:, 4].tolist() # 观察原数据基本特征。 plt.figure(figsize=(12, 8)) plt.rcParams['font.sans-serif'] = 'SimHei' # 设置字体为SimHei显示中文 plt.rcParams['axes.unicode_minus'] = False # 设置正常显示符号 plt.title('原始数据') plt.plot(time, data) plt.show() ##################################################################### '''载入tf中仿sklearn训练方式的模块''' learn = tf.contrib.learn # 模型保存 '''初始化LSTM模型,并保存到工作目录下以方便进行增量学习''' regressor = SKCompat( learn.Estimator(model_fn=lstm_model, model_dir='Models/model_1')) # 数据处理 '''对原数据进行尺度缩放''' data = data_processing(data) '''将4000个数据来作为训练样本''' train_x, train_y = generate_data(data[0:4000]) '''将剩余数据作为测试样本''' test_x, test_y = generate_data(data[3999:-1]) ################################################################################# # 训练数据 regressor.fit(train_x, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) ################################################################################# # 预测测试样本 '''利用已训练好的lstm模型,来生成对应测试集的所有预测值''' predicted = np.array([pred for pred in regressor.predict(test_x)]) '''绘制反标准化之前的真实值与预测值对比图''' plt.figure(figsize=(12, 8)) plt.plot(predicted, label='预测值') plt.plot(test_y, label='真实值') plt.title('反标准化之前') plt.legend() plt.show() # 反标准化之前的预测 ################################################################################### # 反标准化 scale_predicted = scale_inv(predicted) scale_test_y = scale_inv(test_y) '''绘制反标准化之后的真实值与预测值对比图''' plt.figure(figsize=(12, 8)) plt.plot(scale_predicted, label='预测值') plt.plot(scale_test_y, label='真实值') plt.title('反标准化之后') plt.legend() plt.show() ###################################################################################### # 对比图 fig = plt.figure(figsize=(12, 8)) fig.suptitle("对比图") ax1 = fig.add_subplot(1, 2, 1) # print(len(scale_predicted)) ax1.plot(time[4012:-1], scale_predicted, label="测试集") ax1.plot(time[0:4000], scale_inv(data[0:4000]), label="训练集") plt.legend() plt.title('训练集数据+测试集数据') ax2 = fig.add_subplot(1, 2, 2) ax2.plot(time, scale_inv(data)) plt.title('反标准化后数据') ######################################################################################## # 计算准确率 pre_rate = precision_rate(scale_predicted, scale_test_y) print('准确率为:', pre_rate) ######################################################################################### # 预测未来30天的值 day = 30 length = len(data) for i in range(day): P = list() P.append([data[length - TIMESTEPS - 1 + i:length - 1 + i]]) P = np.array(P, dtype=np.float32) pre = regressor.predict(P) data = np.append(data, pre) pre = data[len(data) - day:len(data) + 1] print("====================================") print("以下为进行30天的预测数据") print("反标准化之前:\n", pre) # 反标准化的值 print("反标准化之后:\n", scale_inv(pre)) # 预测图 fig = plt.figure(figsize=(12, 8)) plt.plot(scale_inv(pre)) plt.title("未来30天汇率变化预测图") plt.show()
#将预测的目标转化成one-hot编码的形式,因为一共有三个类别,所以向量长度为3 #经过转换后,类别分别表示为(1,0,0),(0,1,0),(0,0,1) target = tf.one_hot(target, 3, 1, 0) # 计算预测值及损失函数。 #使用了一个全连接层,参数:输入,输出,激活函数 logits = tf.contrib.layers.fully_connected(features, 3, tf.nn.softmax) loss = tf.losses.softmax_cross_entropy(target, logits) # 创建优化步骤。 train_op = tf.contrib.layers.optimize_loss( loss, #损失函数 tf.contrib.framework.get_global_step(), #获取训练步数并再训练时更新 optimizer='Adam', #定义优化器 learning_rate=0.01) #定义学习率 return tf.arg_max(logits, 1), loss, train_op iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=0) #封装和训练模型,输出准确率 x_train, x_test = map(np.float32, [x_train, x_test]) classifier = SKCompat( learn.Estimator(model_fn=my_model, model_dir="Model/model_1")) classifier.fit(x_train, y_train, steps=800) y_predicted = [i for i in classifier.predict(x_test)] score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy: %.2f%%' % (score * 100))
import numpy as np from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat learn = tf.contrib.learn def my_model(features, target): target = tf.one_hot(target, 3, 1, 0) logits = tf.contrib.layers.fully_connected(features, 3, tf.nn.softmax) loss = tf.losses.softmax_cross_entropy(target, logits) train_op = tf.contrib.layers.optimize_loss( loss, tf.contrib.framework.get_global_step(), optimizer='Adam', learning_rate=0.01) return tf.argmax(logits, 1), loss, train_op iris = datasets.load_iris() x_train, x_test, y_train, y_test = model_selection.train_test_split( iris.data, iris.target, test_size=0.2, random_state=0) x_train, x_test = map(np.float32, [x_train, x_test]) classifier = SKCompat( learn.Estimator(model_fn=my_model, model_dir='temp/c8/mdl')) classifier.fit(x_train, y_train, steps=800) y_pred = classifier.predict(x_test) acc = metrics.accuracy_score(y_test, y_pred) print(f'Accuracy: {acc}')
# 4. 进行训练 # 封装之前定义的lstm。 regressor = SKCompat(learn.Estimator(model_fn=lstm_model,model_dir="Models/model_2")) # 生成数据。 test_start = TRAINING_EXAMPLES * SAMPLE_GAP test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP train_X, train_y = generate_data(np.sin(np.linspace( 0, test_start, TRAINING_EXAMPLES, dtype=np.float32))) test_X, test_y = generate_data(np.sin(np.linspace( test_start, test_end, TESTING_EXAMPLES, dtype=np.float32))) # 拟合数据。 regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) # 计算预测值。 predicted = [[pred] for pred in regressor.predict(test_X)] # 计算MSE。 rmse = np.sqrt(((predicted - test_y) ** 2).mean(axis=0)) print ("Mean Square Error is: %f" % rmse[0]) ''' Mean Square Error is: 0.007281 ''' fig = plt.figure() plot_predicted = plt.plot(predicted, label = 'predicted', color="blue",marker='|', linestyle=':') plot_test = plt.plot(test_y, label='real_sin', color="red") #plt.legend([plot_predicted, plot_test], ['predicted', 'real_sin']) fig.savefig('sin.png') plt.show();
#regressor.fit(train_X, train_y,batch_size=50,steps=10000, monitors=[validation_monitor]) # 计算预测值 print "----------fit over,to predict------------" #predicted = [[pred] for pred in regressor.predict(test_X)] #print predicted # 计算MS #print "----------predict over,to rmse------------" #rmse = np.sqrt(((predicted - test_y) ** 2).mean(axis=0)) #print type(rmse),np.shape(rmse) #print("Mean Square Error is:%f" % rmse[0]) print "-----------------------test -------------------------" #print np.shape(test_X),test_X print np.shape(test_X[-BATCH_SIZE:]) print "--------------------- predict -----------------------" p = regressor.predict(test_X[-BATCH_SIZE:]) print np.shape(p) print np.sort(p) from datetime import datetime as dt date = clean_and_flat.index[-1] df = pd.DataFrame(p[-1:], index=[date], columns=target_cols) df.index.name = "date" print df conn = sqlite3.connect('History.db', check_same_thread=False) try: df.to_sql("predict", conn, if_exists='append') except Exception, e: print "exception :", e conn.close()
regressor = SKCompat(learn.Estimator(model_fn=lstm_model, model_dir='temp/c8/mdl2')) test_start = TRAINING_EXAMPLES * SAMPLE_GAP test_end = (TRAINING_EXAMPLES + TESTING_EXAMPLES) * SAMPLE_GAP train_X, train_y = generate_data(np.sin(np.linspace( 0, test_start, TRAINING_EXAMPLES, dtype=np.float32 ))) test_X, test_y = generate_data(np.sin(np.linspace( test_start, test_end, TESTING_EXAMPLES, dtype=np.float32 ))) regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=TRAINING_STEPS) pred_y = regressor.predict(test_X) r2 = r2_score(test_y, pred_y) print(f'R2 score: {r2}') # import matplotlib as mpl # mpl.use('Agg') # import matplotlib.pyplot as plt # # fig = plt.figure() # ax1 = fig.add_subplot(121) # ax2 = fig.add_subplot(122) # # ax1.plot(pred_y, label='pred') # ax2.plot(test_y, label='real_sin')
def train(InputDF, TargetDF): print "*" * 50, "Training a rnn network", "*" * 50 num_features = len(InputDF.columns) num_stocks = len(TargetDF.columns) print "num stocks %s,last train data %s,first train data %s" % (num_stocks, TargetDF.index[-1], TargetDF.index[0]) # 生成数据 used_size = (len(InputDF) // BATCH_SIZE) * BATCH_SIZE # 要BATCH_SIZE整数倍 train_X, train_y = InputDF[-used_size:].values, TargetDF[-used_size:].values test_X, test_y = InputDF[-BATCH_SIZE:].values, TargetDF[-BATCH_SIZE:].values # TODO train_X = train_X.astype(np.float32) train_y = train_y.astype(np.float32) test_X = test_X.astype(np.float32) test_y = test_y.astype(np.float32) print np.shape(train_X), np.shape(train_y) print "Train Set <X:y> shape" print "Train Data Count:%s , Feather Count:%s , Stock Count:%s" % ( len(train_X), num_features, num_stocks) # 3300 个股票日? 股票没有那么多,500个 NUM_TRAIN_BATCHES = int(len(train_X) / BATCH_SIZE) ATTN_LENGTH = 10 dropout_keep_prob = 0.5 def LstmCell(): lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(RNN_HIDDEN_SIZE, state_is_tuple=True) return lstm_cell def makeGRUCells(): cells = [] for i in range(NUM_LAYERS): cell = tf.nn.rnn_cell.GRUCell(num_units=RNN_HIDDEN_SIZE) if len(cells) == 0: # Add attention wrapper to first layer. cell = tf.contrib.rnn.AttentionCellWrapper( cell, attn_length=ATTN_LENGTH, state_is_tuple=True) cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=dropout_keep_prob) cells.append(cell) attn_cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True) # GRUCell必须false,True 比错 ,如果是BasicLSTMCell 必须True return attn_cell def lstm_model(X, y): cell = makeGRUCells() ''' #理论dynnamic rnn 首选,但计算速度相比静态慢很多,不知何故 output, _ = tf.nn.dynamic_rnn( cell, inputs=tf.expand_dims(X, -1), dtype=tf.float32, time_major=False ) ''' split_inputs = tf.reshape(X, shape=[1, BATCH_SIZE, num_features]) # Each item in the batch is a time step, iterate through them # print split_inputs split_inputs = tf.unstack(split_inputs, axis=1, name="unpack_l1") output, _ = tf.nn.static_rnn(cell, inputs=split_inputs, dtype=tf.float32 ) output = tf.transpose(output, [1, 0, 2]) output = output[-1] # 通过无激活函数的全连接层,计算就是线性回归,并将数据压缩成一维数组结构 predictions = tf.contrib.layers.fully_connected(output, num_stocks, None) labels = y loss = tf.losses.mean_squared_error(predictions, labels) # print "lost:",loss train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.get_global_step(), optimizer="Adagrad", learning_rate=lr) return predictions, loss, train_op PRINT_STEPS = 100 validation_monitor = learn.monitors.ValidationMonitor(test_X, test_y, every_n_steps=PRINT_STEPS, early_stopping_rounds=1000) # 进行训练 regressor = SKCompat(learn.Estimator(model_fn=lstm_model, # model_dir="Models/model_0", config=tf.contrib.learn.RunConfig( save_checkpoints_steps=100, save_checkpoints_secs=None, save_summary_steps=100, ))) print "Total Train Step: ", NUM_TRAIN_BATCHES * NUM_EPOCHS print "*" * 50, "Training a rnn regress task now", "*" * 50 regressor.fit(train_X, train_y, batch_size=BATCH_SIZE, steps=NUM_TRAIN_BATCHES * NUM_EPOCHS) # steps=train_labels.shape[0]/batch_size * epochs, print "*" * 50, "Predict tomorrow stock price now", "*" * 50 pred = regressor.predict(test_X[-BATCH_SIZE:]) # 使用最后21天预测 未来5天的股票价格 return pred