def collect_train_data(begin,end): conn,cursor = db.db_connect() for market in config.MARKETS: code_list = stock.get_stock_by_market(market,cursor) for code in code_list: collect_stock_train_data(begin,end,code,market,cursor) conn.commit() db.db_close(conn,cursor)
def update_daily(): global page page = 0 conn, cursor = db.db_connect() while page <= total_page: print('page : ' + str(page) + ' of ' + str(total_page)) try: updateDZJY(page, cursor) except: print 'traceback.print_exc():' traceback.print_exc() page = page + 1 continue page = page + 1 time.sleep(2) conn.commit() db.db_close(conn, cursor)
def collect_price(begin, end): conn, cursor = db.db_connect() enddate = datetime.datetime(int(end[0:4]), int(end[5:7]), int(end[8:10])) begindate = datetime.datetime(int(begin[0:4]), int(begin[5:7]), int(begin[8:10])) begin = begindate.strftime('%Y%m%d') end = enddate.strftime('%Y%m%d') for market in config.MARKETS: code_list = stock.get_stock_by_market(market, cursor) for code in code_list: if market == 'sh': download_price_file(begin, end, code, 0) else: download_price_file(begin, end, code, 1) read_price_file(code + '.csv', market, cursor) os.remove(code + '.csv') conn.commit() db.db_close(conn, cursor)
code_path = '/home/ayesha/data/models/' + code model_path = code_path + '/' + term module_file = tf.train.latest_checkpoint(model_path) saver.restore(sess, module_file) #训练 for i in range(config.DAILY_TRAINING_STEPS + 1): for step in range(len(batch_index) - 1): final_states, loss_ = sess.run( [train_op, loss], feed_dict={ X: train_x[batch_index[step]:batch_index[step + 1]], Y: train_y[batch_index[step]:batch_index[step + 1]] }) #保存模型 print( "save model : ", saver.save(sess, model_path + '/stock.model', global_step=global_step)) if __name__ == '__main__': conn, cursor = db.db_connect() code = sys.argv[1] batch_size = int(sys.argv[2]) time_step = int(sys.argv[3]) term = sys.argv[4] date = sys.argv[5] daily_train_lstm(code, batch_size, time_step, term, date, cursor) db.db_close(conn, cursor)
def predict_lstm(code, time_step, term, date): conn, cursor = db.db_connect() with tf.variable_scope(code + '_' + term, reuse=None): #输入层、输出层权重、偏置 weights = { 'in': tf.Variable(tf.random_normal([input_size, rnn_unit])), 'out': tf.Variable(tf.random_normal([rnn_unit, 1])) } biases = { 'in': tf.Variable(tf.constant(0.1, shape=[ rnn_unit, ])), 'out': tf.Variable(tf.constant(0.1, shape=[ 1, ])) } X = tf.placeholder(tf.float32, shape=[None, time_step, input_size]) batch_size = tf.shape(X)[0] time_step_tensor = tf.shape(X)[1] w_in = weights['in'] b_in = biases['in'] input = tf.reshape(X, [-1, input_size]) #需要将tensor转成2维进行计算,计算后的结果作为隐藏层的输入 input_rnn = tf.matmul(input, w_in) + b_in input_rnn = tf.reshape( input_rnn, [-1, time_step_tensor, rnn_unit]) #将tensor转成3维,作为lstm cell的输入 cell = tf.nn.rnn_cell.BasicLSTMCell(rnn_unit) init_state = cell.zero_state(batch_size, dtype=tf.float32) output_rnn, final_states = tf.nn.dynamic_rnn( cell, input_rnn, initial_state=init_state, dtype=tf.float32 ) #output_rnn是记录lstm每个输出节点的结果,final_states是最后一个cell的结果 output = tf.reshape(output_rnn, [-1, rnn_unit]) #作为输出层的输入 w_out = weights['out'] b_out = biases['out'] pred = tf.matmul(output, w_out) + b_out saver = tf.train.Saver() mean, std, test_x, future_price = get_test_data( code, time_step, term, date, cursor) if len(test_x) == 0: return with tf.Session() as sess: #参数恢复 code_path = '/home/ayesha/data/models/' + code model_path = code_path + '/' + term module_file = tf.train.latest_checkpoint(model_path) saver.restore(sess, module_file) test_predict = [] for step in range(len(test_x)): prob = sess.run(pred, feed_dict={X: [test_x[step]]}) predict = prob.reshape((-1)) test_predict.extend(predict) #预测值 test_predict = np.array(test_predict) * float(std) + float(mean) #print(test_predict) est_price = test_predict[-1] print('refreshing stock est data ' + code + ' ' + date) params = [ code, date, est_price, future_price, est_price, future_price ] stock_est_data.refresh_stock_est_data(params, cursor) conn.commit() db.db_close(conn, cursor)