def main(_): pp.pprint(flags.FLAGS.__flags) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: rnn_model = LstmRNN( sess, FLAGS.stock_count, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, num_steps=FLAGS.num_steps, input_size=FLAGS.input_size, embed_size=FLAGS.embed_size, ) show_all_variables() stock_data_list = load_sp500( FLAGS.input_size, FLAGS.num_steps, k=FLAGS.stock_count, target_symbol=FLAGS.stock_symbol, ) if FLAGS.train: rnn_model.train(stock_data_list, FLAGS) else: if not rnn_model.load()[0]: raise Exception("[!] Train a model first, then run test mode")
def main(_): pp.pprint(FLAGS.__flags) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: rnn_model = LstmRNN( sess, FLAGS.stock_count, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, num_steps=FLAGS.num_steps, input_size=FLAGS.input_size, keep_prob=FLAGS.keep_prob, ) stock_data_list = load_sp500( FLAGS.input_size, FLAGS.num_steps, k=FLAGS.stock_count, target_symbol=FLAGS.stock_symbol, ) if FLAGS.train: rnn_model.train(stock_data_list, FLAGS) else: if not rnn_model.load()[0]: raise Exception("[!] Train a model first, then run test mode")
def main(_): pp.pprint(flags.FLAGS.__flags) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True ## use powerful gpu with tf.Session(config=run_config) as sess: rnn_model = LstmRNN( sess, FLAGS.stock_count, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, num_steps=FLAGS.num_steps, input_size=FLAGS.input_size, embed_size=FLAGS.embed_size, stock=FLAGS.stock_symbol, ) show_all_variables() if FLAGS.train: print('--------------load dataset begin--------------------------') stock_data_list = load_stock( ## load_sp500 FLAGS.input_size, FLAGS.num_steps, k=FLAGS.stock_count, target_symbol=FLAGS.stock_symbol, ) print('----------------end------------------------') rnn_model.train(stock_data_list, FLAGS) if FLAGS.prediction: print('--------------load dataset begin--------------------------') stock_data_list = load_stock( ## load_sp500 FLAGS.input_size, FLAGS.num_steps, k=FLAGS.stock_count, target_symbol=FLAGS.stock_symbol, TushareFlag=True, ) print('----------------end------------------------') ## stock_data_list = ts.get_k_data(FLAGS.stock_symbol, autype='hfq', start='2016-01-01') ##time.strftime('%Y-%m-%d',time.localtime(time.time()))) rnn_model.prediction(stock_data_list, FLAGS) else: if not rnn_model.load()[0]: raise Exception("[!] Train a model first, then run test mode")
def main(_): pp.pprint(flags.FLAGS.__flags) # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True with tf.Session(config=run_config) as sess: rnn_model = LstmRNN( sess, FLAGS.stock_count, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, num_steps=FLAGS.num_steps, input_size=FLAGS.input_size, keep_prob=FLAGS.keep_prob, embed_size=FLAGS.embed_size, ) show_all_variables() stock_data_list = load_sp500( FLAGS.input_size, FLAGS.num_steps, k=FLAGS.stock_count, target_symbol=FLAGS.stock_symbol, ) if FLAGS.train: rnn_model.train(stock_data_list, FLAGS) else: test_prediction, test_loss = rnn_model.predict( stock_data_list, 50, FLAGS) if FLAGS.write: with open('api_log/' + FLAGS.stock_symbol + ".pkl", 'wb') as f: pickle.dump(test_prediction, f) #rnn_model.plot_samples(test_prediction, test_prediction, 'check.png', 'GOOG') #plt.show(block=True) print '-' * 33 if not rnn_model.load()[0]: raise Exception("[!] Train a model first, then run test mode")
def main(_): pp.pprint(FLAGS.__flags) run_config = tf.ConfigProto() #控制GPU资源的使用 run_config.gpu_options.allow_growth = True # 使用allow_growth option,刚一开始分配少量的GPU容量,然后按需慢慢的增加,由于不会释放内存,所以会导致碎片 tf.reset_default_graph() result = [] with tf.Session(config=run_config) as sess: rnn_model = LstmRNN( sess, FLAGS.stock_count, lstm_size=FLAGS.lstm_size, num_layers=FLAGS.num_layers, num_steps=FLAGS.num_steps, input_size=FLAGS.input_size, keep_prob=FLAGS.keep_prob, ) stock_data_list = load_sp500( FLAGS.input_size, FLAGS.num_steps, k=FLAGS.stock_count, target_symbol=FLAGS.stock_symbol, ) result.append(stock_data_list[0].final_price) if FLAGS.train: rnn_model.load() # rnn_model.train(stock_data_list, FLAGS) #LSTM的训练过程 # rnn_model.test(stock_data_list, FLAGS) #LSTM的测试过程 Lstm_predict = rnn_model.test2(stock_data_list) #LSTM的预测过程 tf.reset_default_graph() with tf.Session(config=run_config) as sess: cnn_model = CNN( sess, embedding_dim=FLAGS.embedding_dim, num_steps=FLAGS.num_steps2, num_filters=FLAGS.num_filters, # 卷积核数目 kernel_size=FLAGS.kernel_size, # 卷积核尺寸 hidden_dim=FLAGS.hidden_dim, # 全连接层神经元 keep_prob=FLAGS.keep_prob, ) cnn_data_list = load_szzs2( FLAGS.embedding_dim, FLAGS.num_steps2, target_symbol=FLAGS.stock_symbol2, ) if FLAGS.train: cnn_model.load() # cnn_model.train(cnn_data_list, FLAGS) #CNN的训练过程 CNN_predict = cnn_model.test(cnn_data_list) #CNN的预测过程 for i in range(len(CNN_predict)): Lstm_predict[i] = CNN_predict[i] print(Lstm_predict) for i in range(len(Lstm_predict)): result.append(result[i] * (1 + Lstm_predict[i])) print(result[1:]) # 将预测结果写入.CSV文件内 dataframe = pd.DataFrame({'pred': np.array(result[1:]).tolist()}) dataframe.to_csv(os.path.join("predictions", "pred.csv"), index=False, sep=',')