def dump_record(): """ :return: 存储本次获取数据的日期信息、之后15天的开市日期 存放于DataStore中 """ record_info_dict = {} record_info_dict['now'] = Time.now() pro = ts.pro_api() record_info_dict['open'] = list( pro.trade_cal(exchange='', start_date=Time.now(), end_date=Time.delta(15), is_open='1').cal_date) with open(RECORD_FILE, 'wb') as fw: pickle.dump(record_info_dict, fw)
def get_ipo(start_date): """ :param start_date: str, 开始日期 :return: 指定日期范围内的上市股票 ts_code 股票代码 sub_code 申购代码 name 名称 ipo_date 上网发行日期 issue_date 上市日期 amount 发行总量(万股) market_amount 上网发行总量(万股) price 发行价格 pe 市盈率 limit_amount 个人申购上限(万股) funds 募集资金(亿元) ballot 中签率 """ pro = ts.pro_api() ipo_data = pro.new_share(start_date=start_date) return ipo_data[ipo_data.issue_date < Time.now()]
def runTrain(sess,d,rnn,msg): sess.run(tf.global_variables_initializer()) experiment='{}_{}_{}'.format(rnn.name,datanum,Time.now()) model_path="model/{}".format(experiment) log_path="SAVE_Logs/{}.txt".format(experiment) stat_path="SAVE_Logs/{}.stat".format(experiment) logger=Logger(log_path) stat={"tests":0} stat_lowAbs={"dist":100} total_number_of_batch=0 for number in trainRange: total_number_of_batch+=d[number].numberBatch total_number_of_batch_test=0 for number in testRange: total_number_of_batch_test+=d[number].numberBatch num_epoch=100 totalTime=Time() for curr_epoch in range(0,num_epoch): cost_sum=0 test_cost_sum=0 trainTime=Time() for number in trainRange: for index in range(d[number].numberBatch): cost,_=rnn.Train(d[number]._MFCC[index],d[number]._LABEL[index],0.8) cost_sum+=cost avg_cost=cost_sum/total_number_of_batch acc1=0.0 acc0=0.0 for number in trainRange: for index in range(d[number].numberBatch): ac1,ac0=rnn.Accuracy(d[number]._MFCC[index],d[number]._LABEL[index]) acc1+=ac1 acc0+=ac0 avg_train_accuracy= (acc1/total_number_of_batch+acc0/total_number_of_batch)/2 acc1=0.0 acc0=0.0 test_cost_sum=0 resultMatrix=np.zeros([2,2],int) for number in testRange: for index in range(d[number].numberBatch): ac1,ac0=rnn.Accuracy(d[number]._MFCC[index],d[number]._LABEL[index]) test_cost_sum+=rnn.Cost(d[number]._MFCC[index],d[number]._LABEL[index]) resultMatrix+=rnn.return_ResultMatrix(d[number]._MFCC[index],d[number]._LABEL[index]) acc1+=ac1 acc0+=ac0 avg_test_accuracy= (acc1/total_number_of_batch_test+acc0/total_number_of_batch_test)/2 test_distance=np.abs(acc1/total_number_of_batch_test-acc0/total_number_of_batch_test) avg_test_cost=test_cost_sum/total_number_of_batch_test if(avg_test_accuracy>stat["tests"]): stat['tests']=avg_test_accuracy stat['trains']=avg_train_accuracy stat['epoch']=curr_epoch stat['cost']=avg_cost stat['traincost']=avg_test_cost stat['resultMatrix']=resultMatrix stat['dist']=test_distance rnn.Save(model_path) if(test_distance<stat_lowAbs['dist']): stat_lowAbs['tests']=avg_test_accuracy stat_lowAbs['trains']=avg_train_accuracy stat_lowAbs['epoch']=curr_epoch stat_lowAbs['cost']=avg_cost stat_lowAbs['traincost']=avg_test_cost stat_lowAbs['resultMatrix']=resultMatrix stat_lowAbs['dist']=test_distance rnn.Save(model_path+'lowdist') log="Epoch {}/{}, l_rate:{:.10f}, cost = {:>7.4f},train cost={:>7.4f}, accracy(train,test/best):({:.4f}, {:.4f}/{:.4f}), test_distance ={:.4f} ,time = {}/{}\n".format( curr_epoch, num_epoch, rnn.learning_rate,avg_cost,avg_test_cost, avg_train_accuracy,avg_test_accuracy,stat['tests'],test_distance ,trainTime.duration(), totalTime.duration()) logger.write(log) summary =""" {}.{}.{} learning_rate : {} train_data_ratio : {} num_epoch : {} batch_size : {} windowsize : {} windowshift : {} Best evaulation based on test_data : Accuracy_train : {} Accuracy_test : {} at epoch :{} Best evaulation based on test_data at lowest distance : Accuracy_train : {} Accuracy_test : {} at epoch :{} \n best Result Matrix : \n{}{}\n best Reuslt Matrix at lowest distance : \n{}{}\n """.format( rnn.name,experiment,msg, rnn.learning_rate, train_rate, num_epoch,a.batch_size,a.windowsize,a.windowstep, stat["trains"],stat["tests"],stat['epoch'],stat_lowAbs['trains'],stat_lowAbs['tests'],stat_lowAbs['epoch'], stat['resultMatrix'],matrixAccuracy(stat['resultMatrix']),stat_lowAbs['resultMatrix'],matrixAccuracy(stat_lowAbs['resultMatrix'])) print(summary) logger.flush() logger.close() plot_static(log_path) with open("SAVE_Logs/log.txt","a") as f: f.write(summary)