def sendM305Pic(win_qq_name, stk_code): df = ts.get_k_data(stk_code, start=add_date_str(get_current_date_str(), -400)) # 测试相对均值偏移度 df['m5'] = df['close'].rolling(window=5).mean() df['m30'] = df['close'].rolling(window=30).mean() df['diff_m305'] = df.apply(lambda x: x['m5'] - x['m30'], axis=1) df['rank'] = df.apply(lambda x: relativeRank(df['diff_m305'], x['diff_m305']), axis=1) df = df.dropna(axis=0) df.plot('date', ['close', 'diff_m305', 'rank'], subplots=True) plt.title(stk_code+'M5-M30 分数') output = BytesIO() # BytesIO实现了在内存中读写byte buf_save = BytesIO() plt.savefig(output) output.seek(0) img = Image.open(output) # Image.open可以打开网络图片与本地图片。 img.convert("RGB").save(buf_save, "BMP") # 以RGB模式保存图像 data = buf_save.getvalue()[14:] buf_save.close() output.close() plt.close() send_pic_qq(win_qq_name, data)
def calRSVRank(stk_code, Mdays, history_length=20): df = get_k_data_JQ(stk_code, count=history_length, end_date=get_current_date_str()) # 移动平均线+RSV(未成熟随机值) M = Mdays near_days = 9 df['low_M' + str(M)] = df['low'].rolling(window=M).mean() df['high_M' + str(M)] = df['high'].rolling(window=M).mean() df['close_M' + str(M)] = df['close'].rolling(window=M).mean() df['low_M' + str(M) + '_min'] = df['low_M' + str(M)].rolling(window=M).min() df['high_M' + str(M) + '_max'] = df['high_M' + str(M)].rolling(window=M).max() df['RSV'] = df.apply( lambda x: (x['close_M' + str(M)] - x['low_M' + str(M) + '_min']) / (x['high_M' + str(M) + '_max'] - x['low_M' + str(M) + '_min']), axis=1) df['RSV_abs'] = df.apply( lambda x: (x['close_M' + str(M)] - x['low_M' + str(M) + '_min']), axis=1) df['RSV_Rank'] = df.apply( lambda x: relativeRank(df['RSV_abs'], x['RSV_abs']), axis=1) return df.tail(1)['RSV_Rand'].values[0]
def sendMainIndexPicToPublic(): """ 不准,因为历史数据获取的不够 :return: """ for x in ['sh', 'sz', 'cyb']: # df = ts.get_k_data(x, start=add_date_str(get_current_date_str(), -400)) stk_code_normal = { 'sh': '000001.XSHG', 'sz': '399001.XSHE', 'cyb': '399006.XSHE' }[x] df = jqdatasdk.get_price(stk_code_normal, frequency='daily', count=100, end_date=get_current_date_str()) df['datetime'] = df.index df['date'] = df.apply(lambda x: str(x['datetime'])[:10], axis=1) # 计算MACD df['MACD'], df['MACDsignal'], df['MACDhist'] = talib.MACD( df.close, fastperiod=12, slowperiod=26, signalperiod=9) # 测试相对均值偏移度 df['m9'] = df['close'].rolling(window=9).mean() df['diff_m9'] = df.apply(lambda x: x['close'] - x['m9'], axis=1) df['rank'] = df.apply( lambda x: relativeRank(df['diff_m9'], x['diff_m9']), axis=1) df_plot = df.tail(50) # df.tail(50).plot('date', ['close', 'rank', 'MACD'], # subplots=True, # title=['历史收盘价', '历史分数', 'MACD指标'], # legend=True) fig, ax = subplots(ncols=1, nrows=3) ax[0].plot(range(0, len(df_plot)), df_plot['close'], 'g--', label='收盘价') ax[1].plot(range(0, len(df_plot)), df_plot['rank'], 'r--', label='上涨概率') ax[2].bar(range(0, len(df_plot)), df_plot['MACD']) for ax_sig in ax: ax_sig.set_xticks(range(0, len(df_plot))) ax_sig.set_xticklabels( [x[-5:] for x in list(df_plot['date'].values)], rotation=90) ax_sig.legend(loc='best') ax[0].set_title({'sh': "上证", 'sz': '深证', 'cyb': '创业板'}[x]) # ----------------------- 将图片发到qq ----------------------------------- send_pic_qq('大盘上涨概率公示', fig) plt.close()
df = df.dropna(how='any', axis=0) """ 画图语句 df.plot('date', ['upper', 'lower', 'high', 'low'], style=['-', '-', '*', '*']) """ # 计算短期波动率(3天) near_days = 3 df['low_near'] = df['low'].rolling(window=near_days).min() df['high_near'] = df['high'].rolling(window=near_days).max() df['wave_near'] = df.apply(lambda x: x['high_near'] - x['low_near'], axis=1) df['wave_near_rank'] = df.apply(lambda x: 100 - relativeRank(df['wave_near'], x['wave_near']), axis=1) """ df.plot('date', ['close', 'wave_near_rank', 'wave_near'], style=['--*', '--*'], subplots=True) """ """ 如何调节买卖比例: 用移动均线-未成熟随机值(RSV)来对买卖比例进行分配, 实现在上涨时,易买难卖,在下跌时,易卖难买,最终能够实现随着趋势线调整仓位重心的目的。 避免在迅速下跌时爆仓,在迅速上涨时空仓的情况。 """ def updateRSVRecord(): try: (conn_opt, engine_opt) = genDbConn(localDBInfo, 'stk_opt_info')
# encoding=utf-8 """ """ import tushare as ts from RelativeRank.Sub import relativeRank df = ts.get_k_data('300183').tail(90) df['c_rank'] = df.apply(lambda x: relativeRank(df['close'], x['close']), axis=1) """ df.plot('date', ['close', 'c_rank'], subplots=True) """ end = 0
def predict_tomorrow(stk_code, label, N_STEPS=N_STEPS, feature_cols=feature_cols, HIDDEN_SIZE=HIDDEN_SIZE, NUM_LAYERS=NUM_LAYERS): """ :param stk_code: 例子 '300183' :param label: 例子 'high' :param N_STEPS: :param feature_cols: :param HIDDEN_SIZE: :param NUM_LAYERS: :return: """ """ ---------------------- 读取json中存储的极值 ---------------------- """ with open(rootPath + '\LSTM\AboutLSTM\stk_max_min.json', 'r') as f: max_min_info = json.load(f) """ ---------------------- 获取实时数据 ---------------------- """ data_now = ts.get_k_data(stk_code)[-(N_STEPS + 30):] # 增加M9 Rank data_now['m9'] = data_now['close'].rolling(window=9).mean() data_now['diff_m9'] = data_now.apply(lambda x: (x['close'] - x['m9']) / x['close'], axis=1) data_now['rank'] = data_now.apply(lambda x: relativeRank( max_min_info[stk_code]['m9_history'], x['diff_m9']), axis=1) # rootPath = 'C:/Users\paul\Desktop\软件代码\Git-Clone' for c in ['close', 'high', 'low', 'open']: data_now[c] = (data_now[c].values - max_min_info[stk_code]['p_min'] ) / (max_min_info[stk_code]['p_max'] - max_min_info[stk_code]['p_min']) data_now['volume'] = ( data_now['volume'].values - max_min_info[stk_code]['v_min']) / ( max_min_info[stk_code]['v_max'] - max_min_info[stk_code]['v_min']) # 进行归一化 input_normal = data_now.loc[:, feature_cols].tail(20).values tf.reset_default_graph() """ ---------------------- 创建模型 ---------------------- """ predictions, loss, train_op, X, y = lstm_model(n_steps=N_STEPS, n_inputs=len(feature_cols), HIDDEN_SIZE=HIDDEN_SIZE, NUM_LAYERS=NUM_LAYERS) # 创建保存器用于模型 saver = tf.train.Saver() # 初始化 sess = tf.Session() model_name = stk_code + '_' + label model_dir = rootPath + '\LSTM\AboutLSTM\modelDir/' if os.path.exists(model_dir + model_name + '/' + model_name + '.ckpt.meta'): saver = tf.train.import_meta_graph(model_dir + model_name + '/' + model_name + '.ckpt.meta') saver.restore(sess, tf.train.latest_checkpoint(model_dir + model_name + '/')) # graph = tf.get_default_graph() # 防报错 tf.reset_default_graph() r_rela = sess.run([predictions], feed_dict={X: [input_normal]})[0][0][0] return max_min_info[stk_code]['p_min'] + ( max_min_info[stk_code]['p_max'] - max_min_info[stk_code]['p_min']) * r_rela else: print('加载模型' + model_name + '失败!') return -1
def genTrainDataHL(stk_code, start_time, N_STEPS, feature_cols, label_col, data_store_dir): """ 第二天高低点的判断 :param stk_code: :param start_time: :param data_store_dir: './DataPrepare/' :return: """ df_cyb = ts.get_k_data(stk_code, start=start_time) df = df_cyb # 测试相对均值偏移度 df['m9'] = df['close'].rolling(window=9).mean() df['diff_m9'] = df.apply(lambda x: (x['close'] - x['m9']) / x['close'], axis=1) df['rank'] = df.apply(lambda x: relativeRank(df['diff_m9'], x['diff_m9']), axis=1) # 计算标签的环比变化率 df[label_col[0] + '_tomorrow'] = df[label_col[0]].shift(-1) # 删除空值 df = df.dropna(how='any') # 对open、 close、high、 low进行归一化,对volume单独归一化 p_min = np.min(df.loc[:, ['open', 'close', 'high', 'low']].values) p_max = np.max(df.loc[:, ['open', 'close', 'high', 'low']].values) v_min = np.min(df.loc[:, ['volume']].values) v_max = np.max(df.loc[:, ['volume']].values) # 保存数据的极值情况 global json_max_min_info json_max_min_info[stk_code] = \ { 'p_max': p_max, 'p_min': p_min, 'v_max': v_max, 'v_min': v_min, 'm9_history': list(df['diff_m9'].values) } for c in ['close', 'high', 'low', 'open', label_col[0] + '_tomorrow']: df[c] = (df[c].values - p_min) / (p_max - p_min) df['volume'] = (df['volume'].values - v_min) / (v_max - v_min) # 与capital进行合并 # df_cap = get_stk_money_flow(stk_code, start_date, end_date=None) # df = pd.concat([df.set_index(keys='date'), df_cap], axis=1) # 二次删除空值 df = df.dropna(how='any') # 对数据进行切片 data_slice_list_norm = sliceDfToTrainData( df=df, length=N_STEPS - 1, feature_cols=feature_cols, label_col=[label_col[0] + '_tomorrow'], norm_flag=False) # 将数据分割为训练集和数据集 lenth = math.floor(len(data_slice_list_norm) * 0.8) list_train = data_slice_list_norm[:lenth] list_test = data_slice_list_norm[lenth:] if not os.path.exists(data_store_dir): os.makedirs(data_store_dir) # 保存数据 with open(data_store_dir + stk_code + 'train' + label_col[0] + '.pkl', 'wb') as f: pickle.dump(list_train, f) with open(data_store_dir + stk_code + 'test' + label_col[0] + '.pkl', 'wb') as f: pickle.dump(list_test, f)
df = df.dropna(how='any', axis=0) """ 画图语句 df.plot('date', ['upper', 'lower', 'high', 'low'], style=['-', '-', '*', '*']) """ # 计算短期波动率(3天) near_days = 3 df['low_near'] = df['low'].rolling(window=near_days).min() df['high_near'] = df['high'].rolling(window=near_days).max() df['wave_near'] = df.apply(lambda x: x['high_near'] - x['low_near'], axis=1) df['wave_near_rank'] = df.apply( lambda x: 100 - relativeRank(df['wave_near'], x['wave_near']), axis=1) """ df.plot('date', ['close', 'wave_near_rank', 'wave_near'], style=['--*', '--*'], subplots=True) """ """ 如何调节买卖比例: 用移动均线-未成熟随机值(RSV)来对买卖比例进行分配, 实现在上涨时,易买难卖,在下跌时,易卖难买,最终能够实现随着趋势线调整仓位重心的目的。 避免在迅速下跌时爆仓,在迅速上涨时空仓的情况。 """ def updateRSVRecord(): try: (conn_opt, engine_opt) = genDbConn(localDBInfo, 'stk_opt_info')
# encoding=utf-8 import tushare as ts from RelativeRank.Sub import relativeRank, get_k_data_JQ from SDK.MyTimeOPT import add_date_str, get_current_date_str """ 测试单个stk的相对排名 """ stk_code = '600487' df = get_k_data_JQ(stk_code=stk_code, count=400) # 测试相对均值偏移度 df['m9'] = df['close'].rolling(window=9).mean() df['diff_m9'] = df.apply(lambda x: (x['close'] - x['m9']) / x['close'], axis=1) df['rank'] = df.apply(lambda x: relativeRank(df['diff_m9'], x['diff_m9']), axis=1) """ df.plot('date', ['close', 'diff_m9', 'rank'], subplots=True) """ end = 0
except: # 使用聚宽数据接口替代 if stk_code in ['sh', 'sz', 'cyb']: stk_code_normal = { 'sh': '000001.XSHG', 'sz': '399001.XSHE', 'cyb': '399006.XSHE' }[stk_code] else: stk_code_normal = normalize_code(stk_code) current_price = float(jq.get_price(stk_code_normal, count=1, end_date=get_current_date_str())['close'].values[0]) # 计算实时偏离度 list_history = dict['latest_data'] list_history.append(current_price) M_diff = (current_price - np.mean(list_history)) / current_price M_list = dict['history_M_diverge_data'] M_list.append(M_diff) r_list = [relativeRank(dict['history_M_diverge_data'], x) for x in M_list] plt.plot(r_list) plt.show() end=0