def get_single_ave_by_date(df_param, date_param, time_span_param, field_param): temp_df = df_param # 为了节省排序时间,先将最近十天的时间取出,然后按时间排序,取最近的数据 latest_span= temp_df[(temp_df.date>add_date_str(date_param,-15))&(temp_df.date<date_param)]\ .sort_values(by='date',ascending=False).head(1) if len(latest_span): master_field = latest_span.reset_index(drop=True).to_dict( orient='index')[0][field_param] else: print("函数get_average_index: 在日期" + date_param + "计算" + field_param + "值时出错!") return { "date": date_param, field_param + "_mean" + str(time_span_param): None } # 取“主角日期”与该日期之前“time_span_param”长度的数据的field_param的均值 span_df = temp_df[ (temp_df.date > add_date_str(date_param, -1 * time_span_param)) & (temp_df.date < date_param)] span_mean = span_df[field_param].mean() return { "date": date_param, field_param + "_mean" + str(time_span_param): span_mean }
def getSigleStkReseau(stk_code): """ 计算单只stk的当前网格 :return: """ # df = my_pro_bar(stk_code=stk_code, start=add_date_str(get_current_date_str(), -10)) df = get_k_data_JQ(stk_code=stk_code, start_date=add_date_str(get_current_date_str(), -10), end_date=get_current_date_str()) if len(df) < 7: # df = my_pro_bar(stk_code=stk_code, start=add_date_str(get_current_date_str(), -30)) df = get_k_data_JQ(stk_code=stk_code, start_date=add_date_str(get_current_date_str(), -30), end_date=get_current_date_str()) df = df.reset_index() df = df_win_std(df, 3) df = df_win_std(df, 6) df['std_m'] = df.apply(lambda x: np.mean([x['std_3'], x['std_6']]), axis=1) return df.tail(1)['std_m'].values[0]
def updateSingleMacdHistory(stk_code, history_dict): """ 更新单只stk的小时和半小时macd历史数据 :return: """ if stk_code not in history_dict.keys(): # df_30 = my_pro_bar(stk_code, start=add_date_str(get_current_date_str(), -200), freq='30min') df_30 = get_k_data_JQ(stk_code, start_date=add_date_str(get_current_date_str(), -200), freq='30m') df_30['MACD'], _, _ = talib.MACD(df_30.close, fastperiod=12, slowperiod=26, signalperiod=9) df_60 = get_k_data_JQ(stk_code, start_date=add_date_str(get_current_date_str(), -200), freq='60m') df_60['MACD'], _, _ = talib.MACD(df_60.close, fastperiod=12, slowperiod=26, signalperiod=9) df_30 = df_30.dropna() df_60 = df_60.dropna() history_dict[stk_code] = { 'min30': df_30['close'], 'min60': df_60['close'] }
def get_h_l_pot(stk_list): """ 给定stklist,给出他们的“年度”、“半年度”、“月度”最高点和最低点! :param stk_list: :return: half_year_high half_year_low month_high month_low stk year_high \ 0 1700.50 1205.03 1700.50 1316.10 cyb 1900.480 1 3106.42 2464.36 3106.42 2653.90 sh 3326.700 2 9700.49 7089.44 9700.49 7919.05 sz 11326.270 3 16.77 10.26 16.77 12.45 300183 19.656 4 8.94 5.68 8.94 7.79 000625 11.653 5 4.42 2.56 4.42 2.74 000725 5.972 """ current_date = get_current_date_str() # 获取当前日期 years_before = add_date_str(current_date, -365) # 一年前日期 half_year = add_date_str(current_date, -180) # 半年前日期 month_before = add_date_str(current_date, -30) # 一月前日期 # 存储结果的list MaxMinInfoList = [] for stk in stk_list: # 下载数据 df = ts.get_k_data(stk, start=years_before) # 计算年度高低点 year_low = np.min(df['close']) years_high = np.max(df['close']) # 计算半年度高低点 half_year_low = np.min(df[df['date'] > half_year]['close']) half_year_high = np.max(df[df['date'] > half_year]['close']) # 计算月度高低点 month_low = np.min(df[df['date'] > month_before]['close']) month_high = np.max(df[df['date'] > month_before]['close']) MaxMinInfoList.append({ 'stk': stk, 'year_low': year_low, 'year_high': years_high, 'half_year_low': half_year_low, 'half_year_high': half_year_high, 'month_low': month_low, 'month_high': month_high }) # 高低点转为df,这一天不再更改 df_high_low_pot = pd.DataFrame(MaxMinInfoList) return df_high_low_pot
def check_single_stk_middle_level(stk_code, dict): """ 输入代码,返回level :param stk_code: :return: """ # 获取当前价格 current_price = get_RT_price(stk_code, source='jq') if stk_code in dict.keys(): l = relativeRank(dict[stk_code], current_price) else: df_hour = get_k_data_JQ(stk_code, count=None, start_date=add_date_str( get_current_date_str(), -60), freq='60m') dict[stk_code] = list(df_hour['close'].values) l = relativeRank(list(df_hour['close'].values), current_price) with open(data_dir + 'middlePeriodHourData.json', 'w') as f: json.dump(dict, f) return l
def printConcernedPredict2Self(): win_qq_name = u'影子2' for stk in ['000001', '000333', '300183']: close_today = ts.get_k_data( stk, start=add_date_str(get_current_date_str(), -5)).tail(1)['close'].values[0] r = [(label, '%0.2f' % predict_tomorrow(stk, label, N_STEPS=N_STEPS, feature_cols=feature_cols, HIDDEN_SIZE=HIDDEN_SIZE, NUM_LAYERS=NUM_LAYERS)) for label in ['high', 'low', 'close']] # 增加与今天收盘价的对比 r_contrast = [ (x[0], x[1], '%0.2f' % ((float(x[1]) - close_today) / close_today * 100) + '%') for x in r ] # stk2name = { # 'sh': '上证', # 'sz': '深证', # 'cyb': '创业板' # } send_qq(win_qq_name, stk + ':\n' + str(r_contrast))
def check_single_stk_hour_idx_wx(stk_code, source='jq', debug=False): """ 打印常用指标 """ stk_df = get_k_data_JQ(stk_code, count=120, end_date=add_date_str(get_current_date_str(), 1), freq='30m') # 按升序排序 stk_df = stk_df.sort_values(by='datetime', ascending=True) """ 增加指标 'RSI5', 'RSI12', 'RSI30' 'SAR' 'slowk', 'slowd' 'upper', 'middle', 'lower' 'MOM' """ # 删除volume为空值的情况! stk_df = stk_df.loc[ stk_df.apply(lambda x: not (int(x['volume']) == 0), axis=1), :] # 计算index stk_df = add_stk_index_to_df(stk_df).tail(60) result_analysis = [] # 检查SAR sar_tail_origin = stk_df.tail(2) sar_tail = sar_tail_origin.copy() sar_tail['compare'] = sar_tail_origin.apply( lambda x: x['SAR'] - x['close'], axis=1) if sar_tail.head(1)['compare'].values[0] * sar_tail.tail( 1)['compare'].values[0] < 0: if sar_tail.tail(1)['SAR'].values[0] < sar_tail.tail( 1)['close'].values[0]: title_tmp = stk_code + ' ' + code2name( stk_code) + ' 注意 SAR 指标翻转,后续数小时可能上涨!' result_analysis.append(title_tmp) GenPic.set_background_color(bc='b_r') else: title_tmp = stk_code + ' ' + code2name( stk_code) + ' 注意 SAR 指标翻转,后续数小时可能下跌!' result_analysis.append(title_tmp) # 打印过程日志 if debug: txt_name = 'hour_index' # 打印原始数据 debug_print_txt(txt_name, stk_code, stk_df.to_string() + '\n\n') # 打印结果 debug_print_txt(txt_name, stk_code, '结果:\n' + str(result_analysis) + '\n\n') return result_analysis
def calSingleStkRank(m_days, stk_code, days_length, p_now): """ :param m_days: ?天均线的离心度 :param stk_code: :param days_length: 在?天内进行排名 :return: """ df = ts.get_k_data(stk_code, start=add_date_str(get_current_date_str(), -1 * days_length * 1.8)) if len(df) < days_length * 0.8: print('函数 calSingleStkRank: 该stk历史数据不足!') return -1 # 测试相对均值偏移度 df['m9'] = df['close'].rolling(window=m_days).mean() df['diff_m9'] = df.apply(lambda x: x['close'] - x['m9'], axis=1) """ df.plot('date', ['close', 'diff_m9', 'rank'], subplots=True) """ # 给m9打分 return relativeRand(df['diff_m9'], p_now)
def sendM305Pic(win_qq_name, stk_code): df = ts.get_k_data(stk_code, start=add_date_str(get_current_date_str(), -400)) # 测试相对均值偏移度 df['m5'] = df['close'].rolling(window=5).mean() df['m30'] = df['close'].rolling(window=30).mean() df['diff_m305'] = df.apply(lambda x: x['m5'] - x['m30'], axis=1) df['rank'] = df.apply(lambda x: relativeRank(df['diff_m305'], x['diff_m305']), axis=1) df = df.dropna(axis=0) df.plot('date', ['close', 'diff_m305', 'rank'], subplots=True) plt.title(stk_code+'M5-M30 分数') output = BytesIO() # BytesIO实现了在内存中读写byte buf_save = BytesIO() plt.savefig(output) output.seek(0) img = Image.open(output) # Image.open可以打开网络图片与本地图片。 img.convert("RGB").save(buf_save, "BMP") # 以RGB模式保存图像 data = buf_save.getvalue()[14:] buf_save.close() output.close() plt.close() send_pic_qq(win_qq_name, data)
def get_single_stk_reseau(self, stk_code): """ 计算单只stk的当前网格 :return: """ # df = my_pro_bar(stk_code=stk_code, start=add_date_str(get_current_date_str(), -10)) df_ = get_k_data_JQ(stk=stk_code, start_date=add_date_str(get_current_date_str(), -10), end_date=get_current_date_str()) if len(df_) < 7: # df = my_pro_bar(stk_code=stk_code, start=add_date_str(get_current_date_str(), -30)) df_ = get_k_data_JQ(stk=stk_code, start_date=add_date_str( get_current_date_str(), -30), end_date=get_current_date_str()) return self.get_single_stk_reseau_sub(df_)
def update_middle_period_hour_data(): stk_list = read_config()['buy_stk'] + read_config()['concerned_stk'] # stk_list = readConfig()['buy_stk'] # 获取stk的小时数据 result = {} for stk in stk_list: df_hour = get_k_data_JQ(stk, count=None, start_date=add_date_str(get_current_date_str(), -60), freq='60m') result[stk] = list(df_hour['close'].values) with open(data_dir+'middlePeriodHourData.json', 'w') as f: json.dump(result, f)
def get_k_data_JQ(stk, count=None, start_date=None, end_date=None, freq='daily'): """ 使用JQData来下载stk的历史数据 :param stk_code: :param amount: :return: """ if 'm' in freq: end_date = add_date_str(get_current_date_str(), 1) if pd.isnull(end_date): end_date = get_current_date_str() try: # 增加以兼容list的情况 if isinstance(stk, list): stk_code = [jqdatasdk.normalize_code(x) for x in stk] df = jqdatasdk.get_price(stk_code, frequency=freq, count=count, end_date=end_date, start_date=start_date) elif stk in ['sh', 'sz', 'cyb', 'hs300', 'sz50', 'zz500']: stk_code_normal = JQMethod.get_index_jq_code(stk) df = jqdatasdk.get_price(stk_code_normal, frequency=freq, count=count, start_date=start_date, end_date=end_date) else: df = jqdatasdk.get_price(jqdatasdk.normalize_code(stk), frequency=freq, count=count, end_date=end_date, start_date=start_date) if df.empty: return df df['datetime'] = df.index df['date'] = df.apply(lambda x: str(x['datetime'])[:10], axis=1) return df except Exception as e: print('函数get_k_data_JQ:出错!错误详情:\n' + str(e)) return pd.DataFrame()
def calRelaPLevel(stk_list, period, towho): """ 计算相对价格,并发送到qq :param stk_list: :return: """ r = [ (x, cal_stk_p_level(np.array(get_k_data_JQ(stk_code=x, start_date=add_date_str(get_current_date_str(), period))['close']))['total_last']) for x in stk_list] r_df = pd.DataFrame(data=r, columns=['code', 'level']) sendPLevel2QQ(r_df, towho)
def gen_hour_macd_values(stk_code, source='jq', title=''): if source == 'jq': # df_30 = get_k_data_JQ(stk_code, start_date=add_date_str(get_current_date_str(), -20), # end_date=add_date_str(get_current_date_str(), 1), freq='30m') # df_60 = get_k_data_JQ(stk_code, start_date=add_date_str(get_current_date_str(), -20), # end_date=add_date_str(get_current_date_str(), 1), freq='60m') df_30 = get_k_data_JQ(stk_code, count=120, end_date=add_date_str(get_current_date_str(), 1), freq='30m') df_60 = get_k_data_JQ(stk_code, count=120, end_date=add_date_str(get_current_date_str(), 1), freq='60m') elif source == 'ts': df_30 = my_pro_bar(stk_code, start=add_date_str(get_current_date_str(), -20), freq='30min') df_60 = my_pro_bar(stk_code, start=add_date_str(get_current_date_str(), -20), freq='60min') # 去掉volume为空的行 df_30 = df_30.loc[df_30.apply(lambda x: not (x['volume'] == 0), axis=1), :] df_60 = df_60.loc[df_60.apply(lambda x: not (x['volume'] == 0), axis=1), :] df_30['MACD'], _, _ = talib.MACD(df_30.close, fastperiod=12, slowperiod=26, signalperiod=9) df_60['MACD'], _, _ = talib.MACD(df_60.close, fastperiod=12, slowperiod=26, signalperiod=9) # 生成图片 df_30 = df_30.dropna() df_60 = df_60.dropna() if str(df_60.index[-1]) > get_current_datetime_str(): df_30 = df_30[:-1] df_60 = df_60[:-1] return df_30, df_60
def download_stk_list_hour_data(stk_list): """ 给定stk_list,下载半小时数据,以tuple的形式返回 :return: """ jq_login() stk_list_data = [ (x, get_k_data_JQ(x, count=120, end_date=add_date_str(get_current_date_str(), 1), freq='30m')) for x in stk_list ] # 清除空值 stk_list_data = list(filter(lambda x: not x[1].empty, stk_list_data)) logout() return stk_list_data
def calWeight(code): """ 9天波动率均值与3日波动率均值的均值为当前波动率加权 :param code: :return: """ df = ts.get_k_data(code, start=add_date_str(get_current_date_str(), -30)) # 将昨天的收盘价下移,用来计算波动率 df['yesterday_close'] = df['close'].shift(1) df['rolling'] = df.apply( lambda x: math.fabs(x['high'] - x['low']) * 100 / x['yesterday_close'], axis=1) df['r_3'] = df['rolling'].rolling(window=3).mean() df['r_9'] = df['rolling'].rolling(window=9).mean() df['r_mean'] = df.apply(lambda x: (x['r_3'] + x['r_9']) / 2, axis=1) return df.tail(1)['r_mean'].values[0]
def saveStkMRankHistoryData(stk_code, history_days, m_days, save_dir): """ 保存stk的历史数据,用来实时计算均线离心度分数 :param stk_code: :param history_days: :param save_dir: './M_data/' :return: """ try: df = ts.get_k_data(stk_code, start=add_date_str(get_current_date_str(), -1 * history_days * 1.8)) if len(df) < history_days * 0.8: print('函数 calSingleStkRank: 该stk历史数据不足!') return -1 # 测试相对均值偏移度 df['m9'] = df['close'].rolling(window=m_days).mean() df['diff_m9'] = df.apply(lambda x: x['close'] - x['m9'], axis=1) df = df.dropna() dict_restore = { 'stk_code': stk_code, 'history_M_diverge_data': list(df['diff_m9'].values), 'latest_data': list(df.tail(m_days - 1)['close'].values), 'update_date': df.tail(1)['date'].values[0] } dumpP(data=dict_restore, saveLocation=save_dir, fileName=stk_code + '_M' + str(m_days)) return 0 except: return 1
def down_minute_data(stk_code, freq): try: df = get_k_data_JQ(stk_code, count=300, end_date=add_date_str(get_current_date_str(), 1), freq=freq) # 去掉volume为空的行 df = df.loc[df.apply(lambda x: not (x['volume'] == 0), axis=1), :] # 增加指标计算 df = add_stk_index_to_df(df) if str(df.index[-1]) > get_current_datetime_str(): df = df[:-1] return df except Exception as e_: # self.log = self.log + '函数down_minute_data:\n %s\n' % str(e_) print('函数down_minute_data:\n %s\n' % str(e_)) return pd.DataFrame()
def printPredict2Public(): win_qq_name = u'大盘上涨概率公示' send_qq( win_qq_name, '各位:\n以下是下一个交易日大盘最高价、最低价和收盘价的预测,因为三个价格使用相互独立的模型,所以会出现收盘价低于最低价的情况,以及类似的情形,希望各位注意!' + '\n' + '周一~周五 晚上19:30 计算并发送该消息!\n格式及解释:\n' + "('high', '2989.57','0.11%')" + '\n' + '最高价, 2989.57, 相对于上一个收盘价的涨跌率 0.11, 训练时误差 0.852\n后面以此类推...') with open(rootPath + '\LSTM\AboutLSTM\stk_max_min.json', 'r') as f: max_min_info = json.load(f) for stk in ['sh', 'sz', 'cyb']: close_today = ts.get_k_data( stk, start=add_date_str(get_current_date_str(), -5)).tail(1)['close'].values[0] r = [(label, '%0.2f' % predict_tomorrow(stk, label, N_STEPS=N_STEPS, feature_cols=feature_cols, HIDDEN_SIZE=HIDDEN_SIZE, NUM_LAYERS=NUM_LAYERS), max_min_info[stk][label + '_acc']) for label in ['high', 'low', 'close']] # 增加与今天收盘价的对比 r_contrast = [ (x[0], x[1], '%0.2f' % ((float(x[1]) - close_today) / close_today * 100) + '%', x[2]) for x in r ] stk2name = {'sh': '上证', 'sz': '深证', 'cyb': '创业板'} send_qq(win_qq_name, stk2name[stk] + ':\n' + str(r_contrast))
def get_k_data_JQ(stk_code, count=None, start_date=None, end_date=add_date_str(get_current_date_str(), 1), freq='daily'): """ 使用JQData来下载stk的历史数据 :param stk_code: :param amount: :return: """ if stk_code in ['sh', 'sz', 'cyb']: stk_code_normal = { 'sh': '000001.XSHG', 'sz': '399001.XSHE', 'cyb': '399006.XSHE' }[stk_code] df = jqdatasdk.get_price(stk_code_normal, frequency=freq, count=count, start_date=start_date, end_date=end_date) else: df = jqdatasdk.get_price(jqdatasdk.normalize_code(stk_code), frequency=freq, count=count, end_date=end_date, start_date=start_date) df['datetime'] = df.index df['date'] = df.apply(lambda x: str(x['datetime'])[:10], axis=1) return df
# encoding=utf-8
def stk_sea_select(stk_code, towho, tc, debug_plot=False): try: # 获取今天的情况,涨幅没有超过3%的不考虑 # df_now = get_k_data_JQ(stk_code, count=2, end_date=get_current_date_str()).reset_index() # # if (df_now.tail(1)['close'].values[0]-df_now.head(1)['close'].values[0])/df_now.head(1)['close'].values[0] < -0.05: # print('函数week_MACD_stray_judge:' + stk_code + '涨幅不够!') # return False, pd.DataFrame() """ ------------------------ 下载原始数据 ------------------------------- """ df = get_k_data_JQ(stk_code, count=400, end_date=get_current_date_str()).reset_index() if len(df) < 350: print('函数week_MACD_stray_judge:' + stk_code + '数据不足!') return False, pd.DataFrame() # 规整 df_floor = df.tail(math.floor(len(df) / 20) * 20 - 19) """ ------------------------ 判断周线是否达标 ------------------------------- """ # 增加每周的星期几 df_floor['day'] = df_floor.apply(lambda x: calendar.weekday( int(x['date'].split('-')[0]), int(x['date'].split('-')[1]), int(x['date'].split('-')[2])), axis=1) # 增加每周的星期几 df_floor['day'] = df_floor.apply(lambda x: calendar.weekday( int(x['date'].split('-')[0]), int(x['date'].split('-')[1]), int(x['date'].split('-')[2])), axis=1) # 隔着5个取一个 if df_floor.tail(1)['day'].values[0] != 4: df_floor_slice_5 = pd.concat( [df_floor[df_floor.day == 4], df_floor.tail(1)], axis=0) else: df_floor_slice_5 = df_floor[df_floor.day == 4] # 计算周线指标 df_floor_slice_5['MACD'], df_floor_slice_5[ 'MACDsignal'], df_floor_slice_5['MACDhist'] = talib.MACD( df_floor_slice_5.close, fastperiod=6, slowperiod=12, signalperiod=9) # 判断周线的走势,周线不是底部,直接返回 MACD_5 = df_floor_slice_5.tail(3)['MACD'].values if not (MACD_5[1] == np.min(MACD_5)): tc.AppendText(stk_code + code2name(stk_code) + ':“周线”不符合要求!') return False """ ------------------------ 判断月线是否达标 ------------------------------- """ # 隔着20个取一个(月线) df_floor_slice_20 = df_floor.loc[::20, :] # 计算指标 df_floor_slice_20['MACD'], df_floor_slice_20[ 'MACDsignal'], df_floor_slice_20['MACDhist'] = talib.MACD( df_floor_slice_20.close, fastperiod=4, slowperiod=8, signalperiod=9) # 获取最后的日期 date_last = df_floor_slice_5.tail(1)['date'].values[0] # 判断月线的走势,不符合条件直接返回 MACD_20 = df_floor_slice_20.tail(4)['MACD'].values if not ((MACD_20[1] != np.max(MACD_20)) & (MACD_20[2] != np.max(MACD_20))): tc.AppendText(stk_code + code2name(stk_code) + ':“月线”不符合要求!') return False """ ------------------------ 判断日线SAR是否达标 ------------------------------- """ # 判断日线SAR指标 df_floor['SAR'] = talib.SAR(df_floor.high, df_floor.low, acceleration=0.05, maximum=0.2) if df_floor.tail(1)['SAR'].values[0] > df_floor.tail( 1)['SAR'].values[0]: tc.AppendText(stk_code + code2name(stk_code) + ':“日线SAR指标”不符合要求!') return False """ ------------------------ 判断半小时SAR是否达标 ------------------------------- """ df_half_hour = get_k_data_JQ(stk_code, count=120, end_date=add_date_str( get_current_date_str(), 1), freq='30m') # 判断日线SAR指标 df_half_hour['SAR'] = talib.SAR(df_half_hour.high, df_half_hour.low, acceleration=0.05, maximum=0.2) if df_half_hour.tail(1)['SAR'].values[0] > df_half_hour.tail( 1)['SAR'].values[0]: tc.AppendText(stk_code + code2name(stk_code) + ':“半小时SAR指标”不符合要求!') return False # 符合要求,返回True tc.AppendText(stk_code + code2name(stk_code) + ':符合要求!') return True except Exception as e: tc.AppendText(stk_code + '出错:\n' + str(e)) return False
# encoding=utf-8 """ 数据库名字:stk_JQ_money_flow mysql 链接句柄 db_JQ_money_flow """ from Config.GlobalSetting import localDBInfo from SDK.DBOpt import genDbConn, is_table_exist import pandas as pd from SDK.MyTimeOPT import minus_date_str, get_current_date_str, add_date_str """ -------------------------- 定义全局参数 -------------------------------- """ start_date = '2009-01-01' end_date = get_current_date_str() (conn_JQ_MF, engine_JQ_MF) = genDbConn(localDBInfo, 'stk_JQ_money_flow') """ -------------------------- 获取 all stks -------------------------------- """ all_stk_list = get_all_securities(types=['stock'], date=None) for stk in all_stk_list.index: table_name = 's' + stk.split('.')[0] if is_table_exist(conn=conn_JQ_MF, database_name='stk_JQ_money_flow', table_name=table_name): # 读取原表,并获取最后的日期 df = pd.read_sql('select * from ' + table_name, con=conn_JQ_MF) date_last = str( df.sort_values(by='date', ascending=True).tail(1)['date'].values[0])[:10] # 判断是否是最新日期 if minus_date_str(date_last, get_current_date_str()) >= 0: print(stk + ' 表中的日期已经是最新的了!不需要更新!') continue df = get_money_flow(stk,
# encoding=utf-8
def get_sample_flow(con_param, code_param, sample_amount, sample_length): ''' 获取流入量,按月为单位 当月的日平均水流量,绝对输入量,取日均量的原因是为了防止因为数据缺失而导致月输入总量的计算错误! 。。。先写一个月绝对总输入量的 @:parameter code_param: 代码 @:parameter sample_amount: 样本个数,int类型, 比如,4,则从当前时刻往前推4个月作为时期跨度,即返回4个数据:每个月的水量净流入 @:parameter sample_length 样本长度,int类型,比如说30是一个月,7是一个周 举例:sample_amount=4,sample_length=30,则以30天为单位,返回最近的四个单位的数据 ''' # 如果表不存在,返回空 if not is_table_exist(conn=con_param, table_name="tick" + code_param, database_name=stk_tick_data_db_name): print("stk" + code_param + ":数据不存在!") return [] # 获取该代码的dataframe df = get_total_table_data(conn=con_param, table_name="tick" + code_param).drop_duplicates() # 创建用于存储结果的list result = list() # dateSeries 降序排列的date_series date_series = sorted(df.date, reverse=True) # 获取df中的最晚时间 latest_date_in_table = date_series[0] # 获取df中的最早时间 early_date_in_table = date_series[len(date_series) - 1] for i in range(0, sample_amount): # 获取本次数据的时间跨度(起止时间都取开区间) start_date_temp = add_date_str(latest_date_in_table, -(i + 1) * sample_length + 1) end_date_temp = add_date_str(latest_date_in_table, -i * sample_length) # 如果最早时间早于表中的最早时间,则直接返回 if minus_date_str(end_date_temp, early_date_in_table) < 0: print("采样的最早时间超过了表中的最早时间,for循环break!") # 获取本时间段的数据 df_span = df[(df.date > start_date_temp) & (df.date <= end_date_temp)].drop_duplicates() if (df_span.empty): continue # 求本段时间的日均水流量 diff_series = df_span.total_in - df_span.total_out in_out_avge = diff_series.sum() / len(diff_series) # 将结果保存到list中 result.append(in_out_avge) # 按时间顺序排序 result.reverse() # 返回结果 return result
def down_hour_data(self): self.hour_data = get_k_data_JQ(self.stk_code, count=120, end_date=add_date_str(get_current_date_str(), 1), freq='30m')
# encoding=utf-8
def rpl_stk_hour_page(canvas_para, stk_code): """ 函数功能:在pdf中增加bk信息,篇幅为一整页,或者更多,以页为单位 :param stk_code: :param days: 用于指示近期的期限,比如近30天 :return: """ # 插入字符串,用以表明stk代码及名称 canvas_para.setFont("song", 10) if stk_code in ['sh', 'sz', 'cyb']: stk_name = stk_code else: stk_name = code2name(stk_code) # 打印stk代码和名字 canvas_para.drawString(20, letter[1] - 10, stk_code + ' ' + stk_name + ' ' + '小时数据') # 准备数据 df_stk = get_k_data_JQ(stk_code, start_date=add_date_str(get_current_date_str(), -200), freq='30m') df_stk['date'] = df_stk.reset_index().index df_stk = df_stk.reset_index(drop=True) close = extract_point_from_df_date_x(df_stk, 'date', 'close', timeAxis='year') data = [tuple(close)] data_name = ['close'] drawing_ave = gen_lp_drawing(data=data, data_note=data_name, height=letter[1] * 0.1, line_width=0.5, marker_size=2, time_axis='year') renderPDF.draw(drawing=drawing_ave, canvas=canvas_para, x=10, y=letter[1] * 0.85) # 按时间降序排序,方便计算MACD df_stk = df_stk.sort_values(by='date', ascending=True) # 加入各种指标 df_stk = add_stk_index_to_df(df_stk) # 去除空值,截取最新数据 df_stk = df_stk.dropna(axis=0, how='any').tail(40) close = extract_point_from_df_date_x(df_stk, 'date', 'close', timeAxis='year') sar = extract_point_from_df_date_x(df_stk, 'date', 'SAR', timeAxis='year') MACD = extract_point_from_df_date_x(df_stk, 'date', 'MACD', timeAxis='year') data = [tuple(close), tuple(sar)] data_name = ['close', 'sar'] drawing_ave = gen_lp_drawing(data=data, data_note=data_name, height=letter[1] * 0.1, line_width=1.5, marker_size=5, time_axis='year') renderPDF.draw(drawing=drawing_ave, canvas=canvas_para, x=10, y=letter[1] * 0.7) drawing_MACD = gen_bar_drawing(data=MACD, data_note=['MACD'], height=letter[1] * 0.1) renderPDF.draw(drawing=drawing_MACD, canvas=canvas_para, x=10, y=letter[1] * 0.5) # 整理kdj信息 slowk = extract_point_from_df_date_x(df_stk, 'date', 'slowk', timeAxis='year') slowd = extract_point_from_df_date_x(df_stk, 'date', 'slowd', timeAxis='year') data_kdj = [tuple(slowk), tuple(slowd)] data_kdj_note = ['k', 'd'] drawing_kdj = gen_lp_drawing(data=data_kdj, data_note=data_kdj_note, height=letter[1] * 0.1, time_axis='year') renderPDF.draw(drawing=drawing_kdj, canvas=canvas_para, x=10, y=letter[1] * 0.4) # 画图RSI信息 RSI5 = extract_point_from_df_date_x(df_stk, 'date', 'RSI5', timeAxis='year') RSI12 = extract_point_from_df_date_x(df_stk, 'date', 'RSI12', timeAxis='year') RSI30 = extract_point_from_df_date_x(df_stk, 'date', 'RSI30', timeAxis='year') data_RSI = [tuple(RSI5), tuple(RSI12), tuple(RSI30)] data_RSI_note = ['RSI5', 'RSI12', 'RSI30'] drawing_RSI = gen_lp_drawing(data=data_RSI, data_note=data_RSI_note, height=letter[1] * 0.1, time_axis='year') renderPDF.draw(drawing=drawing_RSI, canvas=canvas_para, x=10, y=letter[1] * 0.25) # 画图布林线 upper = extract_point_from_df_date_x(df_stk, 'date', 'upper', timeAxis='year') middle = extract_point_from_df_date_x(df_stk, 'date', 'middle', timeAxis='year') lower = extract_point_from_df_date_x(df_stk, 'date', 'lower', timeAxis='year') data_BOLL = [tuple(upper), tuple(middle), tuple(lower)] data_BOLL_note = ['上线', '中线', '下线'] drawing_BOLL = gen_lp_drawing(data=data_BOLL, data_note=data_BOLL_note, height=letter[1] * 0.1, time_axis='year') renderPDF.draw(drawing=drawing_BOLL, canvas=canvas_para, x=10, y=letter[1] * 0.1) canvas_para.showPage() return canvas_para
def checkSingleStkHourMACD(stk_code, source='jq'): if source == 'jq': df_30 = get_k_data_JQ(stk_code, start_date=add_date_str(get_current_date_str(), -20), freq='30m') df_60 = get_k_data_JQ(stk_code, start_date=add_date_str(get_current_date_str(), -20), freq='60m') elif source == 'ts': df_30 = my_pro_bar(stk_code, start=add_date_str(get_current_date_str(), -20), freq='30min') df_60 = my_pro_bar(stk_code, start=add_date_str(get_current_date_str(), -20), freq='60min') # 去掉volume为空的行 df_30 = df_30.loc[df_30.apply(lambda x: not (x['volume'] == 0), axis=1), :] df_60 = df_60.loc[df_60.apply(lambda x: not (x['volume'] == 0), axis=1), :] df_30['MACD'], _, _ = talib.MACD(df_30.close, fastperiod=12, slowperiod=26, signalperiod=9) df_60['MACD'], _, _ = talib.MACD(df_60.close, fastperiod=12, slowperiod=26, signalperiod=9) l_60 = df_60.tail(3)['MACD'].values l_30 = df_30.tail(3)['MACD'].values print('函数 checkSingleStkHourMACD:'+stk_code+':\n30min:'+str(l_30)+'\n60min:'+str(l_60)+'\n') if l_60[1] == np.min(l_60): title_str = '60分钟开始上涨' sts = 1 elif l_60[1] == np.max(l_60): title_str = '60分钟开始下跌' sts = 2 elif l_30[1] == np.max(l_30): title_str = '30分钟开始下跌' sts = 3 elif l_30[1] == np.min(l_30): title_str = '30分钟开始上涨' sts = 4 else: title_str = '当前无拐点' sts = 0 # 避免重复发图! if stk_code in MACD_min_last.keys(): if MACD_min_last[stk_code] != sts: send_pic = True MACD_min_last[stk_code] = sts else: send_pic = False else: send_pic = True MACD_min_last[stk_code] = sts print('函数 checkSingleStkHourMACD:' + stk_code + ':\nsend_pic标志位:' + str(send_pic) + '\nsts标志位:' + str(sts) + '\n') # 生成图片 df_30 = df_30.dropna() df_60 = df_60.dropna() fig, ax = subplots(ncols=1, nrows=4) ax[0].plot(range(0, len(df_30)), df_30['close'], 'g*--', label='close_30min') ax[1].bar(range(0, len(df_30)), df_30['MACD'], label='macd_30min') ax[2].plot(range(0, len(df_60)), df_60['close'], 'g*--', label='close_60min') ax[3].bar(range(0, len(df_60)), df_60['MACD'], label='macd_60min') for ax_sig in ax: ax_sig.legend(loc='best') plt.title(stk_code + '-' + title_str) if send_pic & (sts != 0): send_pic_qq('影子', fig) # send_pic_qq('影子', fig) plt.close()
# r = getMDataPWD() # lastscale_stk_pool = loadLastScale('stk_pool') # updateConcernStkMData_Sea() # checkDivergeLowLevel_Sea() ts.set_token('7cb80219c0eec2cfee6608247e485025445f21017732a729d6f96345') from JQData_Test.auth_info import * stk_code = '300183' history_dict = MACD_min_History df_30 = my_pro_bar(stk_code, start=add_date_str(get_current_date_str(), -20), freq='30min') df_30['MACD'], _, _ = talib.MACD(df_30.close, fastperiod=12, slowperiod=26, signalperiod=9) df_60 = my_pro_bar(stk_code, start=add_date_str(get_current_date_str(), -20), freq='60min') df_60['MACD'], _, _ = talib.MACD(df_60.close, fastperiod=12, slowperiod=26, signalperiod=9) l_60 = df_60.tail(3)['MACD'].values l_30 = df_30.tail(3)['MACD'].values if l_60[1] == np.min(l_60):