def test_bei_chi(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) ka = KlineAnalyze(kline, name="日线", max_xd_len=10, verbose=False) bi1 = { "start_dt": ka.bi_list[-11]['dt'], "end_dt": ka.bi_list[-10]['dt'], "direction": "down" } bi2 = { "start_dt": ka.bi_list[-13]['dt'], "end_dt": ka.bi_list[-12]['dt'], "direction": "down" } x1 = ka.is_bei_chi(bi1, bi2, mode="bi", adjust=0.9) xd1 = { "start_dt": ka.xd_list[-2]['dt'], "end_dt": ka.xd_list[-1]['dt'], "direction": "down" } xd2 = { "start_dt": ka.xd_list[-4]['dt'], "end_dt": ka.xd_list[-3]['dt'], "direction": "down" } x2 = ka.is_bei_chi(xd1, xd2, mode='xd', adjust=0.9) print('背驰计算结果:{},{}'.format(x1, x2))
def test_update_ta(): ka = KlineAnalyze(kline, name="日线", max_raw_len=2000, verbose=False) ma_x1 = dict(ka.ma[-1]) macd_x1 = dict(ka.macd[-1]) ka.update(kline.iloc[-1].to_dict()) ma_x2 = dict(ka.ma[-1]) macd_x2 = dict(ka.macd[-1]) assert ma_x1['dt'] == ma_x2['dt'] assert [round(x, 2) for x in ma_x1.values() if isinstance(x, float)] == \ [round(x, 2) for x in ma_x2.values() if isinstance(x, float)] assert macd_x1['dt'] == macd_x2['dt'] assert [round(x, 2) for x in macd_x1.values() if isinstance(x, float)] == \ [round(x, 2) for x in macd_x2.values() if isinstance(x, float)]
def test_kline_analyze(): ka = KlineAnalyze(kline, name="日线", max_raw_len=2000) # 测试绘图 file_img = "kline.png" ka.to_image(file_img, max_k_count=5000) assert os.path.exists(file_img) os.remove(file_img) file_html = "kline.html" ka.to_html(file_html) assert os.path.exists(file_html) os.remove(file_html) # 测试分型识别结果 assert ka.fx_list[-1]['fx_mark'] == 'g' and ka.fx_list[-1]['fx'] == 3456.97 assert ka.fx_list[-5]['fx_mark'] == 'g' and ka.fx_list[-5]['fx'] == 2983.44 # 测试笔识别结果 assert ka.bi_list[-1]['fx_mark'] == 'g' and ka.bi_list[-1]['bi'] == 3456.97 assert ka.bi_list[-4]['fx_mark'] == 'd' and ka.bi_list[-4]['bi'] == 2646.8 # 测试线段识别结果 assert ka.xd_list[-2]['fx_mark'] == 'g' and ka.xd_list[-2]['xd'] == 3288.45 assert ka.xd_list[-3]['fx_mark'] == 'd' and ka.xd_list[-3]['xd'] == 2440.91 # 测试增量更新 ka_raw_len = len(ka.kline_raw) for x in [2890, 2910, 2783, 3120]: k = dict(ka.kline_raw[-1]) k['close'] = x ka.update(k) assert len(ka.kline_raw) == ka_raw_len assert ka.kline_raw[-1]['close'] == x
def selector(symbols: List): """输入股票列表,输入符合买点定义的股票""" res = [] for symbol in tqdm(symbols, desc="缠论选股"): try: kline = get_kline(symbol=symbol, end_date=datetime.now(), freq="30min", count=1000) ka = KlineAnalyze(kline, ma_params=(5, 34, 60, 250), bi_mode="new") if ka.ma[-1]['ma60'] >= ka.latest_price >= ka.ma[-1]['ma250']: # print("{} 满足条件1:ma60 > close > ma233".format(symbol)) points = ka.bi_list[-7:] if len(points) == 7 and points[-1]['fx_mark'] == 'd': zs_g = min([x['bi'] for x in points[2:6] if x['fx_mark'] == 'g']) zs_d = max([x['bi'] for x in points[2:6] if x['fx_mark'] == 'd']) if zs_g > zs_d: # print("{} 满足条件2:向下中枢完成".format(symbol)) date_span = [points[-5]['dt'], points[-1]['dt']] low = [x['low'] for x in ka.kline_raw if date_span[1] >= x['dt'] >= date_span[0]] ma_ = [x['ma250'] for x in ka.ma if date_span[1] >= x['dt'] >= date_span[0]] num = cross_number(low, ma_) res.append({"symbol": symbol, "cross_num": num}) except: print("{} 分析失败".format(symbol)) traceback.print_exc() return res
def test_ka_update(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) kline1 = kline.iloc[:2000] kline2 = kline.iloc[2000:] ka1 = KlineAnalyze(kline, name="日线", max_raw_len=5000, verbose=False) ka2 = KlineAnalyze(kline1, name="日线", max_raw_len=5000, verbose=False) for _, row in kline2.iterrows(): ka2.update(row.to_dict()) assert len(ka1.kline_new) == len(ka2.kline_new) assert len(ka1.fx_list) == len(ka2.fx_list) assert len(ka1.bi_list) == len(ka2.bi_list)
def test_update_ta(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) ka = KlineAnalyze(kline, name="日线", max_xd_len=10, verbose=False) ma_x1 = dict(ka.ma[-1]) macd_x1 = dict(ka.macd[-1]) ka.update(kline.iloc[-1].to_dict()) ma_x2 = dict(ka.ma[-1]) macd_x2 = dict(ka.macd[-1]) assert ma_x1['dt'] == ma_x2['dt'] assert [round(x, 2) for x in ma_x1.values() if isinstance(x, float)] == \ [round(x, 2) for x in ma_x2.values() if isinstance(x, float)] assert macd_x1['dt'] == macd_x2['dt'] assert [round(x, 2) for x in macd_x1.values() if isinstance(x, float)] == \ [round(x, 2) for x in macd_x2.values() if isinstance(x, float)]
def test_calculate_power(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) ka = KlineAnalyze(kline, name="日线", max_raw_len=5000, verbose=False) # 测试 macd 力度 last_xd_power = ka.calculate_macd_power(start_dt=ka.xd_list[-2]['dt'], end_dt=ka.xd_list[-1]['dt'], mode='xd', direction="up" if ka.xd_list[-1]['fx_mark'] == 'g' else "down") last_bi_power = ka.calculate_macd_power(start_dt=ka.bi_list[-2]['dt'], end_dt=ka.bi_list[-1]['dt'], mode='bi') assert int(last_xd_power) == 389 assert int(last_bi_power) == 300 # 测试 vol 力度 last_xd_power = ka.calculate_vol_power(start_dt=ka.xd_list[-2]['dt'], end_dt=ka.xd_list[-1]['dt']) last_bi_power = ka.calculate_vol_power(start_dt=ka.bi_list[-2]['dt'], end_dt=ka.bi_list[-1]['dt']) assert int(last_xd_power) == 13329239053 assert int(last_bi_power) == 9291793337
def test_kline_analyze(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) ka = KlineAnalyze(kline, name="日线", max_count=1000, use_xd=True, verbose=False) # 测试绘图 file_img = "kline.png" ka.to_image(file_img, max_k_count=5000) assert os.path.exists(file_img) # 测试分型识别结果 assert ka.fx_list[-1]['fx_mark'] == 'g' assert ka.fx_list[-5]['fx_mark'] == 'g' # 测试笔识别结果 assert ka.bi_list[-1]['fx_mark'] == 'g' assert ka.bi_list[-4]['fx_mark'] == 'd' # 测试线段识别结果 assert ka.xd_list[-2]['fx_mark'] == 'g' assert ka.xd_list[-3]['fx_mark'] == 'd' # 测试增量更新 for x in [2890, 2910, 2783, 3120]: k = dict(ka.kline_raw[-1]) k['close'] = x ka.update(k) assert ka.kline_raw[-1]['close'] == x
def test_ka_update(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) kline1 = kline.iloc[:2000] kline2 = kline.iloc[2000:] ka1 = KlineAnalyze(kline, name="日线", max_count=1000, use_xd=True, verbose=False) ka2 = KlineAnalyze(kline1, name="日线", max_count=1000, use_xd=True, verbose=False) for _, row in kline2.iterrows(): ka2.update(row.to_dict()) assert ka1.kline_new[-1]['dt'] == ka2.kline_new[-1]['dt'] assert ka1.fx_list[-1]['dt'] == ka2.fx_list[-1]['dt'] assert ka1.bi_list[-1]['dt'] == ka2.bi_list[-1]['dt'] assert ka1.xd_list[-1]['dt'] == ka2.xd_list[-1]['dt'] ka3 = KlineAnalyze(kline, name="日线", max_count=1000, use_xd=False, verbose=False) assert ka3.kline_new[-1]['dt'] == ka2.kline_new[-1]['dt'] assert ka3.fx_list[-1]['dt'] == ka2.fx_list[-1]['dt'] assert ka3.bi_list[-1]['dt'] == ka2.bi_list[-1]['dt'] assert not ka3.xd_list
def test_get_sub_section(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) ka = KlineAnalyze(kline, name="日线", max_xd_len=10, verbose=False) sub_kn = ka.get_sub_section(ka.fx_list[-2]['dt'], ka.fx_list[-1]['dt'], mode='kn', is_last=True) assert sub_kn[0]['dt'] == ka.fx_list[-2]['dt'] and sub_kn[-1][ 'dt'] == ka.fx_list[-1]['dt'] sub_fx = ka.get_sub_section(ka.bi_list[-2]['dt'], ka.bi_list[-1]['dt'], mode='fx', is_last=True) assert sub_fx[0]['dt'] == ka.bi_list[-2]['dt'] and sub_fx[-1][ 'dt'] == ka.bi_list[-1]['dt'] sub_bi = ka.get_sub_section(ka.xd_list[-2]['dt'], ka.xd_list[-1]['dt'], mode='bi', is_last=True) assert sub_bi[0]['dt'] == ka.xd_list[-2]['dt'] and sub_bi[-1][ 'dt'] == ka.xd_list[-1]['dt'] sub_xd = ka.get_sub_section(ka.xd_list[-4]['dt'], ka.xd_list[-1]['dt'], mode='xd', is_last=True) assert sub_xd[0]['dt'] == ka.xd_list[-4]['dt'] and sub_xd[-1][ 'dt'] == ka.xd_list[-1]['dt']
def singal_selector(symbols: List): res = [] for symbol in symbols: try: kline = get_kline(symbol=symbol, end_date=datetime.now(), freq="30min", count=1000) ka = KlineAnalyze(kline, ma_params=(5, 34, 60, 250), bi_mode="new") if ka.ma[-1]['ma60'] >= ka.latest_price >= ka.ma[-1]['ma250']: type = check_bei_chi(ka.xd_list[-5], ka.xd_list[-4], ka.xd_list[-3], ka.xd_list[-2], ka.xd_list[-1]).get("bc") if type in ["向下趋势背驰", "向下盘整背驰"]: res.append({"symbol": symbol, "beici": num}) except: print("{} 分析失败".format(symbol)) traceback.print_exc() return res
def test_kline_pro(): file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") bars = kline.to_dict("records") ka = KlineAnalyze(bars) bs = [] for x in ka.xd_list: if x['fx_mark'] == 'd': mark = "buy" else: mark = "sell" bs.append({"dt": x['dt'], "mark": mark, mark: x['xd']}) chart = plot.kline_pro(ka.kline_raw, fx=ka.fx_list, bi=ka.bi_list, xd=ka.xd_list, bs=bs) chart.render()
def selector(symbols: List): """输入股票列表,输入符合买点定义的股票""" res = [] for symbol in tqdm(symbols, desc="缠论日线笔中枢三买选股"): try: kline = get_kline(symbol=symbol, end_date=datetime.now(), freq="D", count=1000) ka = KlineAnalyze(kline, ma_params=(5, 34, 120, 233), bi_mode="new") points = ka.bi_list[-6:] if len(points) == 6 and points[-1]['fx_mark'] == "d": zs_g = min([x['bi'] for x in points[:4] if x['fx_mark'] == 'g']) zs_d = max([x['bi'] for x in points[:4] if x['fx_mark'] == 'd']) if points[-1]['bi'] > zs_g > zs_d: res.append(symbol) except: print("{} 分析失败".format(symbol)) traceback.print_exc() return res
def use_large_df(): symbol = "*****@*****.**" freq = '5min' file_csv = f"{symbol}_kline_{freq}.csv" start_dt = datetime(2017, 1, 1, 6, 0, 0) end_dt = datetime(2020, 5, 1, 6, 0, 0) freq_dur_sec = {"1min": 60, '5min': 300, '30min': 1800, 'D': 3600 * 24} freq_delta = { "1min": timedelta(days=20), '5min': timedelta(days=100), '30min': timedelta(days=300), 'D': timedelta(days=3000) } api = TqApi() k = DataDownloader(api, symbol_list=symbol, dur_sec=freq_dur_sec[freq], start_dt=start_dt - freq_delta[freq], end_dt=end_dt, csv_file_name=file_csv) with closing(api): while not k.is_finished(): api.wait_update() print("download progress: %.2f%%" % k.get_progress()) kline = pd.read_csv(file_csv) kline.columns = [x.replace(symbol + ".", "") for x in kline.columns] kline.rename({"volume": "vol"}, axis=1, inplace=True) kline.loc[:, "symbol"] = symbol kline.loc[:, "dt"] = kline['datetime'].apply(lambda x: x.split(".")[0]) kline = kline[['symbol', 'dt', 'open', 'close', 'high', 'low', 'vol']] print(kline.shape) ka = KlineAnalyze(kline) return ka
sys.path.insert(0, '..') import os import pandas as pd import czsc from czsc.analyze import KlineAnalyze, find_zs warnings.warn("czsc version is {}".format(czsc.__version__)) cur_path = os.path.split(os.path.realpath(__file__))[0] # cur_path = "./test" file_kline = os.path.join(cur_path, "data/000001.SH_D.csv") kline = pd.read_csv(file_kline, encoding="utf-8") kline.loc[:, "dt"] = pd.to_datetime(kline.dt) kline1 = kline.iloc[:2000] kline2 = kline.iloc[2000:] ka = KlineAnalyze(kline1, name="日线", max_raw_len=2000, verbose=True) def test_update(): for _, row in kline2.iterrows(): ka.update(row.to_dict()) assert ka.kline_raw[-1]['dt'] == row['dt'] def test_update_ta(): ma_x1 = dict(ka.ma[-1]) macd_x1 = dict(ka.macd[-1]) ka.update(kline.iloc[-1].to_dict()) ma_x2 = dict(ka.ma[-1]) macd_x2 = dict(ka.macd[-1]) assert ma_x1['dt'] == ma_x2['dt']