コード例 #1
0
 def __init__(self, conf=None):
     wx = lg.get_handle()
     conf = os.path.dirname(os.path.abspath(sys.argv[0])) + "\\" + conf
     # wx.info("[OBJ] Conf_handler : __init__ Read Conf : {}".format(conf))
     self.rd = configparser.ConfigParser()
     self.rd.read(conf)
コード例 #2
0
    def get_json_str(self, id, time_str=None, page_num=0):
        wx = lg.get_handle()

        if time_str is None or len(time_str) < 11:
            wx.info("[RT_East][get_json_str] 时间段 不正确,退出")
            return None
        else:
            [begin_time_str, end_time_str] = time_str.split("-")
            begin_time_stamp = int(
                time.mktime(
                    time.strptime(self.date_str + begin_time_str,
                                  '%Y%m%d%H:%M')))
            end_time_stamp = int(
                time.mktime(
                    time.strptime(self.date_str + end_time_str,
                                  '%Y%m%d%H:%M')))

        my_timer = wx_timer(date_str='')
        ret_zone = my_timer.tell_time_zone(t_stamp=end_time_stamp)
        # 根据 end_time_stamp 获取 匹配的时间点,作为下次get_rt_data 的起始时间
        record_stamp = ret_zone[2]

        # 用于 rebase 函数
        # 如果ID没有查询过,设置 rt_page_dict[id] = 0,下面每次循环完成后,rt_page_dict[id]累加1
        # if id not in self.rt_page_dict.keys():
        #     self.rt_page_dict[id] = 0

        # 用于 get_rt_data 函数,从文件读取 起始页面序号
        if page_num != 0:
            self.rt_page_dict[id] = page_num

        # 检查 RT 对象是否已经获取 end_time_stamp 之前的交易数据
        if id in self.rt_dict_df.keys(
        ) and self.rt_dict_df[id].time_stamp.max() >= end_time_stamp:
            wx.info(
                "[RT_East][{}] RT 对象已保存 [{}--{}]逐笔交易数据,目标时间段[{}--{}]不需要重新获取".
                format(
                    id,
                    time.strftime(
                        "%H:%M:%S",
                        time.localtime(self.rt_dict_df[id].time_stamp.min())),
                    time.strftime(
                        "%H:%M:%S",
                        time.localtime(self.rt_dict_df[id].time_stamp.max())),
                    begin_time_str, end_time_str))
            return None

        market_code_dict = {
            '60': ['1', '1'],
            '00': ['2', '0'],
            '30': ['2', '0'],
            '68': ['1', '0']
        }

        while True:
            url = "http://push2ex.eastmoney.com/getStockFenShi?pagesize="+self.item_page+\
                  "&ut=7eea3edcaed734bea9cbfc24409ed989&dpt=wzfscj&" \
                  "cb=jQuery1123021130998143685753_1580471904475&pageindex="+str(self.rt_page_dict[id])+\
                  "&id="+id+ market_code_dict[id[0:2]][0]+"&" \
                  "sort=1&ft=1&code="+id+"&market="+market_code_dict[id[0:2]][1]+"&_=1580471904476"

            # sort =1 升序 ; 2 降序;

            header = {
                'Cookie':
                'UM_distinctid=16bf36d52242f3-0693469a5596d3-e323069-1fa400-16bf36d5225362; _ntes_nnid=16b2182ff532e10833492eedde0996df,1563157161323; _ntes_nuid=16b2182ff532e10833492eedde0996df; vjuids=e0fb8aa0.16d4ee83324.0.e074eccb150e; [email protected]|1570190476|0|mail163|00&99|hen&1570190062&mail163#CN&null#10#0#0|&0|mail163|[email protected]; [email protected]:-1:1; mail_psc_fingerprint=8da65e9cc5769a658a69962d94f7c46f; _ntes_usstock_recent_=NTES%7C; _ntes_usstock_recent_=NTES%7C; vjlast=1568986903.1571018378.11; s_n_f_l_n3=e119c348b08890ac1571018378289; NNSSPID=0e35f22546f44023b00d65e2a3ca1f26; ne_analysis_trace_id=1571018721010; _ntes_stock_recent_=1002699%7C0600000%7C1000573; _ntes_stock_recent_=1002699%7C0600000%7C1000573; _ntes_stock_recent_=1002699%7C0600000%7C1000573; pgr_n_f_l_n3=e119c348b08890ac1571018815386610; vinfo_n_f_l_n3=e119c348b08890ac.1.5.1563157161368.1570632456351.1571018833379',
                'User-Agent':
                'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36',
                'Referer':
                'http://quote.eastmoney.com/f1.html?code=' + id + '&market=2'
            }

            # requests.packages.urllib3.disable_warnings()
            http = urllib3.PoolManager()
            try:
                raw_data = http.request('GET', url, headers=header)
            except Exception as e:
                return None
            finally:
                if raw_data.status >= 300:
                    wx.info("Web response failed : {}".format(url))
                    return None

            # 获得html源码,utf-8解码
            str_type = chardet.detect(raw_data.data)
            # unicode = raw_data.data.decode(str_type['encoding'])
            unicode = lg.str_decode(raw_data.data, str_type['encoding'])

            # 解析 JSON 字符串,并将实时交易数据保存到 RT 对象的rt_dict_df
            [page_start_time_str,
             page_end_time_str] = self.json_parse(id=id, json_str=unicode)
            if page_start_time_str is not None:
                page_end_time_stamp = int(
                    time.mktime(
                        time.strptime(self.date_str + page_end_time_str,
                                      '%Y%m%d%H:%M:%S')))
                page_start_time_stamp = int(
                    time.mktime(
                        time.strptime(self.date_str + page_start_time_str,
                                      '%Y%m%d%H:%M:%S')))

                # 找到 record_stamp 的页面,记录到 self.record_page_dict[id]
                # 由get_rt_data 返回,并写入文件,作为下次 get_rt_data 读取的第一个页面序号
                if time.strftime("%H:%M",
                                 time.localtime(record_stamp)) != '13:00':
                    if page_end_time_stamp >= record_stamp and page_start_time_stamp <= record_stamp:
                        self.record_page_dict[id] = self.rt_page_dict[id]
                else:  # 遇到13:00 做特殊处理,11:30 之前所有页面的时间范围都不包含 13:00, 只需记录到最后一个页号
                    self.record_page_dict[id] = self.rt_page_dict[id]

                if page_end_time_stamp >= end_time_stamp:
                    wx.info(
                        "[RT_East][{}] 第{}页 [{}--{}]逐笔交易数据,已获得目标时间段数据".format(
                            id, self.rt_page_dict[id], page_start_time_str,
                            page_end_time_str))
                    # 页数累加,rebase 再次调用get_json_str从此页号开始查询
                    self.rt_page_dict[id] += 1
                    break
                else:
                    wx.info("[RT_East][{}] 第{}页 [{}--{}]逐笔交易数据, 未完成,继续获取下一页数据".
                            format(id, self.rt_page_dict[id],
                                   page_start_time_str, page_end_time_str))
                    # 页数累加,rebase 再次调用get_json_str从此页号开始查询
                    self.rt_page_dict[id] += 1

                time.sleep(0.5)
            else:
                wx.info("[RT_East] [{}] 第{}页 没有数据,退出".format(
                    id, self.rt_page_dict[id]))
                break
        return self.record_page_dict[id]
コード例 #3
0
    def rt_cmp_pa_baseline(self, rt=None, pa_bl_df=None):
        wx = lg.get_handle()
        rt_dict_df = rt.rt_dict_df
        date_str = (date.today()).strftime('%Y%m%d')
        if rt_dict_df is None:
            wx.info("[Rt_Ana][RT_CMP_PA_Baseline] 实时数据字典 是空,退出")
            return None
        if pa_bl_df is None:
            wx.info("[Rt_Ana][RT_CMP_PA_Baseline] 基线数据 是空,退出")
            return None
        cmp_pa_result = pd.DataFrame()  # 保存最后的结果,并导入数据库
        for id in rt_dict_df.keys():
            if rt_dict_df[id] is None:
                wx.info(
                    "[Rt_Ana][RT_CMP_PA_Baseline] {} 未产生实时交易数据,进入下一支股票".format(
                        id))
                continue

            # 起始时间边界对齐
            [frame_begin_stamp,
             frame_begin_time_str] = self.rt_df_find_start_stamp(
                 rt_stamp=rt_dict_df[id]['time_stamp'].min())
            end_stamp = rt_dict_df[id].time_stamp.max()

            # 按时间段 切片rt数据,计算PA向量长度、角度、涨幅、成交金额,然后对比基线数据
            while frame_begin_stamp < end_stamp:
                # 每个时间段,初始化一次 rt_pa_df ,保存该时间段内的RT resample数据
                rt_pa_df = pd.DataFrame()

                # frame_begin_time_str = time.strftime("%H:%M", time.localtime(frame_begin_stamp))
                frame_end_time_str = self.t_frame_dict.get(
                    frame_begin_time_str)[0]
                if frame_end_time_str is None:
                    wx.info(
                        "[Rt_Ana][RT_CMP_PA_Baseline] {} [{}] 起始时间不属于正常范围!!!!".
                        format(id, frame_begin_time_str))
                    break

                t_frame = frame_begin_time_str + "-" + frame_end_time_str
                frame_end_stamp = int(
                    time.mktime(
                        time.strptime(date_str + frame_end_time_str,
                                      '%Y%m%d%H:%M')))

                #  PA 实时数据 与 基线的对比,不需要 整个时间段,随时可以对照 基线的 t_frame 数据
                # if frame_end_stamp > end_stamp:
                #     wx.info("[Rt_Ana][Rt_Cmp_Big_Baseline] {} {} 已超出本次获取的实时数据范围,进入下一支股票".format(id, t_frame))
                #     break

                # 从基线DataFrame 提取该id 对应 t_frame 的基线数据
                id_bl_df = pa_bl_df.loc[(pa_bl_df['id'] == id) &
                                        (pa_bl_df['t_frame'] == t_frame), ]

                # 从RT DataFrame 提取该 id 对应
                if frame_end_time_str == '15:00':  # 15:00 收市后,有最后一笔交易记录 产生在 15:00 之后若干秒
                    rt_df = rt.rt_dict_df[id].loc[(
                        rt.rt_dict_df[id]['time_stamp'] >=
                        frame_begin_stamp)].copy()
                else:
                    rt_df = rt.rt_dict_df[id].loc[
                        (rt.rt_dict_df[id]['time_stamp'] >= frame_begin_stamp)
                        & (rt.rt_dict_df[id]['time_stamp'] < frame_end_stamp
                           )].copy()
                if rt_df is None or rt_df.empty:
                    wx.info(
                        "[Rt_Ana][RT_CMP_PA_Baseline] [{}] 在[{}]期间交易数据为空,开始处理下一支股票"
                        .format(id, t_frame))
                    break

                #
                # 开始resample RT数据,并计算 PA 向量、角度
                #
                rt_df = rt_df.sort_values(by="time_stamp", ascending=True)
                rt_df['amount'] = rt_df['price'] * rt_df['vol']
                rt_df['pd_time'] = date_str + " " + rt_df['time_str']
                rt_df['pd_time'] = pd.to_datetime(rt_df['pd_time'],
                                                  format="%Y%m%d %H:%M:%S")
                rt_df.set_index('pd_time', inplace=True)
                rt_df['price'] = pd.to_numeric(rt_df['price'])

                rt_pa_df['amount'] = rt_df['amount'].resample(
                    self.rt_PA_resample_secs).sum()
                rt_pa_df['min_price'] = rt_df['price'].resample(
                    self.rt_PA_resample_secs).min()
                rt_pa_df['max_price'] = rt_df['price'].resample(
                    self.rt_PA_resample_secs).max()
                rt_pa_df['pct_chg'] = rt_pa_df['max_price'] / rt_pa_df[
                    'min_price'] - 1
                rt_pa_df['pct_chg_enhanced'] = (
                    rt_pa_df['max_price'] / rt_pa_df['min_price'] -
                    1) * 500000000

                # Time(High Price) - Time (Low Price) > 0 上涨; >0 下跌, (高价时间 - 低价时间)
                rt_pa_df['pct_up_down'] = rt_df['price'].resample(
                    self.rt_PA_resample_secs).apply(
                        lambda x: x.idxmax() - x.idxmin() if len(x) > 0 else
                        (pd.to_datetime(0) - pd.to_datetime(0)))

                rt_pa_df.fillna(0, inplace=True)

                # 過濾掉 成交量 == 0 & 價格變動 ==0 的 時間段記錄
                rt_pa_df = rt_pa_df.loc[(rt_pa_df['amount'] > 0) &
                                        (rt_pa_df['pct_chg_enhanced'] > 0), ]
                # 将 pd.datetime64 之差 转换成 float 类型,方便判断 时间切片内的涨跌
                rt_pa_df['pct_up_down'] = pd.to_numeric(
                    rt_pa_df['pct_up_down'])

                # 量价向量长度
                rt_pa_df['pa_vector'] = pow(
                    pow(rt_pa_df['amount'], 2) +
                    pow(rt_pa_df['pct_chg_enhanced'], 2), 0.5)
                # 量价向量方向
                rt_pa_df['pct_dir'] = rt_pa_df['pct_up_down'].apply(
                    lambda x: x / abs(x) if x != 0 else 0)
                # 量价向量角度, X轴 涨跌幅度 , Y轴 成交金额
                # 数值越小:小金额,大幅度涨跌
                # 数值越大:大金额,小幅度涨跌
                rt_pa_df['pa_angle'] = rt_pa_df['amount'] / rt_pa_df[
                    'pct_chg_enhanced'] * rt_pa_df['pct_dir']
                rt_pa_df['id'] = id
                rt_pa_df.reset_index(drop=False, inplace=True)
                rt_pa_df['pd_time'] = rt_pa_df['pd_time'].dt.strftime(
                    '%Y%m%d %H:%M:%S')
                rt_pa_df['date'] = rt_pa_df['pd_time'].apply(lambda x: x[0:8])
                rt_pa_df['time'] = rt_pa_df['pd_time'].apply(lambda x: x[9:])
                pa_cmp_df = pd.merge(rt_pa_df, id_bl_df, on=['id'], how='left')

                pa_cmp_df['pct_chg'] *= 100
                pa_cmp_df['b_up_pct_max'] *= 100
                pa_cmp_df['b_up_pct_min'] *= 100
                pa_cmp_df['b_down_pct_max'] *= 100
                pa_cmp_df['b_down_pct_min'] *= 100

                # 按照上涨、下跌分类
                # 开始与基线数据比对
                cmp_up_dict = {
                    'pa_vector': [
                        'b_up_pa_max', 'b_up_pa_min', 'PA_UP长度-[超高]-',
                        'PA_UP长度-[超低]-', '[PA_UP向量超长]', '[PA_UP向量超短'
                    ],  # PA向量长度
                    'pa_angle': [
                        'b_up_ang_max', 'b_up_ang_min', 'PA_UP角度-[量大幅小]-',
                        'PA_UP角度-[量小幅大]-', '[PA_UP量大幅小]', '[PA_UP量小幅大]'
                    ],  # PA向量角度
                    # 'amount': ['b_up_amount_max', 'b_up_amount_min', '大买单金额占比-[超高]-', '大买单金额占比-[超低]-','[B多]大买单净比高', '[B空]大买单净比低'],  # 大单买入金额占比
                    # 'pct_chg': ['b_up_pct_max', 'b_up_pct_min', '大卖单金额占比-[超高]-', '大卖单金额占比-[超低]-', '[B空]大卖单净比高', '[B多]大卖单净比低'],  # 大单卖出金额占比
                }
                cmp_up_result_df = pd.DataFrame()
                # 筛选出上涨的Dataframe
                pa_up_cmp_df = pa_cmp_df.loc[pa_cmp_df['pct_dir'] > 0, ]
                if pa_up_cmp_df is not None and len(pa_up_cmp_df) > 0:
                    for key in cmp_up_dict.keys():
                        high_df = pa_up_cmp_df.loc[
                            pa_up_cmp_df[key] > pa_up_cmp_df[cmp_up_dict[key]
                                                             [0]]]
                        high_df = self._cmp_data_process_2_(
                            df=high_df,
                            key=key,
                            val=cmp_up_dict[key][0],
                            msg=cmp_up_dict[key][2],
                            type=cmp_up_dict[key][4])
                        if key == 'pa_vector':  # PA 向量长度短,不做异常记录
                            low_df = None
                        else:
                            low_df = pa_up_cmp_df.loc[
                                pa_up_cmp_df[key] < pa_up_cmp_df[
                                    cmp_up_dict[key][1]]]
                            low_df = self._cmp_data_process_2_(
                                df=low_df,
                                key=key,
                                val=cmp_up_dict[key][1],
                                msg=cmp_up_dict[key][3],
                                type=cmp_up_dict[key][5])
                        if cmp_up_result_df is None or len(
                                cmp_up_result_df) == 0:
                            if high_df is not None:
                                cmp_up_result_df = high_df
                                if low_df is not None:
                                    cmp_up_result_df = cmp_up_result_df.append(
                                        low_df)
                            else:
                                cmp_up_result_df = low_df
                        else:
                            if high_df is not None:
                                cmp_up_result_df = cmp_up_result_df.append(
                                    high_df)
                            if low_df is not None:
                                cmp_up_result_df = cmp_up_result_df.append(
                                    low_df)

                # 按照上涨、下跌分类
                # 开始与基线数据比对
                cmp_down_dict = {
                    'pa_vector': [
                        'b_down_pa_max', 'b_down_pa_min', 'PA_DOWN长度-[超高]-',
                        'PA_DOWN长度-[超低]-', '[PA_DOWN向量超长]', '[PA_DOWN向量超短'
                    ],  # PA向量长度
                    'pa_angle': [
                        'b_down_ang_max', 'b_down_ang_min',
                        'PA_DOWN角度-[量小幅大]-', 'PA_DOWN角度-[量大幅小]-',
                        '[PA_DOWN量小幅大]', '[PA_DOWN量大幅小'
                    ],  # PA向量角度
                    # 'amount': ['b_up_amount_max', 'b_up_amount_min', '大买单金额占比-[超高]-', '大买单金额占比-[超低]-','[B多]大买单净比高', '[B空]大买单净比低'],  # 大单买入金额占比
                    # 'pct_chg': ['b_up_pct_max', 'b_up_pct_min', '大卖单金额占比-[超高]-', '大卖单金额占比-[超低]-', '[B空]大卖单净比高', '[B多]大卖单净比低'],  # 大单卖出金额占比
                }
                cmp_down_result_df = pd.DataFrame()
                # 筛选出下跌的Dataframe
                pa_down_cmp_df = pa_cmp_df.loc[pa_cmp_df['pct_dir'] < 0, ]
                if pa_down_cmp_df is not None and len(pa_down_cmp_df) > 0:
                    for key in cmp_down_dict.keys():
                        high_df = pa_down_cmp_df.loc[
                            pa_down_cmp_df[key] > pa_down_cmp_df[
                                cmp_down_dict[key][0]]]
                        high_df = self._cmp_data_process_2_(
                            df=high_df,
                            key=key,
                            val=cmp_down_dict[key][0],
                            msg=cmp_down_dict[key][2],
                            type=cmp_down_dict[key][4])
                        if key == 'pa_vector':
                            low_df = None  # PA 向量长度短,不做异常记录
                        else:
                            low_df = pa_down_cmp_df.loc[
                                pa_down_cmp_df[key] < pa_down_cmp_df[
                                    cmp_down_dict[key][1]]]
                            low_df = self._cmp_data_process_2_(
                                df=low_df,
                                key=key,
                                val=cmp_down_dict[key][1],
                                msg=cmp_down_dict[key][3],
                                type=cmp_down_dict[key][5])
                        if cmp_down_result_df is None or len(
                                cmp_down_result_df) == 0:
                            if high_df is not None:
                                cmp_down_result_df = high_df
                                if low_df is not None:
                                    cmp_down_result_df = cmp_down_result_df.append(
                                        low_df)
                            else:
                                cmp_down_result_df = low_df
                        else:
                            if high_df is not None:
                                cmp_down_result_df = cmp_down_result_df.append(
                                    high_df)
                            if low_df is not None:
                                cmp_down_result_df = cmp_down_result_df.append(
                                    low_df)

                # 本ID 的异常PA检测完成,收集结果到 cmp_pa_result
                if cmp_pa_result is None or len(cmp_pa_result) == 0:
                    if cmp_up_result_df is not None:
                        cmp_pa_result = cmp_up_result_df
                        if cmp_down_result_df is not None:
                            cmp_pa_result = cmp_pa_result.append(
                                cmp_down_result_df)
                    else:
                        cmp_pa_result = cmp_down_result_df
                else:
                    if cmp_up_result_df is not None:
                        cmp_pa_result = cmp_pa_result.append(cmp_up_result_df)
                    if cmp_down_result_df is not None:
                        cmp_pa_result = cmp_pa_result.append(
                            cmp_down_result_df)

                # 准备进入下一个 while frame_begin_stamp < end_stamp 循环
                frame_begin_time_str = self.t_frame_dict.get(
                    frame_begin_time_str)[1]
                if len(frame_begin_time_str) == 0:
                    wx.info("[Rt_Ana][RT_CMP_PA_Baseline] {} {} 已处理完毕,进入下一支股票".
                            format(id, t_frame))
                    break
                frame_begin_stamp = int(
                    time.mktime(
                        time.strptime(date_str + frame_begin_time_str,
                                      '%Y%m%d%H:%M')))

        cols = ['id', 'date', 'time', 'type', 'msg']
        cmp_pa_result = cmp_pa_result.loc[:, cols]

        return cmp_pa_result
コード例 #4
0
    def calc_arr(self,
                 pre_id='',
                 fresh=False,
                 data_src='cq',
                 bt_start_date='',
                 bt_end_date=''):
        wx = lg.get_handle()
        if data_src == 'cq':
            if re.match('002', pre_id) is not None:
                t_name = self.cq_tname_002
            elif re.match('00', pre_id) is not None:
                t_name = self.cq_tname_00
            elif re.match('30', pre_id) is not None:
                t_name = self.cq_tname_30
            elif re.match('60', pre_id) is not None:
                t_name = self.cq_tname_60
            elif re.match('68', pre_id) is not None:
                t_name = self.cq_tname_68
            else:
                wx.info(
                    "[Class MA_kits: calc] failed to identify the Stock_id {}".
                    format(pre_id))
                return None
        elif data_src == 'qfq':
            if re.match('002', pre_id) is not None:
                t_name = self.qfq_tname_002
            elif re.match('00', pre_id) is not None:
                t_name = self.qfq_tname_00
            elif re.match('30', pre_id) is not None:
                t_name = self.qfq_tname_30
            elif re.match('60', pre_id) is not None:
                t_name = self.qfq_tname_60
            elif re.match('68', pre_id) is not None:
                t_name = self.qfq_tname_68
            else:
                wx.info(
                    "[Class MA_kits: calc] failed to identify the Stock_id {}".
                    format(pre_id))
                return None
        elif data_src == 'bt_qfq':
            if re.match('002', pre_id) is not None:
                t_name = self.bt_qfq_tname_002
            elif re.match('00', pre_id) is not None:
                t_name = self.bt_qfq_tname_00
            elif re.match('30', pre_id) is not None:
                t_name = self.bt_qfq_tname_30
            elif re.match('60', pre_id) is not None:
                t_name = self.bt_qfq_tname_60
            elif re.match('68', pre_id) is not None:
                t_name = self.bt_qfq_tname_68
            else:
                wx.info(
                    "[Class MA_kits: calc] failed to identify the Stock_id {}".
                    format(pre_id))
                return None
        else:
            wx.info("[Class MA_kits: calc] failed to identify the Data Src {}".
                    format(data_src))
            return None

        # today = datetime.now().strftime('%Y%m%d')
        if data_src == 'bt_qfq':
            sql = "select id, date, close from " + t_name + " where close > 0 and  date between  " \
                  + bt_start_date + " and "+bt_end_date+" order by id"
        else:
            start_date = (datetime.now() + timedelta(days=-480)).strftime(
                '%Y%m%d')  # 起始日期 为记录日期+1天
            sql = "select id, date, close from " + t_name + " where close > 0 and date >=  " + start_date + " order by id"

        df_ma = self.db._exec_sql(sql)
        # df_ma.sort_values(by='date', ascending=True, inplace=True)
        # df_ma.fillna(0, inplace=True)
        # df_grouped = df_ma['close'].groupby(df_ma['id'])
        if df_ma is None or df_ma.empty:
            return None

        df_tmp = pd.DataFrame()
        for duration in self.ma_duration:
            df_tmp['MA_' + duration] = df_ma['close'].groupby(
                df_ma['id']).rolling(int(duration)).mean()
        # 整理 移动均值数据,合并DataFrame
        df_tmp.reset_index(drop=True, inplace=True)
        df_ma = pd.merge(df_ma,
                         df_tmp,
                         left_index=True,
                         right_index=True,
                         how='inner')

        # EMA 12 26 指数移动均值
        # df_grouped = df_ma['close'].groupby(df_ma['id'])
        df_tmp = pd.DataFrame()
        for duration in self.ema_duration:
            # df_tmp['EMA_' + duration] = df_grouped['close'].ewm(span=int(duration)).mean()
            df_tmp['EMA_' + duration] = df_ma['close'].groupby(
                df_ma['id']).apply(lambda x: x.ewm(span=int(duration)).mean())
        # 整理 指数移动均值数据,合并DataFrame
        df_tmp.reset_index(drop=True, inplace=True)
        df_ma = pd.merge(df_ma,
                         df_tmp,
                         left_index=True,
                         right_index=True,
                         how='inner')

        # MACD 快线
        df_ma['DIF'] = df_ma['EMA_' + self.ema_duration[0]] - df_ma[
            'EMA_' + self.ema_duration[1]]

        # MACD 慢线
        df_tmp = pd.DataFrame()
        df_tmp['DEA'] = df_ma['DIF'].groupby(
            df_ma['id']).apply(lambda x: x.ewm(span=9).mean())
        df_ma = pd.merge(df_ma,
                         df_tmp,
                         left_index=True,
                         right_index=True,
                         how='inner')

        # Bolling 20 计算
        df_tmp = pd.DataFrame()
        df_ma['bolling_mid'] = df_ma['MA_' + self.bolling]
        df_tmp['tmp2'] = df_ma['close'].groupby(df_ma['id']).rolling(
            int(self.bolling)).std()
        df_tmp.reset_index(drop=True, inplace=True)
        df_ma = pd.merge(df_ma,
                         df_tmp,
                         left_index=True,
                         right_index=True,
                         how='inner')
        df_ma['bolling_top'] = df_ma['MA_' + self.bolling] + 2 * df_ma['tmp2']
        df_ma['bolling_bottom'] = df_ma['MA_' +
                                        self.bolling] - 2 * df_ma['tmp2']
        df_ma.drop(columns=['tmp2'], inplace=True)

        df_ma.drop(columns=['close'], inplace=True)
        df_ma.fillna(0, inplace=True)
        # df_ma.dropna(axis=0, how="any", inplace=True)

        if (fresh == False):
            df_ma[['date']] = df_ma[['date']].apply(pd.to_numeric)
            df_ret = df_ma.iloc[df_ma.groupby(
                ['id']).apply(lambda x: x['date'].idxmax())]
            return df_ret
        return df_ma
コード例 #5
0
 def __del__(self):
     wx = lg.get_handle()
     self.db.cursor.close()
     self.db.handle.close()
     wx.info("[OBJ] analyzer __del__() called")
コード例 #6
0
    def db_load_into_daily_data(self,
                                dd_df=None,
                                pre_id='',
                                mode='basic',
                                type='cq'):
        wx = lg.get_handle()

        if (type == 'cq'):
            if pre_id == '00%':
                t_name = self.dd_cq_00
            elif pre_id == '30%':
                t_name = self.dd_cq_30
            elif pre_id == '60%':
                t_name = self.dd_cq_60
            elif pre_id == '002%':
                t_name = self.dd_cq_002
            elif pre_id == '68%':
                t_name = self.dd_cq_68
            else:
                wx.info(
                    "[db_load_into_daily_data]: TYPE = cq, pre_id ( {} )is NOT Match"
                    .format(pre_id))
                return None
        elif (type == 'qfq'):
            if pre_id == '00%':
                t_name = self.dd_qfq_00
            elif pre_id == '30%':
                t_name = self.dd_qfq_30
            elif pre_id == '60%':
                t_name = self.dd_qfq_60
            elif pre_id == '002%':
                t_name = self.dd_qfq_002
            elif pre_id == '68%':
                t_name = self.dd_qfq_68
            else:
                wx.info(
                    "[db_load_into_daily_data]: TYPE = qfq, pre_id ( {} )is NOT Match"
                    .format(pre_id))
                return None
        elif (type == 'bt_qfq'):
            if re.match('002', pre_id) is not None:
                t_name = self.bt_dd_qfq_002
            elif re.match('30', pre_id) is not None:
                t_name = self.bt_dd_qfq_30
            elif re.match('60', pre_id) is not None:
                t_name = self.bt_dd_qfq_60
            elif re.match('00', pre_id) is not None:
                t_name = self.bt_dd_qfq_00
            elif re.match('68', pre_id) is not None:
                t_name = self.bt_dd_qfq_68
            else:
                wx.info(
                    "[db_load_into_daily_data]: TYPE = bt_qfq, pre_id ( {} )is NOT Match"
                    .format(pre_id))
                return None
        else:
            wx.info("[db_load_into_daily_data]: TYPE ( {} ) is NOT Match")
            return None

        if dd_df is None or dd_df.empty or t_name is None:
            wx.info(
                "[db_load_into_daily_data] Err: Daily Data Frame or Table Name is Empty,"
            )
            return -1
        dd_array = dd_df.values.tolist()
        i = 0
        while i < len(dd_array):
            dd_array[i] = tuple(dd_array[i])
            i += 1
        if mode == 'full':
            sql = "REPLACE INTO " + t_name + " SET id=%s, date=%s, open=%s, high=%s, low=%s, " \
                                             "close=%s, pre_close=%s, chg=%s,  pct_chg=%s,vol=%s, amount=%s, " \
                                             "qrr=%s, tor=%s, pct_up_down=%s, pe=%s, pb=%s"
        elif mode == 'basic':
            sql = "REPLACE INTO " + t_name + " SET id=%s, date=%s, open=%s, high=%s, low=%s, " \
                                             "close=%s, pre_close=%s, chg=%s,  pct_chg=%s,vol=%s, amount=%s"
        else:
            sql = "REPLACE INTO " + t_name + " SET id=%s, date=%s, open=%s, high=%s, low=%s, " \
                                             "close=%s, pre_close=%s, chg=%s,  pct_chg=%s,vol=%s, amount=%s"

        i_scale = 1000
        for i in range(0, len(dd_array), i_scale):
            tmp_array = dd_array[i:i + i_scale]
            wx.info(
                "[db_load_into_daily_data][{}][{}] Loaded {} ~ {} , total {} ".
                format(type, t_name, i, i + i_scale, len(dd_array)))
            self.db.cursor.executemany(sql, tmp_array)
            self.db.handle.commit()
コード例 #7
0
ファイル: rt_baseline.py プロジェクト: wu7052/real_time_stock
    def get_baseline_big_deal(self, days=3):
        wx = lg.get_handle()
        # 量价向量 的取样resample的时间段
        std_times = float(self.h_conf.rd_opt('rt_analysis_rules', 'big_deal_std_times'))

        bl_df = self.db.get_bl_big_deal(days=days)
        baseline_bd_df = pd.DataFrame()
        for tmp_df in bl_df.groupby(bl_df['id']):
            each_id_df = tmp_df[1].fillna(0, inplace=False)
            b_qty_ave = each_id_df['big_qty'].groupby(each_id_df['t_frame']).mean()
            b_qty_std = each_id_df['big_qty'].groupby(each_id_df['t_frame']).std().fillna(0)
            b_qty_max = b_qty_ave + b_qty_std * std_times
            b_qty_min = b_qty_ave - b_qty_std * std_times
            b_qty_max.name = 'b_qty_max'
            b_qty_min.name = 'b_qty_min'

            b_abs_pct_ave = each_id_df['big_abs_pct'].groupby(each_id_df['t_frame']).mean()
            b_abs_pct_std = each_id_df['big_abs_pct'].groupby(each_id_df['t_frame']).std().fillna(0)
            b_pct_max = b_abs_pct_ave + b_abs_pct_std * std_times
            b_pct_min = b_abs_pct_ave - b_abs_pct_std * std_times
            b_pct_max.name = 'b_pct_max'
            b_pct_min.name = 'b_pct_min'

            b_buy_pct_ave = each_id_df['big_buy_pct'].groupby(each_id_df['t_frame']).mean()
            b_buy_pct_std = each_id_df['big_buy_pct'].groupby(each_id_df['t_frame']).std().fillna(0)
            b_buy_pct_max = b_buy_pct_ave + b_buy_pct_std * std_times
            b_buy_pct_min = b_buy_pct_ave - b_buy_pct_std * std_times
            b_buy_pct_max.name = 'b_buy_pct_max'
            b_buy_pct_min.name = 'b_buy_pct_min'

            b_sell_pct_ave = each_id_df['big_sell_pct'].groupby(each_id_df['t_frame']).mean()
            b_sell_pct_std = each_id_df['big_sell_pct'].groupby(each_id_df['t_frame']).std().fillna(0)
            b_sell_pct_max = b_sell_pct_ave + b_sell_pct_std * std_times
            b_sell_pct_min = b_sell_pct_ave - b_sell_pct_std * std_times
            b_sell_pct_max.name = 'b_sell_pct_max'
            b_sell_pct_min.name = 'b_sell_pct_min'

            each_id_df = each_id_df[~(each_id_df['buy_qty'].isin([0]))]
            all_buy_qty_ave = each_id_df['buy_qty'].groupby(each_id_df['t_frame']).mean()
            all_buy_qty_std = each_id_df['buy_qty'].groupby(each_id_df['t_frame']).std().fillna(0)
            all_buy_qty_max = all_buy_qty_ave + all_buy_qty_std * std_times
            all_buy_qty_min = all_buy_qty_ave - all_buy_qty_std * std_times
            all_buy_qty_max.name = 'all_buy_qty_max'
            all_buy_qty_min.name = 'all_buy_qty_min'

            each_id_df = each_id_df[~(each_id_df['buy_amount'].isin([0]))]
            all_buy_amount_ave = each_id_df['buy_amount'].groupby(each_id_df['t_frame']).mean()
            all_buy_amount_std = each_id_df['buy_amount'].groupby(each_id_df['t_frame']).std().fillna(0)
            all_buy_amount_max = all_buy_amount_ave + all_buy_amount_std * std_times
            all_buy_amount_min = all_buy_amount_ave - all_buy_amount_std * std_times
            all_buy_amount_max.name = 'all_buy_amount_max'
            all_buy_amount_min.name = 'all_buy_amount_min'

            each_id_df = each_id_df[~(each_id_df['sell_qty'].isin([0]))]
            all_sell_qty_ave = each_id_df['sell_qty'].groupby(each_id_df['t_frame']).mean()
            all_sell_qty_std = each_id_df['sell_qty'].groupby(each_id_df['t_frame']).std().fillna(0)
            all_sell_qty_max = all_sell_qty_ave + all_sell_qty_std * std_times
            all_sell_qty_min = all_sell_qty_ave - all_sell_qty_std * std_times
            all_sell_qty_max.name = 'all_sell_qty_max'
            all_sell_qty_min.name = 'all_sell_qty_min'

            each_id_df = each_id_df[~(each_id_df['sell_amount'].isin([0]))]
            all_sell_amount_ave = each_id_df['sell_amount'].groupby(each_id_df['t_frame']).mean()
            all_sell_amount_std = each_id_df['sell_amount'].groupby(each_id_df['t_frame']).std().fillna(0)
            all_sell_amount_max = all_sell_amount_ave + all_sell_amount_std * std_times
            all_sell_amount_min = all_sell_amount_ave - all_sell_amount_std * std_times
            all_sell_amount_max.name = 'all_sell_amount_max'
            all_sell_amount_min.name = 'all_sell_amount_min'

            id_df = pd.concat([b_qty_max, b_qty_min, b_pct_max, b_pct_min, b_buy_pct_max, b_buy_pct_min,
                               b_sell_pct_max, b_sell_pct_min, all_buy_qty_max, all_buy_qty_min,
                               all_buy_amount_max, all_buy_amount_min, all_sell_qty_max, all_sell_qty_min,
                               all_sell_amount_max, all_sell_amount_min]
                                , axis=1, sort=False)
            # cols = ['qty_max', 'qty_min' , 'pct_max', 'pct_min', 'buy_pct_max', 'buy_pct_min',
            #         'sell_pct_max', 'sell_pct_min', 'all_buy_qty_max', 'all_buy_qty_min',
            #         'all_buy_amount_max', 'all_buy_amount_min', 'all_sell_qty_max', 'all_sell_qty_min',
            #         'all_sell_amount_max', 'all_sell_amount_min']
            # id_df = id_df.loc[:,cols]
            id_df['id']=tmp_df[0]
            if baseline_bd_df is None or baseline_bd_df.empty:
                baseline_bd_df = id_df
            else:
                baseline_bd_df = baseline_bd_df.append(id_df)

        baseline_bd_df.reset_index(drop=False, inplace=True)
        baseline_bd_df.rename(columns={'index':'t_frame'}, inplace=True)
        return baseline_bd_df
コード例 #8
0
    def __init__(self,
                 f_conf='',
                 f_start_date='',
                 f_end_date='',
                 data_src='qfq'):
        wx = lg.get_handle()
        try:
            self.f_conf = conf_handler(conf=f_conf)
            self.h_conf = conf_handler(conf="stock_analyer.conf")
            self.pe = self.f_conf.rd_opt('filter_fix', 'pe')
            self.total_amount = self.f_conf.rd_opt('filter_fix',
                                                   'total_amount')
            self.high_price = self.f_conf.rd_opt('filter_fix', 'high_price')
            self.days = self.f_conf.rd_opt('filter_fix', 'below_ma55_days')
            # self.filter_growth_below_pct = self.f_conf.rd_opt('filter_fix', 'filter_growth_below_pct')
            # self.filter_high_left_power_request = self.f_conf.rd_opt('filter_fix', 'filter_high_left_power_request')
            # self.filter_high_right_power_request = self.f_conf.rd_opt('filter_fix', 'filter_high_right_power_request')
            # self.filter_cur_left_power_request = self.f_conf.rd_opt('filter_fix', 'filter_cur_left_power_request')
            # self.filter_golden_pct = self.f_conf.rd_opt('filter_fix', 'filter_golden_pct')
            # self.filter_golden_pct_request = float(self.f_conf.rd_opt('filter_fix', 'filter_golden_pct_request'))

            self.daily_cq_t_00 = self.h_conf.rd_opt('db', 'daily_table_cq_00')
            self.daily_cq_t_30 = self.h_conf.rd_opt('db', 'daily_table_cq_30')
            self.daily_cq_t_60 = self.h_conf.rd_opt('db', 'daily_table_cq_60')
            self.daily_cq_t_68 = self.h_conf.rd_opt('db', 'daily_table_cq_68')
            self.daily_cq_t_002 = self.h_conf.rd_opt('db',
                                                     'daily_table_cq_002')

            self.daily_qfq_t_00 = self.h_conf.rd_opt('db',
                                                     'daily_table_qfq_00')
            self.daily_qfq_t_30 = self.h_conf.rd_opt('db',
                                                     'daily_table_qfq_30')
            self.daily_qfq_t_60 = self.h_conf.rd_opt('db',
                                                     'daily_table_qfq_60')
            self.daily_qfq_t_68 = self.h_conf.rd_opt('db',
                                                     'daily_table_qfq_68')
            self.daily_qfq_t_002 = self.h_conf.rd_opt('db',
                                                      'daily_table_qfq_002')

            self.bt_daily_qfq_t_00 = self.h_conf.rd_opt(
                'db', 'bt_daily_table_qfq_00')
            self.bt_daily_qfq_t_30 = self.h_conf.rd_opt(
                'db', 'bt_daily_table_qfq_30')
            self.bt_daily_qfq_t_60 = self.h_conf.rd_opt(
                'db', 'bt_daily_table_qfq_60')
            self.bt_daily_qfq_t_68 = self.h_conf.rd_opt(
                'db', 'bt_daily_table_qfq_68')
            self.bt_daily_qfq_t_002 = self.h_conf.rd_opt(
                'db', 'bt_daily_table_qfq_002')

            self.ma_cq_table_60 = self.h_conf.rd_opt('db', 'ma_cq_table_60')
            self.ma_cq_table_30 = self.h_conf.rd_opt('db', 'ma_cq_table_30')
            self.ma_cq_table_00 = self.h_conf.rd_opt('db', 'ma_cq_table_00')
            self.ma_cq_table_002 = self.h_conf.rd_opt('db', 'ma_cq_table_002')
            self.ma_cq_table_68 = self.h_conf.rd_opt('db', 'ma_cq_table_68')

            self.ma_qfq_table_60 = self.h_conf.rd_opt('db', 'ma_qfq_table_60')
            self.ma_qfq_table_30 = self.h_conf.rd_opt('db', 'ma_qfq_table_30')
            self.ma_qfq_table_00 = self.h_conf.rd_opt('db', 'ma_qfq_table_00')
            self.ma_qfq_table_002 = self.h_conf.rd_opt('db',
                                                       'ma_qfq_table_002')
            self.ma_qfq_table_68 = self.h_conf.rd_opt('db', 'ma_qfq_table_68')

            self.ma_bt_qfq_table_00 = self.h_conf.rd_opt(
                'db', 'ma_bt_qfq_table_00')
            self.ma_bt_qfq_table_30 = self.h_conf.rd_opt(
                'db', 'ma_bt_qfq_table_30')
            self.ma_bt_qfq_table_60 = self.h_conf.rd_opt(
                'db', 'ma_bt_qfq_table_30')
            self.ma_bt_qfq_table_68 = self.h_conf.rd_opt(
                'db', 'ma_bt_qfq_table_30')
            self.ma_bt_qfq_table_002 = self.h_conf.rd_opt(
                'db', 'ma_bt_qfq_table_30')

            host = self.h_conf.rd_opt('db', 'host')
            database = self.h_conf.rd_opt('db', 'database')
            user = self.h_conf.rd_opt('db', 'user')
            pwd = self.h_conf.rd_opt('db', 'pwd')
            self.db = db_ops(host=host, db=database, user=user, pwd=pwd)
            wx.info("[OBJ] filter_fix : __init__ called")

            # 指定日期,用于回测,默认日期是当前最近交易日
            if data_src == 'cq' or data_src == 'qfq':
                sql = "SELECT date from " + self.daily_cq_t_00 + " order by date desc limit 1"
                df_date = self.db._exec_sql(sql=sql)
                self.f_end_date = df_date.iloc[0, 0]
                self.f_start_date = (date.today() +
                                     timedelta(days=-240)).strftime('%Y%m%d')
            elif data_src == 'bt_qfq':
                if f_end_date == '' or f_start_date == '':
                    wx.info("[Filter_fix]回测未设置起止日期区间[{}-{}]".format(
                        f_start_date, f_end_date))
                    return
                self.f_end_date = f_end_date
                self.f_start_date = f_start_date

            self.data_src = data_src
        except Exception as e:
            raise e
コード例 #9
0
ファイル: rt_baseline.py プロジェクト: wu7052/real_time_stock
    def set_baseline_big_deal(self, rt=None , date_str='', time_frame_arr=None, src=''):
        wx = lg.get_handle()
        if date_str is None or len(date_str) ==0 :
            date_str = datetime.now().strftime("%Y%m%d")
        # wx.info("[RT_BL][baseline_big_deal] [{}--{}]大单基线设立[{}]".format(time_frame_arr[0], time_frame_arr[1],date_str))

        begin_t_stamp = int(time.mktime(time.strptime(date_str+time_frame_arr[0], "%Y%m%d%H:%M")))
        end_t_stamp = int(time.mktime(time.strptime(date_str+time_frame_arr[1], "%Y%m%d%H:%M")))

        # 163 数据源,开始时间 后退5分钟
        # if src == '163':
        #     begin_t_stamp -= 300
            # time_frame_arr[0] = time.strftime("%H:%M", time.localtime(begin_t_stamp))

        # 基线起止时间颠倒,互换
        if begin_t_stamp > end_t_stamp:
            begin_t_stamp, end_t_stamp = end_t_stamp, begin_t_stamp

        baseline_big_deal_df = pd.DataFrame()
        for id in rt.rt_dict_df.keys():

            # 从RT 数据中筛选出 基线时间段 内的交易记录
            rt_df = rt.rt_dict_df[id].loc[ (rt.rt_dict_df[id]['time_stamp'] >= begin_t_stamp) & ( rt.rt_dict_df[id]['time_stamp'] < end_t_stamp)].copy()

            if rt_df is None or rt_df.empty:
                wx.info("[RT_BL][baseline_big_deal] [{}] 在[{}-{}]期间交易数据为空,开始处理下一支股票".format(id, time_frame_arr[0], time_frame_arr[1]))
                continue

            # ID 的所有成交量
            rt_df['amount'] = rt_df['vol'] * rt_df['price']
            rt_amount = rt_df['amount'].sum()
            rt_df['io_amount'] = rt_df['amount'] * rt_df['type']

            # 内外盘、中性盘的数量统计、金额统计
            rt_sell_qty = rt_df.loc[rt_df["type"] == -1].shape[0]
            rt_buy_qty = rt_df.loc[rt_df["type"] == 1].shape[0]
            rt_air_qty = rt_df.loc[rt_df["type"] == 0].shape[0]
            rt_buy_amount = rt_df.loc[rt_df["type"] == 1].amount.sum()
            rt_sell_amount = rt_df.loc[rt_df["type"] == -1].amount.sum()
            rt_air_amount = rt_df.loc[rt_df["type"] == 0].amount.sum()

            # 成交明细中的 大单列表
            rt_big_df = rt_df.loc[rt_df['amount'] >= self.rt_big_amount,]
            # 大单的数量
            rt_big_qty = len(rt_big_df)
            # 大单买入、卖出金额合计
            rt_big_amount_sum_abs = rt_big_df['amount'].sum()
            # 大单买入 卖出对冲后的金额
            rt_big_amount_sum_io = rt_big_df['io_amount'].sum()

            # 大单金额 占 总成交量的比例
            big_abs_amount_pct = rt_big_amount_sum_abs/rt_amount

            # 大单净买入 占 总成交量的比例
            big_io_amount_pct = rt_big_amount_sum_io / rt_amount

            # 平均每分钟的 大单买入、卖出金额
            # rt_ave_big_amount_per_min_abs = rt_big_amount_sum_abs/((rt_end_time-rt_begin_time)/60)

            # 卖盘的 大单金额
            rt_big_sell_df = rt_big_df.loc[(rt_big_df['type'] < 0),]
            rt_big_sell_amount = rt_big_sell_df['amount'].sum()
            rt_big_sell_amount_pct = rt_big_sell_amount/rt_amount

            # 买盘的 大单金额
            rt_big_buy_df = rt_big_df.loc[(rt_big_df['type'] > 0),]
            rt_big_buy_amount = rt_big_buy_df['amount'].sum()
            rt_big_buy_amount_pct = rt_big_buy_amount/rt_amount

            rt_baseline = {"id":id, "date":date_str,"t_frame":"-".join(time_frame_arr), "big_qty":rt_big_qty,
                           "big_abs_pct":big_abs_amount_pct, "big_io_pct":big_io_amount_pct,
                           "big_buy_pct":rt_big_buy_amount_pct, "big_sell_pct":rt_big_sell_amount_pct,
                           "amount":rt_amount, "sell_qty":rt_sell_qty, "sell_amount":rt_sell_amount,
                           "buy_qty":rt_buy_qty, "buy_amount":rt_buy_amount,
                           "air_qty":rt_air_qty, "air_amount":rt_air_amount}

            if baseline_big_deal_df is None or baseline_big_deal_df.empty:
                baseline_big_deal_df = pd.DataFrame([rt_baseline])
            else:
                baseline_big_deal_df = baseline_big_deal_df.append(pd.DataFrame([rt_baseline]))

        if baseline_big_deal_df is None or baseline_big_deal_df.empty:
            wx.info("[RT_BL][baseline_big_deal] [{}-{}] 大单基线交易数据为空,退出".format(time_frame_arr[0], time_frame_arr[1]))
            return None
        else:
            cols = ['id','date','t_frame','big_qty','big_abs_pct','big_io_pct','big_buy_pct','big_sell_pct',
                    'amount','sell_qty','sell_amount','buy_qty','buy_amount','air_qty','air_amount']
            baseline_big_deal_df = baseline_big_deal_df.loc[:,cols]
            baseline_big_deal_df.fillna(0,inplace=True)
            baseline_big_deal_df.reset_index(drop=True, inplace=True)
            wx.info("[RT_BL][baseline_big_deal]大单数据基线[{}-{}]更新完毕".format(time_frame_arr[0], time_frame_arr[1]))

            # wx.info("[RT_BL][baseline_big_deal] [{}-{}]数据基线更新完毕".format(time_frame_arr[0], time_frame_arr[1]))
            return baseline_big_deal_df
コード例 #10
0
ファイル: rt_baseline.py プロジェクト: wu7052/real_time_stock
    def set_baseline_PA(self, rt=None, date_str='', time_frame_arr=None, src=''):
        wx = lg.get_handle()
        if date_str is None or len(date_str) == 0:
            date_str = datetime.now().strftime("%Y%m%d")
        # wx.info("[RT_BL][baseline_PA] [{}--{}]量价基线设立[{}]".format(time_frame_arr[0], time_frame_arr[1], date_str))

        # 半个小时 时间段,来自 rebase_rt_data 设定
        t_frame_begin_stamp = int(time.mktime(time.strptime(date_str + time_frame_arr[0], "%Y%m%d%H:%M")))
        t_frame_end_stamp = int(time.mktime(time.strptime(date_str + time_frame_arr[1], "%Y%m%d%H:%M")))

        # 基线起止时间颠倒,互换
        if t_frame_begin_stamp > t_frame_end_stamp:
            t_frame_begin_stamp, t_frame_end_stamp = t_frame_end_stamp, t_frame_begin_stamp

        baseline_PA_df = pd.DataFrame()

        # 使用 rt_PA_resample_agg_secs 设定的时间段,聚合resample 的数据
        begin_t_stamp = t_frame_begin_stamp
        if (t_frame_end_stamp - t_frame_begin_stamp) % self.rt_PA_resample_agg_secs == 0:
            icount = (t_frame_end_stamp - t_frame_begin_stamp) / self.rt_PA_resample_agg_secs
        else:
            icount = (t_frame_end_stamp - t_frame_begin_stamp) // self.rt_PA_resample_agg_secs + 1

        while icount > 0:
            icount = icount - 1
            end_t_stamp = begin_t_stamp + self.rt_PA_resample_agg_secs
            if end_t_stamp > t_frame_end_stamp:
                end_t_stamp = t_frame_end_stamp

            agg_t_frame = [time.strftime("%H:%M", time.localtime(begin_t_stamp)),
                           time.strftime("%H:%M", time.localtime(end_t_stamp))]

            for id in rt.rt_dict_df.keys():

                # 从RT 数据中筛选出 rt_PA_resample_agg_secs 时间长度 (配置文件 10分钟)的交易记录
                rt_df = rt.rt_dict_df[id].loc[(rt.rt_dict_df[id]['time_stamp'] >= begin_t_stamp) & (
                            rt.rt_dict_df[id]['time_stamp'] < end_t_stamp)].copy()

                # 增加一列 pandas 的日期格式,用来做 rolling 或 resample 的index列
                # rt_df['pd_time'] = date_str + " " + rt_df['time_str']
                # rt_df['pd_time'] = pd.to_datetime(rt_df['pd_time'], format="%Y%m%d %H:%M:%S")

                if rt_df is None or len(rt_df) == 0:
                    wx.info(
                        "[RT_BL][baseline_PA][{}] 在[{}-{}] 空交易数据,开始处理下一支股票".format(id, agg_t_frame[0], agg_t_frame[1]))
                    continue
                rt_df = rt_df.sort_values(by="time_stamp", ascending=True)
                rt_df['amount'] = rt_df['price'] * rt_df['vol']
                # rt_df.set_index('pd_time', inplace=True)
                rt_df['price'] = pd.to_numeric(rt_df['price'])

                id_baseline_PA_amount = rt_df['amount'].sum()
                id_baseline_PA_pct_chg = rt_df['price'].max() / rt_df['price'].min() - 1
                id_baseline_PA_pct_chg_enhanced = id_baseline_PA_pct_chg * 500000000

                # 量价向量长度
                id_baseline_PA = pow(pow(id_baseline_PA_amount, 2) + pow(id_baseline_PA_pct_chg_enhanced, 2), 0.5)

                # 量价向量方向, Time(High Price) - Time (Low Price) > 0 上涨; <0 下跌, (高价时间 - 低价时间)
                if rt_df.price.idxmax() > rt_df.price.idxmin():
                    id_baseline_PA_pct_dir = 1
                elif rt_df.price.idxmax() < rt_df.price.idxmin():
                    id_baseline_PA_pct_dir = -1
                else:
                    id_baseline_PA_pct_dir = 0

                # 量价向量角度, X轴 涨跌幅度 , Y轴 成交金额
                # 数值越小:小金额,大幅度涨跌
                # 数值越大:大金额,小幅度涨跌
                if id_baseline_PA_pct_chg_enhanced == 0:
                    id_baseline_PA_angle = 0
                else:
                    id_baseline_PA_angle = id_baseline_PA_amount/ id_baseline_PA_pct_chg_enhanced

                pa_baseline = {"id": id, "date": date_str, "t_frame": "-".join(agg_t_frame),
                                "sample_time": self.rt_PA_resample_secs,
                                "bl_pa": id_baseline_PA,
                                "bl_pa_angle": id_baseline_PA_angle,
                                "bl_pct": id_baseline_PA_pct_chg,
                                "bl_amount": id_baseline_PA_amount,
                                "bl_dir":id_baseline_PA_pct_dir
                              }

                if baseline_PA_df is None or baseline_PA_df.empty:
                    baseline_PA_df = pd.DataFrame([pa_baseline])
                else:
                    baseline_PA_df = baseline_PA_df.append(pd.DataFrame([pa_baseline]))

            # while 循环,设定下一次的开始时间戳
            begin_t_stamp = end_t_stamp
            if end_t_stamp >= t_frame_end_stamp:
                break

        if baseline_PA_df is None or baseline_PA_df.empty:
            wx.info("[RT_BL][baseline_PA] [{}-{}] 量价基线交易数据为空,退出".format(time_frame_arr[0], time_frame_arr[1]))
            return None
        else:
            cols = ['id', 'date', 't_frame', 'sample_time',
                    'bl_pa', 'bl_pa_angle', 'bl_pct', 'bl_amount','bl_dir']

            baseline_PA_df = baseline_PA_df.loc[:, cols]
            baseline_PA_df.fillna(0, inplace=True)
            baseline_PA_df.reset_index(drop=True, inplace=True)
            wx.info("[RT_BL][baseline_PA]      PA数据基线[{}-{}]更新完毕".format(agg_t_frame[0], agg_t_frame[1]))
            # wx.info("[RT_BL][baseline_PA] [{}-{}] 量价数据基线更新完毕".format(time_frame_arr[0], time_frame_arr[1]))
            return baseline_PA_df
コード例 #11
0
ファイル: rt_baseline.py プロジェクト: wu7052/real_time_stock
    def set_baseline_PA_old(self, rt=None, date_str='', time_frame_arr=None, src=''):
        wx = lg.get_handle()
        if date_str is None or len(date_str) == 0:
            date_str = datetime.now().strftime("%Y%m%d")
        wx.info("[RT_BL][baseline_PA] [{}][{}--{}]量价基线设立".format(date_str, time_frame_arr[0],time_frame_arr[1]))

        # 半个小时 时间段,来自 rebase_rt_data 设定
        t_frame_begin_stamp = int(time.mktime(time.strptime(date_str + time_frame_arr[0], "%Y%m%d%H:%M")))
        t_frame_end_stamp = int(time.mktime(time.strptime(date_str + time_frame_arr[1], "%Y%m%d%H:%M")))

        # 163 数据源,开始时间 后退5分钟
        if src == '163':
            t_frame_begin_stamp -= 300
            time_frame_arr[0] = time.strftime("%H:%M", time.localtime(t_frame_begin_stamp))

        # 基线起止时间颠倒,互换
        if t_frame_begin_stamp > t_frame_end_stamp:
            t_frame_begin_stamp, t_frame_end_stamp = t_frame_end_stamp, t_frame_begin_stamp

        baseline_PA_df = pd.DataFrame()

        # 使用 rt_PA_resample_agg_secs 设定的时间段,聚合resample 的数据
        begin_t_stamp = t_frame_begin_stamp
        if (t_frame_end_stamp - t_frame_begin_stamp) % self.rt_PA_resample_agg_secs == 0:
            icount = (t_frame_end_stamp - t_frame_begin_stamp) / self.rt_PA_resample_agg_secs
        else:
            icount = (t_frame_end_stamp - t_frame_begin_stamp) // self.rt_PA_resample_agg_secs +1

        while icount > 0:
            icount = icount -1
            end_t_stamp = begin_t_stamp + self.rt_PA_resample_agg_secs
            if end_t_stamp > t_frame_end_stamp:
                end_t_stamp = t_frame_end_stamp

            agg_t_frame = [time.strftime("%H:%M", time.localtime(begin_t_stamp)),
                           time.strftime("%H:%M", time.localtime(end_t_stamp))]

            for id in rt.rt_dict_df.keys():
                id_baseline_PA_df = pd.DataFrame()

                wx.info("[RT_BL][baseline_PA]开始更新[{}]的PA数据基线[{}-{}]".format(id, agg_t_frame[0], agg_t_frame[1]))

                # 从RT 数据中筛选出 rt_PA_resample_agg_secs 时间长度 (配置文件 10分钟)的交易记录
                rt_df = rt.rt_dict_df[id].loc[ (rt.rt_dict_df[id]['time_stamp'] >= begin_t_stamp) & ( rt.rt_dict_df[id]['time_stamp'] <= end_t_stamp)].copy()

                # 增加一列 pandas 的日期格式,用来做 rolling 或 resample 的index列
                rt_df['pd_time'] = date_str +" "+ rt_df['time_str']
                rt_df['pd_time'] = pd.to_datetime(rt_df['pd_time'], format="%Y%m%d %H:%M:%S")

                if rt_df is None or len(rt_df) == 0:
                    wx.info("[RT_BL][baseline_PA][{}] 在[{}-{}] 空交易数据,开始处理下一支股票".format(id, agg_t_frame[0], agg_t_frame[1]))
                    continue
                rt_df = rt_df.sort_values(by="time_stamp", ascending=True)
                rt_df['amount'] = rt_df['price']*rt_df['vol']
                rt_df.set_index('pd_time', inplace=True)
                rt_df['price'] = pd.to_numeric(rt_df['price'])

                id_baseline_PA_df['amount'] = rt_df['amount'].sum()
                id_baseline_PA_df['min_price'] = rt_df['price'].min()
                id_baseline_PA_df['max_price'] = rt_df['price'].max()
                id_baseline_PA_df['pct_chg'] = id_baseline_PA_df['max_price'] / id_baseline_PA_df['min_price'] - 1
                id_baseline_PA_df['pct_chg_enhanced'] = (id_baseline_PA_df['max_price'] / id_baseline_PA_df['min_price'] - 1) * 500000000

                # Time(High Price) - Time (Low Price) > 0 上涨; >0 下跌, (高价时间 - 低价时间)
                id_baseline_PA_df['pct_up_down'] = rt_df.price.idxmax() - rt_df.price.idxmin()
                # 将 pd.datetime64 之差 转换成 float 类型,方便判断 时间切片内的涨跌
                id_baseline_PA_df['pct_up_down'] = pd.to_numeric(rt_df.price.idxmax() - rt_df.price.idxmin())

                """
                id_baseline_PA_df['amount'] = rt_df['amount'].resample(self.rt_PA_resample_secs, label='left', closed='left' ).sum()
                id_baseline_PA_df['min_price'] = rt_df['price'].resample(self.rt_PA_resample_secs, label='left', closed='left'  ).min()
                id_baseline_PA_df['max_price'] = rt_df['price'].resample(self.rt_PA_resample_secs, label='left', closed='left'  ).max()
                id_baseline_PA_df['pct_chg'] = id_baseline_PA_df['max_price'] / id_baseline_PA_df['min_price'] -1
                id_baseline_PA_df['pct_chg_enhanced'] = (id_baseline_PA_df['max_price'] / id_baseline_PA_df['min_price'] -1 )*500000000
                
                # Time(High Price) - Time (Low Price) > 0 上涨; >0 下跌, (高价时间 - 低价时间)
                id_baseline_PA_df['pct_up_down'] = rt_df['price'].resample(self.rt_PA_resample_secs, label='left', closed='left'  ).apply(lambda x: x.idxmax()- x.idxmin()
                                                        if len(x) > 0
                                                        else (pd.to_datetime(0)-pd.to_datetime(0)))
                """

                id_baseline_PA_df.fillna(0, inplace=True)

                # 過濾掉 成交量 == 0 & 價格變動 ==0 的 時間段記錄
                id_baseline_PA_df = id_baseline_PA_df.loc[(id_baseline_PA_df['amount'] > 0)&(id_baseline_PA_df['pct_chg_enhanced'] >0),]

                # 量价向量长度
                id_baseline_PA_df['pa_vector'] = pow( pow(id_baseline_PA_df['amount'],2) + pow(id_baseline_PA_df['pct_chg_enhanced'],2),0.5)
                # 量价向量方向
                id_baseline_PA_df['pct_dir'] = id_baseline_PA_df['pct_up_down'].apply(lambda x: x/abs(x) if x != 0 else 0)
                # 量价向量角度, X轴 涨跌幅度 , Y轴 成交金额
                # 数值越小:小金额,大幅度涨跌
                # 数值越大:大金额,小幅度涨跌
                id_baseline_PA_df['pa_angle'] = id_baseline_PA_df['amount'] / id_baseline_PA_df['pct_chg_enhanced']

                # 上涨向量,去极值后,求均值 标准差
                id_up_baseline_PA_df = id_baseline_PA_df.loc[id_baseline_PA_df['pct_up_down']>0,]
                if id_up_baseline_PA_df is not None and len(id_up_baseline_PA_df) >0:
                    id_up_baseline_PA_df = self._pa_df_(id_up_baseline_PA_df, col='pa_vector') # 去极值函数(1倍标准差范围内的向量),获得计算均值的Dataframe
                    id_up_baseline_PA_df = self._pa_df_(id_up_baseline_PA_df, col='pa_angle') # 去极值函数(1倍标准差范围内的向量),获得计算均值的Dataframe
                    id_up_baseline_PA_df = self._pa_df_(id_up_baseline_PA_df, col='amount') # 去极值函数(1倍标准差范围内的向量),获得计算均值的Dataframe
                    id_up_baseline_PA_df = self._pa_df_(id_up_baseline_PA_df, col='pct_chg') # 去极值函数(1倍标准差范围内的向量),获得计算均值的Dataframe
                    id_up_bl_PA_ave = id_up_baseline_PA_df['pa_vector'].mean() # 向量长度均值
                    id_up_bl_pct_ave = id_up_baseline_PA_df['pct_chg'].mean()  # 涨幅均值
                    id_up_bl_amount_ave = id_up_baseline_PA_df['amount'].mean()  # 涨幅对应的成交金额均值
                    id_up_bl_PA_angle_ave = id_up_baseline_PA_df['pa_angle'].mean() # 向量角度均值
                else:
                    id_up_bl_PA_ave = 0
                    id_up_bl_pct_ave = 0
                    id_up_bl_amount_ave = 0
                    id_up_bl_PA_angle_ave = 0

                # 下跌向量,去极值后,求均值 标准差
                id_down_baseline_PA_df = id_baseline_PA_df.loc[id_baseline_PA_df['pct_up_down']<0,]
                if id_down_baseline_PA_df is not None and len(id_down_baseline_PA_df) >0:
                    id_down_baseline_PA_df = self._pa_df_(id_down_baseline_PA_df, col='pa_vector')
                    id_down_baseline_PA_df = self._pa_df_(id_down_baseline_PA_df, col='pa_angle')
                    id_down_baseline_PA_df = self._pa_df_(id_down_baseline_PA_df, col='amount')
                    id_down_baseline_PA_df = self._pa_df_(id_down_baseline_PA_df, col='pct_chg')
                    id_down_bl_PA_ave = id_down_baseline_PA_df['pa_vector'].mean()
                    id_down_bl_pct_ave = -1 * id_down_baseline_PA_df['pct_chg'].mean()  # 跌幅均值,转换成负数
                    id_down_bl_amount_ave = id_down_baseline_PA_df['amount'].mean()  # 跌幅对应的成交金额均值
                    id_down_bl_PA_angle_ave = -1 * id_down_baseline_PA_df['pa_angle'].mean() # 向量角度均值
                else:
                    id_down_bl_PA_ave = 0
                    id_down_bl_pct_ave = 0
                    id_down_bl_amount_ave = 0
                    id_down_bl_PA_angle_ave = 0

                pa_baseline = {"id":id, "date":date_str,"t_frame":"-".join(agg_t_frame), "sample_time":self.rt_PA_resample_secs,
                               "up_bl_pa_ave":id_up_bl_PA_ave,
                               "up_bl_pct_ave":id_up_bl_pct_ave , "up_bl_amount_ave":id_up_bl_amount_ave ,
                               "down_bl_pa_ave":id_down_bl_PA_ave,
                               "down_bl_pct_ave": id_down_bl_pct_ave, "down_bl_amount_ave": id_down_bl_amount_ave,
                               "up_bl_pa_angle_ave":id_up_bl_PA_angle_ave,
                               "down_bl_pa_angle_ave":id_down_bl_PA_angle_ave}

                if baseline_PA_df is None or baseline_PA_df.empty:
                    baseline_PA_df = pd.DataFrame([pa_baseline])
                else:
                    baseline_PA_df = baseline_PA_df.append(pd.DataFrame([pa_baseline]))

            # while 循环,设定下一次的开始时间戳
            begin_t_stamp = end_t_stamp
            if end_t_stamp >= t_frame_end_stamp:
                break
            # 使用rolling 滑动窗口 取样,放弃这种方式
            # id_baseline_PA_df['amount'] = rt_df['amount'].rolling('20s').sum()
            # id_baseline_PA_df['max_price_index']  = pd.rolling_max(rt_df['price'], freq='20s')#.apply(self.__pct_up_down__, raw=False)
            # id_baseline_PA_df['max_price_index']  = rt_df['price'].rolling_max('20s')#.apply(self.__pct_up_down__, raw=False)
            # id_baseline_PA_df['min_price_index']  = rt_df['price'].rolling_min('20s')#.apply(_pct_up_down_, raw=False)
            # id_baseline_PA_df['pct_up_down'] = id_baseline_PA_df.apply(self.__pct_up_down__)


        if baseline_PA_df is None or baseline_PA_df.empty:
            wx.info("[RT_BL][baseline_PA] [{}-{}] 量价基线交易数据为空,退出".format(time_frame_arr[0], time_frame_arr[1]))
            return None
        else:
            cols = ['id', 'date', 't_frame', 'sample_time',
                    'up_bl_pa_ave', 'up_bl_pa_angle_ave', 'up_bl_pct_ave', 'up_bl_amount_ave',
                    'down_bl_pa_ave', 'down_bl_pa_angle_ave', 'down_bl_pct_ave', 'down_bl_amount_ave']

            baseline_PA_df = baseline_PA_df.loc[:, cols]
            baseline_PA_df.fillna(0, inplace=True)
            baseline_PA_df.reset_index(drop=True, inplace=True)

            #
            # wx.info("[RT_BL][baseline_PA] 去除极值前记录数[{}]".format(len(baseline_PA_df)))
            # baseline_PA_df =self._clr_extreme_data(pa_df=baseline_PA_df)
            # wx.info("[RT_BL][baseline_PA] 去除极值后记录数[{}]".format(len(baseline_PA_df)))

            wx.info("[RT_BL][baseline_PA] [{}-{}] 量价数据基线更新完毕".format(time_frame_arr[0], time_frame_arr[1]))
            return baseline_PA_df
コード例 #12
0
 def trans_day(self):
     wx = lg.get_handle()
     today = date.today().strftime('%Y%m%d')
     yesterday = (date.today() + timedelta(days=-1)).strftime('%Y%m%d')
     wx.info("Yesterday:{} ---- Today date: {}".format(yesterday,today))
     return self.api.trade_cal(exchange='', start_date=yesterday, end_date=today)
コード例 #13
0
 def __del__(self):
     wx = lg.get_handle()
     wx.info("db_ops : {}: __del__ called".format(self))
コード例 #14
0
    def json_parse(self, id=None, json_str=None):
        wx = lg.get_handle()
        if id is None:
            wx.info("[RT_East][json_parse] 传入参数 股票ID 为空,退出")
            return None
        json_str = re.sub(r'jQuery\w+\(', r'', json_str)[:-2]
        if json_str is not None:
            json_obj = json.loads(json_str)
        if json_obj is None:
            wx.info("[RT_East][json_parse] JSON 对象为空,退出")
            return None
        # begin_time_str = json_obj['begin']
        # end_time_str = json_obj['end']
        # if len(json_obj['zhubi_list']) == 0:
        #     return None
        time_str = jsonpath(json_obj, '$..data..data..t')
        if time_str == False:
            return [None, None]
        price_str = jsonpath(json_obj, '$..data..data..p')
        vol_str = jsonpath(json_obj, '$..data..data..v')
        dir_str = jsonpath(json_obj, '$..data..data..bs')
        rt_data = [dir_str, price_str, vol_str, time_str]
        df = pd.DataFrame(rt_data)
        df1 = df.T
        df1.rename(columns={
            0: 'type',
            1: 'price',
            2: 'vol',
            3: 'time_str'
        },
                   inplace=True)
        # 删除集合竞价未成交的记录
        df1 = df1[~(df1['type'].isin([4]))]
        # -1 卖盘内盘 ; 1 卖票外盘, 与163 保持统计
        df1['type'] = df1['type'].map({1: int(-1), 2: int(1)})
        df1['time_str'] = df1['time_str'].astype('str')
        df1['time_str'] = df1['time_str'].apply(
            lambda x: ':'.join([x[:-4], x[-4:]]))
        df1['time_str'] = df1['time_str'].apply(
            lambda x: ':'.join([x[:-2], x[-2:]]))
        df1['time_stamp'] = df1['time_str'].apply(lambda x: int(
            time.mktime(time.strptime(self.date_str + x, '%Y%m%d%H:%M:%S'))))
        df1['price'] = df1['price'] / 1000
        df1['vol'] = df1['vol'] * 100
        # date = [re.sub(r'-', '', tmp[0:10]) for tmp in dt]
        # type = jsonpath(json_obj, '$..TRADE_TYPE')
        # price = jsonpath(json_obj, '$..PRICE')
        # vol = jsonpath(json_obj, '$..VOLUME_INC')
        # time_stamp = jsonpath(json_obj, '$..DATE..sec')
        # time_stamp_usec = jsonpath(json_obj, '$..DATE..usec')
        # time_str = jsonpath(json_obj, '$..DATE_STR')

        # rt_163_data = [seq, type, price, vol, time_stamp, time_str]
        # rt_163_data = [seq, type, price, vol, time_stamp, time_stamp_usec, time_str]
        # df = pd.DataFrame(rt_163_data)

        ret_time_arr = [
            time.strftime('%H:%M:%S', time.localtime(df1.time_stamp.min())),
            time.strftime('%H:%M:%S', time.localtime(df1.time_stamp.max()))
        ]
        if id in self.rt_dict_df.keys():
            self.rt_dict_df[id] = self.rt_dict_df[id].append(
                df1, sort=False).drop_duplicates()
            self.rt_dict_df[id] = self.rt_dict_df[id].sort_values(
                by="time_str", ascending=False)
        else:
            self.rt_dict_df[id] = df1

        return ret_time_arr
コード例 #15
0
ファイル: functions.py プロジェクト: wu7052/real_time_stock
def rebase_rt_data(rt=None, src='', date_str = ''):
    wx = lg.get_handle()

    # 股票代码数组,由 rt 对象内部变量 带入
    if rt.id_arr is None:
        wx.info("[Rebase_RT_Data] 股票列表为空,退出")
        return None

    # 股票代码数组,由 rt 对象内部变量 带入
    if date_str is None or len(date_str) == 0:
        date_str = (datetime.today()).strftime('%Y%m%d')
        wx.info("[Rebase_RT_Data] 未指定回溯的日期,默认使用 {}".format(date_str))

    # 起始时间,作为查询实时交易数据的时间节点

    # begin_time_arr= ['13:00','09:25','09:30','09:40','09:50']#,'10:00','10:30','11:00','13:00','13:30','14:00','14:30','14:40','14:50']#
    begin_time_arr= ['09:25','09:30','09:40','09:50','10:00','10:30','11:00','13:00','13:30','14:00','14:30','14:40','14:50']#
    end_time_arr  = ['09:30','09:40','09:50','10:00','10:30','11:00','11:30','13:30','14:00','14:30','14:40','14:50','15:00']#
    # end_time_arr  = ['13:30','09:30','09:40','09:50','10:00']#,'10:30','11:00','11:30','13:30','14:00','14:30','14:40','14:50','15:00']#

    # 保持全部的 baseline 数据,去极值后,一次性导入数据库
    final_bl_big_deal_df = pd.DataFrame()
    # 保持全部的 baseline 数据,一次性导入数据库
    final_bl_pa_df = pd.DataFrame()

    bl = rt_bl()
    for index in range(len(begin_time_arr)):
        time_inc = 5
        begin_time_stamp = int(time.mktime(time.strptime(date_str+begin_time_arr[index], "%Y%m%d%H:%M")))
        end_time_stamp = int(time.mktime(time.strptime(date_str+end_time_arr[index], "%Y%m%d%H:%M")))
        while begin_time_stamp  < end_time_stamp:
            # rt 对象在主函数生成,传入此函数,添加
            if src == '163':
                time_str = time.strftime("%H:%M:%S", time.localtime(begin_time_stamp+300))  # 163 的5分钟偏移量
                wx.info("[Rebase_RT_Data] 从[{}] 获得[{}] 支股票的交易数据 [{}]-[{}] ".format(src, len(rt.id_arr), date_str, time_str ))
            else:
                time_str = time.strftime("%H:%M:%S", time.localtime(begin_time_stamp))

            for icount, id in enumerate(rt.id_arr):
                if src == '163': # 每支股票按 5分钟时间 递增查询,直到 时间递增后超过 end_time_arr[index]
                    json_str = rt.get_json_str(id=id, time_str=time_str)
                    time_range = rt.json_parse(id=id, json_str=json_str)
                    if time_range is None:
                        wx.info("[Rebase_RT_Data][{}/{}] [{}] [{}-{}] 交易数据不存在".
                                format(icount + 1, len(rt.id_arr), id,date_str, time_str))
                    else:
                        wx.info("[Rebase_RT_Data][{}/{}] {} [{}--{}]逐笔交易数据[{}-{}]".
                                format(icount + 1, len(rt.id_arr), id, time_range[0], time_range[1], date_str, time_str))
                    time.sleep(0.5)
                elif src == 'east':
                    # 每支股票 按 begin_time_arr[index] - end_time_arr[index] 查询,存入 RT对象
                    # 完成所有股票后,进入下一个时间段查询
                    wx.info("[Rebase_RT_Data][{}/{}] ==================> {} 开始获取目标时间段[{}---{}-{}]".
                            format(icount + 1, len(rt.id_arr), id, date_str, begin_time_arr[index], end_time_arr[index]))
                    rt.get_json_str(id=id, time_str = begin_time_arr[index] +"-"+end_time_arr[index])

            #  下一次循环的 起始时间
            if src == '163':
                begin_time_stamp += time_inc*60
            elif src == 'east':
                break  # 已完成所有股票,在本时间段的查询,直接进入下一个时间段查询

        # 大单交易的 基线数据,每个时间片【begin_time_arr,end_time_arr】 每支股票产生一条记录
        baseline_big_deal_df = bl.set_baseline_big_deal(rt=rt, date_str=date_str, time_frame_arr=[begin_time_arr[index], end_time_arr[index]], src= src)
        if final_bl_big_deal_df is None or len(final_bl_big_deal_df) == 0:
            final_bl_big_deal_df = baseline_big_deal_df
        else:
            final_bl_big_deal_df = final_bl_big_deal_df.append(baseline_big_deal_df)

        # 量价 基线数据,每个时间片【begin_time_arr,end_time_arr】 每支股票产生一条记录
        baseline_PA_df = bl.set_baseline_PA(rt=rt, date_str=date_str, time_frame_arr=[begin_time_arr[index], end_time_arr[index]], src= src)
        if final_bl_pa_df is None or len(final_bl_pa_df) == 0:
            final_bl_pa_df = baseline_PA_df
        else:
            final_bl_pa_df = final_bl_pa_df.append(baseline_PA_df)

        if src == '163':
            # 释放 RT 对象的内部变量,只保留 最后15分钟的交易数据
            rt.clr_rt_data(minutes=35)
        elif src == 'east':
            rt.clr_rt_data(stamp = end_time_stamp)
    # for循环结束,进入下一个时间段(半小时)

    # 全天交易时间结束,先做累加
    # cols = ['id', 'date', 't_frame', 'big_qty', 'big_abs_pct', 'big_io_pct', 'big_buy_pct', 'big_sell_pct',
    #         'amount', 'sell_qty', 'sell_amount', 'buy_qty', 'buy_amount', 'air_qty', 'air_amount']
    final_cu_bl_big_deal_df = pd.DataFrame()
    for each_id in final_bl_big_deal_df.groupby(final_bl_big_deal_df['id']):
        df_each_id = each_id[1].sort_values(by="t_frame", ascending=True)
        df_each_id['cu_big_qty'] = df_each_id['big_qty'].cumsum()
        df_each_id['cu_amount'] = df_each_id['amount'].cumsum()
        df_each_id['cu_sell_qty'] = df_each_id['sell_qty'].cumsum()
        df_each_id['cu_sell_amount'] = df_each_id['sell_amount'].cumsum()
        df_each_id['cu_buy_qty'] = df_each_id['buy_qty'].cumsum()
        df_each_id['cu_buy_amount'] = df_each_id['buy_amount'].cumsum()
        df_each_id['cu_air_qty'] = df_each_id['air_qty'].cumsum()
        df_each_id['cu_air_amount'] = df_each_id['air_amount'].cumsum()
        if final_cu_bl_big_deal_df is None or len(final_cu_bl_big_deal_df) == 0:
            final_cu_bl_big_deal_df = df_each_id
        else:
            final_cu_bl_big_deal_df = final_cu_bl_big_deal_df.append(df_each_id)

    cols = ['id', 'date', 't_frame', 'big_qty', 'big_abs_pct', 'big_io_pct', 'big_buy_pct', 'big_sell_pct',
            'amount', 'sell_qty', 'sell_amount', 'buy_qty', 'buy_amount', 'air_qty', 'air_amount',
            'cu_big_qty','cu_amount','cu_sell_qty','cu_sell_amount','cu_buy_qty','cu_buy_amount',
            'cu_air_qty','cu_air_amount']

    final_cu_bl_big_deal_df = final_cu_bl_big_deal_df.loc[:, cols]
    final_cu_bl_big_deal_df.fillna(0, inplace=True)
    final_cu_bl_big_deal_df.reset_index(drop=True, inplace=True)

    # 将基线数据导入数据库
    bl.db_load_baseline_big_deal(df=final_cu_bl_big_deal_df)

    # 再次对全天的 PA 数据去极值
    # final_bl_pa_df = bl._clr_extreme_data(pa_df=final_bl_pa_df)
    bl.db_load_baseline_PA(df=final_bl_pa_df)
コード例 #16
0
 def __del__(self):
     wx = lg.get_handle()
     wx.info("[OBJ] filter_fix : __del__ called")
コード例 #17
0
ファイル: functions.py プロジェクト: wu7052/real_time_stock
def get_rt_data(rt=None, src='', date_str=''):
    wx = lg.get_handle()

    # 股票代码数组,由 rt 对象内部变量 带入
    if rt.id_arr is None:
        wx.info("[Get_RT_Data]: 股票列表为空,退出")
        return None

    if date_str is None or len(date_str) == 0:
        date_str = (datetime.today()).strftime('%Y%m%d')
        wx.info("[Get_RT_Data] 未指定交易日期,默认使用 {}".format(date_str))

    my_timer = wx_timer(date_str=date_str)

    # 判断 今日期 是否交易日
    if my_timer.is_trading_date(date_str=date_str):
        wx.info("{}是交易日,继续运行".format(date_str))
    else:
        wx.info("[Get_RT_Data]:{}不是交易日,退出实时交易数据获取 ".format(date_str))
        return None

    time_inc = 5  # 5分钟增量

    # 从记录文件读取上一次查询实时交易的截止时间,本次查询的开始时间
    # 获得的时间戳有两种情况 1)取整(半小时)的时间戳 2)空文件,自动处理成 09:25
    # 记录文件名:日期_数据来源
    begin_time_str = rt._get_last_record_()
    if src == '163':
        if begin_time_str is None :
            begin_time_str = '09:25'
        elif begin_time_str == '15:00':
            wx.info("[Get_RT_Data] 今日文件记录已查询过所有实时交易,退出")
            return False
    elif src == 'east':
        if begin_time_str is None :
            begin_time_str = '09:25'
        else: # 解析文件记录
            record_dict = json.loads(begin_time_str)
            begin_time_str = record_dict['time_str']
            if begin_time_str == '15:00':
                wx.info("[Get_RT_Data] 今日文件记录已查询过所有实时交易,退出")
                return False
            rt.record_page_dict = record_dict['page']

    # 开始 获取实时交易的时间起点,并判断时间是否在交易时间
    begin_time_stamp = int(time.mktime(time.strptime(date_str+begin_time_str[:5], "%Y%m%d%H:%M")))
    # 返回值,ana_rt_data 的起点时间
    ret_begin_time_stamp = begin_time_stamp
    ret_zone = my_timer.tell_time_zone(t_stamp = begin_time_stamp)
    if ret_zone[0] < 0:
        begin_time_stamp = ret_zone[1]

    # 当前时间,如果超出交易时间,则拉回到 交易时间
    end_time_stamp = int(time.time())
    end_time_str = time.strftime('%H:%M', time.localtime(time.time()))
    ret_zone = my_timer.tell_time_zone(t_stamp = end_time_stamp)
    record_stamp = ret_zone[2]
    record_str = ''
    if ret_zone[0] < 0:
        end_time_stamp = ret_zone[1]

    # if begin_time_stamp > end_time_stamp:
    #     wx.info("[Get_RT_Data] 查询间隔不足5分钟,需等待{}秒".format(begin_time_stamp-end_time_stamp))
    #     time.sleep(begin_time_stamp-end_time_stamp)

    while begin_time_stamp < end_time_stamp:
        # rt 对象在主函数生成,传入此函数,添加
        if src == '163': # 从163 获取数据,时间偏移5分钟
            time_str = time.strftime("%H:%M:%S", time.localtime(begin_time_stamp+300))
        else:
            time_str = time.strftime("%H:%M:%S", time.localtime(begin_time_stamp))

        wx.info("[Get_RT_Data] 从[{}] 查询 [{}]支股票的 交易数据 [{}] ".format(src, len(rt.id_arr), time_str))

        for icount, id in enumerate(rt.id_arr):
             if src == '163':
                # wx.info("[Get_RT_Data][{}:{}] {} 获取逐笔交易数据[{}]".format(icount+1,len(rt.id_arr),id, time_str))
                json_str = rt.get_json_str(id=id, time_str=time_str)
                # wx.info("[Get_RT_Data][{}:{}] {} 解析逐笔交易数据[{}]".format(icount+1,len(rt.id_arr),id, time_str))
                time_range = rt.json_parse(id=id, json_str=json_str)
                if time_range is None:
                    wx.info("[Get_RT_Data][{}/{}] {} [{}]逐笔交易数据 为空".format(icount + 1, len(rt.id_arr), id, time_str))
                else:
                    wx.info("[Get_RT_Data][{}/{}] {} [{}--{}]逐笔交易数据[{}]".format(icount + 1, len(rt.id_arr), id, time_range[0],
                                                                                time_range[1], time_str))
             elif src == 'east':
                # 每支股票 按 begin_time_arr[index] - end_time_arr[index] 查询,存入 RT对象
                # 完成所有股票后,进入下一个时间段查询
                wx.info("[Get_RT_Data][{}/{}] {} 开始获取目标时间段[{}---{}-{}]".
                        format(icount + 1, len(rt.id_arr), id, date_str, begin_time_str, end_time_str))

                # 临时加入的ID,设置初设页面为0
                if id not in rt.record_page_dict.keys():
                    rt.record_page_dict[id] =0
                rt.get_json_str(id=id, time_str=begin_time_str + "-" + end_time_str, page_num = rt.record_page_dict[id])

        if src == '163':
            # 计算下一个循环的 起始时间, 和 文件记录时间,并调整
            begin_time_stamp += time_inc*60
            ret_zone = my_timer.tell_time_zone(t_stamp=begin_time_stamp)
            record_stamp = ret_zone[2]
            if ret_zone[0] == -3:
                begin_time_stamp = ret_zone[1]
            # begin_time_stamp == end_time_stamp 再进行一次循环
            # begin_time_stamp > end_time_stamp 且差值 在 time_inc * 60 秒内,设置 begin == end
            if begin_time_stamp >= end_time_stamp  and begin_time_stamp-end_time_stamp < time_inc*60:
                begin_time_stamp = end_time_stamp
        elif src == 'east':
            record_dict={'time_str':time.strftime('%H:%M', time.localtime(record_stamp)),
                         'page': rt.record_page_dict}
            record_str = json.dumps(record_dict)
            break

    # 文件记录最近一次的实时交易数据时间
    if src == '163':
        time_str = time.strftime("%H:%M", time.localtime(record_stamp))
        rt.f_record.write('\n'+time_str)
    elif src == 'east':
        if len(record_str) != 0:
            rt.f_record.write('\n'+record_str)
    rt.f_record.flush()
    return ret_begin_time_stamp
コード例 #18
0
class db_ops:
    wx = lg.get_handle()

    def __init__(self):
        wx = lg.get_handle()
        try:
            self.h_conf = conf_handler(conf="rt_analyer.conf")
            host = self.h_conf.rd_opt('db', 'host')
            database = self.h_conf.rd_opt('db', 'database')
            user = self.h_conf.rd_opt('db', 'user')
            pwd = self.h_conf.rd_opt('db', 'pwd')

            if pwd is None:
                wx.info("[Err DB_OP]===> {0}:{1}:{2} need password ".format(
                    host, db, user))
                raise Exception("Password is Null")
            else:
                # self.pwd = pwd
                self.config = {
                    'host': host,
                    'user': user,
                    'password': pwd,
                    'database': database,
                    'charset': 'utf8',
                    'port': 3306  # 注意端口为int 而不是str
                }
                # self.handle = pymysql.connect(self.host, self.user, self.pwd, self.db_name)
                self.handle = pymysql.connect(**self.config)
                self.cursor = self.handle.cursor()
                # wx.info("[OBJ] db_ops : __init__ called")
        except Exception as e:
            wx.info("Err occured in DB_OP __init__{}".format(e))
            raise e

    def __del__(self):
        # wx = lg.get_handle()
        # wx.info("[OBJ] db_ops : __del__ called")
        pass

    def get_trade_date(self, back_days=1):
        self.rt_conf = conf_handler(conf="rt_analyer.conf")
        tname = self.rt_conf.rd_opt('db', 'daily_table_cq_60')
        sql = "select distinct date from " + tname + " order by date desc limit " + str(
            back_days)
        df = self._exec_sql(sql=sql)
        df.sort_values(by='date', ascending=False, inplace=True)
        # 取得最后一天的日期
        trade_date = df.iloc[-1][0]
        return trade_date

    def select_table(self, t_name=None, where="", order="", limit=100):
        wx = lg.get_handle()
        if t_name is not None:
            sql = "select * from " + t_name + " " + where + " " + order + " limit " + str(
                limit)
        wx.info("[select_table] {}".format(sql))
        df_ret = self._exec_sql(sql)
        return df_ret

    def _exec_sql(self, sql=None):
        wx = lg.get_handle()
        if sql is None:
            return None
        iCount = self.cursor.execute(sql)
        self.handle.commit()
        if iCount > 0:
            # wx.info("[calc days vol] acquire {} rows of result".format(iCount))
            arr_ret = self.cursor.fetchall()
            if len(arr_ret) == 0:
                wx.info("[_exec_sql] Empty Dataframe Returned : SQL {}".format(
                    sql))
                return None
            columnDes = self.cursor.description  # 获取连接对象的描述信息
            columnNames = [columnDes[i][0] for i in range(len(columnDes))]
            df_ret = pd.DataFrame([list(i) for i in arr_ret],
                                  columns=columnNames)
            return df_ret
        else:
            wx.info(
                "[_exec_sql] Empty Dataframe Returned : SQL {}".format(sql))
            return None

    def db_load_into_RT_BL_Big_Deal(self, df=None):
        wx = lg.get_handle()
        t_name = self.h_conf.rd_opt('db', 'rt_baseline_big')
        if df is None:
            wx.info("[db_load_into_RT_BL_Big_Deal]Err: dataframe is Empty,")
            return -1
        df_array = df.values.tolist()
        i = 0
        while i < len(df_array):
            df_array[i] = tuple(df_array[i])
            i += 1

        # cols = ['id', 'date', 't_frame', 'big_qty', 'big_abs_pct', 'big_io_pct', 'big_buy_pct', 'big_sell_pct',
        #         'amount', 'sell_qty', 'sell_amount', 'buy_qty', 'buy_amount', 'air_qty', 'air_amount',
        #         'cu_big_qty', 'cu_amount', 'cu_sell_qty', 'cu_sell_amount', 'cu_buy_qty', 'cu_buy_amount',
        #         'cu_air_qty', 'cu_air_amount']

        sql = "REPLACE INTO "+t_name+" SET id=%s, date=%s, t_frame=%s, big_qty=%s, big_abs_pct=%s, big_io_pct=%s, " \
              "big_buy_pct=%s, big_sell_pct=%s, amount=%s, sell_qty=%s, sell_amount=%s, buy_qty=%s, buy_amount=%s," \
              "air_qty=%s, air_amount=%s, cu_big_qty=%s, cu_amount=%s, cu_sell_qty=%s, cu_sell_amount=%s, " \
                                     "cu_buy_qty=%s, cu_buy_amount=%s, cu_air_qty=%s, cu_air_amount=%s"
        self.cursor.executemany(sql, df_array)
        self.handle.commit()

    # 实时交易数据 监测的超阀值记录 导入数据库
    def db_load_RT_MSG(self, df=None):
        wx = lg.get_handle()
        t_name = self.h_conf.rd_opt('db', 'rt_message')

        if df is None or df.empty:
            wx.info("[db_load_RT_MSG] 实时信息 DataFrame 为空,退出")
            return
        df_array = df.values.tolist()
        i = 0
        while i < len(df_array):
            df_array[i] = tuple(df_array[i])
            i += 1

        sql = "REPLACE INTO " + t_name + " SET id=%s, date=%s, t_frame=%s, type=%s, msg=%s"
        self.cursor.executemany(sql, df_array)
        self.handle.commit()

    def db_load_into_RT_BL_PA(self, df=None):
        wx = lg.get_handle()
        t_name = self.h_conf.rd_opt('db', 'rt_baseline_PA')
        if df is None:
            wx.info("[db_load_into_RT_BL_PA]Err: dataframe is Empty,")
            return -1
        df_array = df.values.tolist()
        i = 0
        while i < len(df_array):
            df_array[i] = tuple(df_array[i])
            i += 1

        # cols = ['id', 'date', 't_frame', 'sample_time',
        #         'bl_pa', 'bl_pa_angle', 'bl_pct', 'bl_amount', 'bl_dir']

        sql = "REPLACE INTO "+t_name+" SET id=%s, date=%s, t_frame=%s, sample_time=%s, " \
                                    "bl_pa=%s, bl_pa_ang=%s, " \
                                    "bl_pct=%s, bl_amount=%s, bl_dir=%s"
        self.cursor.executemany(sql, df_array)
        self.handle.commit()

    def get_bl_pa(self, days=1):
        t_name = self.h_conf.rd_opt('db', 'rt_baseline_PA')
        sql = "select distinct date from " + t_name + "  order by date desc limit " + str(
            days)
        date_df = self._exec_sql(sql=sql)
        data_arr = date_df.date.values.tolist()
        date_str = ",".join(data_arr)
        sql = "select * from " + t_name + " where date in (" + date_str + ")"
        ret_df = self._exec_sql(sql=sql)
        return ret_df

    def get_bl_big_deal(self, days=3):
        wx = lg.get_handle()
        t_name = self.h_conf.rd_opt('db', 'rt_baseline_big')
        sql = "select distinct date from " + t_name + "  order by date desc limit " + str(
            days)
        date_df = self._exec_sql(sql=sql)
        data_arr = date_df.date.values.tolist()
        date_str = ",".join(data_arr)

        sql = "select * from " + t_name + " where date in (" + date_str + ")"
        ret_df = self._exec_sql(sql=sql)
        return ret_df

    def get_cu_big_deal_date(self, date_str='', t_frame=''):
        if date_str is None or len(date_str) == 0:
            date_str = (date.today()).strftime('%Y%m%d')
        t_name = self.h_conf.rd_opt('db', 'rt_baseline_big')
        sql = "select id, sum(big_qty) as cu_big_qty, sum(amount) as cu_amount, " \
              "sum(buy_qty) as cu_buy_qty, sum(buy_amount) as cu_buy_amount , " \
              "sum(sell_qty) as cu_sell_qty, sum(sell_amount) as cu_sell_amount " \
              "from "+ t_name+" where date= "+date_str+" and t_frame < '"+t_frame+"' group by id"
        ret_df = self._exec_sql(sql=sql)
        return ret_df

    def db_load_into_NOTICE(self, df=None):
        wx = lg.get_handle()
        t_name = self.h_conf.rd_opt('db', 'notice_table')

        if df is None or df.empty:
            wx.info("[db_load_into_NOTICE] 公告信息为空,退出")
            return
        df['title'] = df['title'].apply(lambda x: x[:199])
        df_array = df.values.tolist()
        i = 0
        while i < len(df_array):
            df_array[i] = tuple(df_array[i])
            i += 1

        sql = "REPLACE INTO " + t_name + " SET ann_time=%s, id=%s, title=%s"
        self.cursor.executemany(sql, df_array)
        self.handle.commit()
コード例 #19
0
ファイル: filter_curve.py プロジェクト: wu7052/stock
    def _filter_strength(self, df_side_left=None, df_side_right=None):
        wx = lg.get_handle()
        if df_side_left is None:
            wx.info("[Filter Fix] filter_Left_Side DataFrame is Empty")
            return None

        if df_side_right is None:
            wx.info("[Filter Fix] filter_Right_Side DataFrame is Empty")
            return None

        # 按日期排序,由小到大

        df_side_right = df_side_right.sort_values('date',
                                                  ascending=False).copy()

        # 涨幅 变整数,便于统计数量
        df_side_left['pct_chg_int'] = df_side_left['pct_chg'].astype(
            int).copy()
        df_side_left.loc[df_side_left[(df_side_left['pct_chg'] >= 11)].index,
                         ['pct_chg_int']] = 10
        df_side_left.loc[df_side_left[(df_side_left['pct_chg'] <= -11)].index,
                         ['pct_chg_int']] = -10

        df_side_right['pct_chg_int'] = df_side_right['pct_chg'].astype(
            int).copy()
        df_side_right.loc[df_side_right[(
            df_side_right['pct_chg'] >= 11)].index, ['pct_chg_int']] = 10
        df_side_right.loc[df_side_right[(
            df_side_right['pct_chg'] <= -11)].index, ['pct_chg_int']] = -10

        # 读取 高点左侧 涨幅的权重表
        high_left_power_conf = dict(
            self.f_conf.rd_sec(sec='filter_high_left_power_table'))
        zero_power_table = dict(zip(range(-10, 11),
                                    [0] * 21))  # 从 -10% 到 10% 全部权重为0
        high_left_power = 0
        for key in high_left_power_conf.keys():
            power_item = high_left_power_conf[key].split("#")
            k = [int(x) for x in power_item[0].split(",")]
            v = [int(x) for x in power_item[1].split(",")]
            real_power_table = zero_power_table.copy()
            real_power_table.update(dict(zip(k, v)))  # 从配置文件读取权重,赋值
            high_left_power_conf[key] = real_power_table.copy()

            # Key 是天数,作为统计周期,获得这个周期内的交易数据
            df_lastest_side = df_side_left.head(int(key))
            pct_count = dict(df_lastest_side['pct_chg_int'].value_counts())

            # 获得这个周期内的 high_left_power 得分
            for key in pct_count.keys():
                high_left_power += pct_count[key] * real_power_table[key]

        # 读取 最新交易日 左侧 涨幅的权重表
        cur_left_power_conf = dict(
            self.f_conf.rd_sec(sec='filter_cur_left_power_table'))
        zero_power_table = dict(zip(range(-10, 11),
                                    [0] * 21))  # 从 -10% 到 10% 全部权重为0
        cur_left_power = 0
        for key in cur_left_power_conf.keys():
            power_item = cur_left_power_conf[key].split("#")
            k = [int(x) for x in power_item[0].split(",")]
            v = [int(x) for x in power_item[1].split(",")]
            real_power_table = zero_power_table.copy()
            real_power_table.update(dict(zip(k, v)))  # 从配置文件读取权重,赋值
            cur_left_power_conf[key] = real_power_table.copy()

            # Key 是天数,作为统计周期,获得这个周期内的交易数据
            df_lastest_side = df_side_right.head(int(key))
            pct_count = dict(df_lastest_side['pct_chg_int'].value_counts())

            # 获得这个周期内的 left_power 得分
            for key in pct_count.keys():
                cur_left_power += pct_count[key] * real_power_table[key]

        # 读取 高点右侧 涨幅的权重表
        high_right_power_conf = dict(
            self.f_conf.rd_sec(sec='filter_high_right_power_table'))
        zero_power_table = dict(zip(range(-10, 11),
                                    [0] * 21))  # 从 -10% 到 10% 全部权重为0
        high_right_power = 0
        for key in high_right_power_conf.keys():
            power_item = high_right_power_conf[key].split("#")
            k = [int(x) for x in power_item[0].split(",")]
            v = [int(x) for x in power_item[1].split(",")]
            real_power_table = zero_power_table.copy()
            real_power_table.update(dict(zip(k, v)))  # 从配置文件读取权重,赋值
            high_right_power_conf[key] = real_power_table.copy()

            # Key 是天数,作为统计周期,获得这个周期内的交易数据
            df_lastest_side = df_side_right.tail(int(key))
            pct_count = dict(df_lastest_side['pct_chg_int'].value_counts())

            # 获得这个周期内的 left_power 得分
            for key in pct_count.keys():
                high_right_power += pct_count[key] * real_power_table[key]

        ret_arr_power = [high_left_power, high_right_power, cur_left_power]
        return ret_arr_power
コード例 #20
0
    def db_load_into_ind_xxx(self,
                             ind_type='ma',
                             ind_df=None,
                             stock_type=None,
                             data_src='cq'):
        wx = lg.get_handle()
        if stock_type is None:
            wx.info(
                "[db_load_into_ind_xxx] Err: {} Stock Type is Empty,".format(
                    ind_type))
            return -1

        if ind_df is None or ind_df.empty:
            wx.info(
                "[db_load_into_ind_xxx] Err: {} {} Data Frame is None or Empty"
                .format(ind_type, stock_type))
            return -1
        ind_arry = ind_df.values.tolist()
        i = 0
        while i < len(ind_arry):
            ind_arry[i] = tuple(ind_arry[i])
            i += 1

        tname_00 = self.h_conf.rd_opt('db',
                                      ind_type + '_' + data_src + '_table_00')
        tname_30 = self.h_conf.rd_opt('db',
                                      ind_type + '_' + data_src + '_table_30')
        tname_60 = self.h_conf.rd_opt('db',
                                      ind_type + '_' + data_src + '_table_60')
        tname_002 = self.h_conf.rd_opt(
            'db', ind_type + '_' + data_src + '_table_002')
        tname_68 = self.h_conf.rd_opt('db',
                                      ind_type + '_' + data_src + '_table_68')

        if re.match('002', stock_type) is not None:
            t_name = tname_002
        elif re.match('00', stock_type) is not None:
            t_name = tname_00
        elif re.match('30', stock_type) is not None:
            t_name = tname_30
        elif re.match('60', stock_type) is not None:
            t_name = tname_60
        elif re.match('68', stock_type) is not None:
            t_name = tname_68
        else:
            wx.info(
                "[db_load_into_ind_xxx] stock_type does NOT match ('002','00','30','60')"
            )

        if (ind_type == 'ma'):
            sql = "REPLACE INTO " + t_name + " SET id=%s, date=%s, ma_5=%s, ma_10=%s, ma_20=%s, ma_60=%s, " \
                                             "ma_13=%s, ma_34=%s, ma_55=%s, ema_12=%s, ema_26=%s, DIF=%s, DEA=%s, " \
                                             "bolling_mid=%s, bolling_top=%s, bolling_bottom=%s"
        elif (ind_type == 'psy'):
            sql = "REPLACE INTO " + t_name + " SET id=%s, date=%s, psy=%s"
        else:
            return None

        i_scale = 1000
        for i in range(0, len(ind_arry), i_scale):
            tmp_arry = ind_arry[i:i + i_scale]
            wx.info(
                "[db_load_into_ind_xxx][{}] Loaded {} ~ {} , total {} ".format(
                    t_name, i, i + i_scale, len(ind_arry)))
            self.db.cursor.executemany(sql, tmp_arry)
            self.db.handle.commit()
コード例 #21
0
ファイル: filter_curve.py プロジェクト: wu7052/stock
    def filter_side(self):
        wx = lg.get_handle()

        # if self.data_src == 'bt_qfq':
        #     start_date = self.f_start_date
        # elif self.data_src == 'cq' or self.data_src == 'qfq':
        #     start_date = (date.today() + timedelta(days=-180)).strftime('%Y%m%d')
        # else:
        #     wx.info("[Filter_fix] [filter_side] data src [{}], Err. ".format(self.data_src))
        #     return

        if self.data_src == 'qfq':
            tname_arr = [
                self.daily_qfq_t_002, self.daily_qfq_t_68, self.daily_qfq_t_00,
                self.daily_qfq_t_30, self.daily_qfq_t_60
            ]
        elif self.data_src == 'bt_qfq':
            tname_arr = [
                self.bt_daily_qfq_t_00, self.bt_daily_qfq_t_30,
                self.bt_daily_qfq_t_60, self.bt_daily_qfq_t_002,
                self.bt_daily_qfq_t_68
            ]
        elif self.data_src == 'cq':
            tname_arr = [
                self.daily_cq_t_00, self.daily_cq_t_30, self.daily_cq_t_60,
                self.daily_cq_t_002, self.daily_cq_t_68
            ]
        arr_filter_side = []
        for t_name in tname_arr:
            sql = "select id, date, high, low, close, 100*(close-pre_close)/pre_close as pct_chg from " + t_name + \
                  " where date >  " + self.f_start_date

            df_all_tmp = self.db._exec_sql(sql=sql)
            if df_all_tmp is None or df_all_tmp.empty:
                wx.info("[filter_side] {} 数据表未检出任何记录,继续处理...".format(t_name))
                continue

            df_all = df_all_tmp[-df_all_tmp.high.isin([0])]
            # 按股票ID分组,再按high排序,取每组的第一条记录,即该股票ID的最高价记录
            df_high = df_all.sort_values('high', ascending=False).groupby(
                'id', as_index=False).first()
            wx.info(
                "[Filter_Curve] Completed Located the Highest Price Point in {}"
                .format(t_name))
            # df_left_side = df_all.groupby(['id']).apply(lambda x: self._find_left(x, df_high))

            df_groupby_id = df_all.groupby(['id'])
            for count, df_each_stock in enumerate(df_groupby_id):

                # 获取最高点 左侧 \ 右侧 的数据记录
                df_tmp_left = self._acquire_sides(id=df_each_stock[0],
                                                  x=df_each_stock[1].copy(),
                                                  df_high=df_high,
                                                  dir="left")
                df_tmp_right = self._acquire_sides(id=df_each_stock[0],
                                                   x=df_each_stock[1].copy(),
                                                   df_high=df_high,
                                                   dir="right")

                # 判断 左侧涨幅 是否满足 filter_growth_below_pct 要求
                # 判断 右侧是否 创新低,条件低于左侧 3%
                ret_LR = self._filter_LR(df_side_left=df_tmp_left,
                                         df_side_right=df_tmp_right)
                if ret_LR == 1:
                    wx.info(
                        "[Filter_Curve LR] {} / {} Filter {} LR ... [LR FOUND!!!] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                elif ret_LR == 0:  # 左侧涨幅不达标
                    wx.info(
                        "[Filter_Curve LR] {} / {} Filter {} Left Raise Low... [LR PASS] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                    continue
                elif ret_LR == -1:  # 右侧创新低
                    wx.info(
                        "[Filter_Curve LR] {} / {} Filter {} Right New Lowest ... [LR PASS] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                    continue

                # 计算该股票 收盘价 接近的最小黄金分割比例
                min_golden_pct = self._filter_golden_price(
                    df_right_side=df_tmp_right)
                if min_golden_pct > self.filter_golden_pct_request:
                    wx.info(
                        "[Filter_Curve Golden] {} / {} Filter {}  ........ [Golden PCT FOUND!!!] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                else:
                    wx.info(
                        "[Filter_Curve Golden] {} / {} Filter {}  ........ [Golden PCT PASS] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                    continue

                # 根据规则过滤 高点左侧\右侧 、
                arr_power = self._filter_strength(df_side_left=df_tmp_left,
                                                  df_side_right=df_tmp_right)

                # wx.info("{}:{}".format(df_each_stock[0],arr_power))
                # 配置文件中所有周期的 Power 得分累计
                if arr_power[0] < int(self.filter_high_left_power_request):
                    wx.info(
                        "[Filter_Curve Power] {} / {} Filter {} High Left Side Power... [PASS] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                    continue
                elif arr_power[1] < int(self.filter_high_right_power_request):
                    wx.info(
                        "[Filter_Curve Power] {} / {} Filter {} High Right Side Power... [PASS] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                    continue
                elif arr_power[2] < int(self.filter_cur_left_power_request):
                    wx.info(
                        "[Filter_Curve Power] {} / {} Filter {} Cur Left Side Power... [PASS] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                    continue
                else:
                    wx.info(
                        "[Filter_Curve Power] {} / {} Filter {}  Power........ [FOUND!] "
                        .format(count + 1, len(df_groupby_id),
                                df_each_stock[0]))
                arr_power.insert(0, df_each_stock[0])

                arr_power.append(min_golden_pct)
                arr_filter_side.append(arr_power)

        df_filter_side = pd.DataFrame(
            arr_filter_side,
            columns=['股票代码', '高点左侧得分', '高点右侧得分', '今日左侧得分', '收盘价低于(高低点黄金比例)'])
        df_filter_side['得分合计'] = df_filter_side['高点左侧得分'] + df_filter_side[
            '高点右侧得分'] + df_filter_side['今日左侧得分']
        order = [
            '股票代码', '得分合计', '高点左侧得分', '高点右侧得分', '今日左侧得分', '收盘价低于(高低点黄金比例)'
        ]
        df_filter_side = df_filter_side[order]
        df_filter_side = df_filter_side.sort_values('得分合计', ascending=False)
        df_filter_side.reset_index(drop=True, inplace=True)
        wx.info("[Filter_Curve Side] Completed Filter the Left & Right Side ")

        return df_filter_side
コード例 #22
0
    def rt_cmp_big_baseline(self,
                            date_str='',
                            begin_time_stamp=0,
                            rt=None,
                            big_bl_df=None):
        wx = lg.get_handle()
        rt_dict_df = rt.rt_dict_df
        # if date_str is None or len(date_str) == 0:
        #     date_str = (date.today()).strftime('%Y%m%d')
        if rt_dict_df is None:
            wx.info("[Rt_Ana][Rt_Cmp_Big_Baseline] 实时数据字典 是空,退出")
            return None

        # 暂不需要 big_bl_df 大单基线数据,不需要做对比,直接体现在BI图形上
        # if big_bl_df is None:
        #     wx.info("[Rt_Ana][Rt_Cmp_Big_Baseline] 基线数据 是空,退出")
        #     return None

        [cu_begin_stamp, cu_begin_time_str
         ] = self.rt_df_find_start_stamp(rt_stamp=begin_time_stamp)
        cu_end_time_str = self.t_frame_dict.get(cu_begin_time_str)[0]
        cu_t_frame = cu_begin_time_str + '-' + cu_end_time_str

        rt_big_deal_df = pd.DataFrame()
        for id in rt_dict_df.keys():
            if rt_dict_df[id] is None:
                wx.info("[Rt_Ana][Rt_Cmp_Big_Baseline] {} 未产生实时交易数据,进入下一支股票".
                        format(id))
                continue
            if date_str is None or len(date_str) == 0:  # 处理当天数据
                date_str = (date.today()).strftime('%Y%m%d')
            # else:  # 处理历史数据 , 只用来做调试
            # 起始时间边界对齐
            # 从记录文件读取上一次查询实时交易的截止时间,本次查询的开始时间
            # 获得的时间戳有两种情况 1)取整(半小时)的时间戳 2)空文件,自动处理成 09:25
            # 记录文件名:日期_数据来源
            # frame_begin_time_str = '09:25'
            # frame_begin_stamp = int(time.mktime(time.strptime(date_str + frame_begin_time_str, '%Y%m%d%H:%M')))

            # 起始时间边界对齐
            if begin_time_stamp == 0:
                [frame_begin_stamp,
                 frame_begin_time_str] = self.rt_df_find_start_stamp(
                     rt_stamp=rt_dict_df[id]['time_stamp'].min())
            else:
                [frame_begin_stamp, frame_begin_time_str
                 ] = self.rt_df_find_start_stamp(rt_stamp=begin_time_stamp)

            end_stamp = rt_dict_df[id].time_stamp.max()
            # 按时间段 切片rt数据,计算每个片段的 大单数据,并与基线做比对,再导入基线数据库
            while frame_begin_stamp < end_stamp:
                # frame_begin_time_str = time.strftime("%H:%M", time.localtime(frame_begin_stamp))
                frame_end_time_str = self.t_frame_dict.get(
                    frame_begin_time_str)[0]
                if frame_end_time_str is None:
                    wx.info(
                        "[Rt_Ana][Rt_Cmp_Big_Baseline] {} [{}] 起始时间不属于正常范围!!!!"
                        .format(id, frame_begin_time_str))
                    break
                else:
                    t_frame = frame_begin_time_str + "-" + frame_end_time_str
                    frame_end_stamp = int(
                        time.mktime(
                            time.strptime(date_str + frame_end_time_str,
                                          '%Y%m%d%H:%M')))

                # 采集的实时数据未到 该时间段 结束,仍然继续统计 该时间段内的情况,下面的判断屏蔽掉啦
                # if frame_end_stamp > end_stamp:
                #     wx.info("[Rt_Ana][Rt_Cmp_Big_Baseline] {} {} 已超出本次获取的实时数据范围,进入下一支股票".format(id, t_frame))
                #     break

                if frame_end_time_str == '15:00':  # 15:00 收市后,有最后一笔交易记录 产生在 15:00 之后若干秒
                    rt_df = rt.rt_dict_df[id].loc[(
                        rt.rt_dict_df[id]['time_stamp'] >=
                        frame_begin_stamp)].copy()
                else:
                    rt_df = rt.rt_dict_df[id].loc[
                        (rt.rt_dict_df[id]['time_stamp'] >= frame_begin_stamp)
                        & (rt.rt_dict_df[id]['time_stamp'] < frame_end_stamp
                           )].copy()

                if rt_df is None or rt_df.empty:
                    wx.info(
                        "[Rt_Ana][Rt_Cmp_Big_Baseline] [{}] 在[{}]期间交易数据为空,开始处理下一支股票"
                        .format(id, t_frame))
                    break

                # ID 的所有成交量
                rt_df['amount'] = rt_df['vol'] * rt_df['price']
                rt_amount = rt_df['amount'].sum()
                rt_df['io_amount'] = rt_df['amount'] * rt_df['type']

                # 内外盘、中性盘的数量统计、金额统计
                rt_sell_qty = rt_df.loc[rt_df["type"] == -1].shape[0]
                rt_buy_qty = rt_df.loc[rt_df["type"] == 1].shape[0]
                rt_air_qty = rt_df.loc[rt_df["type"] == 0].shape[0]
                rt_buy_amount = rt_df.loc[rt_df["type"] == 1].amount.sum()
                rt_sell_amount = rt_df.loc[rt_df["type"] == -1].amount.sum()
                rt_air_amount = rt_df.loc[rt_df["type"] == 0].amount.sum()

                # 成交明细中的 大单列表
                rt_big_df = rt_df.loc[rt_df['amount'] >= self.rt_big_amount, ]
                # 大单的数量
                rt_big_qty = len(rt_big_df)
                # 大单买入、卖出金额合计
                rt_big_amount_sum_abs = rt_big_df['amount'].sum()
                # 大单买入 卖出对冲后的金额
                rt_big_amount_sum_io = rt_big_df['io_amount'].sum()

                # 大单金额 占 总成交量的比例
                big_abs_amount_pct = rt_big_amount_sum_abs / rt_amount

                # 大单净买入 占 总成交量的比例
                big_io_amount_pct = rt_big_amount_sum_io / rt_amount

                # 平均每分钟的 大单买入、卖出金额
                # rt_ave_big_amount_per_min_abs = rt_big_amount_sum_abs/((rt_end_time-rt_begin_time)/60)

                # 卖盘的 大单金额
                rt_big_sell_df = rt_big_df.loc[(rt_big_df['type'] < 0), ]
                rt_big_sell_amount = rt_big_sell_df['amount'].sum()
                rt_big_sell_amount_pct = rt_big_sell_amount / rt_amount

                # 买盘的 大单金额
                rt_big_buy_df = rt_big_df.loc[(rt_big_df['type'] > 0), ]
                rt_big_buy_amount = rt_big_buy_df['amount'].sum()
                rt_big_buy_amount_pct = rt_big_buy_amount / rt_amount

                rt_data = {
                    "id": id,
                    "date": date_str,
                    "t_frame": t_frame,
                    "big_qty": rt_big_qty,
                    "big_abs_pct": big_abs_amount_pct,
                    "big_io_pct": big_io_amount_pct,
                    "big_buy_pct": rt_big_buy_amount_pct,
                    "big_sell_pct": rt_big_sell_amount_pct,
                    "amount": rt_amount,
                    "sell_qty": rt_sell_qty,
                    "sell_amount": rt_sell_amount,
                    "buy_qty": rt_buy_qty,
                    "buy_amount": rt_buy_amount,
                    "air_qty": rt_air_qty,
                    "air_amount": rt_air_amount
                }

                if rt_big_deal_df is None or rt_big_deal_df.empty:
                    rt_big_deal_df = pd.DataFrame([rt_data])
                else:
                    rt_big_deal_df = rt_big_deal_df.append(
                        pd.DataFrame([rt_data]))

                wx.info(
                    "[Rt_Ana][Rt_Cmp_Big_Baseline] [{}] 时间段[{}] 数据处理完毕".format(
                        id, t_frame))

                # 准备进入下一个循环
                frame_begin_time_str = self.t_frame_dict.get(
                    frame_begin_time_str)[1]
                if len(frame_begin_time_str) == 0:
                    wx.info(
                        "[Rt_Ana][Rt_Cmp_Big_Baseline] {} {} 已处理完毕,进入下一支股票".
                        format(id, t_frame))
                    break
                frame_begin_stamp = int(
                    time.mktime(
                        time.strptime(date_str + frame_begin_time_str,
                                      '%Y%m%d%H:%M')))

        cu_big_df = self.db.get_cu_big_deal_date(date_str=date_str,
                                                 t_frame=cu_t_frame)
        if cu_big_df is None:
            id_arr = list(rt_dict_df.keys())
            cu_big_df = pd.DataFrame({
                'id': list(id_arr),
                'cu_big_qty': [float(0)] * len(id_arr),
                'cu_amount': [float(0)] * len(id_arr),
                'cu_sell_qty': [float(0)] * len(id_arr),
                'cu_sell_amount': [float(0)] * len(id_arr),
                'cu_buy_qty': [float(0)] * len(id_arr),
                'cu_buy_amount': [float(0)] * len(id_arr)
            })

        final_cu_big_deal_df = pd.DataFrame()
        for each_id in rt_big_deal_df.groupby(rt_big_deal_df['id']):
            df_each_id = each_id[1].sort_values(by="t_frame", ascending=True)
            df_each_id['cu_big_qty'] = df_each_id['big_qty'].cumsum() + \
                                       cu_big_df.loc[cu_big_df['id']==each_id[0]]['cu_big_qty'].values[0]
            df_each_id['cu_amount'] = df_each_id['amount'].cumsum()+ \
                                      cu_big_df.loc[cu_big_df['id']==each_id[0]]['cu_amount'].values[0]
            df_each_id['cu_sell_qty'] = df_each_id['sell_qty'].cumsum()+\
                                        cu_big_df.loc[cu_big_df['id']==each_id[0]]['cu_sell_qty'].values[0]
            df_each_id['cu_sell_amount'] = df_each_id['sell_amount'].cumsum()+ \
                                           cu_big_df.loc[cu_big_df['id']==each_id[0]]['cu_sell_amount'].values[0]
            df_each_id['cu_buy_qty'] = df_each_id['buy_qty'].cumsum()+ \
                                       cu_big_df.loc[cu_big_df['id']==each_id[0]]['cu_buy_qty'].values[0]
            df_each_id['cu_buy_amount'] = df_each_id['buy_amount'].cumsum()+ \
                                          cu_big_df.loc[cu_big_df['id']==each_id[0]]['cu_buy_amount'].values[0]
            df_each_id['cu_air_qty'] = df_each_id['air_qty'].cumsum()
            df_each_id['cu_air_amount'] = df_each_id['air_amount'].cumsum()
            if final_cu_big_deal_df is None or len(final_cu_big_deal_df) == 0:
                final_cu_big_deal_df = df_each_id
            else:
                final_cu_big_deal_df = final_cu_big_deal_df.append(df_each_id)

        cols = [
            'id', 'date', 't_frame', 'big_qty', 'big_abs_pct', 'big_io_pct',
            'big_buy_pct', 'big_sell_pct', 'amount', 'sell_qty', 'sell_amount',
            'buy_qty', 'buy_amount', 'air_qty', 'air_amount', 'cu_big_qty',
            'cu_amount', 'cu_sell_qty', 'cu_sell_amount', 'cu_buy_qty',
            'cu_buy_amount', 'cu_air_qty', 'cu_air_amount'
        ]

        final_cu_big_deal_df = final_cu_big_deal_df.loc[:, cols]
        final_cu_big_deal_df.fillna(0, inplace=True)
        final_cu_big_deal_df.reset_index(drop=True, inplace=True)
        wx.info("[Rt_Ana][Rt_Cmp_Big_Baseline] [{}] 导入 [{}] 条 大单、内外盘记录".format(
            frame_end_time_str, len(final_cu_big_deal_df)))
        return final_cu_big_deal_df
        """
        if rt_big_deal_df is None or rt_big_deal_df.empty:
            wx.info("[Rt_Ana][Rt_Cmp_Big_Baseline] 大单数据为空,退出")
            return None
        else:
            cols = ['id','date','t_frame','big_qty','big_abs_pct','big_io_pct','big_buy_pct','big_sell_pct',
                    'amount','sell_qty','sell_amount','buy_qty','buy_amount','air_qty','air_amount']
            rt_big_deal_df = rt_big_deal_df.loc[:,cols]
            rt_big_deal_df.fillna(0,inplace=True)
            rt_big_deal_df.reset_index(drop=True, inplace=True)

        return rt_big_deal_df
        """
        # 导入基线数据库
        # self.db.db_load_into_RT_BL_Big_Deal(df=rt_big_deal_df)
        """ 不做统计对比, 直接导入 基线数据库,BI工具图形化展示
コード例 #23
0
 def __del__(self):
     wx = lg.get_handle()
     wx.info("[OBJ] Conf_handler : __del__ called")
コード例 #24
0
# -*- coding: utf-8 -*-
# __author__ = "WUX"
# dev version

import new_logger as lg
lg._init_()
wx = lg.get_handle()
from stock_package import ts_data
from realtime_package import rt_163, rt_bl, rt_ana, rt_east

# import pandas as pd
# from assess_package import back_trader
from functions import *
from conf import conf_handler, xl_handler

# 从配置文件 rt_analyer.conf 读取参数,初始化
h_conf = conf_handler(conf="rt_analyer.conf")
rt_delay = int(h_conf.rd_opt('general', 'rt_delay'))

# 读取 accounts.xlsx
xl_acc = xl_handler(f_name="accounts.xlsx")
xl_acc.rd_accounts_file()
id_arr = xl_acc.get_stock_id_from_conf()

#读取 keywords.xlsx
xl_keywords = xl_handler(f_name="keywords.xlsx")
keywords_arr = xl_keywords.rd_keywords_file()

# 实时交易对象
# rt163 = rt_163(id_arr=id_arr, date_str='')
コード例 #25
0
    def calc(self, stock_id, fresh=True, data_src='qfq'):
        wx = lg.get_handle()

        if data_src == 'cq':
            if re.match('002', stock_id) is not None:
                t_name = self.cq_tname_002
            elif re.match('00', stock_id) is not None:
                t_name = self.cq_tname_00
            elif re.match('30', stock_id) is not None:
                t_name = self.cq_tname_30
            elif re.match('60', stock_id) is not None:
                t_name = self.cq_tname_60
            elif re.match('68', stock_id) is not None:
                t_name = self.cq_tname_68
            else:
                wx.info(
                    "[Class MA_kits: calc] failed to identify the Stock_id {}".
                    format(stock_id))
                return None
        elif data_src == 'qfq':
            if re.match('002', stock_id) is not None:
                t_name = self.qfq_tname_002
            elif re.match('00', stock_id) is not None:
                t_name = self.qfq_tname_00
            elif re.match('30', stock_id) is not None:
                t_name = self.qfq_tname_30
            elif re.match('60', stock_id) is not None:
                t_name = self.qfq_tname_60
            elif re.match('68', stock_id) is not None:
                t_name = self.qfq_tname_68
            else:
                wx.info(
                    "[Class MA_kits: calc] failed to identify the Stock_id {}".
                    format(stock_id))
                return None
        else:
            wx.info("[Class MA_kits: calc] failed to identify the Data Src {}".
                    format(data_src))
            return None

        sql = "select id, date, close from " + t_name + " where close > 0 and id = " + stock_id + " order by date desc "
        df_ma = self.db._exec_sql(sql)
        df_ma.sort_values(by='date', ascending=True, inplace=True)

        # MA 5 10 20 60 移动均值
        for duration in self.ma_duration:
            # df_ma['MA_' + duration] = pd.rolling_mean(df_ma['close'], int(duration))
            df_ma['MA_' + duration] = df_ma['close'].rolling(
                int(duration)).mean()

        # EMA 12 26 指数移动均值
        for duration in self.ema_duration:
            df_ma['EMA_' +
                  duration] = df_ma['close'].ewm(span=int(duration)).mean()

        # MACD 快线
        df_ma['DIF'] = df_ma['EMA_' + self.ema_duration[0]] - df_ma[
            'EMA_' + self.ema_duration[1]]

        # MACD 慢线
        df_ma['DEA'] = df_ma['DIF'].ewm(span=9).mean()

        # Bolling 20 计算
        df_ma['bolling_mid'] = df_ma['MA_' + self.bolling]
        df_ma['tmp2'] = df_ma['close'].rolling(int(self.bolling)).std()
        df_ma['bolling_top'] = df_ma['MA_' + self.bolling] + 2 * df_ma['tmp2']
        df_ma['bolling_bottom'] = df_ma['MA_' +
                                        self.bolling] - 2 * df_ma['tmp2']
        df_ma.drop(columns=['tmp2'], inplace=True)

        df_ma.drop(columns=['close'], inplace=True)
        df_ma.dropna(axis=0, how="any", inplace=True)
        if fresh == False:
            df_ma = df_ma.iloc[-1:]  # 选取DataFrame最后一行,返回的是DataFrame
        return df_ma