Esempio n. 1
0
def xsg():
    """
    聚合xsg数据
    """
    # 初始化文件
    f = h5py.File(conf.HDF5_FILE_FUNDAMENTAL, 'a')
    f_share = h5py.File(conf.HDF5_FILE_SHARE, 'a')
    console.write_head(conf.HDF5_OPERATE_ARRANGE, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_FUNDAMENTAL_XSG)
    path = '/' + conf.HDF5_FUNDAMENTAL_XSG
    xsg_sum_dict = dict()
    if f.get(path) is not None:
        for month in f[path]:
            df = tool.df_from_dataset(f[path], month, None)
            df["code"] = df["code"].str.decode("utf-8")
            df["count"] = df["count"].str.decode("utf-8")
            df[conf.HDF5_SHARE_DATE_INDEX] = df[
                conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
            for index, row in df.iterrows():
                code = row["code"]
                xsg_date_str = row[conf.HDF5_SHARE_DATE_INDEX]
                code_prefix = code[0:3]
                code_group_path = '/' + code_prefix + '/' + code
                if f_share.get(code_group_path) is None:
                    continue
                # 获取限售股解禁前一天的价格
                share_df = tool.df_from_dataset(f_share[code_group_path], "D",
                                                None)
                share_df[conf.HDF5_SHARE_DATE_INDEX] = share_df[
                    conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
                share_df = share_df.set_index(conf.HDF5_SHARE_DATE_INDEX)
                share_df = share_df[:xsg_date_str]
                if len(share_df) == 0:
                    continue
                close = share_df.tail(1)["close"]
                # 万股*元,统一单位为亿
                code_sum = close.values * float(row["count"]) * 10000
                sum_price = round(code_sum[0] / 10000 / 10000, 2)
                # trade_date = tradetime.get_week_of_date(xsg_date_str, "D")
                trade_date = xsg_date_str
                if trade_date in xsg_sum_dict:
                    xsg_sum_dict[trade_date] += sum_price
                else:
                    xsg_sum_dict[trade_date] = sum_price
        sum_df = tool.init_df(list(xsg_sum_dict.items()),
                              [conf.HDF5_SHARE_DATE_INDEX, "sum"])
        if len(sum_df) > 0:
            sum_df = sum_df.sort_values(by=[conf.HDF5_SHARE_DATE_INDEX])
            tool.create_df_dataset(f, conf.HDF5_FUNDAMENTAL_XSG_DETAIL, sum_df)
    console.write_tail()
    f_share.close()
    f.close()
    return
Esempio n. 2
0
def basic_detail():
    """
    聚合xsg、ipo、shm、szm等数据,推送至influxdb
    """
    f = h5py.File(conf.HDF5_FILE_FUNDAMENTAL, 'a')
    # 获取xsg
    console.write_head(conf.HDF5_OPERATE_PUSH, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_FUNDAMENTAL_XSG)
    if f.get(conf.HDF5_FUNDAMENTAL_XSG_DETAIL) is not None:
        xsg_df = tool.df_from_dataset(f, conf.HDF5_FUNDAMENTAL_XSG_DETAIL,
                                      None)
        xsg_df = _datetime_index(xsg_df)
        influx.reset_df(xsg_df, conf.MEASUREMENT_BASIC, {"btype": "xsg"})
    console.write_tail()

    # 获取ipo
    console.write_head(conf.HDF5_OPERATE_PUSH, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_FUNDAMENTAL_IPO)
    if f.get(conf.HDF5_FUNDAMENTAL_IPO) and f[conf.HDF5_FUNDAMENTAL_IPO].get(
            conf.HDF5_FUNDAMENTAL_IPO_DETAIL) is not None:
        ipo_df = tool.df_from_dataset(f[conf.HDF5_FUNDAMENTAL_IPO],
                                      conf.HDF5_FUNDAMENTAL_IPO_DETAIL, None)
        ipo_df = _datetime_index(ipo_df)
        influx.reset_df(ipo_df, conf.MEASUREMENT_BASIC, {"btype": "ipo"})
    console.write_tail()

    # 获取sh融资融券
    console.write_head(conf.HDF5_OPERATE_PUSH, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_FUNDAMENTAL_SH_MARGINS)
    console.write_tail()
    if f.get(conf.HDF5_FUNDAMENTAL_SH_MARGINS) and f[
            conf.HDF5_FUNDAMENTAL_SH_MARGINS].get(
                conf.HDF5_FUNDAMENTAL_SH_MARGINS_DETAIL) is not None:
        shm_df = tool.df_from_dataset(f[conf.HDF5_FUNDAMENTAL_SH_MARGINS],
                                      conf.HDF5_FUNDAMENTAL_SH_MARGINS_DETAIL,
                                      None)
        shm_df = _datetime_index(shm_df)
        influx.reset_df(shm_df, conf.MEASUREMENT_BASIC, {"btype": "shm"})

    # 获取sz融资融券
    console.write_head(conf.HDF5_OPERATE_PUSH, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_FUNDAMENTAL_SZ_MARGINS)
    if f.get(conf.HDF5_FUNDAMENTAL_SZ_MARGINS) and f[
            conf.HDF5_FUNDAMENTAL_SZ_MARGINS].get(
                conf.HDF5_FUNDAMENTAL_SZ_MARGINS_DETAIL) is not None:
        shz_df = tool.df_from_dataset(f[conf.HDF5_FUNDAMENTAL_SZ_MARGINS],
                                      conf.HDF5_FUNDAMENTAL_SZ_MARGINS_DETAIL,
                                      None)
        shz_df = _datetime_index(shz_df)
        influx.reset_df(shz_df, conf.MEASUREMENT_BASIC, {"btype": "szm"})
    console.write_tail()
    f.close()
    return
Esempio n. 3
0
def ipo():
    """
    聚合ipo上市数据
    """
    f = h5py.File(conf.HDF5_FILE_FUNDAMENTAL, 'a')
    path = '/' + conf.HDF5_FUNDAMENTAL_IPO
    console.write_head(conf.HDF5_OPERATE_ARRANGE, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_FUNDAMENTAL_IPO)
    path = '/' + conf.HDF5_FUNDAMENTAL_IPO
    ipo_sum_dict = dict()
    if f.get(path) is not None:
        df = tool.df_from_dataset(f[path], conf.HDF5_FUNDAMENTAL_IPO, None)
        df["issue_date"] = df["issue_date"].str.decode("utf-8")
        df["ipo_date"] = df["ipo_date"].str.decode("utf-8")
        for index, row in df.iterrows():
            trade_date = row["ipo_date"]
            # 统一单位为亿元
            sum_price = round(row["funds"], 2)
            if trade_date in ipo_sum_dict:
                ipo_sum_dict[trade_date] += sum_price
            else:
                ipo_sum_dict[trade_date] = sum_price
        sum_df = tool.init_df(list(ipo_sum_dict.items()),
                              [conf.HDF5_SHARE_DATE_INDEX, "sum"])
        if len(sum_df) > 0:
            sum_df = sum_df.sort_values(by=[conf.HDF5_SHARE_DATE_INDEX])
            tool.create_df_dataset(f[path], conf.HDF5_FUNDAMENTAL_IPO_DETAIL,
                                   sum_df)
    console.write_tail()
    f.close()
    return
Esempio n. 4
0
def test():
    # code_list = ["000725", "600519"]
    code_list = ["000725"]
    ktype_list = ["5"]
    # 估算股票转折比例
    f = h5py.File(conf.HDF5_FILE_SHARE, 'a')
    for code in code_list:
        for ktype in ktype_list:
            code_prefix = code[0:3]
            df = tool.df_from_dataset(f[code_prefix][code], ktype, None)
            df[conf.HDF5_SHARE_DATE_INDEX] = df[
                conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
            trend_df = action.arrange_trend(df.tail(100), 0.1)
            length = len(trend_df)
            for index, row in trend_df.iterrows():
                if index != length - 1:
                    next_row = trend_df.iloc[index + 1]
                    if next_row[
                            action.
                            INDEX_STATUS] == action.STATUS_SHAKE and row[
                                action.INDEX_STATUS] != action.STATUS_SHAKE:
                        start_row = trend_df.iloc[
                            index - row[action.INDEX_TREND_COUNT]]
                        detail = "end_date:%s, trend_count:%d, status:%s, macd_range:%f, macd_diff:%f, diff_per:%d"
                        macd_range = row["macd"] - start_row["macd"]
                        macd_diff = next_row["macd"] - row["macd"]
                        print(detail %
                              (next_row['date'], row[action.INDEX_TREND_COUNT],
                               row[action.INDEX_STATUS], macd_range, macd_diff,
                               round(macd_diff * 100 / macd_range, 0)))
    f.close()
    return
Esempio n. 5
0
def operate_quit(action_type):
    """
    将退市quit内容,转换成标签添加在对应code下
    """
    f = h5py.File(conf.HDF5_FILE_BASIC, 'a')
    console.write_head(action_type, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_BASIC_QUIT)
    path = '/' + conf.HDF5_BASIC_QUIT
    if f.get(path) is None:
        console.write_msg("quit的detail不存在")
        return

    quit_list = [
        conf.HDF5_BASIC_QUIT_TERMINATE,
        conf.HDF5_BASIC_QUIT_SUSPEND,
    ]
    for qtype in quit_list:
        # 如果文件不存在,则退出
        quit_df = tool.df_from_dataset(f[path], qtype, None)
        if quit_df is not None and quit_df.empty is not True:
            quit_df["code"] = quit_df["code"].str.decode("utf-8")
            # 将退市内容,转换成标签添加在对应code下
            tool.op_attr_by_codelist(action_type, quit_df["code"].values,
                                     conf.HDF5_BASIC_QUIT, True)
        else:
            console.write_msg("quit的detail数据获取失败")
    console.write_tail()
    f.close()
    return
Esempio n. 6
0
def code_classify(code_list, classify_list):
    """
    按照code整理其所属的classify
    """
    f = h5py.File(conf.HDF5_FILE_CLASSIFY, 'a')
    console.write_head(conf.HDF5_OPERATE_ARRANGE, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_OTHER_CODE_CLASSIFY)
    # 获取classify列表
    code_classify_df = tool.init_empty_df(["date", "code", "classify"])
    today_str = tradetime.get_today()
    for ctype in classify_list:
        for classify_name in f[ctype]:
            if f[ctype][classify_name].get(conf.HDF5_CLASSIFY_DS_CODE) is None:
                console.write_msg(classify_name + "的code列表不存在")
            classify_df = tool.df_from_dataset(f[ctype][classify_name],
                                               conf.HDF5_CLASSIFY_DS_CODE,
                                               None)
            for index, row in classify_df.iterrows():
                code = row[0].astype(str)
                if code in code_list:
                    code_dict = dict()
                    code_dict["date"] = today_str
                    code_dict["code"] = code
                    code_dict["classify"] = classify_name
                    code_classify_df = code_classify_df.append(
                        code_dict, ignore_index=True)
    console.write_tail()
    f.close()

    f_other = h5py.File(conf.HDF5_FILE_OTHER, 'a')
    tool.delete_dataset(f_other, conf.HDF5_OTHER_CODE_CLASSIFY)
    tool.merge_df_dataset(f_other, conf.HDF5_OTHER_CODE_CLASSIFY,
                          code_classify_df)
    f_other.close()
    return
Esempio n. 7
0
def _wrap_kline(f, measurement, code, reset_flag=False):
    """
    推送缠论kline
    """
    for ktype in conf.HDF5_SHARE_WRAP_KTYPE:
        ctags = {"kcode": code, "ktype": ktype}
        wrap_ds_name = conf.HDF5_INDEX_WRAP + "_" + ktype
        if f.get(wrap_ds_name) is None:
            console.write_msg(code + "缠论数据不存在")
            continue

        wrap_df = tool.df_from_dataset(f, wrap_ds_name, None)
        wrap_df = _datetime_index(wrap_df)
        last_datetime = influx.get_last_datetime(measurement, ctags)
        if last_datetime is not None and reset_flag is False:
            wrap_df = wrap_df.loc[wrap_df.index > last_datetime]
        else:
            wrap_df = wrap_df.tail(DF_INIT_LIMIT)
        if len(wrap_df) > 0:
            try:
                influx.reset_df(wrap_df, measurement, ctags)
                console.write_exec()
            except Exception as er:
                print(str(er))
        else:
            console.write_pass()
    return
Esempio n. 8
0
def code_classify(today_str=None):
    """
    推送筛选出的股票相关分类
    """
    f = h5py.File(conf.HDF5_FILE_OTHER, 'a')
    console.write_head(conf.HDF5_OPERATE_PUSH, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_OTHER_CODE_CLASSIFY)
    if today_str is None:
        today_str = tradetime.get_today()

    if f.get(conf.HDF5_OTHER_CODE_CLASSIFY) is None:
        console.write_msg("code的分类文件不存在")
        return
    code_classify_df = tool.df_from_dataset(f, conf.HDF5_OTHER_CODE_CLASSIFY,
                                            None)
    code_classify_df[conf.HDF5_SHARE_DATE_INDEX] = bytes(today_str,
                                                         encoding="utf8")
    code_classify_df = _datetime_index(code_classify_df)
    code_classify_df = code_classify_df.reset_index()
    num = 1
    for index, row in code_classify_df.iterrows():
        code_classify_df.loc[
            index, conf.HDF5_SHARE_DATE_INDEX] = code_classify_df.loc[index][
                conf.HDF5_SHARE_DATE_INDEX] + datetime.timedelta(0, num)
        num += 1
    code_classify_df = code_classify_df.set_index(conf.HDF5_SHARE_DATE_INDEX)
    try:
        influx.write_df(code_classify_df, conf.MEASUREMENT_CODE_CLASSIFY, None)
    except Exception as er:
        print(str(er))
    console.write_tail()
    f.close()
    return
Esempio n. 9
0
def share_filter(today_str=None):
    """
    推送筛选列表至influxdb
    """
    f = h5py.File(conf.HDF5_FILE_SCREEN, 'a')
    if today_str is None:
        today_str = tradetime.get_today()
    console.write_head(conf.HDF5_OPERATE_PUSH, conf.HDF5_RESOURCE_TUSHARE,
                       conf.MEASUREMENT_FILTER_SHARE)

    if f[conf.SCREEN_SHARE_FILTER].get(today_str) is None:
        console.write_msg(today_str + "的筛选数据不存在")
        return
    screen_df = tool.df_from_dataset(f[conf.SCREEN_SHARE_FILTER], today_str,
                                     None)
    screen_df[conf.HDF5_SHARE_DATE_INDEX] = bytes(today_str, encoding="utf8")
    screen_df = _datetime_index(screen_df)
    screen_df = screen_df.reset_index()
    num = 1
    for index, row in screen_df.iterrows():
        screen_df.loc[index,
                      conf.HDF5_SHARE_DATE_INDEX] = screen_df.loc[index][
                          conf.HDF5_SHARE_DATE_INDEX] + datetime.timedelta(
                              0, num)
        num += 1
    screen_df = screen_df.set_index(conf.HDF5_SHARE_DATE_INDEX)
    try:
        influx.write_df(screen_df, conf.MEASUREMENT_FILTER_SHARE,
                        {"filter_date": today_str})
    except Exception as er:
        print(str(er))
    console.write_tail()
    f.close()
    return
Esempio n. 10
0
def all_classify(classify_list, init_flag=True):
    """
    整理分类的缠论k线
    """
    f = h5py.File(conf.HDF5_FILE_SHARE, 'a')
    f_classify = h5py.File(conf.HDF5_FILE_CLASSIFY, 'a')
    # 获取classify列表
    for ctype in classify_list:
        for classify_name in f_classify[ctype]:
            console.write_head(conf.HDF5_OPERATE_WRAP,
                               conf.HDF5_RESOURCE_TUSHARE, classify_name)
            for ktype in conf.HDF5_SHARE_WRAP_KTYPE:
                ds_name = conf.HDF5_CLASSIFY_DS_DETAIL + "_" + ktype
                if f_classify[ctype][classify_name].get(ds_name) is None:
                    continue
                share_df = tool.df_from_dataset(
                    f_classify[ctype][classify_name], ds_name, None)
                wrap_df = one_df(share_df)
                wrap_ds_name = conf.HDF5_INDEX_WRAP + "_" + ktype
                if init_flag is True:
                    tool.delete_dataset(f_classify[ctype][classify_name],
                                        wrap_ds_name)
                if wrap_df is not None:
                    tool.merge_df_dataset(f_classify[ctype][classify_name],
                                          wrap_ds_name, wrap_df)
            console.write_tail()
    f_classify.close()
    f.close()
    return
Esempio n. 11
0
def all_classify(classify_list, init_flag=True):
    """
    获取所有分类的macd与所处均线等指标(依赖分类数据聚合)
    """
    f = h5py.File(conf.HDF5_FILE_CLASSIFY, 'a')
    # 获取classify列表
    for ctype in classify_list:
        for classify_name in f[ctype]:
            console.write_head(conf.HDF5_OPERATE_INDEX,
                               conf.HDF5_RESOURCE_TUSHARE, classify_name)
            for ktype in conf.HDF5_SHARE_KTYPE:
                ds_name = ktype
                if f[ctype][classify_name].get(ds_name) is None:
                    console.write_msg(classify_name + "分类聚合detail不存在")
                    continue

                df = tool.df_from_dataset(f[ctype][classify_name], ds_name,
                                          None)
                df["close"] = df["close"].apply(lambda x: round(x, 2))
                try:
                    index_df = one_df(df, init_flag, True)
                except Exception as er:
                    console.write_msg("[" + classify_name + "]" + str(er))
                    continue
                index_ds_name = conf.HDF5_INDEX_DETAIL + "_" + ktype
                if init_flag is True:
                    tool.delete_dataset(f[ctype][classify_name], index_ds_name)
                tool.merge_df_dataset(f[ctype][classify_name], index_ds_name,
                                      index_df.reset_index())
            console.write_tail()
    f.close()
    return
Esempio n. 12
0
def code_exec(f, code):
    """
    个股筛选
    """
    MACD_DIVERSE_LIMIT = 5
    code_prefix = code[0:3]
    code_dict = dict()
    code_dict["code"] = code
    omit_flag = False
    for ktype in ["D", "W", "M", "30", "5"]:
        if f[code_prefix][code].get(ktype) is None:
            console.write_blank()
            console.write_msg(code + "阶段" + ktype + "的detail数据不存在")
            return None
        share_df = tool.df_from_dataset(f[code_prefix][code], ktype, None)

        index_ds_name = conf.HDF5_INDEX_DETAIL + "_" + ktype
        if f[code_prefix][code].get(index_ds_name) is None:
            console.write_blank()
            console.write_msg(code + "阶段" + ktype + "的macd与均线数据不存在")
            return None
        index_df = tool.df_from_dataset(f[code_prefix][code], index_ds_name,
                                        None)
        share_df = share_df.merge(index_df,
                                  left_on=conf.HDF5_SHARE_DATE_INDEX,
                                  right_on=conf.HDF5_SHARE_DATE_INDEX,
                                  how='outer')
        share_df[conf.HDF5_SHARE_DATE_INDEX] = share_df[
            conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
        # 检查macd的趋势
        if ktype in ["D", "M", "W"]:
            code_dict = _filter_trend(share_df.tail(50), code_dict, ktype)
            if code_dict is None:
                omit_flag = True
                break
        # 检查macd的背离
        if ktype in ["D", "30"]:
            code_dict = _filter_diverse(share_df.tail(100), code_dict, ktype)
    # 如果日线不存在macd的上涨趋势,则忽略该股票
    if omit_flag is True:
        return None
    # 如果日线、30min一个阶段内都不存在背离,忽略
    if code_dict["30" +
                 INDEX_MACD_DIVERSE_COUNT] <= MACD_DIVERSE_LIMIT and code_dict[
                     "d" + INDEX_MACD_DIVERSE_COUNT] <= MACD_DIVERSE_LIMIT:
        return None
    return code_dict
Esempio n. 13
0
def get_file():
    global index
    f = h5py.File(conf.HDF5_FILE_ERROR)
    if f.get(index) is not None:
        columns = conf.HDF5_ERROR_COLUMN_MAP.get(index, ["error"])
        ret = tool.df_from_dataset(f, index, columns)
    else:
        ret = None
    return ret
Esempio n. 14
0
def code_macd_trend(f, ktype):
    index_ds_name = conf.HDF5_INDEX_DETAIL + "_" + ktype
    if f.get(ktype) is None:
        return None

    if f.get(index_ds_name) is None:
        return None

    share_df = tool.df_from_dataset(f, ktype, None)
    index_df = tool.df_from_dataset(f, index_ds_name, None)
    share_df = share_df.merge(index_df,
                              left_on=conf.HDF5_SHARE_DATE_INDEX,
                              right_on=conf.HDF5_SHARE_DATE_INDEX,
                              how='left')
    share_df[conf.HDF5_SHARE_DATE_INDEX] = share_df[
        conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
    trend_df = macd.trend(share_df.tail(60))
    return trend_df
Esempio n. 15
0
def get_from_file(ktype,
                  stype,
                  code,
                  factor_macd_range,
                  df_file_num=48,
                  direct_turn=False):
    """
    从文件获取历史数据,并计算趋势
    """
    df = None
    df_file_num = df_file_num + DF_MACD_MIN_NUM
    if stype == conf.STYPE_BITMEX:
        f = h5py.File(conf.HDF5_FILE_BITMEX, 'a')
        path = '/' + code
        if f.get(path) is not None:
            df = tool.df_from_dataset(f[path], ktype, None)
            df[conf.HDF5_SHARE_DATE_INDEX] = df[
                conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
            df = df.tail(df_file_num)
        else:
            raise Exception(code + "的" + ktype + "文件数据不存在")
        f.close()
    elif stype == conf.STYPE_ASHARE:
        if code.isdigit():
            f = h5py.File(conf.HDF5_FILE_SHARE, 'a')
            code_prefix = code[0:3]
            path = '/' + code_prefix + '/' + code
        else:
            f = h5py.File(conf.HDF5_FILE_INDEX, 'a')
            path = '/' + code

        if f.get(path) is not None:
            df = tool.df_from_dataset(f[path], ktype, None)
            df[conf.HDF5_SHARE_DATE_INDEX] = df[
                conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
            df = df.tail(df_file_num)
        else:
            raise Exception(code + "的" + ktype + "文件数据不存在")
        f.close
    else:
        raise Exception("数据源不存在或未配置")
    return macd.value_and_trend(df, factor_macd_range, direct_turn)
Esempio n. 16
0
def code_detail(code_list, start_date):
    """
    将code的basic内容,整理至share文件下
    """
    # 获取basic所有日期的detail,并遍历读取详细信息
    f = h5py.File(conf.HDF5_FILE_BASIC, 'a')
    f_share = h5py.File(conf.HDF5_FILE_SHARE, 'a')
    console.write_head(conf.HDF5_OPERATE_ARRANGE, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_BASIC_DETAIL)
    path = '/' + conf.HDF5_BASIC_DETAIL
    if f.get(path) is None:
        return

    code_basic_dict = dict()
    for date in f[path]:
        if start_date is not None and date < start_date:
            console.write_msg(start_date + "起始日期大于基本数据的最大日期")
            continue
        df = tool.df_from_dataset(f[path], date, None)
        df["code"] = df["code"].str.decode("utf-8")
        df = df.set_index("code")
        for code in df.index:
            if code not in code_list:
                continue

            if code not in code_basic_dict:
                code_basic_dict[code] = tool.init_empty_df(df.columns)
            code_basic_dict[code].loc[date] = df.loc[code, :]

    for code, code_df in code_basic_dict.items():
        code_df.index.name = conf.HDF5_SHARE_DATE_INDEX
        code_df = code_df.reset_index().sort_values(
            by=[conf.HDF5_SHARE_DATE_INDEX])

        code_prefix = code[0:3]
        code_group_path = '/' + code_prefix + '/' + code
        if f_share.get(code_group_path) is None:
            console.write_msg(code + "的detail文件不存在")
            continue

        if start_date is None:
            tool.delete_dataset(f_share[code_group_path],
                                conf.HDF5_BASIC_DETAIL)
        tool.merge_df_dataset(f_share[code_group_path], conf.HDF5_BASIC_DETAIL,
                              code_df)
        console.write_exec()
    console.write_blank()
    console.write_tail()
    f_share.close()
    f.close()
    return
Esempio n. 17
0
def _raw_kline(f, measurement, code, reset_flag=False):
    """
    推送原始kline
    """
    for ktype in conf.HDF5_SHARE_KTYPE:
        ctags = {"kcode": code, "ktype": ktype}
        detail_ds_name = ktype
        index_ds_name = conf.HDF5_INDEX_DETAIL + "_" + ktype

        if f.get(detail_ds_name) is None:
            console.write_msg(code + "的detail数据不存在")
            continue
        if f.get(index_ds_name) is None:
            console.write_msg(code + "的index数据不存在")
            continue
        detail_df = tool.df_from_dataset(f, detail_ds_name, None)
        index_df = tool.df_from_dataset(f, index_ds_name, None)
        detail_df = detail_df.merge(index_df,
                                    left_on=conf.HDF5_SHARE_DATE_INDEX,
                                    right_on=conf.HDF5_SHARE_DATE_INDEX,
                                    how='left')
        detail_df = _datetime_index(detail_df)
        last_datetime = influx.get_last_datetime(measurement, ctags)
        if last_datetime is not None and reset_flag is False:
            detail_df = detail_df.loc[detail_df.index > last_datetime]
        else:
            detail_df = detail_df.tail(DF_INIT_LIMIT)
        detail_df = detail_df.drop("ma_border", axis=1)
        if len(detail_df) > 0:
            try:
                influx.reset_df(detail_df, measurement, ctags)
                console.write_exec()
            except Exception as er:
                print(str(er))
        else:
            console.write_pass()
    return
Esempio n. 18
0
def one_classify_detail(f, code_list, omit_list, ktype, start_date):
    """
    根据单个分类,聚合所有code获取分类均值
    """
    # 初始化一个全日期的空DataFrame,并初始化一列作为统计个数
    init_df = tool.init_empty_df(conf.HDF5_SHARE_COLUMN)

    # 按照列表顺序,获取code并逐一添加至初始化DF,并递增该日期的个数
    for row in code_list:
        code = row[0].astype(str)
        code_prefix = code[0:3]
        # 判断是否跳过创业板
        if code_prefix in omit_list:
            continue

        code_group_path = '/' + code_prefix + '/' + code
        if f.get(code_group_path) is None:
            console.write_msg(code + "的detail文件不存在")
            continue
        else:
            # 忽略停牌、退市、无法获取的情况
            if f[code_group_path].attrs.get(conf.HDF5_BASIC_QUIT) is not None:
                continue

            if f[code_group_path].attrs.get(conf.HDF5_BASIC_ST) is not None:
                continue

        if f[code_group_path].get(ktype) is None:
            console.write_msg(code + "的" + ktype + "文件不存在")
            continue
        add_df = tool.df_from_dataset(f[code_group_path], ktype, None)
        add_df[conf.HDF5_SHARE_DATE_INDEX] = add_df[
            conf.HDF5_SHARE_DATE_INDEX].str.decode("utf-8")
        add_df["num"] = 1
        add_df = add_df.set_index(conf.HDF5_SHARE_DATE_INDEX)
        init_df = init_df.add(add_df, fill_value=0)
    # 总数除以数量,得到平均值
    if len(init_df) > 0:
        init_df = init_df.div(init_df.num, axis=0)
        init_df = init_df.drop("num", axis=1)
        if start_date is not None:
            init_df = init_df.ix[start_date:]
        init_df = init_df.reset_index().sort_values(
            by=[conf.HDF5_SHARE_DATE_INDEX])
        init_df["volume"] = init_df["volume"].astype('float64')
        return init_df
    else:
        return None
Esempio n. 19
0
def all_index(init_flag=True):
    """
    整理指数的缠论k线
    """
    f = h5py.File(conf.HDF5_FILE_INDEX, 'a')
    for code in f:
        console.write_head(conf.HDF5_OPERATE_WRAP, conf.HDF5_RESOURCE_TUSHARE,
                           code)
        for ktype in conf.HDF5_SHARE_WRAP_KTYPE:
            if f[code].get(ktype) is None:
                continue
            share_df = tool.df_from_dataset(f[code], ktype, None)
            wrap_df = one_df(share_df)
            wrap_ds_name = conf.HDF5_INDEX_WRAP + "_" + ktype
            if init_flag is True:
                tool.delete_dataset(f[code], wrap_ds_name)
            if wrap_df is not None:
                tool.merge_df_dataset(f[code], wrap_ds_name, wrap_df)
        console.write_tail()
    f.close()
    return
Esempio n. 20
0
def all_share(omit_list, init_flag=True):
    """
    获取所有股票的macd与所处均线等指标
    """
    f = h5py.File(conf.HDF5_FILE_SHARE, 'a')
    for code_prefix in f:
        if code_prefix in omit_list:
            continue
        console.write_head(conf.HDF5_OPERATE_INDEX, conf.HDF5_RESOURCE_TUSHARE,
                           code_prefix)
        for code in f[code_prefix]:
            # 忽略停牌、退市、无法获取的情况
            if f[code_prefix][code].attrs.get(
                    conf.HDF5_BASIC_QUIT) is not None:
                continue
            if f[code_prefix][code].attrs.get(conf.HDF5_BASIC_ST) is not None:
                continue

            code_group_path = '/' + code_prefix + '/' + code
            for ktype in conf.HDF5_SHARE_KTYPE:
                try:
                    if f.get(code_group_path) is None or f[code_prefix][
                            code].get(ktype) is None:
                        console.write_msg(code + "-" + ktype + "的detail不存在")
                        continue
                    df = tool.df_from_dataset(f[code_prefix][code], ktype,
                                              None)
                    index_df = one_df(df, init_flag)
                    ds_name = conf.HDF5_INDEX_DETAIL + "_" + ktype
                    if init_flag is True:
                        tool.delete_dataset(f[code_prefix][code], ds_name)
                    tool.merge_df_dataset(f[code_prefix][code], ds_name,
                                          index_df.reset_index())
                except Exception as er:
                    print(str(er))
            console.write_exec()
        console.write_blank()
        console.write_tail()
    f.close()
    return
Esempio n. 21
0
def all_index(init_flag=True):
    """
    处理所有指数的均线与macd
    """
    f = h5py.File(conf.HDF5_FILE_INDEX, 'a')
    for code in f:
        console.write_head(conf.HDF5_OPERATE_INDEX, conf.HDF5_RESOURCE_TUSHARE,
                           code)
        for ktype in conf.HDF5_SHARE_KTYPE:
            if f[code].get(ktype) is None:
                console.write_msg(code + "-" + ktype + "的detail不存在")
                continue
            df = tool.df_from_dataset(f[code], ktype, None)
            index_df = one_df(df, init_flag)
            index_ds_name = conf.HDF5_INDEX_DETAIL + "_" + ktype
            if init_flag is True:
                tool.delete_dataset(f[code], index_ds_name)
            tool.merge_df_dataset(f[code], index_ds_name,
                                  index_df.reset_index())
        console.write_tail()
    f.close()
    return
Esempio n. 22
0
def margins(mtype):
    """
    聚合融资融券数据
    """
    if mtype == "sh":
        mtype_index = conf.HDF5_FUNDAMENTAL_SH_MARGINS
        mtype_index_detail = conf.HDF5_FUNDAMENTAL_SH_MARGINS_DETAIL
    elif mtype == "sz":
        mtype_index = conf.HDF5_FUNDAMENTAL_SZ_MARGINS
        mtype_index_detail = conf.HDF5_FUNDAMENTAL_SZ_MARGINS_DETAIL
    else:
        print("mtype " + mtype + " error\r\n")
        return

    f = h5py.File(conf.HDF5_FILE_FUNDAMENTAL, 'a')
    path = '/' + mtype_index
    console.write_head(conf.HDF5_OPERATE_ARRANGE, conf.HDF5_RESOURCE_TUSHARE,
                       mtype_index)
    console.write_tail()
    margin_sum_dict = dict()
    if f.get(path) is not None:
        df = tool.df_from_dataset(f[path], mtype_index, None)
        df["opDate"] = df["opDate"].str.decode("utf-8")
        for index, row in df.iterrows():
            # trade_date = tradetime.get_week_of_date(row["opDate"], "D")
            trade_date = row["opDate"]
            # 统一单位为亿元
            sum_price = round((row["rzmre"] - row["rqmcl"]) / 10000 / 10000, 2)
            if trade_date in margin_sum_dict:
                margin_sum_dict[trade_date] += sum_price
            else:
                margin_sum_dict[trade_date] = sum_price
        sum_df = tool.init_df(list(margin_sum_dict.items()),
                              [conf.HDF5_SHARE_DATE_INDEX, "sum"])
        if len(sum_df) > 0:
            sum_df = sum_df.sort_values(by=[conf.HDF5_SHARE_DATE_INDEX])
            tool.create_df_dataset(f[mtype_index], mtype_index_detail, sum_df)
    f.close()
    return
Esempio n. 23
0
def filter_share(code_list, start_date):
    """
    整理筛选的股票缠论k线
    """
    f = h5py.File(conf.HDF5_FILE_SHARE, 'a')
    console.write_head(conf.HDF5_OPERATE_WRAP, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_INDEX_WRAP)
    for code in code_list:
        code_prefix = code[0:3]
        code_group_path = '/' + code_prefix + '/' + code
        if f.get(code_group_path) is None:
            continue
        # 忽略停牌、退市、无法获取的情况
        if f[code_prefix][code].attrs.get(
                conf.HDF5_BASIC_QUIT
        ) is not None or f[code_prefix][code].attrs.get(
                conf.HDF5_BASIC_ST) is not None:
            continue

        for ktype in conf.HDF5_SHARE_WRAP_KTYPE:
            ds_name = ktype
            if f[code_prefix][code].get(ds_name) is None:
                continue
            share_df = tool.df_from_dataset(f[code_prefix][code], ds_name,
                                            None)
            wrap_df = one_df(share_df)
            if wrap_df is not None:
                ds_name = conf.HDF5_INDEX_WRAP + "_" + ktype
                if f[code_prefix][code].get(ds_name) is not None:
                    tool.delete_dataset(f[code_prefix][code], ds_name)
                tool.create_df_dataset(f[code_prefix][code], ds_name, wrap_df)
                console.write_exec()
            else:
                console.write_pass()
    console.write_blank()
    console.write_tail()
    f.close()
    return
Esempio n. 24
0
def _basic_info(f, measurement, code, reset_flag):
    ctags = {"kcode": code}
    basic_ds_name = conf.HDF5_BASIC_DETAIL
    if f.get(basic_ds_name) is None:
        console.write_msg(code + "基本面数据不存在")
        return

    basic_df = tool.df_from_dataset(f, basic_ds_name, None)
    basic_df = _datetime_index(basic_df)
    last_datetime = influx.get_last_datetime(measurement, ctags)
    if last_datetime is not None and reset_flag is False:
        basic_df = basic_df.loc[basic_df.index > last_datetime]
    else:
        basic_df = basic_df.tail(DF_INIT_LIMIT)
    if len(basic_df) > 0:
        try:
            influx.reset_df(basic_df, measurement, ctags)
            console.write_exec()
        except Exception as er:
            print(str(er))
    else:
        console.write_pass()
    return
Esempio n. 25
0
def operate_st(action_type):
    """
    将st内容,转换成标签添加在对应code下
    """
    f = h5py.File(conf.HDF5_FILE_BASIC, 'a')
    console.write_head(action_type, conf.HDF5_RESOURCE_TUSHARE,
                       conf.HDF5_BASIC_ST)
    path = '/' + conf.HDF5_BASIC_ST
    # 如果文件不存在,则退出
    if f.get(path) is None:
        console.write_msg("st的detail文件不存在")
        return

    st_df = tool.df_from_dataset(f[path], conf.HDF5_BASIC_ST, None)
    if st_df is not None and st_df.empty is not True:
        st_df["code"] = st_df["code"].str.decode("utf-8")
        # 将st内容,转换成标签添加在对应code下
        tool.op_attr_by_codelist(action_type, st_df["code"].values,
                                 conf.HDF5_BASIC_ST, True)
    else:
        console.write_msg("st的detail数据获取失败")
    console.write_tail()
    f.close()
    return
Esempio n. 26
0
def mark_grade(today_str=None):
    """
    对筛选结果进行打分
    """
    console.write_head(conf.HDF5_OPERATE_SCREEN, conf.HDF5_RESOURCE_TUSHARE,
                       conf.SCREEN_SHARE_GRADE)
    f = h5py.File(conf.HDF5_FILE_SCREEN, 'a')
    f_share = h5py.File(conf.HDF5_FILE_SHARE, 'a')
    if today_str is None:
        today_str = tradetime.get_today()
    if f[conf.STRATEGY_TREND_AND_REVERSE][conf.SCREEN_SHARE_FILTER].get(
            today_str) is None:
        console.write_msg(today_str + "个股筛选结果不存在")
        return
    grade_df = tool.init_empty_df([
        "code", "status", "d_price_space", "d_price_per", "30_price_space",
        "30_price_per", "d_macd", "30_macd"
    ])
    screen_df = tool.df_from_dataset(
        f[conf.STRATEGY_TREND_AND_REVERSE][conf.SCREEN_SHARE_FILTER],
        today_str, None)
    screen_df["d_m_status"] = screen_df["d_m_status"].str.decode("utf-8")
    screen_df["w_m_status"] = screen_df["w_m_status"].str.decode("utf-8")
    screen_df["m_m_status"] = screen_df["m_m_status"].str.decode("utf-8")
    screen_df["code"] = screen_df["code"].str.decode("utf-8")
    for index, row in screen_df.iterrows():
        code = row["code"]
        grade_dict = dict()
        grade_dict["code"] = code
        grade_dict["status"] = 0
        grade_dict["status"] += _status_grade(row["d_m_status"])
        grade_dict["status"] += _status_grade(row["w_m_status"])
        grade_dict["status"] += _status_grade(row["m_m_status"])
        code_prefix = code[0:3]
        code_group_path = '/' + code_prefix + '/' + code
        for ktype in ["D", "30"]:
            detail_ds_name = ktype
            index_ds_name = conf.HDF5_INDEX_DETAIL + "_" + ktype
            if f_share[code_group_path].get(detail_ds_name) is None:
                console.write_msg(code + "的detail数据不存在")
                continue
            if f_share[code_group_path].get(index_ds_name) is None:
                console.write_msg(code + "的index数据不存在")
                continue
            detail_df = tool.df_from_dataset(f_share[code_group_path],
                                             detail_ds_name, None)
            index_df = tool.df_from_dataset(f_share[code_group_path],
                                            index_ds_name, None)
            latest_price = detail_df.tail(1)["close"].values[0]
            latest_macd = index_df.tail(1)["macd"].values[0]
            diverse_price_start = row[str.lower(ktype) +
                                      INDEX_MACD_DIVERSE_PRICE_START]
            if diverse_price_start == 0:
                grade_dict[str.lower(ktype) + "_price_space"] = 0
                grade_dict[str.lower(ktype) + "_price_per"] = 0
            else:
                grade_dict[str.lower(ktype) + "_price_space"] = round(
                    diverse_price_start - latest_price, 2)
                grade_dict[str.lower(ktype) + "_price_per"] = round(
                    grade_dict[str.lower(ktype) + "_price_space"] * 100 /
                    diverse_price_start, 2)
            grade_dict[str.lower(ktype) + "_macd"] = latest_macd
        grade_df = grade_df.append(grade_dict, ignore_index=True)
    if f[conf.STRATEGY_TREND_AND_REVERSE].get(conf.SCREEN_SHARE_GRADE) is None:
        f[conf.STRATEGY_TREND_AND_REVERSE].create_group(
            conf.SCREEN_SHARE_GRADE)
    tool.delete_dataset(
        f[conf.STRATEGY_TREND_AND_REVERSE][conf.SCREEN_SHARE_GRADE], today_str)
    tool.merge_df_dataset(
        f[conf.STRATEGY_TREND_AND_REVERSE][conf.SCREEN_SHARE_GRADE], today_str,
        grade_df)
    f_share.close()
    f.close()
    console.write_tail()
    return