def get_trading_signals_figure(order_reader: OrderReader, entity_id: str, start_timestamp=None, end_timestamp=None, adjust_type=None): entity_type, _, _ = decode_entity_id(entity_id) data_schema = get_kdata_schema(entity_type=entity_type, level=order_reader.level, adjust_type=adjust_type) if not start_timestamp: start_timestamp = order_reader.start_timestamp if not end_timestamp: end_timestamp = order_reader.end_timestamp kdata_reader = DataReader( entity_ids=[entity_id], data_schema=data_schema, entity_schema=zvt_context.tradable_schema_map.get(entity_type), start_timestamp=start_timestamp, end_timestamp=end_timestamp, level=order_reader.level, ) # generate the annotation df order_reader.move_on(timeout=0) df = order_reader.data_df.copy() df = df[df.entity_id == entity_id].copy() if pd_is_not_null(df): df["value"] = df["order_price"] df["flag"] = df["order_type"].apply(lambda x: order_type_flag(x)) df["color"] = df["order_type"].apply(lambda x: order_type_color(x)) print(df.tail()) drawer = Drawer(main_df=kdata_reader.data_df, annotation_df=df) return drawer.draw_kline(show=False, height=800)
def __init__(self, force_update=True, sleeping_time=10, exchanges=None, entity_ids=None, code=None, codes=None, day_data=False, entity_filters=None, ignore_failed=True, real_time=False, fix_duplicate_way='ignore', start_timestamp=None, end_timestamp=None, level=IntervalLevel.LEVEL_1DAY, kdata_use_begin_time=False, one_day_trading_minutes=24 * 60, adjust_type=AdjustType.qfq) -> None: level = IntervalLevel(level) self.adjust_type = AdjustType(adjust_type) self.entity_type = self.entity_schema.__name__.lower() self.data_schema = get_kdata_schema(entity_type=self.entity_type, level=level, adjust_type=self.adjust_type) super().__init__(force_update, sleeping_time, exchanges, entity_ids, code, codes, day_data, entity_filters, ignore_failed, real_time, fix_duplicate_way, start_timestamp, end_timestamp, level, kdata_use_begin_time, one_day_trading_minutes)
def get_performance_stats( entity_type="stock", start_timestamp=None, end_timestamp=None, adjust_type: Union[AdjustType, str] = None, data_provider=None, changes=((-1, -0.5), (-0.5, -0.2), (-0.2, 0), (0, 0.2), (0.2, 0.5), (0.5, 1), (1, 1000)), ): if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) score_df, _ = get_top_entities( data_schema=data_schema, column="close", start_timestamp=start_timestamp, end_timestamp=end_timestamp, pct=1, method=WindowMethod.change, return_type=TopType.positive, data_provider=data_provider, ) if pd_is_not_null(score_df): result = {} for change in changes: range_start = change[0] range_end = change[1] key = f"pct_{range_start}_{range_end}" df = score_df[(score_df["score"] >= range_start) & (score_df["score"] < range_end)] result[key] = len(df) return result
def get_top_volume_entities( entity_type="stock", entity_ids=None, start_timestamp=None, end_timestamp=None, pct=0.1, return_type=TopType.positive, adjust_type: Union[AdjustType, str] = None, method=WindowMethod.avg, data_provider=None, ): if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) filters = None if entity_ids: filters = [data_schema.entity_id.in_(entity_ids)] result, _ = get_top_entities( data_schema=data_schema, start_timestamp=start_timestamp, end_timestamp=end_timestamp, column="turnover", pct=pct, method=method, return_type=return_type, filters=filters, data_provider=data_provider, ) return result
def get_performance( entity_ids, start_timestamp=None, end_timestamp=None, adjust_type: Union[AdjustType, str] = None, data_provider=None, ): entity_type, _, _ = decode_entity_id(entity_ids[0]) if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) result, _ = get_top_entities( data_schema=data_schema, column="close", start_timestamp=start_timestamp, end_timestamp=end_timestamp, pct=1, method=WindowMethod.change, return_type=TopType.positive, kdata_filters=[data_schema.entity_id.in_(entity_ids)], data_provider=data_provider, ) return result
def __init__(self, entity_schema: Type[TradableEntity] = Stock, provider: str = None, entity_provider: str = None, entity_ids: List[str] = None, exchanges: List[str] = None, codes: List[str] = None, start_timestamp: Union[str, pd.Timestamp] = None, end_timestamp: Union[str, pd.Timestamp] = None, columns: List = None, filters: List = None, order: object = None, limit: int = None, level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY, category_field: str = 'entity_id', time_field: str = 'timestamp', computing_window: int = None, keep_all_timestamp: bool = False, fill_method: str = 'ffill', effective_number: int = None, transformer: Transformer = None, accumulator: Accumulator = None, need_persist: bool = False, only_compute_factor: bool = False, factor_name: str = None, clear_state: bool = False, only_load_factor: bool = False, adjust_type: Union[AdjustType, str] = None) -> None: if columns is None: columns = [ 'id', 'entity_id', 'timestamp', 'level', 'open', 'close', 'high', 'low', 'volume' ] # 股票默认使用后复权 if entity_schema == Stock and not adjust_type: adjust_type = AdjustType.hfq self.adjust_type = adjust_type self.data_schema = get_kdata_schema(entity_schema.__name__, level=level, adjust_type=adjust_type) if not factor_name: if type(level) == str: factor_name = f'{type(self).__name__.lower()}_{level}' else: factor_name = f'{type(self).__name__.lower()}_{level.value}' super().__init__(self.data_schema, entity_schema, provider, entity_provider, entity_ids, exchanges, codes, start_timestamp, end_timestamp, columns, filters, order, limit, level, category_field, time_field, computing_window, keep_all_timestamp, fill_method, effective_number, transformer, accumulator, need_persist, only_compute_factor, factor_name, clear_state, only_load_factor)
def get_performance(entity_ids, start_timestamp=None, end_timestamp=None, adjust_type: Union[AdjustType, str] = None): entity_type, _, _ = decode_entity_id(entity_ids[0]) if not adjust_type and entity_type == 'stock': adjust_type = AdjustType.hfq data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) result, _ = get_top_entities(data_schema=data_schema, column='close', start_timestamp=start_timestamp, end_timestamp=end_timestamp, pct=1, method=WindowMethod.change, return_type=TopType.positive, filters=[data_schema.entity_id.in_(entity_ids)]) return result
def __init__( self, force_update=True, sleeping_time=10, exchanges=None, entity_id=None, entity_ids=None, code=None, codes=None, day_data=False, entity_filters=None, ignore_failed=True, real_time=False, fix_duplicate_way="ignore", start_timestamp=None, end_timestamp=None, level=IntervalLevel.LEVEL_1DAY, kdata_use_begin_time=False, one_day_trading_minutes=24 * 60, adjust_type=AdjustType.qfq, ) -> None: level = IntervalLevel(level) adjust_type = AdjustType(adjust_type) self.data_schema = get_kdata_schema(entity_type="stock", level=level, adjust_type=adjust_type) self.jq_trading_level = to_jq_trading_level(level) super().__init__( force_update, sleeping_time, exchanges, entity_id, entity_ids, code, codes, day_data, entity_filters, ignore_failed, real_time, fix_duplicate_way, start_timestamp, end_timestamp, level, kdata_use_begin_time, one_day_trading_minutes, ) self.adjust_type = adjust_type get_token(zvt_config["jq_username"], zvt_config["jq_password"], force=True)
def get_top_performance_entities( entity_type="stock", start_timestamp=None, end_timestamp=None, pct=0.1, return_type=None, adjust_type: Union[AdjustType, str] = None, entity_filters=None, kdata_filters=None, show_name=False, list_days=None, entity_provider=None, data_provider=None, ): if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) if not entity_filters: entity_filters = [] if list_days: entity_schema = get_entity_schema(entity_type=entity_type) list_date = next_date(start_timestamp, -list_days) entity_filters += [entity_schema.list_date <= list_date] filter_entities = get_entity_ids( provider=entity_provider, entity_type=entity_type, filters=entity_filters, ) if not filter_entities: logger.warning(f"no entities selected") return None, None if not kdata_filters: kdata_filters = [] kdata_filters = kdata_filters + [ data_schema.entity_id.in_(filter_entities) ] return get_top_entities( data_schema=data_schema, start_timestamp=start_timestamp, end_timestamp=end_timestamp, column="close", pct=pct, method=WindowMethod.change, return_type=return_type, kdata_filters=kdata_filters, show_name=show_name, data_provider=data_provider, )
def get_top_volume_entities(entity_type='stock', entity_ids=None, start_timestamp=None, end_timestamp=None, pct=0.1, return_type=TopType.positive, adjust_type: Union[AdjustType, str] = None, method=WindowMethod.avg): if not adjust_type and entity_type == 'stock': adjust_type = AdjustType.hfq data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) filters = None if entity_ids: filters = [data_schema.entity_id.in_(entity_ids)] result, _ = get_top_entities(data_schema=data_schema, start_timestamp=start_timestamp, end_timestamp=end_timestamp, column='turnover', pct=pct, method=method, return_type=return_type, filters=filters) return result
def get_top_performance_entities( entity_type="stock", start_timestamp=None, end_timestamp=None, pct=0.1, return_type=None, adjust_type: Union[AdjustType, str] = None, filters=None, show_name=False, list_days=None, entity_provider=None, data_provider=None, ): if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) if list_days: entity_schema = get_entity_schema(entity_type=entity_type) list_date = next_date(start_timestamp, -list_days) ignore_entities = get_entity_ids( provider=entity_provider, entity_type=entity_type, filters=[entity_schema.list_date >= list_date], ) if ignore_entities: logger.info(f"ignore size: {len(ignore_entities)}") logger.info(f"ignore entities: {ignore_entities}") f = [data_schema.entity_id.notin_(ignore_entities)] if filters: filters = filters + f else: filters = f return get_top_entities( data_schema=data_schema, start_timestamp=start_timestamp, end_timestamp=end_timestamp, column="close", pct=pct, method=WindowMethod.change, return_type=return_type, filters=filters, show_name=show_name, data_provider=data_provider, )
def compare(entity_ids, schema_map_columns: dict = None, chart_type: ChartType = ChartType.line): entity_type_map_ids = _group_entity_ids(entity_ids=entity_ids) dfs = [] for entity_type in entity_type_map_ids: if schema_map_columns: for schema in schema_map_columns: columns = ["entity_id", "timestamp"] + schema_map_columns.get(schema) df = schema.query_data(entity_ids=entity_type_map_ids.get(entity_type), columns=columns) dfs.append(df) else: schema = get_kdata_schema(entity_type=entity_type) df = schema.query_data(entity_ids=entity_type_map_ids.get(entity_type)) dfs.append(df) all_df = pd.concat(dfs) if schema_map_columns: drawer = Drawer(main_df=all_df) drawer.draw(main_chart=chart_type, show=True) else: drawer = Drawer(main_df=all_df, sub_df_list=[all_df[["entity_id", "timestamp", "turnover"]].copy()]) drawer.draw_kline(show=True)
def get_entity_list_by_cap(timestamp, cap_start, cap_end, entity_type="stock", provider=None, adjust_type=None, retry_times=20): if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) kdata_schema = get_kdata_schema(entity_type, level=IntervalLevel.LEVEL_1DAY, adjust_type=adjust_type) df = kdata_schema.query_data( provider=provider, filters=[kdata_schema.timestamp == to_pd_timestamp(timestamp)], index="entity_id", ) if pd_is_not_null(df): df["cap"] = df["turnover"] / df["turnover_rate"] df_result = df.copy() if cap_start: df_result = df_result.loc[(df["cap"] >= cap_start)] if cap_end: df_result = df_result.loc[(df["cap"] <= cap_end)] return df_result.index.tolist() else: if retry_times == 0: return [] return get_entity_list_by_cap( timestamp=next_date(timestamp, 1), cap_start=cap_start, cap_end=cap_end, entity_type=entity_type, provider=provider, adjust_type=adjust_type, retry_times=retry_times - 1, )
def get_top_performance_entities(entity_type='stock', start_timestamp=None, end_timestamp=None, pct=0.1, return_type=None, adjust_type: Union[AdjustType, str] = None, filters=None, show_name=False, list_days=None): if not adjust_type and entity_type == 'stock': adjust_type = AdjustType.hfq data_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) if list_days: entity_schema = get_entity_schema(entity_type=entity_type) list_date = next_date(start_timestamp, -list_days) ignore_entities = get_entity_ids(entity_type=entity_type, filters=[entity_schema.list_date >= list_date]) if ignore_entities: logger.info(f'ignore size: {len(ignore_entities)}') logger.info(f'ignore entities: {ignore_entities}') f = [data_schema.entity_id.notin_(ignore_entities)] if filters: filters = filters + f else: filters = f return get_top_entities(data_schema=data_schema, start_timestamp=start_timestamp, end_timestamp=end_timestamp, column='close', pct=pct, method=WindowMethod.change, return_type=return_type, filters=filters, show_name=show_name)
def compare( entity_ids=None, codes=None, schema=None, columns=None, schema_map_columns: dict = None, chart_type: ChartType = ChartType.line, start_timestamp=None, scale_value: int = None, ): """ compare indicators(columns) of entities :param entity_ids: :param codes: :param schema: :param columns: :param schema_map_columns: key represents schema, value represents columns :param chart_type: "line", "area", "scatter", default "line" :param start_timestamp: " :param scale_value: compare with same value which scaled to scale_value """ dfs = [] # default compare kdata if schema_map_columns is None and schema is None: entity_type_map_ids = _group_entity_ids(entity_ids=entity_ids) for entity_type in entity_type_map_ids: schema = get_kdata_schema(entity_type=entity_type) df = schema.query_data( entity_ids=entity_type_map_ids.get(entity_type), start_timestamp=start_timestamp) dfs.append(df) all_df = pd.concat(dfs) drawer = Drawer(main_df=all_df, sub_df_list=[ all_df[["entity_id", "timestamp", "turnover"]].copy() ]) drawer.draw_kline(show=True, scale_value=scale_value) else: if schema_map_columns: for schema in schema_map_columns: columns = ["entity_id", "timestamp" ] + schema_map_columns.get(schema) df = schema.query_data(entity_ids=entity_ids, codes=codes, columns=columns, start_timestamp=start_timestamp) dfs.append(df) elif schema: columns = ["entity_id", "timestamp"] + columns df = schema.query_data(entity_ids=entity_ids, codes=codes, columns=columns, start_timestamp=start_timestamp) dfs.append(df) all_df = pd.concat(dfs) drawer = Drawer(main_df=all_df) drawer.draw(main_chart=chart_type, show=True, scale_value=scale_value)
def report_targets( factor_cls: Type[Factor], entity_provider, data_provider, title, entity_type="stock", informer: EmailInformer = None, em_group=None, em_group_over_write=True, filter_by_volume=True, adjust_type=None, start_timestamp="2019-01-01", **factor_kv, ): logger.info( f"entity_provider: {entity_provider}, data_provider: {data_provider}, entity_type: {entity_type}, start_timestamp: {start_timestamp}" ) error_count = 0 while error_count <= 10: try: if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) target_date = get_latest_kdata_date(provider=data_provider, entity_type=entity_type, adjust_type=adjust_type) logger.info(f"target_date :{target_date}") current_entity_pool = None if filter_by_volume: # 成交量 vol_df = get_top_volume_entities( entity_type=entity_type, start_timestamp=next_date(target_date, -30), end_timestamp=target_date, adjust_type=adjust_type, pct=0.4, data_provider=data_provider, ) current_entity_pool = vol_df.index.tolist() logger.info( f"current_entity_pool({len(current_entity_pool)}): {current_entity_pool}" ) kdata_schema = get_kdata_schema(entity_type, level=IntervalLevel.LEVEL_1DAY, adjust_type=adjust_type) filters = [] if "turnover_threshold" in factor_kv: filters = filters + [ kdata_schema.turnover >= factor_kv.get("turnover_threshold") ] if "turnover_rate_threshold" in factor_kv: filters = filters + [ kdata_schema.turnover_rate >= factor_kv.get("turnover_rate_threshold") ] if filters: filters = filters + [kdata_schema.timestamp == target_date] kdata_df = kdata_schema.query_data( provider=data_provider, filters=filters, columns=["entity_id", "timestamp"], index="entity_id") if current_entity_pool: current_entity_pool = set(current_entity_pool) & set( kdata_df.index.tolist()) else: current_entity_pool = kdata_df.index.tolist() if "entity_ids" in factor_kv: if current_entity_pool: current_entity_pool = set(current_entity_pool) & set( factor_kv.pop("entity_ids")) else: current_entity_pool = set(factor_kv.pop("entity_ids")) # add the factor my_selector = TargetSelector(start_timestamp=start_timestamp, end_timestamp=target_date, select_mode=SelectMode.condition_or) entity_schema = get_entity_schema(entity_type=entity_type) tech_factor = factor_cls( entity_schema=entity_schema, entity_provider=entity_provider, provider=data_provider, entity_ids=current_entity_pool, start_timestamp=start_timestamp, end_timestamp=target_date, adjust_type=adjust_type, **factor_kv, ) my_selector.add_factor(tech_factor) my_selector.run() long_stocks = my_selector.get_open_long_targets( timestamp=target_date) inform( informer, entity_ids=long_stocks, target_date=target_date, title=title, entity_provider=entity_provider, entity_type=entity_type, em_group=em_group, em_group_over_write=em_group_over_write, ) break except Exception as e: logger.exception("report error:{}".format(e)) time.sleep(60 * 3) error_count = error_count + 1 if error_count == 10: informer.send_message( zvt_config["email_username"], f"report {entity_type}{factor_cls.__name__} error", f"report {entity_type}{factor_cls.__name__} error: {e}", )
def report_top_entities( entity_provider, data_provider, periods=None, ignore_new_stock=True, ignore_st=True, entity_ids=None, entity_type="stock", adjust_type=None, top_count=30, turnover_threshold=100000000, turnover_rate_threshold=0.02, informer: EmailInformer = None, em_group=None, em_group_over_write=True, return_type=TopType.positive, ): error_count = 0 while error_count <= 10: try: if periods is None: periods = [7, 30, 365] if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) kdata_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) entity_schema = get_entity_schema(entity_type=entity_type) target_date = get_latest_kdata_date(provider=data_provider, entity_type=entity_type, adjust_type=adjust_type) filter_entity_ids = get_entity_ids_by_filter( provider=entity_provider, ignore_st=ignore_st, ignore_new_stock=ignore_new_stock, entity_schema=entity_schema, target_date=target_date, entity_ids=entity_ids, ) if not filter_entity_ids: msg = f"{entity_type} no entity_ids selected" logger.error(msg) informer.send_message(zvt_config["email_username"], "report_top_stats error", msg) return filter_turnover_df = kdata_schema.query_data( filters=[ kdata_schema.turnover >= turnover_threshold, kdata_schema.turnover_rate >= turnover_rate_threshold, ], provider=data_provider, start_timestamp=target_date, index="entity_id", columns=["entity_id", "code"], ) if filter_entity_ids: filter_entity_ids = set(filter_entity_ids) & set( filter_turnover_df.index.tolist()) else: filter_entity_ids = filter_turnover_df.index.tolist() if not filter_entity_ids: msg = f"{entity_type} no entity_ids selected" logger.error(msg) informer.send_message(zvt_config["email_username"], "report_top_stats error", msg) return logger.info( f"{entity_type} filter_entity_ids size: {len(filter_entity_ids)}" ) filters = [kdata_schema.entity_id.in_(filter_entity_ids)] selected = [] for i, period in enumerate(periods): interval = period if target_date.weekday() + 1 < interval: interval = interval + 2 start = next_date(target_date, -interval) positive_df, negative_df = get_top_performance_entities( entity_type=entity_type, start_timestamp=start, kdata_filters=filters, pct=1, show_name=True, entity_provider=entity_provider, data_provider=data_provider, return_type=return_type, ) if return_type == TopType.positive: df = positive_df else: df = negative_df selected = selected + df.index[:top_count].tolist() selected = list(dict.fromkeys(selected)) inform( informer, entity_ids=selected, target_date=target_date, title=f"{entity_type} {em_group}({len(selected)})", entity_provider=entity_provider, entity_type=entity_type, em_group=em_group, em_group_over_write=em_group_over_write, ) break except Exception as e: logger.exception("report error:{}".format(e)) time.sleep(30) error_count = error_count + 1
def __init__( self, entity_schema: Type[TradableEntity] = Stock, provider: str = None, entity_provider: str = None, entity_ids: List[str] = None, exchanges: List[str] = None, codes: List[str] = None, start_timestamp: Union[str, pd.Timestamp] = None, end_timestamp: Union[str, pd.Timestamp] = None, columns: List = None, filters: List = None, order: object = None, limit: int = None, level: Union[str, IntervalLevel] = IntervalLevel.LEVEL_1DAY, category_field: str = "entity_id", time_field: str = "timestamp", computing_window: int = None, keep_all_timestamp: bool = False, fill_method: str = "ffill", effective_number: int = None, transformer: Transformer = None, accumulator: Accumulator = None, need_persist: bool = False, only_compute_factor: bool = False, factor_name: str = None, clear_state: bool = False, only_load_factor: bool = False, adjust_type: Union[AdjustType, str] = None, ) -> None: if columns is None: columns = [ "id", "entity_id", "timestamp", "level", "open", "close", "high", "low", "volume", "turnover", "turnover_rate", ] # 股票默认使用后复权 if not adjust_type: adjust_type = default_adjust_type( entity_type=entity_schema.__name__) self.adjust_type = adjust_type self.data_schema = get_kdata_schema(entity_schema.__name__, level=level, adjust_type=adjust_type) if not factor_name: if type(level) == str: factor_name = f"{type(self).__name__.lower()}_{level}" else: factor_name = f"{type(self).__name__.lower()}_{level.value}" super().__init__( self.data_schema, entity_schema, provider, entity_provider, entity_ids, exchanges, codes, start_timestamp, end_timestamp, columns, filters, order, limit, level, category_field, time_field, computing_window, keep_all_timestamp, fill_method, effective_number, transformer, accumulator, need_persist, only_compute_factor, factor_name, clear_state, only_load_factor, )
def report_top_stats( entity_provider, data_provider, periods=[7, 30, 180, 365], ignore_new_stock=True, entity_type="stock", adjust_type=None, top_count=30, turnover_threshold=100000000, turnover_rate_threshold=0.02, em_group_over_write=True, ): if not adjust_type: adjust_type = default_adjust_type(entity_type=entity_type) kdata_schema = get_kdata_schema(entity_type=entity_type, adjust_type=adjust_type) entity_schema = get_entity_schema(entity_type=entity_type) latest_day = kdata_schema.query_data(provider=data_provider, order=kdata_schema.timestamp.desc(), limit=1, return_type="domain") current_timestamp = latest_day[0].timestamp email_action = EmailInformer() # 至少上市一年 filter_entity_ids = [] if ignore_new_stock: pre_year = next_date(current_timestamp, -365) entity_ids = get_entity_ids( provider=entity_provider, entity_schema=entity_schema, filters=[entity_schema.timestamp <= pre_year]) if not entity_ids: msg = f"{entity_type} no entity_ids listed one year" logger.error(msg) email_action.send_message(zvt_config["email_username"], "report_top_stats error", msg) return filter_entity_ids = entity_ids filter_turnover_df = kdata_schema.query_data( filters=[ kdata_schema.turnover >= turnover_threshold, kdata_schema.turnover_rate >= turnover_rate_threshold, ], provider=data_provider, start_timestamp=current_timestamp, index="entity_id", columns=["entity_id", "code"], ) if filter_entity_ids: filter_entity_ids = set(filter_entity_ids) & set( filter_turnover_df.index.tolist()) else: filter_entity_ids = filter_turnover_df.index.tolist() if not filter_entity_ids: msg = f"{entity_type} no entity_ids selected" logger.error(msg) email_action.send_message(zvt_config["email_username"], "report_top_stats error", msg) return logger.info( f"{entity_type} filter_entity_ids size: {len(filter_entity_ids)}") filters = [kdata_schema.entity_id.in_(filter_entity_ids)] stats = [] ups = [] downs = [] for period in periods: start = next_date(current_timestamp, -period) df, _ = get_top_performance_entities( entity_type=entity_type, start_timestamp=start, filters=filters, pct=1, show_name=True, entity_provider=entity_provider, data_provider=data_provider, ) df.rename(columns={"score": f"score_{period}"}, inplace=True) ups.append(tabulate(df.iloc[:top_count], headers="keys")) downs.append(tabulate(df.iloc[-top_count:], headers="keys")) stats.append(tabulate(df.describe(), headers="keys")) # 最近一个月和一周最靓仔的 if period == 7 or period == 30: try: codes = [ decode_entity_id(entity_id)[2] for entity_id in df.index[:top_count] ] add_to_eastmoney(codes=codes, entity_type=entity_type, group="最靓仔", over_write=em_group_over_write) except Exception as e: logger.exception(e) email_action.send_message( zvt_config["email_username"], f"report_top_stats error", "report_top_stats error:{}".format(e)) # 一年内跌幅最大的 if period == 365: try: codes = [ decode_entity_id(entity_id)[2] for entity_id in df.index[-top_count:] ] add_to_eastmoney(codes=codes, entity_type=entity_type, group="谁有我惨", over_write=em_group_over_write) except Exception as e: logger.exception(e) email_action.send_message( zvt_config["email_username"], f"report_top_stats error", "report_top_stats error:{}".format(e)) msg = "\n" for s in stats: msg = msg + s + "\n" email_action.send_message(zvt_config["email_username"], f"{current_timestamp} {entity_type}统计报告", msg) msg = "\n" for up in ups: msg = msg + up + "\n" email_action.send_message(zvt_config["email_username"], f"{current_timestamp} {entity_type}涨幅统计报告", msg) msg = "\n" for down in downs: msg = msg + down + "\n" email_action.send_message(zvt_config["email_username"], f"{current_timestamp} {entity_type}跌幅统计报告", msg)