def get_stock_valuation_data(provider: Provider, sleep, desc, pc, lock, region, batch): # 个股估值数据 StockValuation.record_data(provider=provider, share_para=(desc, pc, lock, True, region), sleeping_time=sleep, batch_size=batch)
def record_valuation(): while True: email_action = EmailInformer() try: StockValuation.record_data(provider='joinquant', sleeping_time=0, day_data=True) email_action.send_message(zvt_config['email_username'], 'joinquant record valuation finished', '') break except Exception as e: msg = f'joinquant record valuation error:{e}' logger.exception(msg) email_action.send_message(zvt_config['email_username'], 'joinquant record valuation error', msg) time.sleep(60)
def get_top_fund_holding_stocks(timestamp=None, pct=0.3, by=None): if not timestamp: timestamp = now_pd_timestamp() # 季报一般在report_date后1个月内公布,年报2个月内,年报4个月内 # 所以取时间点的最近的两个公布点,保证取到数据 # 所以,这是个滞后的数据,只是为了看个大概,毕竟模糊的正确better than 精确的错误 report_date = get_recent_report_date(timestamp, 1) fund_cap_df = FundStock.query_data( filters=[ FundStock.report_date >= report_date, FundStock.timestamp <= timestamp, ], columns=["stock_id", "market_cap"], ) fund_cap_df = fund_cap_df.groupby( "stock_id")["market_cap"].sum().sort_values(ascending=False) # 直接根据持有市值返回 if not by: s = fund_cap_df.iloc[:int(len(fund_cap_df) * pct)] return s.to_frame() # 按流通盘比例 if by == "trading": columns = ["entity_id", "circulating_market_cap"] # 按市值比例 elif by == "all": columns = ["entity_id", "market_cap"] entity_ids = fund_cap_df.index.tolist() start_timestamp = next_date(timestamp, -30) cap_df = StockValuation.query_data( entity_ids=entity_ids, filters=[ StockValuation.timestamp >= start_timestamp, StockValuation.timestamp <= timestamp, ], columns=columns, ) if by == "trading": cap_df = cap_df.rename(columns={"circulating_market_cap": "cap"}) elif by == "all": cap_df = cap_df.rename(columns={"market_cap": "cap"}) cap_df = cap_df.groupby("entity_id").mean() result_df = pd.concat([cap_df, fund_cap_df], axis=1, join="inner") result_df["pct"] = result_df["market_cap"] / result_df["cap"] pct_df = result_df["pct"].sort_values(ascending=False) s = pct_df.iloc[:int(len(pct_df) * pct)] return s.to_frame()
def record(self, entity, start, end, size, timestamps): if not end: end = to_time_str(now_pd_timestamp()) start = to_time_str(start) em_code = to_em_entity_id(entity) columns_list = list(self.data_schema.get_data_map(self)) df = pd.DataFrame() i = 0 underweight_trade_day = c.css( em_code, "HOLDDECREASEANNCDATEPLAN", f"StartDate={start},EndDate={end},ispandas=1") if underweight_trade_day.HOLDDECREASEANNCDATEPLAN[0]: underweight_trade_day_list = underweight_trade_day.HOLDDECREASEANNCDATEPLAN[ 0].split(',') for end_date in underweight_trade_day_list: # 减持 Underweight_data = c.css( em_code, "HOLDDECREASEANNCDATENEWPLAN,HOLDDECREASENAMENEWPLAN,DECRENEWPROGRESS," "DECRENEWMAXSHANUM,DECRENEWMINSHANUM", f"EndDate={end_date},ispandas=1") values_data = StockValuation.query_data( entity_id=entity.id, limit=1, order=StockValuation.timestamp.desc(), end_timestamp=end_date, columns=["capitalization"]) name_list = Underweight_data.HOLDDECREASENAMENEWPLAN.tolist( )[0].split(',') max_shanum = Underweight_data.DECRENEWMAXSHANUM.tolist()[0].split(',') if \ Underweight_data.DECRENEWMAXSHANUM.tolist()[0] else None min_shanum = Underweight_data.DECRENEWMINSHANUM.tolist()[0].split(',') if \ Underweight_data.DECRENEWMINSHANUM.tolist()[0] else None progress_data = Underweight_data.DECRENEWPROGRESS.tolist()[0] if \ Underweight_data.DECRENEWPROGRESS.tolist()[0] else None try: data_end = pd.DataFrame( { "holder_name": name_list, "volume_plan_max": max_shanum, "volume_plan_mix": min_shanum }, index=[i for i in range(len(name_list))]) except: continue data_end['capitalization'] = values_data.capitalization.values[ 0] data_end = data_end.astype({ 'volume_plan_max': 'float64', 'volume_plan_mix': 'float64' }) data_end['change_pct'] = round( (data_end['volume_plan_max'] / data_end['capitalization']) * 100, 2) data_end['report_date'] = pd.to_datetime(end_date) data_end[ 'plan_progress'] = progress_data if progress_data else None data_end['holder_direction'] = '减持' df = df.append(data_end) overweight_trade_day = c.css( em_code, "HOLDINCREASEANNCDATEPLAN", f"StartDate={start},EndDate={end},ispandas=1") if overweight_trade_day.HOLDINCREASEANNCDATEPLAN[0]: overweight_trade_day_list = overweight_trade_day.HOLDINCREASEANNCDATEPLAN[ 0].split(',') for end_date in overweight_trade_day_list: # 增持 Overweight_data = c.css( em_code, "HOLDDECREASEANNCDATENEWPLAN,HOLDINCREASENAMENEWPLAN,HOLDPLANSCHEDULE," "HOLDMAXSHARENEWPLAN,HOLDMINSHARENEWPLAN", f"EndDate={end_date},ispandas=1") values_data = StockValuation.query_data( entity_id=entity.id, limit=1, order=StockValuation.timestamp.desc(), end_timestamp=end_date, columns=["capitalization"]) name_list = Overweight_data.HOLDINCREASENAMENEWPLAN.tolist( )[0].split(',') max_shanum = Overweight_data.HOLDMAXSHARENEWPLAN.tolist()[0].split(',') if \ Overweight_data.HOLDMAXSHARENEWPLAN.tolist()[0] else None min_shanum = Overweight_data.HOLDMINSHARENEWPLAN.tolist()[0].split(',') if \ Overweight_data.HOLDMINSHARENEWPLAN.tolist()[0] else None progress_data = Overweight_data.HOLDPLANSCHEDULE.tolist()[0] if \ Overweight_data.HOLDPLANSCHEDULE.tolist()[0] else None try: data_end = pd.DataFrame( { "holder_name": name_list, "volume_plan_max": max_shanum, "volume_plan_mix": min_shanum }, index=[i for i in range(len(name_list))]) except: continue data_end['capitalization'] = values_data.capitalization.values[ 0] data_end = data_end.astype({ 'volume_plan_max': 'float64', 'volume_plan_mix': 'float64' }) data_end['change_pct'] = round( (data_end['volume_plan_max'] / data_end['capitalization']) * 100, 2) data_end['report_date'] = pd.to_datetime(end_date) data_end[ 'plan_progress'] = progress_data if progress_data else None data_end['holder_direction'] = '增持' df = df.append(data_end) if pd_is_not_null(df): df.reset_index(drop=True, inplace=True) df.rename(columns=self.data_schema.get_data_map(self), inplace=True) df['entity_id'] = entity.id df['timestamp'] = pd.to_datetime(df.report_date) df['provider'] = 'emquantapi' df['code'] = entity.code def generate_id(se): holdname = se['holder_name'] if len(holdname) > 20: holdname = str(holdname).split('、')[0] return "{}_{}_{}".format( se['entity_id'], to_time_str(se['timestamp'], fmt=TIME_FORMAT_DAY), holdname) df['id'] = df[['entity_id', 'timestamp', 'holder_name']].apply(generate_id, axis=1) df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update) return None
def record(self, entity, start, end, size, timestamps): if not end: end = now_pd_timestamp() date_range = pd.date_range(start=start, end=end, freq='1D').tolist() for date in date_range: # etf包含的个股和比例 etf_stock_df = get_etf_stocks(code=entity.code, timestamp=date, provider=self.provider) if pd_is_not_null(etf_stock_df): all_pct = etf_stock_df['proportion'].sum() if all_pct >= 1.2 or all_pct <= 0.8: self.logger.error( f'ignore etf:{entity.id} date:{date} proportion sum:{all_pct}' ) break etf_stock_df.set_index('stock_id', inplace=True) # 个股的估值数据 stock_valuation_df = StockValuation.query_data( entity_ids=etf_stock_df.index.to_list(), filters=[StockValuation.timestamp == date], index='entity_id') if pd_is_not_null(stock_valuation_df): stock_count = len(etf_stock_df) valuation_count = len(stock_valuation_df) self.logger.info( f'etf:{entity.id} date:{date} stock count: {stock_count},' f'valuation count:{valuation_count}') pct = abs(stock_count - valuation_count) / stock_count if pct >= 0.2: self.logger.error( f'ignore etf:{entity.id} date:{date} pct:{pct}') break se = pd.Series({ 'id': "{}_{}".format(entity.id, date), 'entity_id': entity.id, 'timestamp': date, 'code': entity.code, 'name': entity.name }) for col in ['pe', 'pe_ttm', 'pb', 'ps', 'pcf']: # PE=P/E # 这里的算法为:将其价格都设为PE,那么Earning为1(亏钱为-1),结果为 总价格(PE)/总Earning value = 0 price = 0 # 权重估值 positive_df = stock_valuation_df[[ col ]][stock_valuation_df[col] > 0] positive_df['count'] = 1 positive_df = positive_df.multiply( etf_stock_df["proportion"], axis="index") if pd_is_not_null(positive_df): value = positive_df['count'].sum() price = positive_df[col].sum() negative_df = stock_valuation_df[[ col ]][stock_valuation_df[col] < 0] if pd_is_not_null(negative_df): negative_df['count'] = 1 negative_df = negative_df.multiply( etf_stock_df["proportion"], axis="index") value = value - negative_df['count'].sum() price = price + negative_df[col].sum() se[f'{col}1'] = price / value # 简单算术平均估值 positive_df = stock_valuation_df[col][ stock_valuation_df[col] > 0] positive_count = len(positive_df) negative_df = stock_valuation_df[col][ stock_valuation_df[col] < 0] negative_count = len(negative_df) value = positive_count - negative_count price = positive_df.sum() + abs(negative_df.sum()) se[col] = price / value df = se.to_frame().T self.logger.info(df) df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update) return None
def report_tm(): while True: error_count = 0 discord_informer = DiscordInformer() try: # 抓取k线数据 # StockTradeDay.record_data(provider='baostock', sleeping_time=2) # Stock1dKdata.record_data(provider='baostock', sleeping_time=1.5) latest_day: StockTradeDay = StockTradeDay.query_data(order=StockTradeDay.timestamp.desc(), limit=1,provider='joinquant', return_type='domain') if latest_day: target_date = latest_day[0].timestamp else: target_date = now_pd_timestamp() start_date = target_date - timedelta(60) # 计算 my_selector = TargetSelector(entity_schema=Stock, provider='joinquant', start_timestamp=start_date, end_timestamp=target_date) # add the factors tm_factor = TMFactor(entity_schema=Stock, provider='joinquant', start_timestamp=start_date, end_timestamp=target_date) my_selector.add_filter_factor(tm_factor) my_selector.run() long_targets = my_selector.get_open_long_targets(timestamp=target_date) logger.info(long_targets) msg = 'no targets' # 过滤亏损股 # check StockValuation data pe_date = target_date - timedelta(10) if StockValuation.query_data(start_timestamp=pe_date, limit=1, return_type='domain'): positive_df = StockValuation.query_data(provider='joinquant', entity_ids=long_targets, start_timestamp=pe_date, filters=[StockValuation.pe > 0], columns=['entity_id']) bad_stocks = set(long_targets) - set(positive_df['entity_id'].tolist()) if bad_stocks: stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=bad_stocks, return_type='domain') info = [f'{stock.name}({stock.code})' for stock in stocks] msg = '亏损股:' + ' '.join(info) + '\n' long_stocks = set(positive_df['entity_id'].tolist()) if long_stocks: # use block to filter block_selector = BlockSelector(start_timestamp='2020-01-01', long_threshold=0.8) block_selector.run() long_blocks = block_selector.get_open_long_targets(timestamp=target_date) if long_blocks: blocks = Block.query_data(provider='sina', entity_ids=long_blocks, return_type='domain') info = [f'{block.name}({block.code})' for block in blocks] msg = ' '.join(info) + '\n' block_stocks = BlockStock.query_data(provider='sina', filters=[ BlockStock.stock_id.in_(long_stocks)], entity_ids=long_blocks, return_type='domain') block_map_stocks = {} for block_stock in block_stocks: stocks = block_map_stocks.get(block_stock.name) if not stocks: stocks = [] block_map_stocks[block_stock.name] = stocks stocks.append(f'{block_stock.stock_name}({block_stock.stock_code})') for block in block_map_stocks: stocks = block_map_stocks[block] stock_msg = ' '.join(stocks) msg = msg + f'{block}:\n' + stock_msg + '\n' discord_informer.send_message(f'{target_date} TM选股结果 {msg}') break except Exception as e: logger.exception('report_tm error:{}'.format(e)) time.sleep(60 * 3) error_count = error_count + 1 if error_count == 10: discord_informer.send_message(f'report_tm error', 'report_tm error:{}'.format(e))
def report_state(): while True: error_count = 0 email_action = EmailInformer(ssl=True) try: latest_day: Stock1dKdata = Stock1dKdata.query_data(order=Stock1dKdata.timestamp.desc(), limit=1, return_type='domain') target_date = latest_day[0].timestamp # target_date = to_pd_timestamp('2020-01-02') # 计算均线 my_selector = TargetSelector(start_timestamp='2018-01-01', end_timestamp=target_date) # add the factors factor1 = VolumeUpMa250Factor(start_timestamp='2018-01-01', end_timestamp=target_date) my_selector.add_filter_factor(factor1) my_selector.run() long_stocks = my_selector.get_open_long_targets(timestamp=target_date) msg = 'no targets' # 过滤亏损股 # check StockValuation data pe_date = target_date - datetime.timedelta(10) if StockValuation.query_data(start_timestamp=pe_date, limit=1, return_type='domain'): positive_df = StockValuation.query_data(provider='joinquant', entity_ids=long_stocks, start_timestamp=pe_date, filters=[StockValuation.pe > 0], columns=['entity_id']) bad_stocks = set(long_stocks) - set(positive_df['entity_id'].tolist()) if bad_stocks: stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=bad_stocks, return_type='domain') info = [f'{stock.name}({stock.code})' for stock in stocks] msg = '亏损股:' + ' '.join(info) + '\n' long_stocks = set(positive_df['entity_id'].tolist()) if long_stocks: pre_date = target_date - datetime.timedelta(3 * 365) ma_state = MaStateStatsFactor(entity_ids=long_stocks, start_timestamp=pre_date, end_timestamp=target_date, persist_factor=False) bad_stocks = [] for entity_id, df in ma_state.factor_df.groupby(level=0): if df['current_pct'].max() >= 0.35: bad_stocks.append(entity_id) long_stocks.remove(entity_id) if bad_stocks: stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=bad_stocks, return_type='domain') info = [f'{stock.name}({stock.code})' for stock in stocks] msg = msg + '3年内高潮过:' + ' '.join(info) + '\n' # 过滤风险股 if long_stocks: risky_codes = risky_company(the_date=target_date, entity_ids=long_stocks) if risky_codes: long_stocks = [entity_id for entity_id in long_stocks if get_entity_code(entity_id) not in risky_codes] stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=risky_codes, return_type='domain') info = [f'{stock.name}({stock.code})' for stock in stocks] msg = msg + '风险股:' + ' '.join(info) + '\n' if long_stocks: stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=long_stocks, return_type='domain') # add them to eastmoney try: try: eastmoneypy.del_group('real') except: pass eastmoneypy.create_group('real') for stock in stocks: eastmoneypy.add_to_group(stock.code, group_name='real') except Exception as e: email_action.send_message("*****@*****.**", f'report state error', 'report state error:{}'.format(e)) info = [f'{stock.name}({stock.code})' for stock in stocks] msg = msg + '盈利股:' + ' '.join(info) + '\n' logger.info(msg) email_action.send_message('*****@*****.**', f'{target_date} 放量突破年线state选股结果', msg) break except Exception as e: logger.exception('report state error:{}'.format(e)) time.sleep(60 * 3) error_count = error_count + 1 if error_count == 10: email_action.send_message("*****@*****.**", f'report state error', 'report state error:{}'.format(e))
def report_vol_up_250(): while True: error_count = 0 email_action = EmailInformer() try: # 抓取k线数据 # StockTradeDay.record_data(provider='joinquant') # Stock1dKdata.record_data(provider='joinquant') latest_day: Stock1dHfqKdata = Stock1dHfqKdata.query_data( order=Stock1dHfqKdata.timestamp.desc(), limit=1, return_type='domain') target_date = latest_day[0].timestamp # 计算均线 my_selector = TargetSelector(start_timestamp='2018-10-01', end_timestamp=target_date) # add the factors factor1 = VolumeUpMaFactor(start_timestamp='2018-10-01', end_timestamp=target_date) my_selector.add_filter_factor(factor1) my_selector.run() long_stocks = my_selector.get_open_long_targets( timestamp=target_date) msg = 'no targets' # 过滤亏损股 # check StockValuation data pe_date = target_date - datetime.timedelta(10) if StockValuation.query_data(start_timestamp=pe_date, limit=1, return_type='domain'): positive_df = StockValuation.query_data( provider='joinquant', entity_ids=long_stocks, start_timestamp=pe_date, filters=[StockValuation.pe > 0], columns=['entity_id']) bad_stocks = set(long_stocks) - set( positive_df['entity_id'].tolist()) if bad_stocks: stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=bad_stocks, return_type='domain') info = [f'{stock.name}({stock.code})' for stock in stocks] msg = '亏损股:' + ' '.join(info) + '\n' long_stocks = set(positive_df['entity_id'].tolist()) if long_stocks: stocks = get_entities(provider='joinquant', entity_schema=Stock, entity_ids=long_stocks, return_type='domain') # add them to eastmoney try: try: eastmoneypy.del_group('tech') except: pass eastmoneypy.create_group('tech') for stock in stocks: eastmoneypy.add_to_group(stock.code, group_name='tech') except Exception as e: email_action.send_message( "*****@*****.**", f'report_vol_up_250 error', 'report_vol_up_250 error:{}'.format(e)) info = [f'{stock.name}({stock.code})' for stock in stocks] msg = msg + '盈利股:' + ' '.join(info) + '\n' logger.info(msg) email_action.send_message(get_subscriber_emails(), f'{target_date} 改进版放量突破年线选股结果', msg) break except Exception as e: logger.exception('report_vol_up_250 error:{}'.format(e)) time.sleep(60 * 3) error_count = error_count + 1 if error_count == 10: email_action.send_message( "*****@*****.**", f'report_vol_up_250 error', 'report_vol_up_250 error:{}'.format(e))
def record(self, entity, start, end, size, timestamps): if not end: end = now_pd_timestamp() date_range = pd.date_range(start=start, end=end, freq='1D').tolist() for date in date_range: # etf包含的个股和比例 etf_stock_df = get_etf_stocks(code=entity.code, timestamp=date, provider=self.provider) all_pct = etf_stock_df['proportion'].sum() if all_pct >= 1.1 or all_pct <= 0.9: self.logger.info( f'etf:{entity.id} date:{date} proportion sum:{all_pct}') if pd_is_not_null(etf_stock_df): etf_stock_df.set_index('stock_id', inplace=True) # 个股的估值数据 stock_valuation_df = StockValuation.query_data( entity_ids=etf_stock_df.index.to_list(), filters=[StockValuation.timestamp == date], index='entity_id') if pd_is_not_null(stock_valuation_df): # 暂时只支持 简单算术平均估值,理由:模糊的正确比精确的错误有用 # A股个股的市值往往相差很大,按市值权重的话,这样的估值很难反映整体 self.logger.info( f'etf:{entity.id} date:{date} stock count: {len(etf_stock_df)},valuation count:{len(stock_valuation_df)}' ) # # 静态pe # pe = Column(Float) # # 动态pe # pe_ttm = Column(Float) # # 市净率 # pb = Column(Float) # # 市销率 # ps = Column(Float) # # 市现率 # pcf = Column(Float) se = pd.Series({ 'id': "{}_{}".format(entity.id, date), 'entity_id': entity.id, 'timestamp': date, 'code': entity.code, 'name': entity.name }) for col in ['pe', 'pe_ttm', 'pb', 'ps', 'pcf']: # PE=P/E # 这里的算法为:将其价格都设为1,算出总earning,再相除 positive_df = stock_valuation_df[col][ stock_valuation_df[col] > 0] positive_count = len(positive_df) negative_df = stock_valuation_df[col][ stock_valuation_df[col] < 0] negative_count = len(negative_df) result = (positive_count + negative_count) / ( positive_count / positive_df.mean() + negative_count / negative_df.mean()) se[col] = result df = se.to_frame().T self.logger.info(df) df_to_db(df=df, data_schema=self.data_schema, provider=self.provider, force_update=self.force_update) return None
def report_real(region): while True: error_count = 0 email_action = EmailInformer(ssl=True) try: latest_day: Stock1dKdata = Stock1dKdata.query_data( region=region, order=Stock1dKdata.timestamp.desc(), limit=1, return_type='domain') target_date = latest_day[0].timestamp # target_date = '2020-02-04' # 计算均线 my_selector = TargetSelector(region=region, start_timestamp='2018-01-01', end_timestamp=target_date) # add the factors factor1 = VolumeUpMa250Factor(region=region, start_timestamp='2018-01-01', end_timestamp=target_date) my_selector.add_filter_factor(factor1) my_selector.run() long_stocks = my_selector.get_open_long_targets( timestamp=target_date) msg = 'no targets' # 过滤亏损股 # check StockValuation data pe_date = target_date - datetime.timedelta(10) if StockValuation.query_data(region=region, start_timestamp=pe_date, limit=1, return_type='domain'): positive_df = StockValuation.query_data( region=region, provider=Provider.JoinQuant, entity_ids=long_stocks, start_timestamp=pe_date, filters=[StockValuation.pe > 0], columns=['entity_id']) bad_stocks = set(long_stocks) - set( positive_df['entity_id'].tolist()) if bad_stocks: stocks = get_entities(region=region, provider=Provider.JoinQuant, entity_schema=Stock, entity_ids=bad_stocks, return_type='domain') info = [f'{stock.name}({stock.code})' for stock in stocks] msg = '亏损股:' + ' '.join(info) + '\n' long_stocks = set(positive_df['entity_id'].tolist()) if long_stocks: # use block to filter block_selector = BlockSelector(region=region, start_timestamp='2020-01-01', long_threshold=0.8) block_selector.run() long_blocks = block_selector.get_open_long_targets( timestamp=target_date) if long_blocks: blocks: List[Block] = Block.query_data( region=region, provider=Provider.Sina, entity_ids=long_blocks, return_type='domain') info = [f'{block.name}({block.code})' for block in blocks] msg = ' '.join(info) + '\n' block_stocks: List[BlockStock] = BlockStock.query_data( region=region, provider=Provider.Sina, filters=[BlockStock.stock_id.in_(long_stocks)], entity_ids=long_blocks, return_type='domain') if block_stocks: # add them to eastmoney try: try: eastmoneypy.del_group('real') except: pass eastmoneypy.create_group('real') for block_stock in block_stocks: eastmoneypy.add_to_group( block_stock.stock_code, group_name='real') except Exception as e: email_action.send_message( "*****@*****.**", f'report_real error', 'report_real error:{}'.format(e)) block_map_stocks = {} for block_stock in block_stocks: stocks = block_map_stocks.get(block_stock.name) if not stocks: stocks = [] block_map_stocks[block_stock.name] = stocks stocks.append( f'{block_stock.stock_name}({block_stock.stock_code})' ) for block in block_map_stocks: stocks = block_map_stocks[block] stock_msg = ' '.join(stocks) msg = msg + f'{block}:\n' + stock_msg + '\n' logger.info(msg) email_action.send_message('*****@*****.**', f'{target_date} 放量突破年线real选股结果', msg) break except Exception as e: logger.exception('report_real error:{}'.format(e)) time.sleep(60 * 3) error_count = error_count + 1 if error_count == 10: email_action.send_message("*****@*****.**", f'report_real error', 'report_real error:{}'.format(e))