def get_appropriate_date_momentum( company_df: pd.DataFrame, company, duration: Tuple[int, int] = (0, 1), verbosity: int = 1) -> Tuple[datetime.datetime, float]: """ Return appropriate date which is present in data record. Parameters ---------- company_df : pd.DataFrame Company dataframe duration : Tuple[year,month], optional Desired duration to go back to retrive record, by default (0,1) verbosity : int, optional Level of detail logging,1=< Deatil, 0=Less detail , by default 1 Returns ------- Tuple[datetime.datetime,float] Date,Close value on date retrived Raises ------ ValueError If desired old is older than first record """ current_date = company_df.iloc[-1].Date desired_date = current_date - \ dateutil.relativedelta.relativedelta( years=duration[0], months=duration[1]) if desired_date < company_df.iloc[0].Date: logger.error( f"Given desired date {desired_date.strftime('%d-%m-%Y')} is older than first recorded date {company_df.iloc[0].Date.strftime('%d-%m-%Y')}" ) raise ValueError dd_copy = desired_date if verbosity > 0: logger.debug( f"Your desired date for monthly return for {company} is {desired_date.strftime('%d-%m-%Y')}" ) if len(company_df.loc[company_df['Date'] == desired_date]) != 0: desired_close = company_df.loc[company_df['Date'] == desired_date] else: for i in range(1, 100): if len(company_df.loc[company_df['Date'] == desired_date]) == 0: desired_date = desired_date - \ dateutil.relativedelta.relativedelta(days=i) desired_close = company_df.loc[company_df['Date'] == desired_date] break if verbosity > 0: logger.warning( f"Desired date: {dd_copy.strftime('%d-%m-%Y')} not found going for next possible date: {desired_date.strftime('%d-%m-%Y')}" ) return desired_date, desired_close.iloc[-1].Close
def get_appropriate_date_ema( company_df: pd.DataFrame, desired_date: datetime.datetime, verbosity: int = 1) -> Tuple[datetime.datetime, float]: """ Return appropriate date which is present in data record. Parameters ---------- company_df : pd.DataFrame Company dataframe duration : datetime.datetime Desired date cut-off to calculate ema verbosity : int, optional Level of detail logging, by default 1 Returns ------- Tuple[datetime.datetime,float] Date,Close value on date retrived Raises ------ ValueError If desired old is older than first record """ if desired_date < company_df.index[0]: logger.error( f"Given desired date {desired_date.strftime('%d-%m-%Y')} is older than first recorded date {company_df.index[0].strftime('%d-%m-%Y')}" ) if verbosity > 0: logger.debug( f"Your desired EMA cut-off date is {desired_date.strftime('%d-%m-%Y')}" ) for day_idx in range(1, 100): if desired_date not in company_df.index: date = desired_date - \ dateutil.relativedelta.relativedelta(days=day_idx) else: date = desired_date if date in company_df.index: break if verbosity > 0 and desired_date != date: logger.warning( f"Desired date: {desired_date.strftime('%d-%m-%Y')} not found going for next possible date: {date.strftime('%d-%m-%Y')}" ) return date
def volume_n_days_indicator( self, duration: int = 90, save: bool = True, export_path: str = ".", verbosity: int = 1, ) -> Optional[pd.DataFrame]: """Mean Volume Indicator based on desired days Args: duration (int, optional): Total days from current date to retrive data. Defaults to 90. save (bool, optional): Save to hard disk. Defaults to True. export_path (str, optional): Path to save, to be used only if 'save' is true. Defaults to ".". verbosity (int, optional): Level of detail logging,1=< Deatil, 0=Less detail. Defaults to 1. Returns: All Volume based indicator Example: ```python from stock_analysis.indicator import Indicator ind = Indicator('./data/company_list.yaml') vol = ind.volume_n_days_indicator(150) ``` """ with parallel_backend(n_jobs=-1, backend="multiprocessing"): result = Parallel()( delayed(self.unit_vol_indicator_n_days)(company, duration) for company in self.data["company"]) vol_ind_df = pd.DataFrame(result) if verbosity > 0: logger.debug(f"Here are sample 5 company\n{vol_ind_df.head()}") if save is True: new_folder(export_path) vol_ind_df.to_csv( f"{export_path}/VolumeIndicator90Days_detailed_{now_strting}.csv", index=False, ) if verbosity > 0: logger.debug( f"Save at {export_path}/VolumeIndicator90Days_detailed_{now_strting}.csv" ) else: return vol_ind_df
def _ema_indicator_n3(self, ema_canditate: Tuple[int, int] = (5, 13, 26), cutoff_date: Union[str, datetime.datetime] = 'today', verbosity: int = 1) -> pd.DataFrame: with multiprocessing.Pool(multiprocessing.cpu_count() - 1) as pool: result = pool.starmap( self._parallel_ema_indicator_n3, [(company, ema_canditate, cutoff_date, verbosity) for company in self.data['company']]) ema_indicator_df = pd.DataFrame(result) ema_indicator_df.dropna(inplace=True) if verbosity > 0: logger.debug( f"Here are sample 5 company\n{ema_indicator_df.head()}") return ema_indicator_df
def _ema_indicator_n3( self, ema_canditate: Tuple[int, int, int] = (5, 13, 26), cutoff_date: Union[str, datetime.datetime] = "today", verbosity: int = 1, ) -> pd.DataFrame: with parallel_backend(n_jobs=-1, backend="multiprocessing"): result = Parallel()(delayed(self.unit_ema_indicator_n3)( company, ema_canditate, cutoff_date, verbosity) for company in self.data["company"]) ema_indicator_df = pd.DataFrame(result) ema_indicator_df.dropna(inplace=True) if verbosity > 0: logger.debug( f"Here are sample 5 company\n{ema_indicator_df.head()}") return ema_indicator_df
def unit_momentum(self, company: str, start, end, verbosity: int = 1): logger.info(f"Retriving data for {company}") try: company_df = DataRetrive.single_company_specific( company_name=f"{company}.NS", start_date=start, end_date=end ) company_df.reset_index(inplace=True) ar_yearly = annualized_rate_of_return( end_date=company_df.iloc[-1].Close, start_date=company_df.iloc[0].Close, duration=1, ) # (company_df.iloc[-30,0] - company_df.iloc[0,0]).days/365) ar_monthly = annualized_rate_of_return( end_date=company_df.iloc[-1].Close, start_date=get_appropriate_date_momentum( company_df, company, verbosity=verbosity )[1], duration=(company_df.iloc[-1, 0] - company_df.iloc[-30, 0]).days / 30, ) monthly_start_date = get_appropriate_date_momentum( company_df, company, verbosity=0 )[0].strftime("%d-%m-%Y") except (IndexError, KeyError, ValueError): if verbosity > 0: logger.debug(f"Data is not available for: {company}") company_df = pd.DataFrame( {"Date": [datetime.datetime(1000, 1, 1)] * 30, "Close": [pd.NA] * 30} ) ar_yearly, ar_monthly, monthly_start_date = pd.NA, pd.NA, pd.NA return { "company": company, "yearly_start_date": company_df.iloc[0].Date.strftime("%d-%m-%Y"), "yearly_start_date_close": company_df.iloc[0].Close, "yearly_end_date": company_df.iloc[-1].Date.strftime("%d-%m-%Y"), "yearly_end_date_close": company_df.iloc[-1].Close, "return_yearly": ar_yearly, "monthly_start_date": monthly_start_date, "monthly_start_date_close": company_df.iloc[-30].Close, "monthly_end_date": company_df.iloc[-1].Date.strftime("%d-%m-%Y"), "monthly_end_date_close": company_df.iloc[-1].Close, "return_monthly": ar_monthly, }
def volume_indicator_n_days(self, duration: int = 90, save: bool = True, export_path: str = '.', verbosity: int = 1) -> pd.DataFrame: """Mean Volume Indicator based on desired days Parameters ---------- duration : int, optional Total days from current date to retrive data, by default 90 save : bool, optional Save to hard disk, by default True export_path : str, optional Path to save, to be used only if 'save' is true, by default '.' verbosity : int, optional Level of detail logging,1=< Deatil, 0=Less detail , by default 1 Returns ------- pd.DataFrame All Volume based indicator """ with multiprocessing.Pool(multiprocessing.cpu_count() - 1) as pool: result = pool.starmap(self._parallel_vol_indicator_n_days, [(company, duration) for company in self.data['company']]) vol_ind_df = pd.DataFrame(result) if verbosity > 0: logger.debug(f"Here are sample 5 company\n{vol_ind_df.head()}") if save is True: vol_ind_df.to_csv( f"{export_path}/VolumeIndicator90Days_detailed_{now_strting}.csv", index=False) if verbosity > 0: logger.debug( f"Save at {export_path}/VolumeIndicator90Days_detailed_{now_strting}.csv" ) else: return vol_ind_df
def ema_indicator( self, ema_canditate: Tuple[int, int] = (50, 200), cutoff_date: Union[str, datetime.datetime] = "today", save: bool = True, export_path: str = ".", verbosity: int = 1, ) -> Optional[pd.DataFrame]: """Exponential moving average based on desired two period (or no of days) Args: ema_canditate (Tuple[int, int], optional): Two number used two calculate EMA. Defaults to (50, 200). cutoff_date (Union[str, datetime.datetime], optional): Desired date till which to calculate ema. Defaults to "today". save (bool, optional): Save to hard disk. Defaults to True. export_path (str, optional): Path to save, to be used only if 'save' is true. Defaults to ".". verbosity (int, optional): Level of detail logging,1=< Detail, 0=Less detail. Defaults to 1. Returns: EMA and indicators based on it Example: ```python from stock_analysis.indicator import Indicator ind = Indicator('./data/company_list.yaml') ema = ind.ema_indicator((50,200), '01/06/2020') ``` """ with parallel_backend(n_jobs=-1, backend="multiprocessing"): result = Parallel()(delayed(self.unit_ema_indicator)( company, ema_canditate, cutoff_date, verbosity) for company in self.data["company"]) ema_indicator_df = pd.DataFrame(result) ema_indicator_df.dropna(inplace=True) ema_indicator_df["percentage_diff"] = ema_indicator_df.apply( lambda x: percentage_diff( x[f"ema{str(ema_canditate[0])}"], x[f"ema{str(ema_canditate[1])}"], return_absolute=True, ), axis=1, ) ema_indicator_df["outcome"] = ema_indicator_df.apply( lambda x: outcome_analysis(x["percentage_diff"]), axis=1) ema_indicator_df = ema_indicator_df[[ "company", "ema_date", f"ema{str(ema_canditate[0])}", f"ema{str(ema_canditate[1])}", "percentage_diff", "outcome", "action", ]] if verbosity > 0: logger.debug( f"Here are sample 5 company\n{ema_indicator_df.head()}") if save is True: new_folder(export_path) ema_indicator_df.to_csv( f"{export_path}/ema_indicator{str(ema_canditate[0])}-{str(ema_canditate[1])}_{len(self.data['company'])}company_{now_strting}.csv", index=False, ) if verbosity > 0: logger.debug( f"Exported at {export_path}/ema_indicator{str(ema_canditate[0])}-{str(ema_canditate[1])}_{len(self.data['company'])}company_{now_strting}.csv" ) else: return ema_indicator_df
def ema_crossover_detail_indicator( self, ema_canditate: Tuple[int, int, int] = (5, 13, 26), save: bool = True, export_path: str = ".", verbosity: int = 1, ) -> Optional[pd.DataFrame]: """Exponential moving average for crossover triple period technique Args: ema_canditate (Tuple[int, int, int], optional): Three Period (or days) to calculate EMA. Defaults to (5, 13, 26). save (bool, optional): Save to hard disk. Defaults to True. export_path (str, optional): Path to save, to be used only if 'save' is true. Defaults to ".". verbosity (int, optional): Level of detail logging,1=< Deatil, 0=Less detail. Defaults to 1. Returns: Results is based on crossover ema and detailed metrics Example: ```python from stock_analysis.indicator import Indicator ind = Indicator('./data/company_list.yaml') ema = ind.ema_crossover_detail_indicator((5,10,020), '01/06/2020') ``` """ logger.info("Performing EMA Indicator Task") ema_short = self._ema_indicator_n3(ema_canditate=ema_canditate, verbosity=verbosity) logger.info("Extarcting detail company quote data") batch_company_quote = pd.DataFrame() with parallel_backend(n_jobs=-1, backend="multiprocessing"): company_quote = Parallel()( delayed(self.unit_quote_retrive)(company) for company in ema_short["company"]) for single_company_quote in company_quote: if isinstance(single_company_quote, pd.DataFrame): batch_company_quote = batch_company_quote.append( single_company_quote) batch_company_quote = batch_company_quote.reset_index().rename( columns={"index": "company"}) batch_company_quote = batch_company_quote[[ "company", "longName", "price", "regularMarketVolume", "marketCap", "bookValue", "priceToBook", "averageDailyVolume3Month", "averageDailyVolume10Day", "fiftyTwoWeekLowChange", "fiftyTwoWeekLowChangePercent", "fiftyTwoWeekRange", "fiftyTwoWeekHighChange", "fiftyTwoWeekHighChangePercent", "fiftyTwoWeekLow", "fiftyTwoWeekHigh", ]] batch_company_quote["company"] = batch_company_quote[ "company"].str.replace(".NS", "") ema_quote = ema_short.merge(batch_company_quote, on="company", validate="1:1") if verbosity > 0: logger.debug(f"Here are sample 5 company\n{ema_quote.head()}") if save is not False: ema_quote.to_csv( f"{export_path}/ema_crossover_detail_indicator{str(ema_canditate[0])}-{str(ema_canditate[1])}-{str(ema_canditate[2])}_{len(self.data['company'])}company_{now_strting}.csv", index=False, ) if verbosity > 0: logger.debug( f"Exported at {export_path}/ema_crossover_detail_indicator{str(ema_canditate[0])}-{str(ema_canditate[1])}-{str(ema_canditate[2])}_{len(self.data['company'])}company_{now_strting}.csv" ) else: return ema_quote
def ema_indicator(self, ema_canditate: Tuple[int, int] = (50, 200), cutoff_date: Union[str, datetime.datetime] = 'today', save: bool = True, export_path: str = '.', verbosity: int = 1) -> pd.DataFrame: """Exponential moving average based on desired two period (or no of days) Parameters ---------- ema_canditate : Tuple[int, int], optional [description], by default (50, 200) cutoff_date : Union[str,datetime.datetime], optional Desired date till which to calculate ema. 'today' for current day, eg 01/01/2020 for any other date, by default 'today' save : bool, optional Save to hard disk, by default True export_path : str, optional Path to save, to be used only if 'save' is true, by default '.' verbosity : int, optional Level of detail logging,1=< Deatil, 0=Less detail , by default 1 Returns ------- -> pd.DataFrame EMA and indicators based on it """ with multiprocessing.Pool(multiprocessing.cpu_count() - 1) as pool: result = pool.starmap( self._parallel_ema_indicator, [(company, ema_canditate, cutoff_date, verbosity) for company in self.data['company']]) ema_indicator_df = pd.DataFrame(result) ema_indicator_df.dropna(inplace=True) ema_indicator_df['percentage_diff'] = ema_indicator_df.apply( lambda x: percentage_diff_analysis( x[f'ema{str(ema_canditate[0])}'], x[ f'ema{str(ema_canditate[1])}']), axis=1) ema_indicator_df['outcome'] = ema_indicator_df.apply( lambda x: outcome_analysis(x['percentage_diff']), axis=1) ema_indicator_df = ema_indicator_df[[ 'company', 'ema_date', f'ema{str(ema_canditate[0])}', f'ema{str(ema_canditate[1])}', 'percentage_diff', 'outcome', 'action' ]] if verbosity > 0: logger.debug( f"Here are sample 5 company\n{ema_indicator_df.head()}") if save is True: ema_indicator_df.to_csv( f"{export_path}/ema_indicator{str(ema_canditate[0])}-{str(ema_canditate[1])}_{len(self.data['company'])}company_{now_strting}.csv", index=False) if verbosity > 0: logger.debug( f"Exported at {export_path}/ema_indicator{str(ema_canditate[0])}-{str(ema_canditate[1])}_{len(self.data['company'])}company_{now_strting}.csv" ) else: return ema_indicator_df
def ema_crossover_indicator_detail(self, ema_canditate: Tuple[int, int, int] = (5, 13, 26), save: bool = True, export_path: str = '.', verbosity: int = 1) -> pd.DataFrame: """Exponential moving average for crossover triple period technique Parameters ---------- ema_canditate : Tuple[int, int, int], optional Three Period (or days) to calculate EMA, by default (5,13,26) save : bool, optional Save to hard disk, by default True export_path : str, optional Path to save, to be used only if 'save' is true, by default '.' verbosity : int, optional Level of detail logging,1=< Deatil, 0=Less detail , by default 1 Returns ------- pd.DataFrame """ logger.info("Performing EMA Indicator Task") ema_short = self._ema_indicator_n3(ema_canditate=ema_canditate, verbosity=verbosity) logger.info("Extarcting detail company quote data") batch_company_quote = pd.DataFrame() with multiprocessing.Pool(multiprocessing.cpu_count() - 1) as pool: company_quote = pool.map(self._parallel_quote_retrive, ema_short['company']) for single_company_quote in company_quote: batch_company_quote = batch_company_quote.append( single_company_quote) batch_company_quote = batch_company_quote.reset_index().rename( columns={'index': 'company'}) batch_company_quote = batch_company_quote[[ 'company', 'longName', 'price', 'regularMarketVolume', 'marketCap', 'bookValue', 'priceToBook', 'averageDailyVolume3Month', 'averageDailyVolume10Day', 'fiftyTwoWeekLowChange', 'fiftyTwoWeekLowChangePercent', 'fiftyTwoWeekRange', 'fiftyTwoWeekHighChange', 'fiftyTwoWeekHighChangePercent', 'fiftyTwoWeekLow', 'fiftyTwoWeekHigh' ]] batch_company_quote['company'] = batch_company_quote[ 'company'].str.replace('.NS', '') ema_quote = ema_short.merge(batch_company_quote, on='company', validate='1:1') if verbosity > 0: logger.debug(f"Here are sample 5 company\n{ema_quote.head()}") if save is not False: ema_quote.to_csv( f"{export_path}/ema_indicator_detail{str(ema_canditate[0])}-{str(ema_canditate[1])}_{len(self.data['company'])}company_{now_strting}.csv", index=False) if verbosity > 0: logger.debug( f"Exported at {export_path}/ema_indicator_detail{str(ema_canditate[0])}-{str(ema_canditate[1])}_{len(self.data['company'])}company_{now_strting}.csv" ) else: return ema_quote
def relative_momentum( self, end_date: str = "today", top_company_count: int = 20, save: bool = True, export_path: str = ".", verbosity: int = 1, ) -> Optional[pd.DataFrame]: """The strategy is used to identity stocks which had 'good performance' based on desired 'return' duration Args: end_date (str, optional): End date of of stock record to retrive. Must be in format: dd/mm/yyyy. Defaults to 'today'. top_company_count (int, optional): No of top company to retrieve based on Annualized return. Defaults to 20. save (bool, optional): Wether to export to disk. Defaults to True. export_path (str, optional): Path to export csv.To be used only if 'save' is True. Defaults to '.'. verbosity (int, optional): Level of detail logging,1=< Deatil, 0=Less detail. Defaults to 1. Returns: Record based on monthly and yearly calculation Example: ```python from stock_analysis import MomentumStrategy sa = MomentumStrategy('./data/company_list.yaml') ms = sa.relative_momentum(end_date='01/06/2020') ``` """ if end_date == "today": end = datetime.datetime.now() else: end = datetime.datetime.strptime(end_date, "%d/%m/%Y").date() start = end - dateutil.relativedelta.relativedelta(years=1) with parallel_backend(n_jobs=-1, backend="multiprocessing"): result = Parallel()( delayed(self.unit_momentum)(company, start, end, verbosity) for company in self.data["company"]) momentum_df = pd.DataFrame(result) momentum_df.dropna(inplace=True) momentum_df.sort_values(by=["return_yearly"], ascending=False, inplace=True) momentum_df.reset_index(inplace=True, drop=True) if verbosity > 0: logger.debug( f"Sample output:\n{momentum_df.head(top_company_count)}") if save is True: new_folder(export_path) momentum_df.head(top_company_count).to_csv( f"{export_path}/momentum_result_{end.strftime('%d-%m-%Y')}_top_{top_company_count}.csv", index=False, ) if verbosity > 0: logger.debug( f"Saved at {export_path}/momentum_result_{end.strftime('%d-%m-%Y')}_top_{top_company_count}.csv" ) else: return momentum_df.head(top_company_count)
def relative_momentum_with_ema( self, end_date: str = "today", top_company_count: int = 20, ema_canditate: Tuple[int, int] = (50, 200), save: bool = True, export_path: str = ".", verbosity: int = 1, ) -> Optional[pd.DataFrame]: """The strategy is used to identity stocks with 'good performance' based on desired 'return' duration and 'exponential moving avg'. Args: end_date (str, optional): End date of of stock record to retrive. Must be in format: dd/mm/yyyy. Defaults to 'today'. top_company_count (int, optional): No of top company to retrieve based on Annualized return. Defaults to 20. ema_canditate (Tuple[int, int], optional): Period (or days) to calculate EMA. Defaults to (50, 200). save (bool, optional): Wether to export to disk. Defaults to True. export_path (str, optional): Path to export csv.To be used only if 'save' is True. Defaults to '.'. verbosity (int, optional): Level of detail logging,1=< Deatil, 0=Less detail. Defaults to 1. Returns: Record based on monthly and yearly calculation and EMA calculation Example: ```python from stock_analysis import MomentumStrategy sa = MomentumStrategy('./data/company_list.yaml') mes = sa.relative_momentum_with_ema('01/06/2020', 30) ``` """ logger.info("Performing Momentum Strategy task") momentum_df = self.relative_momentum( end_date=end_date, top_company_count=top_company_count, save=False, verbosity=verbosity, ) momentum_df.reset_index(drop=True, inplace=True) ind = Indicator(company_name=momentum_df.loc[:, "company"]) logger.info( f"Performing EMA task on top {top_company_count} company till {end_date}" ) if end_date == "today": cutoff_date = end_date save_date = datetime.datetime.now().strftime("%d-%m-%Y") else: save_date = end_date.replace("/", "-") cutoff_date = datetime.datetime.strptime(end_date, "%d/%m/%Y") assert isinstance(cutoff_date, datetime.datetime), "Incorrect date type" ema_df = ind.ema_indicator( ema_canditate=ema_canditate, cutoff_date=cutoff_date, save=False, verbosity=verbosity, ) momentum_ema_df = momentum_df.merge(ema_df, on="company", validate="1:1") if save is True: new_folder(export_path) momentum_ema_df.reset_index(drop=True, inplace=True) momentum_ema_df.to_csv( f"{export_path}/momentum_ema{ema_canditate[0]}-{ema_canditate[1]}_{save_date}_top_{top_company_count}.csv", index=False, ) logger.debug( f"Saved at {export_path}/momentum_ema{ema_canditate[0]}-{ema_canditate[1]}_{save_date}_top_{top_company_count}.csv" ) if verbosity > 0: logger.debug(f"Sample output:\n{momentum_ema_df.head()}") else: return momentum_ema_df
def momentum_strategy(self, end_date: str = 'today', top_company_count: int = 20, save: bool = True, export_path: str = '.', verbosity: int = 1) -> pd.DataFrame: """ The strategy is used to identity stocks which had 'good performance' based on desired 'return' duration eg >>>from stock_analysis import UnitStrategy >>>sa = UnitStrategy('./data/company_list.yaml') >>>sa.momentum_strategy(end_date='01/06/2020') Parameters ---------- end_date : str, optional End date of of stock record to retrive. Must be in format: dd/mm/yyyy, by default 'today' for current date top_company_count : int, optional No of top company to retrieve based on Annualized return, by default 20 save : int, optional Wether to export to disk, by default True export_path : str, optional Path to export csv.To be used only if 'save' is True,by default'.' verbosity : int, optional Level of detail logging,1=< Deatil, 0=Less detail , by default 1 Returns ------- pd.DataFrame Record based on monthly and yearly calculation """ if end_date == 'today': end = datetime.datetime.now() else: end = datetime.datetime.strptime(end_date, '%d/%m/%Y').date() start = end - dateutil.relativedelta.relativedelta(years=1) with multiprocessing.Pool(multiprocessing.cpu_count() - 1) as pool: result = pool.starmap(self._parallel_momentum, [(company, start, end, verbosity) for company in self.data['company']]) momentum_df = pd.DataFrame(result) momentum_df.dropna(inplace=True) momentum_df.sort_values(by=['return_yearly'], ascending=False, inplace=True) if verbosity > 0: logger.debug( f"Sample output:\n{momentum_df.head(top_company_count)}") if save is True: momentum_df.head(top_company_count).to_csv( f"{export_path}/momentum_result_{end.strftime('%d-%m-%Y')}_top_{top_company_count}.csv", index=False) if verbosity > 0: logger.debug( f"Saved at {export_path}/momentum_result_{end.strftime('%d-%m-%Y')}_top_{top_company_count}.csv" ) else: return momentum_df.head(top_company_count)
def momentum_with_ema_strategy(self, end_date: str = 'today', top_company_count: int = 20, ema_canditate: Tuple[int, int] = (50, 200), save: bool = True, export_path: str = '.', verbosity: int = 1) -> pd.DataFrame: """The strategy is used to identity stocks with 'good performance' based on desired 'return' duration and 'exponential moving avg'. Parameters ---------- end_date : str, optional End date of of stock record to retrive. Must be in format: dd/mm/yyyy, by default 'today' for current date top_company_count : int, optional No of top company to retrieve based on Annualized return, by default 20 ema_canditate : Tuple[int, int], optional Period (or days) to calculate EMA, by default (50,200) save : int, optional Wether to export to disk, by default True export_path : str, optional Path to export csv.To be used only if 'save' is True,by default'.' verbosity : int, optional Level of detail logging,1=< Deatil, 0=Less detail , by default 1 Returns ------- pd.DataFrame Record based on monthly and yearly calculation and EMA calculation """ logger.info("Performing Momentum Strategy task") momentum_df = self.momentum_strategy( end_date=end_date, top_company_count=top_company_count, save=False, verbosity=verbosity) momentum_df.reset_index(drop=True, inplace=True) ind = Indicator(company_name=momentum_df.loc[:, 'company']) logger.info( f"Performing EMA task on top {top_company_count} company till {end_date}" ) if end_date == 'today': cutoff_date = end_date save_date = datetime.datetime.now().strftime('%d-%m-%Y') else: save_date = end_date.replace('/', '-') cutoff_date = datetime.datetime.strptime(end_date, '%d/%m/%Y') assert isinstance(cutoff_date, datetime.datetime), 'Incorrect date type' ema_df = ind.ema_indicator(ema_canditate=ema_canditate, cutoff_date=cutoff_date, save=False, verbosity=verbosity) momentum_ema_df = momentum_df.merge(ema_df, on='company', validate='1:1') if save is True: momentum_ema_df.reset_index(drop=True, inplace=True) momentum_ema_df.to_csv( f"{export_path}/momentum_ema{ema_canditate[0]}-{ema_canditate[1]}_{save_date}_top_{top_company_count}.csv", index=False) logger.debug( f"Saved at {export_path}/momentum_ema{ema_canditate[0]}-{ema_canditate[1]}_{save_date}_top_{top_company_count}.csv" ) if verbosity > 0: logger.debug(f"Sample output:\n{momentum_ema_df.head()}") else: return momentum_ema_df