def monthly_returns_table(df: pd.Series, wts): """ Display a table containing monthly returns and ytd returns for every year in range. """ rets = stockanalytics.cum_returns(df, wts) stats = ffn.PerformanceStats(rets) print('Table displaying monthly and YTD returns:') stats.display_monthly_returns()
def calculate_benchmark(self, default, v1_name, v1_values, v2_name=None, v2_values=None, train_rng=[0, 0.8]): #Copy dictionary param_dict = {} for key in default: param_dict[key] = [default[key]] #Set the parameter ranges param_dict[v1_name] = v1_values if (v2_values is not None): param_dict[v2_name] = v2_values #Create output dataframe cols = list(param_dict.keys()) cols.extend(['perf']) benchmark_output = pd.DataFrame(columns=cols) #Process for the value combinations for p in tqdm(list(product(*param_dict.values()))): a = {} for i, key in enumerate(param_dict.keys()): a[key] = p[i] self.process(train_rng=deepcopy(train_rng), **a) trade_record = self.record a['perf'] = ffn.PerformanceStats(trade_record['cum rets'], rf=0.0016) benchmark_output = benchmark_output.append(a, ignore_index=True) return benchmark_output
def test_monthly_returns(): dates = [ '31/12/2017', '5/1/2018', '9/1/2018', '13/1/2018', '17/1/2018', '21/1/2018', '25/1/2018', '29/1/2018', '2/2/2018', '6/2/2018', '10/2/2018', '14/2/2018', '18/2/2018', '22/2/2018', '26/2/2018', '1/5/2018', '5/5/2018', '9/5/2018', '13/5/2018', '17/5/2018', '21/5/2018', '25/5/2018', '29/5/2018', '2/6/2018', '6/6/2018', '10/6/2018', '14/6/2018', '18/6/2018', '22/6/2018', '26/6/2018' ] prices = [ 100, 98, 100, 103, 106, 106, 107, 111, 115, 115, 118, 122, 120, 119, 118, 119, 118, 120, 122, 126, 130, 131, 131, 134, 138, 139, 139, 138, 140, 140 ] df1 = pd.DataFrame(prices, index=pd.to_datetime(dates, format="%d/%m/%Y"), columns=['Price']) obj1 = ffn.PerformanceStats(df1['Price']) obj1.monthly_returns == df1['Price'].resample('M').last().pct_change()
def get_summary(self, fromDate=None, toDate=None, trim=False, verbose=True): if trim: d = self.record[fromDate:toDate] else: d = self.record longentry = d.long_entry & d.transaction & (d.numUnits == 1) longexit = d.long_exit & d.transaction & (d.numUnits.shift(1) == 1) shortentry = d.short_entry & d.transaction & (d.numUnits == -1) shortexit = d.short_exit & d.transaction & (d.numUnits.shift(1) == -1) if sum(longentry) > sum(longexit): longexit[-1] = True if sum(shortentry) > sum(shortexit): shortexit[-1] = True if sum(longentry) < sum(longexit): longentry[0] = True if sum(shortentry) < sum(shortexit): shortentry[0] = True if (sum(longentry) + sum(shortentry)) == 0: print("No transactions made!") return pd.DataFrame.from_records([{ 'sharpe_ratio': 0, 'total_return': 0, 'max_drawdown': 0, 'long_ret': 0, 'short_ret': 0, 'spread_ret': 0, 'trans_ret': 0, 'n_transactions': 0, 'mean_hold_time': 0 }]) l = {'entry': d.index[longentry], 'exit': d.index[longexit]} s = {'entry': d.index[shortentry], 'exit': d.index[shortexit]} long_holdings = pd.DataFrame(l) short_holdings = pd.DataFrame(s) long_holdings['position'] = 'long' short_holdings['position'] = 'short' holdings = pd.concat([long_holdings, short_holdings]) holdings['time'] = holdings['exit'] - holdings['entry'] perf = ffn.PerformanceStats(d['cum rets'], rf=0.0016) timing = np.mean(holdings['time']).components minutes_hold = timing.days * 24 * 60 + timing.hours * 60 + timing.minutes out = pd.DataFrame.from_records([{ 'sharpe_ratio': round(perf.stats['daily_sharpe'], 3), 'total_return': round(perf.stats['total_return'], 3), 'max_drawdown': round(perf.stats['max_drawdown'], 3), 'long_ret': round((1 + d.long_holding_ret).prod(), 3), 'short_ret': round((1 + d.short_holding_ret).prod(), 3), 'spread_ret': round((1 - d.spread_cost_ret).prod(), 3), 'trans_ret': round((1 - d.transaction_cost_ret).prod(), 3), 'n_transactions': len(holdings), 'mean_hold_time': minutes_hold }]) if verbose: print( tabulate(out[[ 'sharpe_ratio', 'long_ret', 'short_ret', 'spread_ret', 'trans_ret', 'n_transactions', 'mean_hold_time' ]], headers='keys', tablefmt='psql')) perf.display() perf.plot() return out
def test_performance_stats(): ps = ffn.PerformanceStats(df['AAPL']) num_stats = len(ps.stats.keys()) num_unique_stats = len(ps.stats.keys().drop_duplicates()) assert (num_stats == num_unique_stats)
def test_set_riskfree_rate(): r = df.to_returns() performanceStats = ffn.PerformanceStats(df['MSFT']) groupStats = ffn.GroupStats(df) daily_returns = df['MSFT'].resample('D').last().dropna().pct_change() aae( performanceStats.daily_sharpe, daily_returns.dropna().mean() / (daily_returns.dropna().std()) * (np.sqrt(252)), 3) aae(performanceStats.daily_sharpe, groupStats['MSFT'].daily_sharpe, 3) monthly_returns = df['MSFT'].resample('M').last().pct_change() aae( performanceStats.monthly_sharpe, monthly_returns.dropna().mean() / (monthly_returns.dropna().std()) * (np.sqrt(12)), 3) aae(performanceStats.monthly_sharpe, groupStats['MSFT'].monthly_sharpe, 3) yearly_returns = df['MSFT'].resample('A').last().pct_change() aae( performanceStats.yearly_sharpe, yearly_returns.dropna().mean() / (yearly_returns.dropna().std()) * (np.sqrt(1)), 3) aae(performanceStats.yearly_sharpe, groupStats['MSFT'].yearly_sharpe, 3) performanceStats.set_riskfree_rate(0.02) groupStats.set_riskfree_rate(0.02) daily_returns = df['MSFT'].pct_change() aae( performanceStats.daily_sharpe, np.mean(daily_returns.dropna() - 0.02 / 252) / (daily_returns.dropna().std()) * (np.sqrt(252)), 3) aae(performanceStats.daily_sharpe, groupStats['MSFT'].daily_sharpe, 3) monthly_returns = df['MSFT'].resample('M').last().pct_change() aae( performanceStats.monthly_sharpe, np.mean(monthly_returns.dropna() - 0.02 / 12) / (monthly_returns.dropna().std()) * (np.sqrt(12)), 3) aae(performanceStats.monthly_sharpe, groupStats['MSFT'].monthly_sharpe, 3) yearly_returns = df['MSFT'].resample('A').last().pct_change() aae( performanceStats.yearly_sharpe, np.mean(yearly_returns.dropna() - 0.02 / 1) / (yearly_returns.dropna().std()) * (np.sqrt(1)), 3) aae(performanceStats.yearly_sharpe, groupStats['MSFT'].yearly_sharpe, 3) rf = np.zeros(df.shape[0]) #annual rf is 2% rf[1:] = 0.02 / 252 rf[0] = 0. #convert to price series rf = 100 * np.cumprod(1 + pd.Series(data=rf, index=df.index, name='rf')) performanceStats.set_riskfree_rate(rf) groupStats.set_riskfree_rate(rf) daily_returns = df['MSFT'].pct_change() rf_daily_returns = rf.pct_change() aae( performanceStats.daily_sharpe, np.mean(daily_returns - rf_daily_returns) / (daily_returns.dropna().std()) * (np.sqrt(252)), 3) aae(performanceStats.daily_sharpe, groupStats['MSFT'].daily_sharpe, 3) monthly_returns = df['MSFT'].resample('M').last().pct_change() rf_monthly_returns = rf.resample('M').last().pct_change() aae( performanceStats.monthly_sharpe, np.mean(monthly_returns - rf_monthly_returns) / (monthly_returns.dropna().std()) * (np.sqrt(12)), 3) aae(performanceStats.monthly_sharpe, groupStats['MSFT'].monthly_sharpe, 3) yearly_returns = df['MSFT'].resample('A').last().pct_change() rf_yearly_returns = rf.resample('A').last().pct_change() aae( performanceStats.yearly_sharpe, np.mean(yearly_returns - rf_yearly_returns) / (yearly_returns.dropna().std()) * (np.sqrt(1)), 3) aae(performanceStats.yearly_sharpe, groupStats['MSFT'].yearly_sharpe, 3)
def test_performance_stats(): ps = ffn.PerformanceStats(df['AAPL'])
def test_monthly_returns(): dates = [ "31/12/2017", "5/1/2018", "9/1/2018", "13/1/2018", "17/1/2018", "21/1/2018", "25/1/2018", "29/1/2018", "2/2/2018", "6/2/2018", "10/2/2018", "14/2/2018", "18/2/2018", "22/2/2018", "26/2/2018", "1/5/2018", "5/5/2018", "9/5/2018", "13/5/2018", "17/5/2018", "21/5/2018", "25/5/2018", "29/5/2018", "2/6/2018", "6/6/2018", "10/6/2018", "14/6/2018", "18/6/2018", "22/6/2018", "26/6/2018", ] prices = [ 100, 98, 100, 103, 106, 106, 107, 111, 115, 115, 118, 122, 120, 119, 118, 119, 118, 120, 122, 126, 130, 131, 131, 134, 138, 139, 139, 138, 140, 140, ] df1 = pd.DataFrame(prices, index=pd.to_datetime(dates, format="%d/%m/%Y"), columns=["Price"]) obj1 = ffn.PerformanceStats(df1["Price"]) obj1.monthly_returns == df1["Price"].resample("M").last().pct_change()