def test_get_aggregate_returns_with_simple_returns(self): test_returns = [1, 1, 1, 1] dates = DatetimeIndex( ['2015-12-01', '2016-05-05', '2016-10-01', '2017-01-05']) simple_returns_series = SimpleReturnsSeries(data=test_returns, index=dates) expected_cumulative_returns = [1.0, 3.0, 1.0] expected_result = SimpleReturnsSeries( data=expected_cumulative_returns, index=DatetimeIndex(['2015-12-31', '2016-12-31', '2017-12-31'])) actual_result = get_aggregate_returns(simple_returns_series, convert_to=Frequency.YEARLY) assert_series_equal(expected_result, actual_result) expected_result = SimpleReturnsSeries(data=[1, 1, 1, 1], index=DatetimeIndex([ '2015-12-31', '2016-05-31', '2016-10-31', '2017-01-31' ])) actual_result = get_aggregate_returns(simple_returns_series, convert_to=Frequency.MONTHLY) assert_series_equal(expected_result, actual_result) actual_result = get_aggregate_returns(simple_returns_series, convert_to=Frequency.MONTHLY, multi_index=True) actual_result = actual_result.unstack() self.assertEqual(actual_result[1].values[2], 1.0) self.assertEqual(actual_result[5].values[1], 1.0) self.assertEqual(actual_result[10].values[1], 1.0) self.assertEqual(actual_result[12].values[0], 1.0)
def setUp(self): dates_span = 100 regressor_names = ['a', 'b', 'c'] dates = pd.date_range(start='2015-01-01', periods=dates_span, freq='D') fund_returns_tms = SimpleReturnsSeries( data=[i / 100 for i in range(1, dates_span + 1)], index=dates) deviation = 0.005 fit_returns_tms = SimpleReturnsSeries(data=(fund_returns_tms.values + deviation), index=dates) regressors_returns_df = SimpleReturnsDataFrame(data=np.array([ fund_returns_tms, fund_returns_tms + deviation, fund_returns_tms - deviation ]).T, index=dates, columns=regressor_names) coefficients = QFSeries(index=regressor_names, data=[1.0, 1.0, 1.0]) self.fund_returns_tms = fund_returns_tms self.fit_returns_tms = fit_returns_tms self.regressors_returns_df = regressors_returns_df self.coefficients = coefficients self.alpha = 0.005
def __init__(self, settings: Settings, pdf_exporter: PDFExporter, trades_df: QFDataFrame, start_date: datetime, end_date: datetime, nr_of_assets_traded: int = 1, title: str = "Trades"): """ trades_df indexed by consecutive numbers starting at 0. columns are indexed using TradeField values nr_of_assets_traded the model can be used to trade on many instruments at the same time. All aggregated trades will be in trades_df nr_of_instruments_traded informs on how many instruments at the same time the model was traded. title title of the document, will be a part of the filename. Do not use special characters """ self.trades_df = trades_df.sort_values([TradeField.EndDate, TradeField.StartDate]).reset_index(drop=True) self.start_date = start_date self.end_date = end_date self.nr_of_assets_traded = nr_of_assets_traded self.returns_of_trades = SimpleReturnsSeries(self.trades_df[TradeField.Return]) self.returns_of_trades.name = "Returns of Trades" self.title = title self.document = Document(title) # position is linked to the position of axis in tearsheet.mplstyle self.half_image_size = (4, 2.2) self.dpi = 400 self.settings = settings self.pdf_exporter = pdf_exporter
def test_beta_and_alpha(self): dates = date_range(start='2015-01-01', periods=10, freq='d') series_values = [i for i in range(1, 21, 2)] benchmark_values = [i for i in range(0, 10)] series_tms = SimpleReturnsSeries(data=series_values, index=dates).to_prices() benchmark_tms = SimpleReturnsSeries(data=benchmark_values, index=dates) actual_beta, actual_alpha = beta_and_alpha(series_tms, benchmark_tms) epsilon = 0.000000001 expected_beta = 2.0 expected_alpha = 1.0 self.assertAlmostEqual(expected_beta, actual_beta, delta=epsilon) self.assertAlmostEqual(expected_alpha, actual_alpha, delta=epsilon) series_values[0] += 1 series_values[1] -= 1 series_tms = SimpleReturnsSeries(data=series_values, index=dates) benchmark_tms = SimpleReturnsSeries(data=benchmark_values, index=dates) actual_beta, actual_alpha = beta_and_alpha(series_tms, benchmark_tms) expected_beta = 1.9878787878787878 expected_alpha = 1.0545454545454569 self.assertAlmostEqual(expected_beta, actual_beta, delta=epsilon) self.assertAlmostEqual(expected_alpha, actual_alpha, delta=epsilon)
def setUp(self): self.return_dates = date_range('2015-01-01', periods=20, freq='D') self.test_returns = [0.01, 0.02, 0.03, 0.02, 0.01, 0.00, -0.01, -0.02, 0.01, 0.03, 0.05, 0.04, 0.03, 0.02, 0.01, 0.00, 0.01, 0.03, 0.02, 0.04] self.test_returns_tms = SimpleReturnsSeries(data=self.test_returns, index=self.return_dates, dtype=float) monthly_returns = [0.05, 0.03, -0.1, 0.2, -0.01, -0.01] monthly_return_dates = date_range('2015-01-01', periods=6, freq='M') self.monthly_ret_series = SimpleReturnsSeries(data=monthly_returns, index=monthly_return_dates, dtype=float)
def test_tail_events(self): expected_dates = DatetimeIndex(['2015-01-06', '2015-01-07', '2015-01-08', '2015-01-16'], freq=None) expected_benchmark_tail = SimpleReturnsSeries(index=expected_dates, data=[0, -0.01, -0.02, 0]) expected_examined_tail = SimpleReturnsSeries(index=expected_dates, data=[0.02, 0.01, 0.00, 0.02]) actual_benchmark_tail, actual_examined_tail = \ tail_events(self.test_simple_returns_tms, self.test_simple_returns_tms + 0.02, 25.0) assert_series_equal(expected_benchmark_tail, actual_benchmark_tail) assert_series_equal(expected_examined_tail, actual_examined_tail)
def trade_based_max_drawdown(trades: QFDataFrame): """ Calculates the max drawdown on the series of returns of trades """ if trades.shape[0] > 0: returns = trades[TradeField.Return] dates = trades[TradeField.EndDate] returns_tms = SimpleReturnsSeries(index=dates, data=returns.values) prices_tms = returns_tms.to_prices(frequency=Frequency.DAILY) return -max_drawdown(prices_tms) return None
def setUp(self): self.return_dates = date_range('2015-01-01', periods=20, freq='D') prices_values = [100, 101, 103.02, 106.1106, 108.232812, 109.31514012, 109.31514012, 108.2219887188, 106.057548944424, 107.118124433868, 110.331668166884, 115.848251575229, 120.482181638238, 124.096647087385, 126.578580029132, 127.844365829424, 127.844365829424, 129.1228094877180, 132.9964937723500, 135.656423647797, 141.082680593708] prices_dates = date_range('2014-12-31', periods=1, freq='D').append(self.return_dates) self.test_prices_tms = PricesSeries(data=prices_values, index=prices_dates) self.test_dd_prices_tms = PricesSeries(data=[100, 90, 80, 70, 95, 100, 100, 200, 100, 50, 100, 200, 150], index=date_range('2015-01-01', periods=13, freq='M')) self.test_returns = [0.01, 0.02, 0.03, 0.02, 0.01, 0, -0.01, -0.02, 0.01, 0.03, 0.05, 0.04, 0.03, 0.02, 0.01, 0, 0.01, 0.03, 0.02, 0.04] self.test_simple_returns_tms = SimpleReturnsSeries(data=self.test_returns, index=self.return_dates, dtype=float)
def test_rolling_window(self): strategy_dates = pd.date_range('2015-01-01', periods=20, freq='D') benchmark_dates = pd.date_range('2015-01-10', periods=20, freq='D') data = [ 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01 ] strategy = SimpleReturnsSeries(data=data, index=strategy_dates) benchmark = SimpleReturnsSeries(data=data, index=benchmark_dates) rolling = strategy.rolling_window_with_benchmark( benchmark, 1, lambda x, y: x.mean() + y.mean()) self.assertEqual(rolling.iloc[0], 0.02) self.assertEqual(rolling.index[0], benchmark_dates[1]) self.assertEqual(rolling.index[9], benchmark_dates[10]) self.assertEqual(len(rolling), 10) # Test with missing values in the middle. strategy_dates = pd.date_range('2015-01-02', periods=3, freq='D') benchmark_dates = pd.DatetimeIndex( ['2015-01-01', '2015-01-02', '2015-01-04']) strategy = SimpleReturnsSeries(data=[0.01, 0.50, 0.01], index=strategy_dates) benchmark = SimpleReturnsSeries(data=[0.50, 0.01, 0.01], index=benchmark_dates) rolling = strategy.rolling_window_with_benchmark( benchmark, 1, lambda x, y: x.mean() + y.mean()) self.assertEqual(rolling.iloc[0], 0.02)
def _add_returns_distribution(self): if self.initial_risk is not None: returns = SimpleReturnsSeries(data=[t.percentage_pnl / self.initial_risk for t in self.trades]) title = "Distribution of R multiples, Initial risk = {:.2%}".format(self.initial_risk) returns_histogram = self._get_distribution_plot(returns, title) else: returns = SimpleReturnsSeries(data=[t.percentage_pnl for t in self.trades]) title = "Distribution of returns [%]" returns_histogram = self._get_distribution_plot(returns, title) # Format the x-axis so that its labels are shown as a percentage in case of percentage returns axes_formatter_decorator = AxesFormatterDecorator(x_major=PercentageFormatter(), key="axes_formatter") returns_histogram.add_decorator(axes_formatter_decorator) self.document.add_element(ChartElement(returns_histogram, figsize=self.full_image_size, dpi=self.dpi))
def _create_test_benchmark(cls): values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0] index = pd.date_range(start='2015-01-02', periods=6) return SimpleReturnsSeries(data=values, index=index, name='Test prices')
def test_volatility_manager(self): periods = 20 returns = [] for i in range(periods): ret = 0.01 if i >= 10: ret = 0.02 returns.append(ret * pow(-1, i)) # 1, -1, 1 ... -2, 2 -2 ... in % dates = date_range('2015-01-01', periods=periods, freq='D') series = SimpleReturnsSeries(data=returns, index=dates, name='Series 1') vol_manager = VolatilityManager(series) window_size = 5 managed_series, weights_series = vol_manager.get_managed_series( vol_level=0.1, window_size=window_size, lag=1) ret = 0.005750 self.assertAlmostEqual(abs(managed_series[window_size]), ret, places=5) self.assertAlmostEqual(abs(managed_series[window_size + 1]), ret, places=5) self.assertAlmostEqual(abs(managed_series[-2]), ret, places=5) self.assertAlmostEqual(abs(managed_series[-1]), ret, places=5)
def _get_distribution_plot(self, data_series: SimpleReturnsSeries, title: str, bins: Union[int, str] = 50, crop: bool = False): colors = Chart.get_axes_colors() if crop: start_x = np.quantile(data_series, 0.01) end_x = np.quantile(data_series, 0.99) chart = HistogramChart(data_series, bins=bins, start_x=start_x, end_x=end_x) else: chart = HistogramChart(data_series, bins=bins) # Only show whole numbers on the y-axis. y_axis_locator = MaxNLocator(integer=True) axes_locator_decorator = AxesLocatorDecorator(y_major=y_axis_locator, key="axes_locator") chart.add_decorator(axes_locator_decorator) # Add an average line. avg_line = VerticalLineDecorator(data_series.mean(), color=colors[1], key="average_line_decorator", linestyle="--", alpha=0.8) chart.add_decorator(avg_line) # Add a legend. legend = LegendDecorator(key="legend_decorator") legend.add_entry(avg_line, "Mean") chart.add_decorator(legend) # Add a title. title_decorator = TitleDecorator(title, key="title") chart.add_decorator(title_decorator) chart.add_decorator(AxesLabelDecorator(title, "Occurrences")) position_decorator = AxesPositionDecorator(*self.full_image_axis_position) chart.add_decorator(position_decorator) return chart
def omega_ratio(returns_tms: SimpleReturnsSeries, threshold: float = 0) -> float: """ Omega Ratio - The Omega Ratio is a measure of performance that doesn't assume a normal distribution of returns. The Omega ratio is a relative measure of the likelihood of achieving a given return, such as a minimum acceptable return (MAR) or a target return. The higher the omega value, the greater the probability that a given return will be met or exceeded. Omega represents a ratio of the cumulative probability of an investment's outcome above an investor's defined return level (a threshold level), to the cumulative probability of an investment's outcome below an investor's threshold level. The omega concept divides expected returns into two parts – gains and losses, or returns above the expected rate (the upside)and those below it (the downside). Therefore, in simple terms, consider omega as the ratio of upside returns (good) relative to downside returns (bad). Parameters ---------- returns_tms time series of price returns threshold threshold (e.g. benchmark return or target return) for the portfolio Returns ------- omega_ratio Omega Ratio calculated for threshold """ returns_tms = returns_tms.to_simple_returns() downside = 0 upside = 0 for ret in returns_tms.values: if ret < threshold: downside += threshold - ret else: upside += ret - threshold return upside / downside
def get_analysed_tms_and_regressors(dates_span: int = 1000, num_of_regressors: int = 7, start_date: datetime.datetime = str_to_date('2016-01-01'), mean_return: float = 0.001, std_of_returns: float = 0.02, a_coeff: float = -0.25, b_coeff: float = 1.25, intercept: float = 0.004)\ -> Tuple[SimpleReturnsSeries, SimpleReturnsDataFrame]: """ Creates a dataframe with simple returns of sample timeseries (regressors). Then creates a series which linearly depends on regressors 'a' and 'b'. """ dates = pd.bdate_range(start=start_date, periods=dates_span) regressors_names = generate_sample_column_names( num_of_columns=num_of_regressors) np.random.seed( 5 ) # init random number generator with a fixed number, so that results are always the same regressors_data = np.random.normal(mean_return, std_of_returns, (dates_span, num_of_regressors)) regressors_df = SimpleReturnsDataFrame(data=regressors_data, index=dates, columns=regressors_names) analyzed_data = a_coeff * regressors_data[:, 0] + b_coeff * regressors_data[:, 1] + \ np.random.normal(0, 0.02, dates_span) + intercept analysed_tms = SimpleReturnsSeries(data=analyzed_data, index=dates, name='Fund') return analysed_tms, regressors_df
def test_make_parity_boxes(self): abs_tolerance = 0.0005 actual_boxes = self.risk_parity_boxes_factory.make_parity_boxes(self.start_date, self.end_date) datetime_index = pd.DatetimeIndex([ '2017-10-03', '2017-10-04', '2017-10-05', '2017-10-06', '2017-10-09', '2017-10-10', '2017-10-11', '2017-10-12', '2017-10-13', '2017-10-16', '2017-10-17', '2017-10-18', '2017-10-19', '2017-10-20', '2017-10-23', '2017-10-24', '2017-10-25', '2017-10-26', '2017-10-27', '2017-10-30', '2017-10-31', '2017-11-01' ]) expected_series = SimpleReturnsSeries(index=datetime_index, data=[ 0.000668214, 0.000835684, 0.000837076, -0.001577371, 0.000934, 0.002332372, 0.000723187, 0.000714223, 0.002511958, -0.00039049, -0.000812991, -0.000116197, 0.0011223, -0.001970612, -0.000243163, -0.000622247, 0.000292873, -0.001195635, 0.002011089, 0.002190187, 7.02049E-05, 0.000546751 ]) actual_series = actual_boxes.get_series(growth=ChangeDirection.RISING, inflation=ChangeDirection.RISING) assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance) expected_series = SimpleReturnsSeries(index=datetime_index, data=[ 0.00214062368, 0.00011823259, 0.00133745897, -0.00093319962, .0, 0.00126311759, 0.00040289465, -0.00051413454, 0.00268699427, -0.00018594003, 0.00017342217, -0.00062892417, 0.00109962909, 0.00034010165, -0.00100080029, -0.0008813078, -0.00345021469, 0.00057545608, 0.0049085509, 0.00068356544, -0.00038338606, -0.00010546472 ]) actual_series = actual_boxes.get_series(growth=ChangeDirection.RISING, inflation=ChangeDirection.FALLING) assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance) expected_series = SimpleReturnsSeries(index=datetime_index, data=[ 0.00075094743, 0.00102506202, -0.00116334637, 0.00086457088, 0.001030, 0.00202367433, 0.00069925268, 0.00124515552, 0.00178121325, -0.00337692323, -0.00195857117, -0.00094960523, 0.00162098426, -0.00199096335, 0.00042216467, -0.00137335971, -0.0010796149, -0.00136642671, 0.00247283233, 0.00223942762, -0.00063914336, 0.00046975 ]) actual_series = actual_boxes.get_series(growth=ChangeDirection.FALLING, inflation=ChangeDirection.RISING) assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance) expected_series = SimpleReturnsSeries(index=datetime_index, data=[ 0.00112748921, 0.0005160599, -0.00222551401, 0.00103350015, 0.002442, 0.00115561595, 0.00162539269, 0.00111385182, 0.00464585854, -0.00296358368, -0.00262220629, -0.00270699558, 0.00275822114, -0.00509161628, 0.00107539045, -0.00342160737, -0.00093475391, -0.00330513788, 0.003736121, 0.00322371024, -0.00155485155, 0.00004935567 ]) actual_series = actual_boxes.get_series(growth=ChangeDirection.FALLING, inflation=ChangeDirection.FALLING) assert_series_equal(expected_series, actual_series, absolute_tolerance=abs_tolerance)
def test_min_max_normalized(self): actual_normalized_tms = self.test_returns_tms.min_max_normalized() expected_normalized_values = array([3, 4, 5, 4, 3, 2, 1, 0, 3, 5, 7, 6, 5, 4, 3, 2, 3, 5, 4, 6]) / 7 expected_normalized_tms = SimpleReturnsSeries(data=expected_normalized_values, index=self.test_returns_tms.index) assert_series_equal(expected_normalized_tms, actual_normalized_tms)
def setUp(self): portfolio_rets = [0.01, 0.02, -0.03, 0.04, -0.05, 0.06] asset_1_rets = [0.011, 0.035, -0.028, 0.039, -0.044, 0.061] asset_2_rets = [0.02, 0.04, -0.06, 0.08, -0.1, 0.12] dates = pd.date_range(start='2015-02-01', periods=6) self.portfolio_tms = SimpleReturnsSeries(portfolio_rets, dates) returns_array = np.array([asset_1_rets, asset_2_rets]).T self.factors_df = SimpleReturnsDataFrame(data=returns_array, index=dates, columns=['a', 'b'])
def to_simple_returns(self) -> "SimpleReturnsSeries": from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries simple_rets_values = [exp(log_ret) - 1 for log_ret in self.values] simple_returns_tms = SimpleReturnsSeries( index=self.index.copy(), data=simple_rets_values).__finalize__(self) return simple_returns_tms
def to_simple_returns(self) -> "SimpleReturnsSeries": from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries shifted = self.copy().shift(1) rets = self / shifted - 1 # type: PricesSeries dates = self.index[1:].copy() returns = rets.iloc[1:] return SimpleReturnsSeries(index=dates, data=returns).__finalize__(self)
def trade_based_cagr(trades: QFDataFrame, start_date: datetime, end_date: datetime): """ Calculates average number of trades per year for a given data-frame of trades. """ returns = trades[TradeField.Return] dates = trades[TradeField.EndDate] # insert start date and the beginning and end date at the end. # we insert nex start + 1day to returns and set the frequency for to_prices to daily so that the # prices series will start exactly from the start_date returns = pd.concat([pd.Series([0]), returns, pd.Series([0])]) dates = pd.concat([ pd.Series([start_date]), dates + timedelta(days=1), pd.Series([end_date]) ]) returns_tms = SimpleReturnsSeries(index=dates, data=returns.values) prices_tms = returns_tms.to_prices(frequency=Frequency.DAILY) return cagr(prices_tms)
def setUp(self): return_dates = pd.date_range('2015-01-01', periods=20, freq='D') test_returns = [0.01, 0.02, 0.03, 0.02, 0.01, 0, -0.01, -0.02, 0.01, 0.03, 0.05, 0.04, 0.03, 0.02, 0.01, 0, 0.01, 0.03, 0.02, 0.04] self.test_simple_returns_tms = SimpleReturnsSeries(data=test_returns, index=return_dates, dtype=float, name='Test Name') prices_values = [100, 101, 103.02, 106.1106, 108.232812, 109.31514012, 109.31514012, 108.2219887188, 106.057548944424, 107.118124433868, 110.331668166884, 115.848251575229, 120.482181638238, 124.096647087385, 126.578580029132, 127.844365829424, 127.844365829424, 129.1228094877180, 132.9964937723500, 135.656423647797, 141.082680593708] prices_dates = pd.date_range('2014-12-31', periods=1, freq='D').append(return_dates) self.test_prices_tms = PricesSeries(data=prices_values, index=prices_dates, name='Test Name') test_log_returns = [0.009950331, 0.019802627, 0.029558802, 0.019802627, 0.009950331, 0, -0.010050336, -0.020202707, 0.009950331, 0.029558802, 0.048790164, 0.039220713, 0.029558802, 0.019802627, 0.009950331, 0, 0.009950331, 0.029558802, 0.019802627, 0.039220713] self.test_log_returns_tms = LogReturnsSeries(data=test_log_returns, index=return_dates, dtype=float, name='Test Name')
def main(): data_provider = container.resolve(GeneralPriceProvider) # type: GeneralPriceProvider msi = MarketStressIndicator(tickers, weights, data_provider) stress_indicator_tms = msi.get_indicator(years_rolling, start_date, end_date, step) # stress_indicator_tms = cached_value(_get_indicator, indicator_cache_path) # type: QFSeries fig_size = (10, 5) title = "Stress Indicator US {}Y rolling".format(years_rolling) chart = create_line_chart([stress_indicator_tms], ['Stress Indicator'], title) chart.plot(figsize=fig_size) no_none_indicator_tms = stress_indicator_tms.dropna() histogram = HistogramChart(no_none_indicator_tms, best_fit=False, bins=100) histogram.plot(figsize=fig_size) # Get SPX Index spx = BloombergTicker('SPX Index') spx_index_tms = data_provider.get_price(spx, PriceField.Close, no_none_indicator_tms.first_valid_index(), end_date) spx_returns = spx_index_tms.to_simple_returns() # Calculate managed series managed_series = SimpleReturnsSeries() for date, ret in spx_returns.iteritems(): risk_value = no_none_indicator_tms.asof(date - timedelta(days=2)) leverage = 1 if risk_value > 0.35: leverage = 0.66 if risk_value > 1.5: leverage = 0.33 managed_ret = ret * leverage managed_series[date] = managed_ret # Plot managed and pure SPX series chart = create_line_chart( [spx_returns.to_prices(), managed_series.to_prices()], ['SPX Index', "SPX with Stress Indicator"]) chart.plot(figsize=fig_size) plt.show(block=True)
def to_simple_returns(self) -> "SimpleReturnsSeries": from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries return_values = [] for i in range(1, len(self)): return_value = self[i] / self[i - 1] - 1 return_values.append(return_value) dates = self.index[1::].copy() return SimpleReturnsSeries(index=dates, data=return_values).__finalize__(self)
def _get_monte_carlos_simulator_outputs(self, scenarios_df: PricesDataFrame, total_returns: SimpleReturnsSeries) \ -> DFTable: _, all_scenarios_number = scenarios_df.shape rows = [] # Add the Median Return value median_return = np.median(total_returns) rows.append(("Median Return", "{:.2%}".format(median_return))) # Add the Mean Return value mean_return = total_returns.mean() rows.append(("Mean Return", "{:.2%}".format(mean_return))) trade_returns = QFSeries(data=[trade.percentage_pnl for trade in self.trades]) sample_len = int(self._average_number_of_trades_per_year()) std = trade_returns.std() expectation_adj_series = np.ones(sample_len) * (trade_returns.mean() - 0.5 * std * std) expectation_adj_series = SimpleReturnsSeries(data=expectation_adj_series) expectation_adj_series = expectation_adj_series.to_prices(suggested_initial_date=0) mean_volatility_adjusted_return = expectation_adj_series.iloc[-1] / expectation_adj_series.iloc[0] - 1.0 rows.append(("Mean Volatility Adjusted Return", "{:.2%}".format(mean_volatility_adjusted_return))) # Add the Median Drawdown max_drawdowns = max_drawdown(scenarios_df) median_drawdown = np.median(max_drawdowns) rows.append(("Median Maximum Drawdown", "{:.2%}".format(median_drawdown))) # Add the Median Return / Median Drawdown rows.append(("Return / Drawdown", "{:.2f}".format(median_return / median_drawdown))) # Probability, that the return will be > 0 scenarios_with_positive_result = total_returns[total_returns > 0.0].count() probability = scenarios_with_positive_result / all_scenarios_number rows.append(("Probability of positive return", "{:.2%}".format(probability))) table = DFTable(data=QFDataFrame.from_records(rows, columns=["Measure", "Value"]), css_classes=['table', 'left-align']) table.add_columns_classes(["Measure"], 'wide-column') return table
def _get_simulation_plot(self, scenarios_df: PricesDataFrame) -> Chart: chart = LineChart(log_scale=True) for _, scenario in scenarios_df.items(): data_element = DataElementDecorator(scenario, linewidth=0.5) chart.add_decorator(data_element) # Add a legend legend = LegendDecorator(key="legend_decorator") # Add Ensemble average ensemble_avg = scenarios_df.mean(axis=1) ensemble_avg_data_element = DataElementDecorator(ensemble_avg, color="#e1e5f4", linewidth=3) chart.add_decorator(ensemble_avg_data_element) legend.add_entry(ensemble_avg_data_element, "Ensemble average") # Add Expectation (vol adjusted) trade_returns = QFSeries(data=[trade.percentage_pnl for trade in self.trades]) std = trade_returns.std() expectation_adj_series = np.ones(len(ensemble_avg)) * (trade_returns.mean() - 0.5 * std * std) expectation_adj_series = SimpleReturnsSeries(data=expectation_adj_series, index=ensemble_avg.index) expectation_adj_series = expectation_adj_series.to_prices() data_element = DataElementDecorator(expectation_adj_series, color="#46474b", linewidth=2) chart.add_decorator(data_element) legend.add_entry(data_element, "Expectation (vol adjusted)") # Add title title_decorator = TitleDecorator("Monte Carlo Simulations (log scale)", key="title") chart.add_decorator(title_decorator) position_decorator = AxesPositionDecorator(*self.full_image_axis_position) chart.add_decorator(position_decorator) chart.add_decorator(legend) return chart
def test_exponential_average(self): actual_smoothed_series = self.test_returns_tms.exponential_average() expected_smoothed_values = [0.01, 0.0194, 0.029364, 0.02056184, 0.0106337104, 0.000638022624, -0.00936171864256, -0.019361703118554, 0.0082382978128868, 0.028694297868773, 0.048721657872126, 0.040523299472328, 0.03063139796834, 0.0206378838781, 0.010638273032686, 0.00063829638196116, 0.0094382977829177, 0.028766297866975, 0.020525977872019, 0.038831558672321] expected_smoothed_series = SimpleReturnsSeries(data=expected_smoothed_values, index=self.return_dates.copy()) assert_series_equal(expected_smoothed_series, actual_smoothed_series) actual_smoothed_series = self.test_returns_tms.exponential_average(lambda_coeff=1) expected_smoothed_series = self.test_returns_tms assert_series_equal(expected_smoothed_series, actual_smoothed_series)
def constant_weights(cls, assets_rets_df: SimpleReturnsDataFrame, weights: pd.Series) \ -> Tuple[SimpleReturnsSeries, QFDataFrame]: """ Calculates the time series of portfolio returns (given the weights of portfolio's assets). Weights of assets are assumed to be the same all the time (there is a rebalancing on each time tick, e.g. every day if the series has a daily frequency). The method also calculates the allocation matrix. However since the weights are constant, so are the allocations. Parameters ---------- assets_rets_df simple returns of assets which create the portfolio weights weights of assets creating the portfolio Returns ------- portfolio_rets_tms timeseries of portfolio's returns allocation_df dataframe indexed with dates and showing allocations in time (one column per asset) """ assert len(weights) == assets_rets_df.num_of_columns weights_sum = weights.sum() if abs(weights_sum) - 1.0 > cls.EPSILON: cls.logger().warning( "Sum of all weights is not equal to 1.0: sum(weights) = {:f}". format(weights_sum)) num_of_assets = assets_rets_df.num_of_rows portfolio_rets = assets_rets_df.values.dot(weights) portfolio_rets_tms = SimpleReturnsSeries( data=portfolio_rets, index=assets_rets_df.index.copy()) allocation_matrix = np.tile(weights, (num_of_assets, 1)) allocation_df = QFDataFrame(data=allocation_matrix, index=assets_rets_df.index.copy(), columns=assets_rets_df.columns.copy()) return portfolio_rets_tms, allocation_df
def performance_attribution_chart(self) -> BarChart: colors_palette = Chart.get_axes_colors() unexplained_ret = self.model.unexplained_performance_attribution_ret factors_ret = self.model.factors_performance_attribution_ret fund_ret = self.model.fund_tms_analysis.cagr unexplained_name = "Unexplained" factors_names = [ self._get_security_name(ticker) for ticker in self.model.coefficients.index.values ] fund_name = self._get_security_name( self.model.input_data.analysed_tms.name) all_values = [unexplained_ret] + list(factors_ret) + [fund_ret] all_names = [unexplained_name] + list(factors_names) + [fund_name] all_returns = SimpleReturnsSeries(data=all_values, index=pd.Index(all_names)) colors = [ colors_palette[0] ] + [colors_palette[1]] * len(factors_names) + [colors_palette[2]] index_translator = self._get_index_translator(labels=all_names) bar_chart = BarChart(orientation=Orientation.Horizontal, index_translator=index_translator, thickness=self._bars_width, align='center') bar_chart.add_decorator(DataElementDecorator(all_returns, color=colors)) bar_chart.add_decorator( TitleDecorator("Attribution of Fund Annualised Return")) bar_chart.add_decorator( AxesLabelDecorator(x_label="annualised return [%]")) bar_chart.add_decorator( AxesFormatterDecorator(x_major=PercentageFormatter())) labels = ('{:.2f}'.format(value * 100) for value in all_returns) self._add_labels_for_bars(bar_chart, all_returns, labels) return bar_chart
def different_allocations_tms(cls, assets_rets_df: SimpleReturnsDataFrame, allocations_df: QFDataFrame) \ -> SimpleReturnsSeries: """ Calculates the time series of portfolio returns given the allocations on each date. The portfolio returns are calculated by multiplying returns of assets by corresponding allocations' values. Parameters ---------- assets_rets_df simple returns of assets which create the portfolio allocations_df dataframe indexed with dates, showing allocations in time (one column per asset) Returns ------- portfolio_rets_tms timeseries of portfolio's returns """ assert np.all(assets_rets_df.columns.values == allocations_df.columns.values), \ "Different column values for assets and allocation matrix" assert np.all(assets_rets_df.index.values == allocations_df.index.values), \ "Different dates for assets and allocation matrix" # get indices of rows for which: sum of weights is greater than 1. The result of where is a tuple (for a vector # it's a 1-element tuple, for a matrix -- a 2-element tuple and so on). Thus it's necessary to unwrap the result # from a tuple, to get the array of indices (instead of 1-elem. tuple consisted of an array). incorrect_weights_rows = np.abs(allocations_df.sum(axis=1) - 1.0) > cls.EPSILON # type: np.ndarray if np.any(incorrect_weights_rows): dates = allocations_df.index.values[incorrect_weights_rows] dates_str = ", ".join([date_to_str(date) for date in dates]) cls.logger().warning( "Weights don't sum up to 1 for the following dates: " + dates_str) scaled_returns = assets_rets_df * allocations_df # type: np.ndarray portfolio_rets = scaled_returns.sum(axis=1) portfolio_rets_tms = SimpleReturnsSeries( data=portfolio_rets, index=allocations_df.index.copy()) return portfolio_rets_tms