def __init__(self, factors: typing.List, securities_universe,
              start_date: datetime, end_date: datetime,
              rebalancing_frequency: config.RebalancingFrequency):
     # super().__init__()
     self.factors = factors
     self.asset_returns = Portfolio(assets=securities_universe).df_returns
     self.securities_universe = list(self.asset_returns.columns)
     self.start_date = start_date
     self.end_date = end_date
     self.rebalancing_frequency = rebalancing_frequency
    def security_market_line(self, portfolio: Portfolio, date: datetime = None, regression_window: int = 36,
                             benchmark: pd.Series = None):
        """

        :param portfolio:
        :param date:
        :param regression_window:
        :param benchmark:
        :return:
        """
        # '''
        #     The Security Market Line (SML) graphically represents the relationship between the asset's return (on y-axis) and systematic risk (or beta, on x-axis).
        #     With E(R_i) = R_f + B_i * (E(R_m) - R_f), the y-intercept of the SML is equal to the risk-free interest rate, while the slope is equal to the market risk premium
        #     Plotting the SML for a market index (i.e. DJIA), individual assets that are correctly priced are plotted on the SML (in the ideal 'Efficient Market Hypothesis' world).
        #     In real market scenarios, we are able to use the SML graph to determine if an asset being considered for a portfolio offers a reasonable expected return for the risk.
        #     - If an asset is priced at a point above the SML, it is undervalued, since for a given amount of risk, it yields a higher return.
        #     - Conversely, an asset priced below the SML is overvalued, since for a given amount of risk, it yields a lower return.
        # '''
        frequency = self.factors_timedf.df_frequency
        portfolio_copy = portfolio.set_frequency(frequency, inplace=False) \
            .slice_dataframe(to_date=date, from_date=regression_window, inplace=False)

        betas = [
            self.regress_factor_loadings(portfolio=portfolio.df_returns[ticker], benchmark_returns=benchmark, date=date,
                                         regression_window=regression_window).params[1]
            for ticker in portfolio_copy.df_returns]

        mean_asset_returns = portfolio_copy.get_mean_returns()
        date = portfolio_copy.df_returns.index[-1] if date is None else date

        risk_free_rate = macro.risk_free_rates(lookback=regression_window, to_date=date, frequency=frequency).mean() \
                         * portfolio.freq_to_yearly[frequency[0]]

        risk_premium = macro.market_premiums(lookback=regression_window, to_date=date, frequency=frequency).mean() \
                       * portfolio.freq_to_yearly[frequency[0]]

        x = np.linspace(0, max(betas) + 0.1, 100)
        y = float(risk_free_rate) + x * float(risk_premium)
        fig, ax = plt.subplots(figsize=(10, 10))
        plt.plot(x, y)
        ax.set_xlabel('Betas', fontsize=14)
        ax.set_ylabel('Expected Returns', fontsize=14)
        ax.set_title('Security Market Line', fontsize=18)

        for i, txt in enumerate(portfolio.df_returns):
            ax.annotate(txt, (betas[i], mean_asset_returns[i]), xytext=(10, 10), textcoords='offset points')
            plt.scatter(betas[i], mean_asset_returns[i], marker='x', color='red')

        plt.show()
Beispiel #3
0
    def broker_deployment(self, broker):
        """
        Of course, need to schedule that function depending on the `is_time_to_reschedule`. Potentially use AWS Lambda

        :param broker:
        :return:
        """
        if not issubclass(broker.__class__, Broker):
            raise Exception('Ensure that the broker object inherits from the `Broker` class.')

        assets_to_long, assets_to_short = self.generate_assets_to_trade(datetime.now())
        # TODO long for now, improve optimization to include diff constraints
        weights = self.portfolio_allocation(portfolio=Portfolio(assets_to_long)).solve_weights()
        broker.place_order(symbol='AAPL', side='buy')
        current_positions = broker.list_positions()
        for position in current_positions:
            print(position)
        broker.place_order()
    def markowitz_efficient_frontier(self, market_portfolio: Portfolio = None, plot_assets=True, plot_cal=False):
        """
        One can plot every possible combination of risky assets on the *risk-return space*, a graph with the horizontal axis
        representing the *variance* (a.k.a risk), and the vertical axis representing the *expected returns*.
        The left boundary of this region is parabolic, and the upper part of the parabolic boundary is the **efficient frontier**
        in the absence of a risk-free asset (sometimes called *the Markowitz bullet*). Combinations along this upper edge
        represent portfolios (including no holdings of the risk-free asset) for which there is lowest risk for a given
        level of expected return. Equivalently, a portfolio lying on the efficient frontier represents the combination
        offering the best possible expected return for given risk level.

        :param market_portfolio:
        :param plot_assets: Plot risk/return profile of each asset in the portfolio
        :param plot_cal: Plot the Capital Allocation Line
        :return: Pandas DataFrame representing optimal weights and minimun volatilities for each level of required return.
        """
        covariance_matrix = self.portfolio.get_covariance_matrix(to_freq='Y')
        mean_returns = self.portfolio.get_mean_returns(to_freq='Y')
        target_returns = np.linspace(mean_returns.min(), mean_returns.max(), 50)
        minimal_volatilities, weights = [], []

        for target_return in target_returns:
            optimal = minimize(
                # objective function for portfolio volatility. We're optimizing for the weights
                fun=lambda w: np.sqrt(np.dot(w, np.dot(w, covariance_matrix))),
                # initial guess for weights (all equal)
                x0=np.ones((len(self.portfolio.stocks))) / (len(self.portfolio.stocks)),
                method='SLSQP',
                # weighted sum should equal target return, and weights should sum to one
                constraints=({'type': 'eq', 'fun': lambda x: np.sum(mean_returns * x) - target_return},
                             {'type': 'eq', 'fun': lambda x: np.sum(x) - 1}),
                # all weights should be between zero and one
                bounds=Bounds(0, 1))

            minimal_volatilities.append(optimal['fun'])
            weights.append(optimal.x)

        sharpe_arr = target_returns / minimal_volatilities

        fig, ax = plt.subplots(figsize=(10, 10))
        plt.scatter(minimal_volatilities, target_returns, c=sharpe_arr, cmap='viridis')
        plt.colorbar(label='Sharpe Ratio')
        plt.xlabel('Standard Deviation')
        plt.ylabel('Expected Returns')

        max_sharpe_idx = sharpe_arr.argmax()
        plt.plot(minimal_volatilities[max_sharpe_idx], target_returns[max_sharpe_idx], 'r*', markersize=15.0)
        ax.annotate(text='Max Sharpe', xy=(minimal_volatilities[max_sharpe_idx], target_returns[max_sharpe_idx]),
                    xytext=(10, 10), textcoords='offset points')

        min_volatility_idx = np.asarray(minimal_volatilities).argmin()
        plt.plot(minimal_volatilities[min_volatility_idx], target_returns[min_volatility_idx], 'y*', markersize=15.0)
        ax.annotate(text='Min Vol', xy=(minimal_volatilities[min_volatility_idx], target_returns[min_volatility_idx]),
                    xytext=(10, 10), textcoords='offset points')

        # Get yearly mean and std of market portfolio returns
        mkt_mean, mkt_std = market_portfolio.get_mean_returns(to_freq='Y'), market_portfolio.get_volatility_returns(to_freq='Y')
        if market_portfolio is not None and len(market_portfolio.df_returns.columns) == 1:
            plt.plot(mkt_std, mkt_mean, 'bo', markersize=15.0)
            ax.annotate(text=market_portfolio.df_returns.columns[0], xy=(mkt_std, mkt_mean),
                        xytext=(10, 10), textcoords='offset points')
        if plot_assets:
            volatilities = portfolio.get_volatility_returns()
            for i, txt in enumerate(self.portfolio.stocks):
                ax.annotate(txt, (volatilities[i], mean_returns[i]), xytext=(10, 10), textcoords='offset points')
                plt.scatter(volatilities[i], mean_returns[i], marker='x', color='red')

        if plot_cal:
            # self.capital_allocation_line()
            pass

        plt.show()

        return pd.DataFrame.from_dict({'Target Return': target_returns, 'Minimum Volatilty': minimal_volatilities,
                                       'Sharpe Ratio': sharpe_arr, 'Optimal Weights': weights})
    def solve_weights(self, risk_metric=None, objective=None, leverage=0, long_short_exposure=0):
        pass


class NestedClusteredOptimization(PortfolioAllocationModel):
    def __init__(self, portfolio: Portfolio):
        super().__init__(portfolio)

    def solve_weights(self, risk_metric=None, objective=None, leverage=0, long_short_exposure=0):
        pass


if __name__ == '__main__':
    assets = macro.companies_in_index(MarketIndices.DOW_JONES)
    portfolio = Portfolio(assets=assets)
    portfolio.set_frequency(frequency='M', inplace=True)
    portfolio.slice_dataframe(from_date=datetime(2016, 1, 1), to_date=datetime(2020, 1, 1), inplace=True)
    print(portfolio.df_returns.tail(10))

    MPT = ModernPortfolioTheory(portfolio)
    weights = MPT.solve_weights(use_sharpe=True)
    print(weights)

    market_portfolio = Portfolio(assets='^DJI')
    market_portfolio.set_frequency(frequency='M', inplace=True)
    market_portfolio.slice_dataframe(from_date=datetime(2016, 1, 1), to_date=datetime(2020, 1, 1), inplace=True)

    stats = MPT.markowitz_efficient_frontier(market_portfolio=market_portfolio, plot_assets=True, plot_cal=True)
    pd.set_option('display.max_columns', None)
    print(stats.head())
Beispiel #6
0
    performance, per unit of downside risk.

    :param portfolio_returns: Pandas series or dataframe representing percentage changes of the security (or portfolio) returns over time. It should be same time range and frequency as risk free rates
    :param target: minimum acceptable return, below which the returns are less desirable. minimum acceptable return, below which the returns are less desirable.
    :return:
    """
    return hpm(portfolio_returns, target, 1) / math.sqrt(lpm(portfolio_returns, target, 2))


def roys_safety_first_criterion(portfolio_returns: pd.Series, minimum_threshold=0.02, period=252):
    """

    :param portfolio_returns: Pandas series or dataframe representing percentage changes of the security (or portfolio) returns over time. It should be same time range and frequency as risk free rates
    :param minimum_threshold: minimum acceptable return, below which the returns are less desirable.
    :param period: period to compute statistics of returns for. For instance, to compute yearly, then input 252, and to compute monthly, then input 21.
    :return:
    """
    return (portfolio_returns.mean() * period - minimum_threshold) / (portfolio_returns.std() * math.sqrt(period))


if __name__ == '__main__':
    from portfolio_management.Portfolio import Portfolio
    from datetime import datetime

    assets = ['AAPL', 'V', 'KO', 'CAT']
    portfolio = Portfolio(assets=assets)
    portfolio.slice_dataframe(to_date=datetime(2021, 1, 1), from_date=datetime(2016, 1, 1))
    # portfolio_returns = portfolio.get_weighted_sum_returns(weights=np.ones(len(assets)) / len(assets))
    # print(portfolio_returns.head())
    print(roys_safety_first_criterion(portfolio_returns=portfolio.df_returns, minimum_threshold=0.02, period=252))
        def select_stocks(group):
            returns_df = pd.DataFrame()
            from_date = group.index.values[0][0]
            from_date_idx = group.index.levels[0].to_list().index(from_date)
            try:
                # TODO think about inclusive at rebalancing day
                to_date = group.index.levels[0][from_date_idx +
                                                1] - timedelta(days=1)
            except:
                to_date = self.end_date
            if 'SMB' in group.columns:
                # Do the Fama French / AQR Way
                small_size_stocks, large_size_stocks = split_stocks_quantile(
                    df=group, factor='SMB')

                for factor in group.columns:
                    if factor != 'SMB':
                        bottom_quantile_stocks, top_quantile_stocks = split_stocks_quantile(
                            df=group, factor=factor)

                        small_top_stocks = set.intersection(
                            set(small_size_stocks), set(top_quantile_stocks))
                        big_top_stocks = set.intersection(
                            set(large_size_stocks), set(top_quantile_stocks))

                        small_bottom_stocks = set.intersection(
                            set(small_size_stocks),
                            set(bottom_quantile_stocks))
                        big_bottom_stocks = set.intersection(
                            set(large_size_stocks),
                            set(bottom_quantile_stocks))

                        cross_section_returns = {
                            name: {}
                            for name in [
                                'Small Top', 'Big Top', 'Small Bottom',
                                'Big Bottom'
                            ]
                        }

                        for name, stocks in zip([
                                'Small Top', 'Big Top', 'Small Bottom',
                                'Big Bottom'
                        ], [
                                small_top_stocks, big_top_stocks,
                                small_bottom_stocks, big_bottom_stocks
                        ]):
                            if len(stocks) > 0:
                                portfolio = self.asset_returns[stocks]
                                # To allocate weight, need history of returns up to now
                                weights = allocation_method(
                                    Portfolio(portfolio.loc[:from_date])
                                ).solve_weights()
                                cross_section_returns[name]['Weight Allocation'] \
                                    = [(stock, weight) for stock, weight in zip(portfolio.columns, weights)]

                                returns = np.sum(
                                    weights * portfolio.loc[from_date:to_date],
                                    axis=1)
                                cross_section_returns[name][
                                    'Returns'] = returns
                            else:
                                dates = pd.date_range(
                                    start=from_date + timedelta(days=1) -
                                    timedelta(seconds=1),
                                    end=to_date + timedelta(days=1) -
                                    timedelta(seconds=1)).to_list()
                                cross_section_returns[name][
                                    'Returns'] = pd.Series(
                                        np.zeros((to_date - from_date).days +
                                                 1),
                                        index=dates)
                                cross_section_returns[name][
                                    'Weight Allocation'] = [('', 0)]
                        # HML = 1/2 (Small Value + Big Value) - 1/2 (Small Growth + Big Growth).
                        long_stocks = small_top_stocks | big_top_stocks
                        short_stocks = small_bottom_stocks | big_bottom_stocks

                        for factor_ in self.factors:
                            if factor_.__class__.__name__ == factor:
                                # TODO
                                df = pd.DataFrame(
                                    columns=['Long Stocks', 'Short Stocks'],
                                    data=0)
                                factor_.holdings.append()

                        returns = 0.5 * (cross_section_returns['Small Top']['Returns'].add(
                            cross_section_returns['Big Top']['Returns'], fill_value=0)) \
                                  - 0.5 * (cross_section_returns['Small Bottom']['Returns'].add(
                            cross_section_returns['Big Bottom']['Returns'], fill_value=0))
                        returns.name = factor
                        returns_df = returns_df.join(
                            [returns], how='inner'
                        ) if not returns_df.empty else returns.to_frame()
            for factor, returns in returns_df.iteritems():
                factor_obj = None
                for f_ in self.factors:
                    if f_.__class__.__name__ == factor:
                        factor_obj = f_
                factor_obj.returns = returns
                # factor_obj.holdings =
            return returns_df
                            cross_section_returns['Big Bottom']['Returns'], fill_value=0))
                        returns.name = factor
                        returns_df = returns_df.join(
                            [returns], how='inner'
                        ) if not returns_df.empty else returns.to_frame()
            for factor, returns in returns_df.iteritems():
                factor_obj = None
                for f_ in self.factors:
                    if f_.__class__.__name__ == factor:
                        factor_obj = f_
                factor_obj.returns = returns
                # factor_obj.holdings =
            return returns_df

        factor_returns = pipeline_df.groupby(level=0,
                                             axis=0).apply(select_stocks)
        factor_returns.index = factor_returns.index.droplevel(0)
        return factor_returns


if __name__ == '__main__':
    # sp_500_market = Portfolio(assets=['AAPL'])
    # capm = CapitalAssetPricingModel(frequency='Monthly', to_date=datetime.today(), from_date=80)
    # reg = capm.regress_factor_loadings(portfolio=Portfolio(assets=['MSFT']))
    # print(reg.params)

    ff3 = FamaFrench_ThreeFactorModel(frequency='Monthly',
                                      to_date=datetime.today())
    reg = ff3.regress_factor_loadings(portfolio=Portfolio(assets=['MSFT']))
    print(reg.params)
Beispiel #9
0
    def historical_simulation(self):
        results = []
        portfolio = Portfolio(assets=[], balance=self.starting_capital, trades=[], date=self.starting_date)

        # First, populate stock returns universe
        securities_universe_prices_df = pd.DataFrame()
        for stock in os.listdir(path=config.STOCK_PRICES_DIR_PATH):
            ticker = stock.strip('.pkl')
            series = pd.read_pickle(os.path.join(config.STOCK_PRICES_DIR_PATH, stock))['Adj Close']

            dummy_dates = pd.date_range(start=series.index[0], end=series.index[-1])
            zeros_dummy = pd.Series(data=np.zeros(shape=len(dummy_dates)).fill(np.nan),
                                    index=dummy_dates, name='Dummy', dtype='float64')

            series = pd.concat([series, zeros_dummy], axis=1).iloc[:, 0]
            securities_universe_prices_df[ticker] = series.ffill()

        securities_universe_returns_df = securities_universe_prices_df.pct_change()

        for date in pd.date_range(start=self.starting_date, end=self.ending_date):
            portfolio.date = datetime(year=date.year, month=date.month, day=date.day)

            for trade in portfolio.trades:  # update portfolio float
                date_loc = trade.stock.index.get_loc(date - timedelta(seconds=1))

                daily_pct_return = (trade.stock.iloc[date_loc] - trade.stock.iloc[date_loc - 1]) \
                                   / trade.stock.iloc[date_loc - 1]

                daily_doll_return = daily_pct_return * trade.stock.loc[date - timedelta(seconds=1)] * trade.shares

                portfolio.float = portfolio.float + daily_doll_return if trade.direction \
                    else portfolio.float - daily_doll_return

            if not (self.is_time_to_reschedule(current_date=date,
                                               last_rebalancing_day=portfolio.last_rebalancing_day)
                    or date == self.starting_date):
                continue

            portfolio.last_rebalancing_day = date  # rebalancing day, now can go on:
            stocks_to_trade = self.generate_assets_to_trade(portfolio.date)
            long_stocks, short_stocks = stocks_to_trade

            for trade in portfolio.trades:  # close portfolio trades that no longer meet condition
                if trade.stock.name not in long_stocks + short_stocks:
                    portfolio.make_position(trade, entry=False)

            # Get portfolio returns of selected stocks up to current date, and optimize portfolio allocation
            portfolio.df_returns = securities_universe_returns_df[long_stocks]
            sliced_portfolio = portfolio.slice_dataframe(to_date=date, inplace=False)
            weights = self.allocation_regime(portfolio=sliced_portfolio)

            portfolio.rebalance_portfolio(long_stocks=securities_universe_prices_df[long_stocks],
                                          short_stocks=securities_universe_prices_df[short_stocks],
                                          weights=weights, commission=self.commission,
                                          fractional_shares=self.fractional_shares)

            # Aggregate trades for better formatting in the dataframe
            dictionary = dict()
            for trade in portfolio.trades:
                dictionary[trade.stock.name] = dictionary.get(trade.stock.name, 0) + trade.shares

            aggregated_trades = [(key, val) for (key, val) in dictionary.items()]
            results.append([date.strftime("%Y-%m-%d"), aggregated_trades,
                            round(portfolio.balance, 2), round(portfolio.float, 2)])

        evolution_df = pd.DataFrame(results, columns=['Date', 'Holdings', 'Balance', 'Float'])
        evolution_df.set_index('Date', inplace=True)
        evolution_df['Cumulative (%) Return'] = evolution_df.filter(['Float']).pct_change().apply(
            lambda x: x + 1).cumprod()
        evolution_df['Float'].plot(grid=True, figsize=(10, 6))
        plt.show()
        with pd.option_context('display.max_rows', None, 'display.max_columns', None):
            print(evolution_df.to_string())
        return evolution_df