def test_custom_lower_bound():
    ef = EfficientFrontier(
        *setup_efficient_frontier(data_only=True), weight_bounds=(0.02, 1)
    )
    ef.max_sharpe()
    assert ef.weights.min() >= 0.02
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
Exemplo n.º 2
0
def test_custom_bounds_same():
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(0.03, 0.13))
    ef.max_sharpe()
    assert ef.weights.min() >= 0.03
    assert ef.weights.max() <= 0.13
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
Exemplo n.º 3
0
def test_custom_lower_bound():
    ef = EfficientFrontier(
        *setup_efficient_frontier(data_only=True), weight_bounds=(0.02, 1)
    )
    ef.max_sharpe()
    assert ef.weights.min() >= 0.02
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
Exemplo n.º 4
0
def test_custom_upper_bound():
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(0, 0.10))
    ef.max_sharpe()
    ef.portfolio_performance()
    assert ef.weights.max() <= 0.1
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
def test_custom_upper_bound():
    ef = EfficientFrontier(
        *setup_efficient_frontier(data_only=True), weight_bounds=(0, 0.10)
    )
    ef.max_sharpe()
    ef.portfolio_performance()
    assert ef.weights.max() <= 0.1
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
Exemplo n.º 6
0
def test_clean_weights_short():
    ef = setup_efficient_frontier()
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(-1, 1))
    ef.max_sharpe()
    # In practice we would never use such a high cutoff
    number_tiny_weights = sum(np.abs(ef.weights) < 0.05)
    cleaned = ef.clean_weights(cutoff=0.05)
    cleaned_weights = cleaned.values()
    clean_number_tiny_weights = sum(abs(i) < 0.05 for i in cleaned_weights)
    assert clean_number_tiny_weights == number_tiny_weights
def test_clean_weights_short():
    ef = setup_efficient_frontier()
    ef = EfficientFrontier(
        *setup_efficient_frontier(data_only=True), weight_bounds=(-1, 1)
    )
    ef.max_sharpe()
    # In practice we would never use such a high cutoff
    number_tiny_weights = sum(np.abs(ef.weights) < 0.05)
    cleaned = ef.clean_weights(cutoff=0.05)
    cleaned_weights = cleaned.values()
    clean_number_tiny_weights = sum(abs(i) < 0.05 for i in cleaned_weights)
    assert clean_number_tiny_weights == number_tiny_weights
Exemplo n.º 8
0
def test_custom_bounds_different():
    bounds = [(0.01, 0.13), (0.02, 0.11)] * 10
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=bounds)
    ef.max_sharpe()
    assert (0.01 <= ef.weights[::2]).all() and (ef.weights[::2] <= 0.13).all()
    assert (0.02 <= ef.weights[1::2]).all() and (ef.weights[1::2] <=
                                                 0.11).all()
    np.testing.assert_almost_equal(ef.weights.sum(), 1)

    bounds = ((0.01, 0.13), (0.02, 0.11)) * 10
    assert EfficientFrontier(*setup_efficient_frontier(data_only=True),
                             weight_bounds=bounds)
Exemplo n.º 9
0
def test_max_sharpe_input_errors():
    with pytest.raises(ValueError):
        ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                               gamma="2")

    with warnings.catch_warnings(record=True) as w:
        ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                               gamma=-1)
        assert len(w) == 1
        assert issubclass(w[0].category, UserWarning)
        assert (str(w[0].message) == "in most cases, gamma should be positive")

    with pytest.raises(ValueError):
        ef.max_sharpe(risk_free_rate="0.2")
    def __init__(self, table):
        mu = expected_returns.mean_historical_return(table)
        S = risk_models.sample_cov(table)

        # Optimise for maximal Sharpe ratio
        ef = EfficientFrontier(mu, S)
        ef.max_sharpe()  # Raw weights
        self.cleaned_weights = ef.clean_weights()
        print(self.cleaned_weights)
        ef.portfolio_performance(verbose=True)

        latest_prices = discrete_allocation.get_latest_prices(table)
        self.allocation, self.leftover = discrete_allocation.portfolio(
            self.cleaned_weights, latest_prices, total_portfolio_value=10000
        )
Exemplo n.º 11
0
def max_sharpe_multi(ticker_list, time='1d', cash=10000000):
    data = yf.download(ticker_list, period='10y', interval=time)
    if len(ticker_list) > 1:
        data = yf.download(ticker_list,
                           period='10y',
                           interval=time,
                           group_by='ticker')
    new_data = []
    df = pd.DataFrame()
    weight = 1 / len(ticker_list)
    for i in ticker_list:
        stock_normal_ret = data['Close'] / data.iloc[0]['Close']
        df[i] = data['Close']
        if len(ticker_list) > 1:
            stock_normal_ret = data[i]['Close'] / data[i].iloc[0]['Close']
            df[i] = data[i]['Close']
        alloc = stock_normal_ret * weight
        balance = alloc * cash
        new_data.append(balance)

    mu = expected_returns.mean_historical_return(df)
    s = risk_models.sample_cov(df)
    ef = EfficientFrontier(mu, s)
    weights = ef.max_sharpe()
    cleaned_weights = ef.clean_weights()
    x = ef.portfolio_performance(verbose=True)
    return cleaned_weights, round(2.5 * x[2] / 15, 3)  # sharpe adjusted weight
def get_sorted_weights(prices):
    '''
    prices: dataframe of the following shape. Column as symbols, Date as indices
                      AAL       AAPL    GOOGL
      Date
      2014-02-14  32.439728   17.411098  0.00
      2014-02-15  32.439728   17.411098  0.00
    '''
    # calculate parameters
    mu = expected_returns.mean_historical_return(prices)
    S = risk_models.sample_cov(prices)

    # remove infinite values
    symbols = prices.columns
    for symbol in symbols:
        # if value is infinite
        mu_value = mu[symbol]
        if not np.isfinite(mu_value) or mu_value == 0:
            # delete from means
            del mu[symbol]

            # delete from sample covariance
            S.drop(symbol, axis=1, inplace=True)
            S.drop(symbol, axis=0, inplace=True)

    # calculate efficient frontier
    ef = EfficientFrontier(mu, S)
    weights = ef.max_sharpe()
    cleaned_weights = ef.clean_weights()
    sorted_weights = sorted(cleaned_weights.items(), key=lambda x: -x[1])
    return sorted_weights
Exemplo n.º 13
0
def get_mean_variance_share_allocation():
    dates, ticker_to_closing_prices = get_ticker_to_closing_prices(
        START_DATE, TEST_START_DATE - timedelta(days=1))

    tickers = ticker_to_closing_prices.keys()
    prices = list(
        zip(*[ticker_to_closing_prices[ticker] for ticker in tickers]))
    df = pd.DataFrame(
        prices,
        index=dates,
        columns=tickers,
    )

    mu = mean_historical_return(df)
    S = CovarianceShrinkage(df).ledoit_wolf()

    ef = EfficientFrontier(mu, S)
    weights = ef.max_sharpe()

    _, ticker_to_closing_prices_at_test_start = get_ticker_to_closing_prices(
        TEST_START_DATE, TEST_END_DATE)
    prices_at_test_start = pd.Series([
        float(ticker_to_closing_prices_at_test_start[ticker][0])
        for ticker in tickers
    ],
                                     index=tickers)
    da = DiscreteAllocation(weights,
                            prices_at_test_start,
                            total_portfolio_value=INITIAL_PORTFOLIO_VAL)
    allocation, leftover = da.lp_portfolio()
    for ticker in tickers:
        if ticker not in allocation:
            allocation[ticker] = 0

    return allocation
Exemplo n.º 14
0
def mypfopt(codes):
    from pypfopt.efficient_frontier import EfficientFrontier
    from pypfopt import risk_models
    from pypfopt import expected_returns
    start_date = '20180101'
    end_date = time.strftime('%Y%m%d', time.localtime())
    _, wsd_data = w.wsd(codes,
                        "close",
                        start_date,
                        end_date,
                        "PriceAdj=F",
                        usedf=True)

    # Calculate expected returns and sample covariance
    mu = expected_returns.mean_historical_return(wsd_data)
    S = risk_models.sample_cov(wsd_data)

    # 协方差矩阵的计算:np.cov  sklearn.covariance.GraphicalLasso  covariance_ == get_precision
    # Optimise for maximal Sharpe ratio
    ef = EfficientFrontier(mu, S)
    raw_weights = ef.max_sharpe()
    print('\n max_sharpe:')
    for k, v in raw_weights.items():
        if round(v, 2) > 0:
            print(k, round(v, 2))
    cleaned_weights = ef.clean_weights()
    print('\n clean_weights:')
    for k, v in cleaned_weights.items():
        if round(v, 2) > 0:
            print(k, round(v, 2))
    print('\n portfolio_performance:')
    ef.portfolio_performance(verbose=True)
Exemplo n.º 15
0
    def expected_r(tickers, start_date):
        today = pd.datetime.today()
        if start_date == '1y':
            delta = today - pd.DateOffset(years=1)
            delta = delta.date()
            delta = delta.strftime('%Y-%m-%d')
        elif start_date == '3y':
            delta = today - pd.DateOffset(years=3)
            delta = delta.date()
            delta = delta.strftime('%Y-%m-%d')
        elif start_date == '5y':
            delta = today - pd.DateOffset(years=5)
            delta = delta.date()
            delta = delta.strftime('%Y-%m-%d')
        elif start_date == '10y':
            delta = today - pd.DateOffset(years=10)
            delta = delta.date()
            delta = delta.strftime('%Y-%m-%d')
        elif start_date == 'max':
            delta = today - pd.DateOffset(years=30)
            delta = delta.date()
            delta = delta.strftime('%Y-%m-%d')

        prices = ffn.get(tickers, start=delta)
        mu = expected_returns.mean_historical_return(prices)
        S = risk_models.sample_cov(prices)
        ef = EfficientFrontier(mu, S)
        raw_weights = ef.max_sharpe()
        cleaned_weights = ef.clean_weights()
        st.write(cleaned_weights)
        metrics = ef.portfolio_performance(verbose=True)
        st.write('Expected Return: {:.2f}'.format(metrics[0]))
        st.write('Annual Volatility: {:.2f}'.format(metrics[1]))
        st.write('Sharpe Ratio {:.2f}'.format(metrics[2]))
 def asset_allocation(tickers, start_date):
     today = pd.datetime.today()
     if start_date == '1y':
         delta = today - pd.DateOffset(years=1)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == '3y':
         delta = today - pd.DateOffset(years=3)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == '5y':
         delta = today - pd.DateOffset(years=5)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == '10y':
         delta = today - pd.DateOffset(years=10)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == 'max':
         delta = today - pd.DateOffset(years=30)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     prices = ffn.get(tickers, start=delta)
     mu = expected_returns.mean_historical_return(prices)
     S = risk_models.sample_cov(prices)
     ef = EfficientFrontier(mu, S)
     raw_weights = ef.max_sharpe()
     cleaned_weights = ef.clean_weights()
     latest_prices = discrete_allocation.get_latest_prices(prices)
     da = DiscreteAllocation(cleaned_weights, latest_prices, total_portfolio_value=amount)
     allocation, leftover = da.lp_portfolio()
     st.subheader('Asset Allocation breakdown: ')
     st.write(allocation)
     st.write("Funds remaining: ${:.2f}".format(leftover))
Exemplo n.º 17
0
def optimizePortEfficient(port, weights, start, plot = False, short = False, printBasicStats=True, how = 'Sharpe'):
	#Getting Datat
	df = getData(port)
	#Plotting the portfolio
	if plot: 
		plotPort(df, port)
		
	if printBasicStats:
		basicStats(df, weights)

	#Optimization for Sharpe using Efficient Frontier
	if short: 
		bounds = (-1,1)
	else:
		bounds = (0,1)
	mu = df.pct_change().mean() * 252
	S = risk_models.sample_cov(df)

	if how == 'Sharpe':
		# Maximized on Sharpe Ratio
		ef = EfficientFrontier(mu, S, weight_bounds=bounds) #Here the weight bounds are being used to allow short positions as well
		weights = ef.max_sharpe()
		cleaned_weights = dict(ef.clean_weights())
		print("Weights of an optimal portfolio maximised on Sharpe Ratio:")
		print(cleaned_weights)
		ef.portfolio_performance(verbose = True)
		getDiscreteAllocations(df, weights)
	if how == "Vol":
		# Minimized on Volatility
		efi = EfficientFrontier(mu, S, weight_bounds=(-1,1))
		w = dict(efi.min_volatility())
		print("\nWeights of an optimal portfolio minimized on Volatilty (Risk):")
		print(w)
		efi.portfolio_performance(verbose = True)
		getDiscreteAllocations(df, w)
Exemplo n.º 18
0
def getMaxSharpePortfolio(data):
    mu, Sigma = getMuSigma(data)
    ef = EfficientFrontier(mu, Sigma)
    raw_weights = ef.max_sharpe()
    weights = ef.clean_weights()
    performance = ef.portfolio_performance()
    return weights, performance
Exemplo n.º 19
0
def optimize(tickers, cash=1000, longshort=False):
    print(f'Cash: ${cash}')
    date_start = 20 * 6

    df = pd.DataFrame()
    for t in tickers:
        path = os.path.join(os.path.dirname(__file__),
                            f'../data/price/{t}.csv')
        price = pd.read_csv(path, parse_dates=True,
                            index_col='Date')['Adj Close'].rename(t)
        df[t] = price[-date_start:]

    mu = expected_returns.mean_historical_return(df)
    S = risk_models.sample_cov(df)

    # Optimise for maximal Sharpe ratio
    ef = EfficientFrontier(mu,
                           S,
                           weight_bounds=((-1, 1) if longshort else (0, 1)))
    raw_weights = ef.max_sharpe()
    clean_weights = ef.clean_weights()

    latest_prices = get_latest_prices(df)
    da = DiscreteAllocation(raw_weights,
                            latest_prices,
                            total_portfolio_value=cash)
    allocation, leftover = da.lp_portfolio()

    print('\nWeights:', clean_weights)
    print('\nShares:', allocation)
    print(f'\n${leftover:.2f} leftover')

    ef.portfolio_performance(verbose=True)
Exemplo n.º 20
0
def opt_data(ticker, start, end, amt):
    weight = np.array([1 / len(ticker)] * len(ticker))

    hist_data = get_data(ticker, start, end)

    daily_returns = hist_data.pct_change()
    cov_annual_mat = daily_returns.cov() * 255

    port_variance = np.dot(weight.T, np.dot(cov_annual_mat, weight))
    port_volatility = np.sqrt(port_variance).round(4)
    port_simple_annual_return = np.sum(daily_returns.mean() * weight) * 255

    mu = expected_returns.mean_historical_return(hist_data)
    S = risk_models.sample_cov(hist_data)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()
    cw = ef.clean_weights()

    e = ef.portfolio_performance(verbose=False)
    latest_prices = get_latest_prices(hist_data)
    da = DiscreteAllocation(cw, latest_prices, total_portfolio_value=amt)

    allocation, leftover = da.lp_portfolio()

    return {
        "orignal_port_volatility": str(round(port_volatility, 3) * 100) + "%",
        "orignal_annual_return":
        str(round(port_simple_annual_return, 3) * 100) + "%",
        "new_port_volatility": str(round(e[1], 3) * 100) + "%",
        "new_annual_return": str(round(e[0], 3) * 100) + "%",
        "Allocation": cw,
        "AllocationNo": allocation,
        "Left_Amount": str(round(leftover, 2))
    }
Exemplo n.º 21
0
def ef_sharpe_strategy(returns=None, cov_matrix=None):
    assert returns is not None
    ef = EfficientFrontier(returns, cov_matrix)
    ef.add_objective(objective_functions.L2_reg,
                     gamma=0.1)  # eliminate minor weights
    weights = ef.max_sharpe()
    return weights, portfolio_performance(ef), ef
Exemplo n.º 22
0
def min_variance(ticker_list, period='1y', interval='1d', cash=10000000):
    x = Ticker(ticker_list,
               retry=20,
               status_forcelist=[404, 429, 500, 502, 503, 504])
    data = x.history(period=period, interval=interval)
    if len(ticker_list) > 1:
        data = yf.download(ticker_list,
                           period='10y',
                           interval=interval,
                           group_by='ticker')
    new_data = []
    df = pd.DataFrame()
    weight = 1 / len(ticker_list)
    for i in ticker_list:
        stock_normal_ret = data['close'] / data.iloc[0]['close']
        df[i] = data['close']
        if len(ticker_list) > 1:
            stock_normal_ret = data[i]['close'] / data[i].iloc[0]['close']
            df[i] = data[i]['close']
        alloc = stock_normal_ret * weight
        balance = alloc * cash
        new_data.append(balance)

    mu = expected_returns.mean_historical_return(df)
    s = risk_models.sample_cov(df)
    ef = EfficientFrontier(mu, s)
    weights = ef.min_volatility()
    sharpe = ef.max_sharpe()
    cleaned_weights = ef.clean_weights()
    x = ef.portfolio_performance(verbose=False)
    return cleaned_weights, round(2.5 * x[2] / 15, 3)  # sharpe adjusted weight
Exemplo n.º 23
0
def test_lp_portfolio_allocation_different_params():
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()

    latest_prices = get_latest_prices(df)
    da = DiscreteAllocation(w,
                            latest_prices,
                            total_portfolio_value=80000,
                            short_ratio=0.4)
    allocation, leftover = da.lp_portfolio()

    # assert allocation == {
    #     "GOOG": 3,
    #     "AAPL": 32,
    #     "FB": 99,
    #     "BABA": 34,
    #     "AMZN": 2,
    #     "BBY": 15,
    #     "MA": 164,
    #     "PFE": 438,
    #     "SBUX": 15,
    # }

    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 80000, decimal=4)
Exemplo n.º 24
0
def get_portfolio(expected_returns, risk_array):
    cv = covariance_matrix(risk_array)
    ef = EfficientFrontier(expected_returns, cv)
    raw_weights = ef.max_sharpe()
    cleaned_weights = ef.clean_weights()
    print(cleaned_weights)
    ef.portfolio_performance(verbose=True)
            def get_momentum_stocks(df, date, portfolio_size, cash):
                # Filter the df to get the top 10 momentum stocks for the latest day
                df_top_m = df.loc[df['date'] == pd.to_datetime(date)]
                df_top_m = df_top_m.sort_values(
                    by='momentum', ascending=False).head(portfolio_size)

                # Set the universe to the top momentum stocks for the period
                universe = df_top_m['symbol'].tolist()

                # Create a df with just the stocks from the universe
                df_u = df.loc[df['symbol'].isin(universe)]

                # Create the portfolio
                # Pivot to format for the optimization library
                df_u = df_u.pivot_table(index='date',
                                        columns='symbol',
                                        values='close',
                                        aggfunc='sum')

                # Calculate expected returns and sample covariance
                mu = expected_returns.mean_historical_return(df_u)
                S = risk_models.sample_cov(df_u)

                # Optimise the portfolio for maximal Sharpe ratio
                ef = EfficientFrontier(mu, S,
                                       gamma=1)  # Use regularization (gamma=1)
                weights = ef.max_sharpe()
                cleaned_weights = ef.clean_weights()

                # Allocate
                latest_prices = get_latest_prices(df_u)

                da = DiscreteAllocation(cleaned_weights,
                                        latest_prices,
                                        total_portfolio_value=cash)

                allocation = da.lp_portfolio()[0]

                # Put the stocks and the number of shares from the portfolio into a df
                symbol_list = []
                num_shares_list = []

                for symbol, num_shares in allocation.items():
                    symbol_list.append(symbol)
                    num_shares_list.append(num_shares)

                # Now that we have the stocks we want to buy we filter the df for those ones
                df_buy = df.loc[df['symbol'].isin(symbol_list)]

                # Filter for the period to get the closing price
                df_buy = df_buy.loc[df_buy['date'] == date].sort_values(
                    by='symbol')

                # Add in the qty that was allocated to each stock
                df_buy['qty'] = num_shares_list

                # Calculate the amount we own for each stock
                df_buy['amount_held'] = df_buy['close'] * df_buy['qty']
                df_buy = df_buy.loc[df_buy['qty'] != 0]
                return df_buy
def test_lp_portfolio_allocation_different_params():
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()

    latest_prices = get_latest_prices(df)
    da = DiscreteAllocation(
        w,
        latest_prices,
        min_allocation=0.002,
        total_portfolio_value=80000,
        short_ratio=0.4,
    )
    allocation, leftover = da.lp_portfolio()

    assert da.allocation == {
        "GOOG": 1,
        "AAPL": 43,
        "FB": 95,
        "BABA": 44,
        "AMZN": 4,
        "BBY": 69,
        "MA": 114,
        "PFE": 412,
        "SBUX": 51,
    }
    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 80000)
def test_weight_bounds_minus_one_to_one():
    ef = EfficientFrontier(*setup_efficient_frontier(data_only=True),
                           weight_bounds=(-1, 1))
    assert ef.max_sharpe()
    assert ef.min_volatility()
    assert ef.efficient_return(0.05)
    assert ef.efficient_risk(0.05)
def test_allocation_errors():
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()
    latest_prices = get_latest_prices(df)

    assert DiscreteAllocation(w, latest_prices)
    with pytest.raises(TypeError):
        DiscreteAllocation(ef.weights, latest_prices)
    with pytest.raises(TypeError):
        DiscreteAllocation(w, latest_prices.values.tolist())
    with pytest.raises(ValueError):
        DiscreteAllocation(w, latest_prices, total_portfolio_value=0)
    with pytest.raises(ValueError):
        DiscreteAllocation(w, latest_prices, short_ratio=-0.4)
    with pytest.raises(NameError):
        da = DiscreteAllocation(w, latest_prices)
        da.lp_portfolio(solver="ABCDEF")
    w2 = w.copy()
    w2["AAPL"] = np.nan
    with pytest.raises(ValueError):
        DiscreteAllocation(w2, latest_prices)
    latest_prices.iloc[0] = np.nan
    with pytest.raises(TypeError):
        DiscreteAllocation(w, latest_prices)
def test_greedy_portfolio_allocation():
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()

    latest_prices = get_latest_prices(df)
    da = DiscreteAllocation(w, latest_prices)
    allocation, leftover = da.greedy_portfolio()

    assert da.allocation == {
        "MA": 14,
        "FB": 12,
        "PFE": 51,
        "BABA": 5,
        "AAPL": 5,
        "AMZN": 0,
        "BBY": 9,
        "SBUX": 6,
        "GOOG": 1,
    }
    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 10000)
def test_greedy_portfolio_allocation():
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()

    latest_prices = get_latest_prices(df)
    da = DiscreteAllocation(w, latest_prices, short_ratio=0.3)
    allocation, leftover = da.greedy_portfolio()

    assert allocation == {
        "MA": 20,
        "FB": 12,
        "PFE": 54,
        "BABA": 4,
        "AAPL": 4,
        "BBY": 2,
        "SBUX": 1,
        "GOOG": 1,
    }

    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 10000, decimal=4)

    # Cover the verbose parameter,
    allocation_verbose, leftover_verbose = da.greedy_portfolio(verbose=True)
    assert allocation_verbose == allocation
    assert leftover_verbose == leftover
def run_allocation(date, duration):
    df = pd.DataFrame(columns=symbols)
    for symbol in symbols:
        try:
            data = get_all_binance(symbol, date, duration, save=False)
            df[symbol] = data["close"]

        except BinanceAPIException:
            print("Symbol was : " + symbol)

    df = df.apply(pd.to_numeric)
    df.fillna(0)
    print(df)
    mu = mean_historical_return(df)
    S = CovarianceShrinkage(df).ledoit_wolf()

    ef = EfficientFrontier(mu, S)

    weights = ef.max_sharpe()

    #aws, graphql, table'a ekle

    cleaned_weights = ef.clean_weights()
    print(ef.portfolio_performance(verbose=True))
    lists = sorted(weights.items())  # sorted by key, return a list of tuples
    lists = {x: y for x, y in lists if y > 0}

    f = open("Coin" + duration + ".txt", "w")
    f.write(str(lists))
    f.close()
    print(weights)
    s3 = boto3.client('s3')
    with open("Coin" + duration + ".txt", "rb") as f:
        s3.upload_fileobj(f, "model-predictions", "Coin" + duration + ".txt")
Exemplo n.º 32
0
def get_momentum_stocks(df, date, portfolio_size, cash):
    # Convert dates and filter all momentum scores to include only top `portfolio_size` movers
    df_top_movers = df.loc[df['date'] == pd.to_datetime(date)]
    df_top_movers = df_top_movers.sort_values(by='momentum', ascending=False).head(portfolio_size)

    # Create a universe of top momentum stocks
    universe = df_top_movers['symbol'].tolist()

    # Create universe as DF for these stocks
    df_universe_top_movers = df.loc[df['symbol'].isin(universe)]

    # Create pre-optimzed portfolio
    df_universe_top_movers = df_universe_top_movers.pivot_table(
        index='date', 
        columns='symbol',
        values='close',
        aggfunc='sum')

    # Calculate expected returns and sample covariance
    mu = expected_returns.mean_historical_return(df_universe_top_movers)
    S = risk_models.sample_cov(df_universe_top_movers)

    # Optimize by Sharpe Ratio
    ef = EfficientFrontier(mu, S, gamma=1) # Use regularization (gamma=1)
    weights = ef.max_sharpe()
    cleaned_weights = ef.clean_weights()

    # Allocate
    latest_prices = get_latest_prices(df_universe_top_movers)

    allocated = DiscreteAllocation(
        cleaned_weights,
        latest_prices,
        total_portfolio_value=cash)

    allocation = allocated.lp_portfolio()[0]

    # Put the stocks and the number of shares from the portfolio into a df
    symbols = []
    num_shares = []

    for sym, shares in allocation.items():
        symbols.append(sym)
        num_shares.append(shares)

    # Create the to-buy dataframe
    df_buy = df.loc[df['symbol'].isin(symbols)]

    # Filter out irrelevant dates
    df_buy = df_buy.loc[df_buy['date'] == date].sort_values(by='symbol')

    # Add quantity allocations into dataframe
    df_buy['qty'] = num_shares # has thrown -> ValueError

    # Calculate the new/desired equity for each stock
    df_buy['equity'] = df_buy['close'] * df_buy['qty']
    df_buy = df_buy.loc[df_buy['qty'] != 0]

    return df_buy
Exemplo n.º 33
0
    def weight(
        self,
        market_state: pd.DataFrame,
        agent_portfolio: Dict[str, float],
        is_recommendation: bool = False,
    ) -> Dict[str, float]:
        # make default weight map

        weights = {}
        stocks = set(market_state.symbol.unique())
        for stock in stocks:
            weights[stock] = 0.0

        current_date = market_state.date.max()

        if current_date.year == market_state.date.min().year:
            # there is no previous year
            return weights
        if not is_recommendation:
            if current_date != market_state.loc[market_state.date.dt.year ==
                                                current_date.year].date.min():

                # it's not the first trading day of the current year
                if current_date == self.all_dates[
                    (self.all_dates.date.dt.year == current_date.year)
                        & (self.all_dates.date.dt.month == 12)].date.max():
                    for stock in agent_portfolio.keys():
                        weights[stock] = -1
                    return weights
                return weights

        prices = market_state.pivot(values="adjusted_close",
                                    index="date",
                                    columns="symbol")

        # Get Mu and sigma for efficient frontier
        mu = expected_returns.mean_historical_return(prices)
        sigma = risk_models.CovarianceShrinkage(prices).ledoit_wolf()

        # Calculate efficient portfolio, objective: maximize sharpe ratio
        ef = EfficientFrontier(mu, sigma, weight_bounds=(0, 1))
        ef.max_sharpe()
        cleaned_weights = ef.clean_weights()

        print(cleaned_weights)
        return dict(cleaned_weights)
def test_max_sharpe_input_errors():
    with pytest.raises(ValueError):
        ef = EfficientFrontier(
            *setup_efficient_frontier(data_only=True), gamma="2"
        )

    with warnings.catch_warnings(record=True) as w:
        ef = EfficientFrontier(
            *setup_efficient_frontier(data_only=True), gamma=-1)
        assert len(w) == 1
        assert issubclass(w[0].category, UserWarning)
        assert (
            str(w[0].message)
            == "in most cases, gamma should be positive"
        )

    with pytest.raises(ValueError):
        ef.max_sharpe(risk_free_rate="0.2")
def test_portfolio_allocation_errors():
    df = get_data()
    e_ret = mean_historical_return(df)
    cov = sample_cov(df)
    ef = EfficientFrontier(e_ret, cov)
    w = ef.max_sharpe()
    latest_prices = discrete_allocation.get_latest_prices(df)

    with pytest.raises(TypeError):
        discrete_allocation.portfolio(ef.weights, latest_prices)

    with pytest.raises(TypeError):
        discrete_allocation.portfolio(w, latest_prices.values.tolist())

    with pytest.raises(ValueError):
        discrete_allocation.portfolio(w, latest_prices, min_allocation=0.5)

    with pytest.raises(ValueError):
        discrete_allocation.portfolio(w, latest_prices, total_portfolio_value=0)
def test_max_sharpe_short():
    ef = EfficientFrontier(
        *setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
    )
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.40723757138191374, 0.24823079451957306, 1.5524922427959371),
    )
    sharpe = ef.portfolio_performance()[2]

    ef_long_only = setup_efficient_frontier()
    ef_long_only.max_sharpe()
    long_only_sharpe = ef_long_only.portfolio_performance()[2]

    assert sharpe > long_only_sharpe
def test_max_sharpe_L2_reg_with_shorts():
    ef_no_reg = setup_efficient_frontier()
    ef_no_reg.max_sharpe()
    initial_number = sum(ef_no_reg.weights > 0.01)

    ef = EfficientFrontier(
        *setup_efficient_frontier(data_only=True), weight_bounds=(None, None)
    )
    ef.gamma = 1
    w = ef.max_sharpe()
    assert isinstance(w, dict)
    assert set(w.keys()) == set(ef.tickers)
    assert set(w.keys()) == set(ef.expected_returns.index)
    np.testing.assert_almost_equal(ef.weights.sum(), 1)
    np.testing.assert_allclose(
        ef.portfolio_performance(),
        (0.3236047844566581, 0.20241509723550233, 1.4969817524033966),
    )
    new_number = sum(ef.weights > 0.01)
    assert new_number >= initial_number
def test_portfolio_allocation():
    df = get_data()
    e_ret = mean_historical_return(df)
    cov = sample_cov(df)
    ef = EfficientFrontier(e_ret, cov)
    w = ef.max_sharpe()

    latest_prices = discrete_allocation.get_latest_prices(df)
    allocation, leftover = discrete_allocation.portfolio(w, latest_prices)
    assert allocation == {
        "MA": 14,
        "FB": 12,
        "PFE": 51,
        "BABA": 5,
        "AAPL": 5,
        "AMZN": 0,
        "BBY": 9,
        "SBUX": 6,
        "GOOG": 1,
    }
    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 10000)
Exemplo n.º 39
0
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.hierarchical_risk_parity import hrp_portfolio
from pypfopt.value_at_risk import CVAROpt
from pypfopt import discrete_allocation


# Reading in the data; preparing expected returns and a risk model
df = pd.read_csv("tests/stock_prices.csv", parse_dates=True, index_col="date")
returns = df.pct_change().dropna(how="all")
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)

# Long-only Maximum Sharpe portfolio, with discretised weights
ef = EfficientFrontier(mu, S)
weights = ef.max_sharpe()
ef.portfolio_performance(verbose=True)
latest_prices = discrete_allocation.get_latest_prices(df)
allocation, leftover = discrete_allocation.portfolio(weights, latest_prices)
print("Discrete allocation:", allocation)
print("Funds remaining: ${:.2f}".format(leftover))

"""
Expected annual return: 33.0%
Annual volatility: 21.7%
Sharpe Ratio: 1.43

Discrete allocation: {'MA': 14, 'FB': 12, 'PFE': 51, 'BABA': 5, 'AAPL': 5,
                      'AMZN': 0, 'BBY': 9, 'SBUX': 6, 'GOOG': 1}
Funds remaining: $12.15
"""