def test_sample_cov_type_warning():
    df = get_data()
    cov_from_df = risk_models.sample_cov(df)

    returns_as_array = np.array(df)
    with warnings.catch_warnings(record=True) as w:
        cov_from_array = risk_models.sample_cov(returns_as_array)

        assert len(w) == 1
        assert issubclass(w[0].category, RuntimeWarning)
        assert str(w[0].message) == "prices are not in a dataframe"

    np.testing.assert_array_almost_equal(
        cov_from_df.values, cov_from_array.values, decimal=6
    )
def setup_cla(data_only=False):
    df = get_data()
    mean_return = expected_returns.mean_historical_return(df)
    sample_cov_matrix = risk_models.sample_cov(df)
    if data_only:
        return mean_return, sample_cov_matrix
    return CLA(mean_return, sample_cov_matrix)
def test_sample_cov_real_data():
    df = get_data()
    S = risk_models.sample_cov(df)
    assert S.shape == (20, 20)
    assert S.index.equals(df.columns)
    assert S.index.equals(S.columns)
    assert S.notnull().all().all()
def setup_efficient_frontier(data_only=False):
    df = get_data()
    mean_return = expected_returns.mean_historical_return(df)
    sample_cov_matrix = risk_models.sample_cov(df)
    if data_only:
        return mean_return, sample_cov_matrix
    return EfficientFrontier(mean_return, sample_cov_matrix)
def test_shrunk_covariance_frequency():
    df = get_data()
    cs = risk_models.CovarianceShrinkage(df, frequency=52)
    # if delta = 0, no shrinkage occurs
    shrunk_cov = cs.shrunk_covariance(0)

    S = risk_models.sample_cov(df, frequency=52)
    np.testing.assert_array_almost_equal(shrunk_cov.values, S)
def test_exp_cov_limits():
    df = get_data()
    sample_cov = risk_models.sample_cov(df)
    S = risk_models.exp_cov(df)
    assert not np.allclose(sample_cov, S)

    # As span gets larger, it should tend towards sample covariance
    S2 = risk_models.exp_cov(df, span=1e20)
    assert np.abs(S2 - sample_cov).max().max() < 1e-3
def test_shrunk_covariance_extreme_delta():
    df = get_data()
    cs = risk_models.CovarianceShrinkage(df)
    # if delta = 0, no shrinkage occurs
    shrunk_cov = cs.shrunk_covariance(0)
    np.testing.assert_array_almost_equal(
        shrunk_cov.values, risk_models.sample_cov(df))
    # if delta = 1, sample cov does not contribute to shrunk cov
    shrunk_cov = cs.shrunk_covariance(1)
    N = df.shape[1]
    F = np.identity(N) * np.trace(cs.S) / N
    np.testing.assert_array_almost_equal(shrunk_cov.values, F * 252)
def test_negative_sharpe():
    df = get_data()
    e_rets = mean_historical_return(df)
    S = sample_cov(df)
    w = np.array([1 / len(e_rets)] * len(e_rets))

    sharpe = objective_functions.negative_sharpe(w, e_rets, S)
    assert isinstance(sharpe, float)
    assert sharpe < 0

    sigma = np.sqrt(np.dot(w, np.dot(S, w.T)))
    negative_mu = objective_functions.negative_mean_return(w, e_rets)
    np.testing.assert_almost_equal(sharpe * sigma - 0.02, negative_mu)

    # Risk free rate increasing should lead to negative Sharpe increasing.
    assert sharpe < objective_functions.negative_sharpe(
        w, e_rets, S, risk_free_rate=0.1
    )
def test_sample_cov_dummy():
    data = pd.DataFrame(
        [
            [4.0, 2.0, 0.6],
            [4.2, 2.1, 0.59],
            [3.9, 2.0, 0.58],
            [4.3, 2.1, 0.62],
            [4.1, 2.2, 0.63],
        ]
    )
    test_answer = pd.DataFrame(
        [
            [0.006661687937656102, 0.00264970955585574, 0.0020849735375206195],
            [0.00264970955585574, 0.0023450491307634215, 0.00096770864287974],
            [0.0020849735375206195, 0.00096770864287974, 0.0016396416271856837],
        ]
    )
    S = risk_models.sample_cov(data) / 252
    pd.testing.assert_frame_equal(S, test_answer)
def test_portfolio_allocation_errors():
    df = get_data()
    e_ret = mean_historical_return(df)
    cov = sample_cov(df)
    ef = EfficientFrontier(e_ret, cov)
    w = ef.max_sharpe()
    latest_prices = discrete_allocation.get_latest_prices(df)

    with pytest.raises(TypeError):
        discrete_allocation.portfolio(ef.weights, latest_prices)

    with pytest.raises(TypeError):
        discrete_allocation.portfolio(w, latest_prices.values.tolist())

    with pytest.raises(ValueError):
        discrete_allocation.portfolio(w, latest_prices, min_allocation=0.5)

    with pytest.raises(ValueError):
        discrete_allocation.portfolio(w, latest_prices, total_portfolio_value=0)
def test_portfolio_allocation():
    df = get_data()
    e_ret = mean_historical_return(df)
    cov = sample_cov(df)
    ef = EfficientFrontier(e_ret, cov)
    w = ef.max_sharpe()

    latest_prices = discrete_allocation.get_latest_prices(df)
    allocation, leftover = discrete_allocation.portfolio(w, latest_prices)
    assert allocation == {
        "MA": 14,
        "FB": 12,
        "PFE": 51,
        "BABA": 5,
        "AAPL": 5,
        "AMZN": 0,
        "BBY": 9,
        "SBUX": 6,
        "GOOG": 1,
    }
    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 10000)
Exemplo n.º 12
0
# import pandas as pd
# import numpy as np
# import cvxpy as cp
from pypfopt import risk_models
# from pypfopt import expected_returns
# from pypfopt import EfficientFrontier
# from pypfopt import objective_functions
# from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
# from pypfopt import HRPOpt
# from pypfopt import CLA
# from pypfopt import black_litterman
# from pypfopt import BlackLittermanModel
# from pypfopt import plotting


import pandas_datareader.data as web
from datetime import datetime
import matplotlib.pylab as plt
import pandas as pd
dfs = {}
for ticker in ['AAPL', 'MSFT', 'KO']:
    df = web.DataReader(ticker, 'stooq', '2020-01-01', '2020-09-30', api_key='6JUN1FV7A3MTWJ1Y')
    dfs[ticker] = df["Close"]
stock_data = pd.DataFrame(dfs)
stock_data.sort_index(inplace=True)
risk_models.sample_cov(stock_data)
print(stock_data)
Exemplo n.º 13
0
print("Before Optimization")
print("Expected annual return: "+ percent_ret)
print("Annual volatility / risk: "+ percent_vols)
print("Annual variance: "+ percent_var)
# There is a python package that can analyze the portfolio and give a higher return with a lower risk.

pip install PyPortfolioOpt

from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns

# Optimization of the portfolio
# Calculate the expected returns and the annualized sample covariance matrix of asset returns
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)

# Optimize for max sharpe ratio
ef = EfficientFrontier(mu, S)
weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
print("After Optimization")
print(cleaned_weights)
ef.portfolio_performance(verbose = True)

0.04904 + 0.17874 + 0.06929 + 0.25513 + 0.0 + 0.19851 + 0.24929

# Get the discrete allocation of each share per stock
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices

latest_prices = get_latest_prices(df)
Exemplo n.º 14
0
def load_portfolio_dash(RANDOM_NUMBER_SELECTED_IN, PORTFOLIO_VALUE_IN):
    START_DATE_FOR_STOCK_PRICES = '2015-1-1'  # When you want your Stocks to go back to
    STOCK_TICKERS_CSV = 'stock_ticker_list.csv'
    PERCENTAGE_TO_USE = 10  # The % in which the amount a column must have otherwise it will be deleted

    # MAX 400 Tickers at this moment in time
    RANDOM_NUMBER_SELECTED = RANDOM_NUMBER_SELECTED_IN  # The number used to select the number of element from a larger list
    PORTFOLIO_VALUE = PORTFOLIO_VALUE_IN  # The amount in $ you wish to invest

    # Load the tickers list from the csv file of tickers names
    with open(STOCK_TICKERS_CSV, newline='') as f:
        reader = csv.reader(f)
        data = list(reader)

    tickers_list_imported = []
    for d in data:
        ticker = str(d).replace("'",
                                "").replace("[",
                                            "").replace("]",
                                                        "").replace(" ", "")
        tickers_list_imported.append(ticker)

    # 2 tickers just to start the dataframe in the correct format
    tickers_list_start = ['AAPL', 'TSLA']

    # Fetching the data from the list of tickers
    data = yf.download(tickers_list_start,
                       START_DATE_FOR_STOCK_PRICES)['Adj Close']
    df = pd.DataFrame(data)

    ticker_count = 0

    LIST_TO_USE = random.sample(tickers_list_imported, RANDOM_NUMBER_SELECTED)
    #LIST_TO_USE = tickers_list_imported

    progress_count = 0
    my_progress_bar = st.progress(progress_count)
    val_to_progress_up = (100 / len(LIST_TO_USE)) / 100

    for i in range(len(LIST_TO_USE)):
        if ticker_count > 0 and ticker_count % 300 == 0:
            #df.to_csv ('stock_dataframe.csv', index=True, header=True)
            st.write('\nSleeping for 2 Minutes\n')
            time.sleep(120)
        ticker_count = ticker_count + 1
        data_new_col_name = str(LIST_TO_USE[i])
        #st.write('\n' + str(ticker_count) + '/' + str(len(LIST_TO_USE)) + ' - Downloading: ' + data_new_col_name)
        try:
            data_new = yf.download(LIST_TO_USE[i],
                                   START_DATE_FOR_STOCK_PRICES)['Adj Close']
            df[data_new_col_name] = data_new
        except:
            continue
        progress_count = progress_count + val_to_progress_up
        if progress_count <= 1:
            my_progress_bar.progress(progress_count)
        else:
            my_progress_bar.progress(100)

    #Drooping stock values
    no_data_for_ticker_list = []
    row_count_based_on_percentage = int(len(df) * (PERCENTAGE_TO_USE / 100))
    for (columnName, columnData) in df.iteritems():
        if df[columnName].isnull().all():
            #st.write('Dropping : ' + str(columnName) + ' - it is all NAN')
            no_data_for_ticker_list.append(df.columns.get_loc(columnName))
        #elif df[columnName].count() < row_count_based_on_percentage:
        #    st.write('Dropping : ' + str(columnName) + ' - < than ' + str(PERCENTAGE_TO_USE) + '% of then number of rows')
        #    no_data_for_ticker_list.append(df.columns.get_loc(columnName))

    df = df.drop(df.columns[no_data_for_ticker_list], axis=1)

    # Creating a csv file
    #df.to_csv ('stock_dataframe.csv', index=True, header=True)

    # Calculate the expected annualised returns and the annualised sample covariance matrix of the daily asset returns
    mu = expected_returns.mean_historical_return(df)
    S = risk_models.sample_cov(df)

    # Optimise for the maximal Sharpe ratio - describes how much excess return you recieve from the extra volitility you endure for holding a riskier asset
    ef = EfficientFrontier(mu, S)  # Create the Efficient Frontier object
    raw_weights = ef.max_sharpe(
    )  # maximise the Sharpe Ratio and get the raw weights

    # Helper method to clean the raw weights setting any weights whos absolute values are below the cut off point to zero and rounding the rest
    # can cause some rounding errors - should not be off by a lot just good to know
    cleaned_weights = ef.clean_weights()

    # Get the discret allocations of each share per stock and the leftover money from investment
    latest_price = get_latest_prices(df)
    weights = cleaned_weights
    da = DiscreteAllocation(weights,
                            latest_price,
                            total_portfolio_value=PORTFOLIO_VALUE)
    allocation, leftover = da.lp_portfolio()

    # Store the company name into a list & Get descrete allocation values
    company_name = []
    discrete_allocation_list = []
    for symbol in allocation:
        company_name.append(get_company_name(symbol))
        discrete_allocation_list.append(allocation.get(symbol))

    # Create the portfolio
    # Create DF for portfolio
    portfolio_df = pd.DataFrame(columns=[
        'Company_Name', 'Company_Ticker', 'Discrete_val_' +
        str(PORTFOLIO_VALUE)
    ])

    # Add data to portfolio df
    portfolio_df['Company_Name'] = company_name
    portfolio_df['Company_Ticker'] = allocation
    portfolio_df['Discrete_val_' +
                 str(PORTFOLIO_VALUE)] = discrete_allocation_list

    # How the portfolio would expect to return
    perf = ef.portfolio_performance(verbose=True)
    st.write()
    st.write(
        'Any Sharpe ratio over 1.0 is considered acceptable to good buy investment:'
    )
    st.write('Expected annual return:', round(perf[0], 2), '%')
    st.write('Annual volatility:', round(perf[1], 2), '%')
    st.write('Sharpe Ratio:', round(perf[0], 2))
    st.write()
    st.write('Funds Remaining: $', round(leftover, 2))

    # Show the portfolio
    st.write(portfolio_df)
Exemplo n.º 15
0
def performance(request):
    current_user = request.user
    user = Profile.objects.get(user = current_user)
    all_portfolios_of_current_users = Portfolio.objects.filter(user = user)
    stock_and_prices = {}
    stocks_held = {}
    all_stocks = []
    total_assets_worth = 0
    for indv in all_portfolios_of_current_users:
        all_stocks.append(indv.stock)

    for stock in all_stocks:
        price = si.get_live_price(stock)
                
        stock_and_prices[stock] = {'price': price}
    print(stock_and_prices)
    

    for indv in all_portfolios_of_current_users:
        total_assets_worth += stock_and_prices[indv.stock]['price']  * indv.number

    for indv in all_portfolios_of_current_users:
        stock = indv.stock
        number = indv.number
        worth = stock_and_prices[indv.stock]['price']  * indv.number
        # total_assets_worth += worth
        percentage = stock_and_prices[indv.stock]['price']  * indv.number/ total_assets_worth

        stocks_held[stock] = percentage

    assets = []
    weights = []
    for stock, percentage in stocks_held.items():
        assets.append(stock)
        weights.append(percentage)

    if request.method == "POST":
        assets =  assets
        weights = np.array(weights)
        #Get the stock starting date
        stockStartDate = '2017-01-01'
        # Get the stocks ending date aka todays date and format it in the form YYYY-MM-DD
        today = datetime.today().strftime('%Y-%m-%d')

        #Create a dataframe to store the adjusted close price of the stocks
        df = pd.DataFrame()
        #Store the adjusted close price of stock into the data frame
        for stock in assets:
            df[stock] = web.DataReader(stock,data_source='yahoo',start=stockStartDate , end=today)['Adj Close']

        # Create the title 'Portfolio Adj Close Price History
        title = 'Portfolio Adj. Close Price History    '
        #Get the stocks
        my_stocks = df
        #Create and plot the graph
        plt.figure(figsize=(12.2,4.5)) #width = 12.2in, height = 4.5
        # Loop through each stock and plot the Adj Close for each day
        for c in my_stocks.columns.values:
            plt.plot( my_stocks[c],  label=c)#plt.plot( X-Axis , Y-Axis, line_width, alpha_for_blending,  label)
        plt.title(title)
        plt.xlabel('Date',fontsize=18)
        plt.ylabel('Adj. Price USD ($)',fontsize=18)
        plt.legend(my_stocks.columns.values, loc='upper left')

        returns = df.pct_change()

        cov_matrix_annual = returns.cov() * 252

        port_variance = np.dot(weights.T, np.dot(cov_matrix_annual, weights))

        port_volatility = np.sqrt(port_variance)

        portfolioSimpleAnnualReturn = np.sum(returns.mean()*weights) * 252

        percent_var = str(round(port_variance, 2) * 100) + '%'
        percent_vols = str(round(port_volatility, 2) * 100) + '%'
        percent_ret = str(round(portfolioSimpleAnnualReturn, 2)*100)+'%'

        mu = expected_returns.mean_historical_return(df)#returns.mean() * 252
        S = risk_models.sample_cov(df) #Get the sample covariance matrix

        ef = EfficientFrontier(mu, S)
        weights = ef.max_sharpe() #Maximize the Sharpe ratio, and get the raw weights
        cleaned_weights = ef.clean_weights()
        weight_list = list(cleaned_weights.values())
        #return the dictionary
        #print(cleaned_weights) #Note the weights may have some rounding error, meaning they may not add up exactly to 1 but should be close

        #print out the porfolio performance
        #ef.portfolio_performance(verbose=True)
        tuple_elements = ef.portfolio_performance(verbose=False)
        ef_return = tuple_elements[0]
        ef_volatility = tuple_elements[1]
        ef_ratio =tuple_elements[2]

        stock_dict = {}
        for i in range(len(weight_list)):
            percentage = "{:.2%}".format(weight_list[i])
            stock_dict[assets[i]] = percentage

        name_list = ['return','volatility','ratio']
        optimized_dict = {}
        for i in range(len(name_list)):
            percentage = "{:.2%}".format(tuple_elements[i])
            optimized_dict[name_list[i]] = percentage

        return render(request, 'performance.html',{'weight_list':weight_list,'assets':assets, 'ef_return':ef_return,'ef_volatility':ef_volatility,'ef_ratio':ef_ratio,'percent_var':percent_var, 'percent_vols':percent_vols, 'percent_ret':percent_ret,'stock_dict':stock_dict,'optimized_dict':optimized_dict})

    else:
        assets =  assets
        weights = np.array(weights)
        #Get the stock starting date
        stockStartDate = '2017-01-01'
        # Get the stocks ending date aka todays date and format it in the form YYYY-MM-DD
        today = datetime.today().strftime('%Y-%m-%d')

        #Create a dataframe to store the adjusted close price of the stocks
        df = pd.DataFrame()
        #Store the adjusted close price of stock into the data frame
        for stock in assets:
            df[stock] = web.DataReader(stock,data_source='yahoo',start=stockStartDate , end=today)['Adj Close']

        # Create the title 'Portfolio Adj Close Price History
        title = 'Portfolio Adj. Close Price History    '
        #Get the stocks
        my_stocks = df
        #Create and plot the graph
        plt.figure(figsize=(12.2,4.5)) #width = 12.2in, height = 4.5
        # Loop through each stock and plot the Adj Close for each day
        for c in my_stocks.columns.values:
            plt.plot( my_stocks[c],  label=c)#plt.plot( X-Axis , Y-Axis, line_width, alpha_for_blending,  label)
        plt.title(title)
        plt.xlabel('Date',fontsize=18)
        plt.ylabel('Adj. Price USD ($)',fontsize=18)
        plt.legend(my_stocks.columns.values, loc='upper left')

        returns = df.pct_change()

        cov_matrix_annual = returns.cov() * 252

        port_variance = np.dot(weights.T, np.dot(cov_matrix_annual, weights))

        port_volatility = np.sqrt(port_variance)

        portfolioSimpleAnnualReturn = np.sum(returns.mean()*weights) * 252

        percent_var = str(round(port_variance, 2) * 100) + '%'
        percent_vols = str(round(port_volatility, 2) * 100) + '%'
        percent_ret = str(round(portfolioSimpleAnnualReturn, 2)*100)+'%'

        return render(request, 'performance.html',{'percent_var':percent_var, 'percent_vols':percent_vols, 'percent_ret':percent_ret,'assets':assets})
Exemplo n.º 16
0
for ticker in range(thelen):
    prices = web.DataReader(tickers[ticker],
                            start='2019-01-01',
                            end='2020-06-06',
                            data_source='yahoo')
    price_data.append(prices.assign(ticker=ticker)[['Adj Close']])
    df_stocks = pd.concat(price_data, axis=1)

df_stocks.columns = tickers

# computing risk and returns
mu = expected_returns.mean_historical_return(
    df_stocks)  #Sample Variance of Portfolio

Sigma = risk_models.sample_cov(df_stocks)

# computing portfolio


# optimisation functions
def MinimiseRisk(mu, Sigma):
    n = len(mu)
    x = cp.Variable(n)
    A = np.random.randn(n)
    B = np.ones(n)

    prob = cp.Problem(cp.Minimize(cp.quad_form(x, Sigma)),
                      [x >= np.zeros(n), B @ x == 1])

    prob.solve()
Exemplo n.º 17
0
 def calculateSampleCovarianceMatrix(self):
     S = risk_models.sample_cov(self.df)  #Get the sample covariance matrix
     return S
Exemplo n.º 18
0
def BLmain():

    #Excell Call
    sht = xw.Book.caller().sheets['Optim']
    shtdata = xw.Book.caller().sheets['Data']
    sht.range('J17').value = 'Optimizing...'

    #Clear Values
    sht.range('L23').expand().clear_contents()
    shtdata.range('A1').expand().clear_contents()
    shtdata.range('J1').expand().clear_contents()

    #Set variables from excel
    rf = sht.range('J10').value
    MinWeight = sht.range('J11').value
    MaxWeight = sht.range('J12').value
    Delta = sht.range('J13').value
    Tau = sht.range('J14').value
    Output = sht.range('J15').value
    ModelOptim = sht.range('J8').value
    RiskModel = sht.range('J9').value
    listticker = xw.Range('B3').expand().value
    indexname = sht.range('J7').value
    startdate = sht.range('J3').value
    enddate = sht.range('J6').value
    EFBool = sht.range('J16').value
    traintestdate = sht.range(
        'J4'
    ).value  #Dataset is divided in two sub: train (optimization) and test for backtest

    #Initializing
    train, test = initialize(startdate, enddate, traintestdate, listticker)
    trainindex, testindex = initializeIndex(startdate, enddate, traintestdate,
                                            indexname)  #for risk aversion

    #Black Litterman
    if RiskModel == 'historicalcov':
        S = risk_models.sample_cov(train)
    elif RiskModel == 'exphistoricalcov':
        S = risk_models.exp_cov(train)

    if Delta != None:
        delta = Delta
    else:
        delta = black_litterman.market_implied_risk_aversion(trainindex,
                                                             risk_free_rate=rf)

    s = data.get_quote_yahoo(listticker)['marketCap']
    mcaps = {tick: mcap
             for tick, mcap in zip(listticker, s)
             }  #Dictionnary of Market Cap for each stock

    #Expected returns implied from the market
    prior = black_litterman.market_implied_prior_returns(mcaps,
                                                         delta,
                                                         S,
                                                         risk_free_rate=rf)
    views, picking = createviews(listticker)
    bl = BlackLittermanModel(S, Q=views, P=picking, pi=prior, tau=Tau)
    rets = bl.bl_returns()
    cov = bl.bl_cov()

    #Two ways of displaying outputs: either using Optimizer, either returning implied weights
    if Output == 'Optimization':
        ef = EfficientFrontier(rets, S, weight_bounds=(MinWeight, MaxWeight))
        #RiskModel
        if ModelOptim == 'min_volatility':
            raw_weights = ef.min_volatility()
        elif ModelOptim == 'max_sharpe':
            raw_weights = ef.max_sharpe()
        cleaned_weights = ef.clean_weights()
        finalw = [cleaned_weights.get(i, 1) for i in listticker]
        perf = ef.portfolio_performance(verbose=True, risk_free_rate=rf)
        sht.range('H21').value = perf

    elif Output == 'Return-Implied-Weight':
        bl.bl_weights(delta)
        weights = bl.clean_weights()
        finalw = [weights.get(i, 1) for i in listticker]
    finalr = [rets.get(i, 1) for i in listticker]  #E(R) from BL

    #Display results
    sht.range('L23').options(transpose=True).value = listticker
    sht.range('M23').options(transpose=True).value = finalw
    sht.range('N23').options(transpose=True).value = finalr

    #Copy Data in Data Range
    shtdata.range((1, 1)).value = train
    shtdata.range((1, len(listticker) + 3)).value = test
    #numshares, left = getoptimprices(test, cleanW, InitialAmountInPortfolio)

    #Visualisation
    sht.charts['BLweights'].set_source_data(
        sht.range((23, 12), (22 + len(listticker), 13)))
    CorrMap(sht, 'CorrMatPrior', S, 'coolwarm')
    CorrMap(sht, 'CorrMatBL', cov, 'YlGn')
    if EFBool == "YES":
        effrontier(rets, S, sht, 'EFBL')

    #Done
    sht.range('J17').value = 'Optimization Done'
def test_volatility():
    df = get_data()
    S = sample_cov(df)
    w = np.array([1 / df.shape[1]] * df.shape[1])
    vol = objective_functions.volatility(w, S)
    np.testing.assert_almost_equal(vol, 0.21209018103844543)
Exemplo n.º 20
0
##
def dict_print(df_dict):
    for key, value in df_dict.items():
        print('{key}:{value}'.format(key=key, value=value))
    return


##
"""
portdf.to_csv("files/date.csv")
"""

##

mu = expected_returns.mean_historical_return(portdf)
S = risk_models.sample_cov(portdf)
print(mu)
print(S)

##
"""strategy-1"""
"""max sharpe method optimal portfolio"""
ef_maxsp = EfficientFrontier(mu, S)
raw_weights = ef_maxsp.max_sharpe()
cleaned_weights = ef_maxsp.clean_weights()
"""ef.save_weights_to_file("/files/weights.csv")  # saves to file"""

print("the weights of max sharpe portfolio")
dict_print(cleaned_weights)
ef_maxsp.portfolio_performance(verbose=True)
Exemplo n.º 21
0
SPY_returns = SPY.pct_change().dropna()
Factors = ETF_Prices.iloc[:,1:]
Factor_returns = Factors.pct_change().dropna()

# Loop to solve for portfolio weights each period 
backtest_wgts = []

return_lb = 6*21 #(eight months)
covar_lb = 2*252 # 252 trading days
rows = len(ETF_Prices.index) - covar_lb + 1 # length of loop
dates = ETF_Prices.index[covar_lb-1:] # datetime index

for i in range(rows):
    mu1 = expected_returns.mean_historical_return(Factors.iloc[covar_lb-return_lb+i:covar_lb+i])
    mu = np.clip(mu1, 0.01, 0.20)
    S = risk_models.sample_cov(Factors.iloc[i:covar_lb+i,:])
    ef = EfficientFrontier(mu, S, weight_bounds=(0, 0.30))
    try: 
        raw_weights = ef.max_sharpe()
        wgts = list(ef.clean_weights().values())
    except: 
        wgts = [0.2] * 5
    backtest_wgts.append(wgts)

wgts_df = pd.DataFrame(backtest_wgts, index=dates, columns=factor_names)
wgts_arr = np.array(backtest_wgts[:-1])
returns_arr = (Factor_returns.iloc[-len(wgts_df)+1:]).to_numpy()

model_returns = (wgts_arr * returns_arr).sum(axis=1)
SPY_returns = SPY_returns[-len(model_returns):]
dts = SPY_returns.index[-len(model_returns):]
Exemplo n.º 22
0
import time
import pypfopt
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import expected_returns
from pypfopt import risk_models
import settings

start_time = time.time()
start_date = datetime.datetime(settings.start_date[0], settings.start_date[1],
                               settings.start_date[2])
end_date = datetime.datetime(settings.end_date[0], settings.end_date[1],
                             settings.end_date[2])

price_data = web.DataReader(settings.portfolio, 'yahoo', start_date,
                            end_date)['Adj Close']
price_data.index.rename('date', True)

returns = expected_returns.mean_historical_return(price_data)
cov = risk_models.sample_cov(price_data)

ef = EfficientFrontier(returns, cov, weight_bounds=(.1, .4))
sharpe = ef.max_sharpe()
min_vol = ef.min_volatility()
performance = ef.portfolio_performance()
# returns expected returns, volatility, sharpe ratio

elapsed = round(time.time() - start_time, 2)

print("Portfolio optimized..")
print("Completed in {} seconds..".format(elapsed))
Exemplo n.º 23
0
def test_market_implied_prior():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv(
        "tests/spy_prices.csv", parse_dates=True, index_col=0, squeeze=True
    )
    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = {
        "GOOG": 927e9,
        "AAPL": 1.19e12,
        "FB": 574e9,
        "BABA": 533e9,
        "AMZN": 867e9,
        "GE": 96e9,
        "AMD": 43e9,
        "WMT": 339e9,
        "BAC": 301e9,
        "GM": 51e9,
        "T": 61e9,
        "UAA": 78e9,
        "SHLD": 0,
        "XOM": 295e9,
        "RRC": 1e9,
        "BBY": 22e9,
        "MA": 288e9,
        "PFE": 212e9,
        "JPM": 422e9,
        "SBUX": 102e9,
    }
    pi = black_litterman.market_implied_prior_returns(mcaps, delta, S)

    assert isinstance(pi, pd.Series)
    assert list(pi.index) == list(df.columns)
    assert pi.notnull().all()
    assert pi.dtype == "float64"
    np.testing.assert_array_almost_equal(
        pi.values,
        np.array(
            [
                0.14933293,
                0.2168623,
                0.11219185,
                0.10362374,
                0.28416295,
                0.12196098,
                0.19036819,
                0.08860159,
                0.17724273,
                0.08779627,
                0.0791797,
                0.16460474,
                0.12854665,
                0.08657863,
                0.11230036,
                0.13875465,
                0.15017163,
                0.09066484,
                0.1696369,
                0.13270213,
            ]
        ),
    )

    mcaps = pd.Series(mcaps)
    pi2 = black_litterman.market_implied_prior_returns(mcaps, delta, S)
    pd.testing.assert_series_equal(pi, pi2, check_exact=False)
Exemplo n.º 24
0
 def _getEfficientFrontier(self, portfolio_data) -> EfficientFrontier:
     mu: Series = expected_returns.mean_historical_return(
         portfolio_data)  # returns.mean() * 252
     S: DataFrame = risk_models.sample_cov(
         portfolio_data)  # Get the sample covariance matrix
     return EfficientFrontier(mu, S)
Exemplo n.º 25
0
def update_graph1(n_clicks, stock_ticker, lookforward_period, risk):
    exp_ret = pd.Series()
    content1 = list(stock_ticker)
    data3 = pd.DataFrame()
    for contents in content1:
        data3 = pd.concat([
            data3,
            yf.download(f"{contents}", start="2015-01-01",
                        end="2020-01-01").iloc[:, 4]
        ],
                          axis=1,
                          sort=False)
    data3.columns = content1

    data4 = data3.dropna(how="all")
    data4 = data4.dropna(axis='columns', how="any")

    cumulative_ret_data = pd.DataFrame()
    for contents in content1:
        cumulative_ret_data[f"{contents}"] = (
            1 + (data4[f"{contents}"]).pct_change()).cumprod()
    cumulative_ret_data = cumulative_ret_data.fillna(1)
    S = risk_models.sample_cov(data4)

    if lookforward_period == 2:
        exp_ret = pypfopt.expected_returns.mean_historical_return(
            data4, frequency=500)
        if (risk == 2):
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.max_sharpe(risk_free_rate=0.02)
        elif (risk == 3):
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.max_quadratic_utility(risk_aversion=0.00001,
                                                  market_neutral=False)
        else:
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.min_volatility()
    elif (lookforward_period == 3):
        exp_ret = pypfopt.expected_returns.mean_historical_return(
            data4, frequency=750)
        if (risk == 2):
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.max_sharpe(risk_free_rate=0.02)
        elif (risk == 3):
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.max_quadratic_utility(risk_aversion=0.00001,
                                                  market_neutral=False)
        else:
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.min_volatility()
    else:
        exp_ret = pypfopt.expected_returns.mean_historical_return(
            data4, frequency=250)
        if (risk == 2):
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.max_sharpe(risk_free_rate=0.02)
        elif (risk == 3):
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.max_quadratic_utility(risk_aversion=0.00001,
                                                  market_neutral=False)
        else:
            ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
            weights_ef = ef.min_volatility()
        #exp_ret=pypfopt.expected_returns.mean_historical_return(data4, frequency=250)

    #exp_ret=pypfopt.expected_returns.mean_historical_return(data4, frequency=252)

    if (risk == 2):
        ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
        weights_ef = ef.max_sharpe(risk_free_rate=0.02)
    elif (risk == 3):
        ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
        weights_ef = ef.max_quadratic_utility(risk_aversion=0.00001,
                                              market_neutral=False)
    else:
        ef = EfficientFrontier(exp_ret, S, weight_bounds=(-1, 1), gamma=1)
        weights_ef = ef.min_volatility()

    dictlist = []
    for key, value in weights_ef.items():
        temp = [key, value]
        dictlist.append(temp)

    weights = pd.DataFrame(dictlist, columns=['ticker',
                                              'weight']).set_index('ticker')
    weights = weights.iloc[:, 0]
    weights = weights.sort_index()
    weight_list = weights.tolist()

    HRP_df = weights.multiply(weights, weight_list)
    HRP_cumu = cumulative_ret_data.reindex(
        sorted(cumulative_ret_data.columns),
        axis=1)  #sort column by company names
    HRP_df = HRP_cumu.mul(weight_list, axis=1)
    HRP_df['final_returns'] = HRP_df.sum(axis=1)

    fig2 = {
        # set data equal to traces
        'data': [{
            'x': HRP_df.index,
            'y': HRP_df["final_returns"],
            'name': tic
        }],
        # use string formatting to include all symbols in the chart title
        'layout': {
            'title': ', '.join(stock_ticker) + ' portfolio'
        }
    }
    return fig2
Exemplo n.º 26
0
def mo_portfolio(budget, yesterday, end_period):

    # debug
    print(f'Hello from mo_portfolio')
    print(f'- budget:\t\t {budget}')
    print(f'- yesterday:\t {yesterday}')
    print(f'- end_period:\t {end_period}')

    if end_period is None:
        return 'False'

    global g_budget
    g_budget = budget
    global g_date
    g_date = [yesterday, end_period]
    # yesterday: day before the creation of the portfolio
    # end_period: end of investment

    chart = pd.DataFrame()
    # chart: adj closes fino a yesterday
    for s in stocks:
        chart = pd.concat(
            [chart, dataframes[s]['Adj Close'].loc[:yesterday, ]], axis=1)
    chart.columns = stocks
    # compute montly (default value = 'Y') cc return
    chart_rt = {}
    for s in chart:
        tmp = chart[s].groupby(pd.Grouper(freq="Y"))
        tmp2 = tmp.mean()
        chart_rt[s] = np.log(tmp2 / tmp2.shift(1))
    chart_rt = pd.DataFrame.from_dict(chart_rt)
    chart_rt = chart_rt.dropna()
    chart_rt.columns = [
        "AAPL CC returns", "NVDA CC returns", "KO CC returns", "UL CC returns",
        "BAC CC returns", "AXP CC returns"
    ]

    # adding transition costs (1,5% fee per share)
    chart = chart.apply(lambda x: x + (x * 0.015))

    # Optimal portfolio

    # computes CC return on year granularity
    avg_returns = expected_returns.mean_historical_return(chart)
    # sample covariance matrix
    S = risk_models.sample_cov(chart)
    ef = EfficientFrontier(avg_returns, S)

    # Minimize the volatily of the portfolio (Markowitz)
    weights = ef.min_volatility()
    # rounding weights values, meaning they may not add up exactly to 1 but should be close
    weights = ef.clean_weights()

    Mop_pw = weights

    opt_return, opt_risk, _ = ef.portfolio_performance(verbose=False)
    global g_expected_return_volat
    g_expected_return_volat = [opt_return, opt_risk]

    recap = {}
    for s in weights:
        # print(f'{s} budget {budget}, {type(budget)}')     # debug
        # print(f'{s} weights[s]/chart[s].iloc[-1] {weights[s]/chart[s].iloc[-1]}, {type(weights[s]/chart[s].iloc[-1])}')   # debug
        recap[s] = [int(np.floor(budget * weights[s] / chart[s].iloc[-1]))
                    ]  # number of shares
        price_no_fee = np.round(chart[s].iloc[-1] -
                                (chart[s].iloc[-1] * 1.5 / 101.5),
                                decimals=2)
        recap[s].append(price_no_fee)  # price for each shares
        recap[s].append(np.round(price_no_fee * 0.015,
                                 2))  # transaction costs 1,5%
        tot_cost = np.around(recap[s][0] * (recap[s][1] + recap[s][2]),
                             decimals=2)
        recap[s].append(
            tot_cost
        )  # total cost of the investment in s (shares * (price for each s + transaction costs))

    recap = pd.DataFrame.from_dict(recap, orient='index')
    recap.columns = [
        'Num of shares', 'Price for each share $', 'Transaction costs $',
        'Purchase cost $'
    ]

    global g_recap
    g_recap = recap

    total = 0
    for _, row in recap.iterrows():
        total += row['Purchase cost $']

    total = np.around(total, decimals=2)

    global g_spent
    g_spent = total
    global g_left
    g_left = str(np.around(budget - total, decimals=2))

    price_end = {}
    tot_port = 0
    for s in dataframes:
        price_end[s] = dataframes[s]['Adj Close'].loc[end_period]

    act_return = 0
    for index, row in recap.iterrows():
        tot_port += np.around(row['Num of shares'] *
                              (price_end[index] + row['Transaction costs $']),
                              decimals=2)
        rtn = (price_end[index] + row['Transaction costs $']
               ) / recap.loc[index, 'Price for each share $'] - 1
        act_return += weights[index] * rtn

    global g_returns
    g_returns = str(np.around(tot_port, decimals=2)) + ' [' + str(
        np.round(100 * act_return, decimals=2)) + '%]'
    print(g_returns)

    return "True"
Exemplo n.º 27
0
def test_bl_tau():
    df = get_data()
    S = risk_models.sample_cov(df)

    prices = pd.read_csv("tests/spy_prices.csv",
                         parse_dates=True,
                         index_col=0,
                         squeeze=True)
    delta = black_litterman.market_implied_risk_aversion(prices)

    mcaps = {
        "GOOG": 927e9,
        "AAPL": 1.19e12,
        "FB": 574e9,
        "BABA": 533e9,
        "AMZN": 867e9,
        "GE": 96e9,
        "AMD": 43e9,
        "WMT": 339e9,
        "BAC": 301e9,
        "GM": 51e9,
        "T": 61e9,
        "UAA": 78e9,
        "SHLD": 0,
        "XOM": 295e9,
        "RRC": 1e9,
        "BBY": 22e9,
        "MA": 288e9,
        "PFE": 212e9,
        "JPM": 422e9,
        "SBUX": 102e9,
    }
    prior = black_litterman.market_implied_prior_returns(mcaps, delta, S)

    viewdict = {"GOOG": 0.40, "AAPL": -0.30, "FB": 0.30, "BABA": 0}

    # Need to change omega for this test to work
    omega = np.diag([0.01, 0.01, 0.01, 0.01])

    bl0 = BlackLittermanModel(S,
                              pi=prior,
                              absolute_views=viewdict,
                              tau=1e-10,
                              omega=omega)
    bl1 = BlackLittermanModel(S,
                              pi=prior,
                              absolute_views=viewdict,
                              tau=0.01,
                              omega=omega)
    bl2 = BlackLittermanModel(S,
                              pi=prior,
                              absolute_views=viewdict,
                              tau=0.1,
                              omega=omega)

    # For tiny tau, posterior should roughly equal prior
    np.testing.assert_allclose(bl0.bl_returns(), bl0.pi.flatten(), rtol=1e-5)

    # For bigger tau, GOOG should be given more weight
    assert bl1.bl_returns()["GOOG"] > bl0.bl_returns()["GOOG"]
    assert bl2.bl_returns()["GOOG"] > bl1.bl_returns()["GOOG"]
 def _get_sample_cov(self):
     return risk_models.sample_cov(self.price_data_df)
Exemplo n.º 29
0
display_tear_sheet()

#
# Modern Portfolio Theory
#

# Import the packages
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.efficient_frontier import EfficientFrontier

# Calculate expected returns mu
mu = expected_returns.mean_historical_return(stock_prices)

# Calculate the covariance matrix S
Sigma = risk_models.sample_cov(stock_prices)

# Obtain the efficient frontier
ef = EfficientFrontier(mu, Sigma)
print(mu, Sigma)

# Get the returns from the stock price data
returns = stock_prices.pct_change()

# Calculate the annualized covariance matrix
covMatrix = returns.cov() * 252

# Calculate the covariance matrix Sigma from a`PyPortfolioOpt` function
Sigma = risk_models.sample_cov(stock_prices)

# Print both covariance matrices
Exemplo n.º 30
0
def show_ef(stocks: List[str], other_args: List[str]):
    """Display efficient frontier

    Parameters
    ----------
    stocks : List[str]
        List of the stocks to be included in the weights
    other_args : List[str]
        argparse other args
    """

    parser = argparse.ArgumentParser(
        add_help=False,
        prog="ef",
        description="""This function plots random portfolios based
                                     on their risk and returns and shows the efficient frontier.""",
    )

    parser.add_argument(
        "-p",
        "--period",
        default="3mo",
        dest="period",
        help="period to get yfinance data from",
        choices=period_choices,
    )
    parser.add_argument(
        "-n",
        "--number-portfolios",
        default=300,
        type=check_non_negative,
        dest="n_port",
        help="number of portfolios to simulate",
    )

    try:
        if other_args:
            if "-" not in other_args[0]:
                other_args.insert(0, "-n")
        ns_parser = parse_known_args_and_warn(parser, other_args)
        if not ns_parser:
            return
        if len(stocks) < 2:
            print("Please have at least 2 loaded tickers to calculate weights.\n")
            return

        stock_prices = process_stocks(stocks, ns_parser.period)
        mu = expected_returns.mean_historical_return(stock_prices)
        S = risk_models.sample_cov(stock_prices)
        ef = EfficientFrontier(mu, S)
        _, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI)

        # Generate random portfolios
        n_samples = ns_parser.n_port
        w = np.random.dirichlet(np.ones(len(mu)), n_samples)
        rets = w.dot(mu)
        stds = np.sqrt(np.diag(w @ S @ w.T))
        sharpes = rets / stds
        ax.scatter(stds, rets, marker=".", c=sharpes, cmap="viridis_r")

        plotting.plot_efficient_frontier(ef, ax=ax, show_assets=True)
        # Find the tangency portfolio
        ef.max_sharpe()
        ret_sharpe, std_sharpe, _ = ef.portfolio_performance()
        ax.scatter(std_sharpe, ret_sharpe, marker="*", s=100, c="r", label="Max Sharpe")

        ax.set_title(f"Efficient Frontier simulating {ns_parser.n_port} portfolios")
        ax.legend()
        plt.tight_layout()
        plt.grid(b=True, which="major", color="#666666", linestyle="-")
        plt.minorticks_on()
        plt.grid(b=True, which="minor", color="#999999", linestyle="-", alpha=0.2)

        if gtff.USE_ION:
            plt.ion()

        plt.show()
        print("")

    except Exception as e:
        print(e)
        print("")
Exemplo n.º 31
0
    print('no file!')

df = pd.DataFrame()
for tick in tickers:
    try:
        df[tick[0]] = web.DataReader(tick[0],
                                     data_source='yahoo',
                                     start="2009-01-01",
                                     end=today)['Adj Close']
        print(tick[0], "added")
    except:
        print(tick[0], "not added")
        pass

mu = expected_returns.mean_historical_return(df)  #returns.mean() * 252
S = risk_models.sample_cov(df)  #Get the sample covariance matrix
ef = EfficientFrontier(mu, S)
weights = ef.max_sharpe()  #Maximize the Sharpe ratio, and get the raw weights
cleaned_weights = ef.clean_weights()
print(
    cleaned_weights
)  #Note the weights may have some rounding error, meaning they may not add up exactly to 1 but should be close
ef.portfolio_performance(verbose=True)

latest_prices = get_latest_prices(df)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=2000)
allocation, leftover = da.lp_portfolio()
print("Discrete allocation:", allocation)
print("Funds remaining: ${:.2f}".format(leftover))
Exemplo n.º 32
0
def test_fix_npd_different_method():
    df = get_data()
    S = risk_models.sample_cov(df)
    assert risk_models._is_positive_semidefinite(S)
    S = risk_models.sample_cov(df, fix_method="diag")
    assert risk_models._is_positive_semidefinite(S)
Exemplo n.º 33
0
from pypfopt.expected_returns import mean_historical_return
from pypfopt.risk_models import sample_cov

from mcos.covariance_transformer import DeNoiserCovarianceTransformer
from mcos.error_estimator import ExpectedOutcomeErrorEstimator, SharpeRatioErrorEstimator, VarianceErrorEstimator
from mcos.mcos import simulate_optimizations, simulate_optimizations_from_price_history
from mcos.observation_simulator import MuCovObservationSimulator, MuCovLedoitWolfObservationSimulator,\
    MuCovJackknifeObservationSimulator

from mcos.optimizer import HRPOptimizer, MarkowitzOptimizer, NCOOptimizer, RiskParityOptimizer

prices_df = pd.read_csv('tests/stock_prices.csv',
                        parse_dates=True,
                        index_col='date')
mu = mean_historical_return(prices_df).values
cov = sample_cov(prices_df).values


@pytest.mark.parametrize(
    'simulator, estimator, transformers, expected_mean, expected_stdev',
    [(MuCovObservationSimulator(mu, cov, n_observations=5),
      ExpectedOutcomeErrorEstimator(), [DeNoiserCovarianceTransformer()],
      np.array([0.07580845, 0.05966212, -0.02893896, 0.0085226]),
      np.array([0.03445259, 0.03214469, 0.01724587, 0.01244282])),
     (MuCovObservationSimulator(
         mu, cov, n_observations=5), ExpectedOutcomeErrorEstimator(), [],
      np.array([0.05043029, -0.0761952, -0.03200537, -0.00413669
                ]), np.array([0.05422127, 0.25850676, 0.0196157, 0.01376204])),
     (MuCovObservationSimulator(mu, cov, n_observations=5),
      SharpeRatioErrorEstimator(), [DeNoiserCovarianceTransformer()],
      np.array([0.44088768, 0.32030003, -0.26876011, 0.15122857
def test_sample_cov_frequency():
    df = get_data()
    S = risk_models.sample_cov(df)
    S2 = risk_models.sample_cov(df, frequency=2)
    pd.testing.assert_frame_equal(S / 126, S2)
Exemplo n.º 35
0
def test_bl_weights():
    df = get_data()
    S = risk_models.sample_cov(df)

    viewdict = {
        "AAPL": 0.20,
        "BBY": -0.30,
        "BAC": 0,
        "SBUX": -0.2,
        "T": 0.131321
    }
    bl = BlackLittermanModel(S, absolute_views=viewdict)

    prices = pd.read_csv(resource("spy_prices.csv"),
                         parse_dates=True,
                         index_col=0,
                         squeeze=True)

    delta = black_litterman.market_implied_risk_aversion(prices)
    bl.bl_weights(delta)
    w = bl.clean_weights()
    assert abs(sum(w.values()) - 1) < 1e-5

    # check weights are allocated in same direction as views
    # (in absence of priors)
    assert all(viewdict[t] * w[t] >= 0 for t in viewdict)

    # numerical check
    test_weights = {
        "GOOG": 0.0,
        "AAPL": 1.40675,
        "FB": 0.0,
        "BABA": 0.0,
        "AMZN": 0.0,
        "GE": 0.0,
        "AMD": 0.0,
        "WMT": 0.0,
        "BAC": 0.02651,
        "GM": 0.0,
        "T": 2.81117,
        "UAA": 0.0,
        "SHLD": 0.0,
        "XOM": 0.0,
        "RRC": 0.0,
        "BBY": -1.44667,
        "MA": 0.0,
        "PFE": 0.0,
        "JPM": 0.0,
        "SBUX": -1.79776,
    }
    assert w == test_weights

    bl = BlackLittermanModel(S, absolute_views=viewdict)
    bl.optimize(delta)
    w2 = bl.clean_weights()
    assert w2 == w

    bl = BlackLittermanModel(S, absolute_views=pd.Series(viewdict))
    bl.optimize(delta)
    w2 = bl.clean_weights()
    assert w2 == w
def test_volatility():
    df = get_data()
    S = sample_cov(df)
    w = np.array([1 / df.shape[1]] * df.shape[1])
    var = objective_functions.volatility(w, S)
    np.testing.assert_almost_equal(var, 0.04498224489292057)
Exemplo n.º 37
0
def test_sample_cov_frequency():
    df = get_data()
    S = risk_models.sample_cov(df)
    S2 = risk_models.sample_cov(df, frequency=2)
    pd.testing.assert_frame_equal(S / 126, S2)
Exemplo n.º 38
0
def test_volatility():
    df = get_data()
    S = sample_cov(df)
    w = np.array([1 / df.shape[1]] * df.shape[1])
    var = objective_functions.portfolio_variance(w, S)
    np.testing.assert_almost_equal(var, 0.04498224489292057)
Exemplo n.º 39
0
local_df = local_df.replace(0, np.nan).dropna(axis=1)
columns = list(local_df.columns)
drop = list(set(cryptos) - set(columns))

local_df = returns[start_date_i:start_portfolio_i]
local_df = local_df.iloc[1:]  # remove first row
local_df = local_df.drop(drop, axis=1)
columns = list(local_df.columns)

mu = expected_returns.mean_historical_return(local_df,
                                             returns_data=True,
                                             frequency=365)
rf = 0.00  #risk free rate - 0.02 (in this case = 0)
mu_excess = mu - rf

Sigma = risk_models.sample_cov(local_df, returns_data=True, frequency=365)
Sigma = rmt.clipped(Sigma, return_covariance=True)
Sigma = pd.DataFrame(data=Sigma, index=columns, columns=columns)

ones = [1] * (len(columns))
arr_ones = np.array(ones)
arr_ones = pd.DataFrame([arr_ones], columns=columns)

Sigma_inv = pd.DataFrame(np.linalg.pinv(Sigma.values), Sigma.columns,
                         Sigma.index)

x = arr_ones.dot(Sigma_inv)
y = x.dot(mu_excess)
y = y**-1
weights = Sigma_inv.multiply(float(y))
weights = weights.dot(mu_excess)
Exemplo n.º 40
0
stock_weights = []
sharpe_ratio = []

num_assets = len(codes)
num_portfolios = NUM_PORTFOLIOS

returns_daily_all = table.pct_change().dropna(how="all")
returns_daily = returns_daily_all.mean()
# returns_annual = returns_daily.mean()
print(returns_daily_all.std())
cov_daily = returns_daily_all.cov()
# cov_annual = cov_daily
# print(returns_annual)

avg_returns = expected_returns.mean_historical_return(table)
cov_mat = risk_models.sample_cov(table)
ef = EfficientFrontier(avg_returns, cov_mat)

# CLA
# cla = CriticalLineAlgorithm()
# cla.allocate(expected_asset_returns=returns_daily,covariance_matrix=cov_daily, solution='efficient_frontier', asset_names=codes)
# cla_weights = cla.weights
# means, sigma = cla.efficient_frontier_means, cla.efficient_frontier_sigma
# plt.plot(sigma, means)
# plt.show()

cla = CriticalLineAlgorithm()
cla.allocate(asset_prices=table, asset_names=codes, solution='min_volatility')
cla_weights = cla.weights.sort_values(by=0, ascending=False, axis=1)
weights = cla_weights.loc[0]
avg_returns = expected_returns.mean_historical_return(table)
Exemplo n.º 41
0
import pandas as pd
import numpy as np
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.hierarchical_risk_parity import hrp_portfolio
from pypfopt.value_at_risk import CVAROpt
from pypfopt import discrete_allocation


# Reading in the data; preparing expected returns and a risk model
df = pd.read_csv("tests/stock_prices.csv", parse_dates=True, index_col="date")
returns = df.pct_change().dropna(how="all")
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)

# Long-only Maximum Sharpe portfolio, with discretised weights
ef = EfficientFrontier(mu, S)
weights = ef.max_sharpe()
ef.portfolio_performance(verbose=True)
latest_prices = discrete_allocation.get_latest_prices(df)
allocation, leftover = discrete_allocation.portfolio(weights, latest_prices)
print("Discrete allocation:", allocation)
print("Funds remaining: ${:.2f}".format(leftover))

"""
Expected annual return: 33.0%
Annual volatility: 21.7%
Sharpe Ratio: 1.43

Discrete allocation: {'MA': 14, 'FB': 12, 'PFE': 51, 'BABA': 5, 'AAPL': 5,
Exemplo n.º 42
0
            def get_momentum_stocks(df, date, portfolio_size, cash):
                # Filter the df to get the top 10 momentum stocks for the latest day
                df_top_m = df.loc[df['date'] == pd.to_datetime(date)]
                df_top_m = df_top_m.sort_values(by='momentum', ascending=False).head(portfolio_size)

                # Set the universe to the top momentum stocks for the period
                universe = df_top_m['symbol'].tolist()

                # Create a df with just the stocks from the universe
                df_u = df.loc[df['symbol'].isin(universe)]

                # Create the portfolio
                # Pivot to format for the optimization library
                df_u = df_u.pivot_table(
                    index='date', 
                    columns='symbol',
                    values='close',
                    aggfunc='sum'
                    )

                # Calculate expected returns and sample covariance
                mu = expected_returns.mean_historical_return(df_u)
                S = risk_models.sample_cov(df_u)

                # Optimise the portfolio for maximal Sharpe ratio
                ef = EfficientFrontier(mu, S, gamma=1) # Use regularization (gamma=1)
                weights = ef.max_sharpe()
                cleaned_weights = ef.clean_weights()

                # Allocate
                latest_prices = get_latest_prices(df_u)

                da = DiscreteAllocation(
                    cleaned_weights,
                    latest_prices,
                    total_portfolio_value=cash
                    )

                allocation = da.lp_portfolio()[0]

                # Put the stocks and the number of shares from the portfolio into a df
                symbol_list = []
                num_shares_list = []

                for symbol, num_shares in allocation.items():
                    symbol_list.append(symbol)
                    num_shares_list.append(num_shares)

                # Now that we have the stocks we want to buy we filter the df for those ones
                df_buy = df.loc[df['symbol'].isin(symbol_list)]

                # Filter for the period to get the closing price
                df_buy = df_buy.loc[df_buy['date'] == date].sort_values(by='symbol')

                # Add in the qty that was allocated to each stock
                df_buy['qty'] = num_shares_list

                # Calculate the amount we own for each stock
                df_buy['amount_held'] = df_buy['close'] * df_buy['qty']
                df_buy = df_buy.loc[df_buy['qty'] != 0]
                return df_buy
    value=True)
allocmo = st.sidebar.checkbox(
    'Επιλεγμένο επιλέγει τον υπολογισμό του μοντέλου του greedy_portfolio αλλιώς επιλέγει το lp_portfolio.',
    value=True)
cutoff = st.sidebar.slider(
    'Ελάχιστο Ποσοστό Συμμετοχής μιας Μετοχής στο Χαρτοφυλάκιο.', 0.01, 0.30,
    0.10)

c1, c2, c3, c4 = st.beta_columns((1, 1, 1, 1))
#-----Χαρτοφυλάκιο Νο1 γενικό
#Calculate portofolio mu and S
mu = expected_returns.mean_historical_return(df_t)
if riskmo:
    S = CovarianceShrinkage(df_t).ledoit_wolf()
else:
    S = risk_models.sample_cov(df_t)
# Optimise the portfolio
ef = EfficientFrontier(mu, S, gamma=2)  # Use regularization (gamma=1)
if weightsmo:
    weights = ef.max_sharpe()
else:
    weights = ef.min_volatility()
cleaned_weights = ef.clean_weights(cutoff=cutoff, rounding=3)
ef.portfolio_performance()

c1.subheader('Χαρτοφυλάκιο Νο1')
c1.write(
    'Το προτινόμενο χαρτοφυλάκιο από τις ιστορικές τιμές των επιλεγμένων μετοχών έχει τα παρακάτω χαρακτηριστικά'
)
c1.write('Αρχική Αξία Χαρτοφυλακίου : ' + str(port_value) + '€')
c1.write('Sharpe Ratio: ' + str(round(ef.portfolio_performance()[2], 2)))
Exemplo n.º 44
0
import pandas as pd
import matplotlib.pyplot as plt
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt.black_litterman import BlackLittermanModel

prbr = pd.read_excel(r'C:\Users\Jhona\OneDrive\Área de Trabalho\PRBR11.xlsx',
                     index_col='Data',
                     parse_dates=['Data'])

###Expectativa de retornos de mu
mu = expected_returns.mean_historical_return(prbr)

###Matrix de covariancia
sigma = risk_models.sample_cov(prbr)

###Fronteira eficiente Max sharpe
ef = EfficientFrontier(mu, sigma)
weights = ef.max_sharpe()
ef.efficient_risk(2.0)
ef.efficient_return(1.5)
cleaned_weights = ef.clean_weights()
print(weights, cleaned_weights)
ef.portfolio_performance(verbose=True, risk_free_rate=0.0225)

###Fronteira eficiente Min Vol
ef = EfficientFrontier(mu, sigma)
raw_weights = ef.min_volatility()
cleaned_weights = ef.clean_weights()
print(cleaned_weights)