コード例 #1
0
def test_lp_portfolio_allocation():
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()

    latest_prices = get_latest_prices(df)
    da = DiscreteAllocation(w, latest_prices, short_ratio=0.3)
    allocation, leftover = da.lp_portfolio()

    #  Weirdly, this gives different answers for py3.8+ vs py3.6-3.7.
    assert allocation == {
        "AMD": 1,
        "GOOG": 1,
        "AAPL": 4,
        "FB": 12,
        "BABA": 4,
        "BBY": 2,
        "MA": 20,
        "PFE": 54,
        "SBUX": 1,
    } or allocation == {
        "GOOG": 1,
        "AAPL": 4,
        "FB": 12,
        "BABA": 4,
        "AMD": 1,
        "BBY": 2,
        "MA": 20,
        "PFE": 54,
        "SBUX": 1,
    }

    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 10000, decimal=4)
コード例 #2
0
ファイル: coefficient.py プロジェクト: miladnazar/Project_3
def generate_portfolio(starting_investment, stock_price_history):

    # Reset the date as the index
    stock_price_history = stock_price_history.set_index(
        pd.DatetimeIndex(stock_price_history['Date'].values))
    #Remove the Date column
    stock_price_history.drop(columns=['Date'], axis=1, inplace=True)
    stock_price_history.dropna(axis=1, inplace=True)

    # Optimize the portfolio
    from pypfopt.efficient_frontier import EfficientFrontier
    from pypfopt import risk_models
    from pypfopt import expected_returns

    # Calculate the expected annualized returns and the annualized sample covariance matrix of the daily asset returns
    mu = expected_returns.mean_historical_return(stock_price_history)
    S = risk_models.sample_cov(stock_price_history)

    # Optimize for the maximal Sharpe ratio
    ef = EfficientFrontier(mu, S)  # Creates the Efficient Frontier Object
    weights = ef.max_sharpe()

    cleaned_weights = ef.clean_weights()
    #print(cleaned_weights)
    ef.portfolio_performance(verbose=True)

    # Get the descret allocation of each share per stock
    from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices

    latest_prices = get_latest_prices(stock_price_history)
    weights = cleaned_weights
    da = DiscreteAllocation(weights,
                            latest_prices,
                            total_portfolio_value=starting_investment)
    allocation, leftover = da.lp_portfolio()
    print("Discrete allocation:", allocation)
    print("Funds Remaining: $", leftover)
    return (allocation, leftover)
コード例 #3
0
def calculateInvestment(limit=10,
                        count=10,
                        write_to_file=True,
                        show_cla=False,
                        tpv=20000):
    symbols = getSymbolsFromDatabase()
    prices = createDataFrame(symbols[:limit], count)
    mu = expected_returns.mean_historical_return(prices)
    S = risk_models.CovarianceShrinkage(prices).ledoit_wolf()
    ef = EfficientFrontier(mu, S, weight_bounds=(-1, 1))
    ef.add_objective(objective_functions.L2_reg)
    ef.min_volatility()
    c_weights = ef.clean_weights()
    if write_to_file == True:
        ef.save_weights_to_file("weights.txt")
    if show_cla == True:
        cla = CLA(mu, S)
        ef_plot(cla)
    ef.portfolio_performance(verbose=True)
    latest_prices = disc_alloc.get_latest_prices(prices)
    allocation_minv, leftover = disc_alloc.DiscreteAllocation(
        c_weights, latest_prices, total_portfolio_value=tpv).lp_portfolio()
    return allocation_minv, leftover
コード例 #4
0
def min_volatility(stocks_in_portfolio):
     stock_data = web.DataReader(stocks_in_portfolio,data_source='yahoo',start=start_date,end=end_date)['Adj Close']
     stock_data.sort_index(inplace=True)
     
     mu = expected_returns.mean_historical_return(stock_data)
     S = risk_models.sample_cov(stock_data)
     lower_bound=0.30/len(stocks_in_portfolio)

     # Optimise for maximal Sharpe ratio with no contraints
     ef = EfficientFrontier(mu,S,weight_bounds=(lower_bound,1))
     #Need to change the risk free rate 
     raw_weights = ef.min_volatility()
     cleaned_weights = ef.clean_weights()
     cleaned_weights_df=pd.DataFrame.from_dict(cleaned_weights, orient='index')
     #remove weights with 0% 
     cleaned_weights_df=cleaned_weights_df.loc[(cleaned_weights_df!=0).any(1)]
     #print("Portfolio having maximal sharpie ratio and with no contraints\n" )
    # print(cleaned_weights)
     final_return= ef.portfolio_performance(verbose=True)
     index=['Expected Annual Return','Expected Annual Volatility','Sharpe Ratio']
     final_return_df = pd.DataFrame(final_return,index=index)
     final_df=pd.concat([cleaned_weights_df,final_return_df])
     return final_df
コード例 #5
0
    def __init__(self, ret_freq, long_short):
        self.ret_freq = ret_freq
        self.compare_true_ret = pd.read_pickle(
            f"compare_true_ret_{self.ret_freq}.pkl")

        ## need to shift backward as it's a 1 step forward prediction
        self.compare_var_ret = pd.read_pickle(
            f"compare_var_ret_{self.ret_freq}.pkl").shift(-1)
        self.pred_LSTM_ret = pd.read_pickle(
            f"pred_LSTM_ret_{self.ret_freq}.pkl").shift(-1)
        self.pred_LSTM_X_ret = pd.read_pickle(
            f"pred_LSTM_X_ret_{self.ret_freq}.pkl").shift(-1)
        self.pred_VAR_LSTM_ret = pd.read_pickle(
            f"pred_VAR_LSTM_ret_{self.ret_freq}.pkl").shift(-1)

        train_ret = pd.read_pickle(f"train_ret_{self.ret_freq}.pkl")
        val_ret = pd.read_pickle(f"val_ret_{self.ret_freq}.pkl")
        train_ret = train_ret.append(val_ret).sort_index()

        df = pd.read_excel("dataset.xlsx", index_col='Date').dropna(axis=0)
        self.mkts = df[[
            'US Equity', 'UK Equity', 'Japan Equity', 'Germany Equity',
            'Canada Equity', 'US Bond', 'UK Bond', 'Japan Bond',
            'Germany Bond', 'Canada Bond', 'EM Equity'
        ]]
        self.mkts_ret = self.mkts.pct_change(1)
        # self.rebalancing_dates = self.compare_true_ret.resample('1m').first().index

        w_bounds = (-1, 1) if long_short else (0, 1)
        vcv = train_ret.cov()
        mu = train_ret.mean()
        eff = EfficientFrontier(expected_returns=mu,
                                cov_matrix=vcv,
                                weight_bounds=w_bounds)
        self.init_weights = eff.max_sharpe(risk_free_rate=0)
        self.train_ret = train_ret
        self.w_bounds = w_bounds
コード例 #6
0
def Resampling_EF(mu, cov, weight_bounds, rf=0.02):
    ww = []
    mm = []
    ss = []
    # Generate a distribution of returns
    return_Dis = stat.multivariate_normal(mu, cov)
    #Repeat 200 times
    for i in range(10):
        #draw 200 random samples, one sample includes n asset returns
        samples = return_Dis.rvs(200)
        # Estimate μ and Σ according to samples
        mu_est = samples.mean(0).reshape(
            len(mu), 1)  #take mean along col, reshape it to N rows in 1 col
        cov_est = np.cov(samples.T)  #T means transpose

        ef2 = EfficientFrontier(mu_est, cov_est, weight_bounds)
        #calculates weights for minimize volatility, max return, and some middle points for graphing
        #weights=list(ef.min_volatility().values())
        #w_all.append(weights)

        #Draw EfficientFrontier using old mu and cov
        (w, m, s) = All_frontier(mu, cov, ef2)
        #w2 = list(ef2.efficient_risk(risk_free_rate=rf,target_risk=0.250).values())
        #(m2, s2, sharpe2)=ef2.portfolio_performance(verbose=False,risk_free_rate=rf)

        ww.append(w)
        mm.append(m)
        ss.append(s)
    # calculate the average weights of 1000 times
    #w_average1 = np.mean(w_all, axis = 0)
    #w_average2 = np.mean(w_all2, axis = 0)
    w_average = np.mean(ww, axis=0)
    mu_a = np.array(np.mean(mm, axis=0)).flatten()
    cov_a = np.mean(ss, axis=0)
    #result = [w_average1, w_average2]#2 is max return, 1 is min vol
    return mu_a, cov_a
コード例 #7
0
    def __get_ef_allocation(self, portfolio_size, portfolio_value):
        """Method that returns optimal allocation based on Markowitz Portfolio theory."""
        df_hist = query_quotes_history()
        df = self.__get_momentum(df_hist)
        date = df.date.max()
        df_top = df.loc[df['date'] == date]
        df_top = df_top.sort_values(by='momentum',
                                    ascending=False).head(portfolio_size)

        universe = df_top['tickr'].tolist()
        df_u = df.loc[df['tickr'].isin(universe)]

        df_u = df_u.pivot_table(index='date',
                                columns='tickr',
                                values='close',
                                aggfunc='sum')

        # Calculate expected returns and covariance
        mu = expected_returns.mean_historical_return(df_u)
        S = risk_models.sample_cov(df_u)

        # Optimise the portfolio for maximal Sharpe ratio
        # with regularization
        ef = EfficientFrontier(mu, S, gamma=1)
        ef.max_sharpe()
        cleaned_weights = ef.clean_weights()

        latest_prices = get_latest_prices(df_u)

        # Generate allocation
        da = DiscreteAllocation(cleaned_weights,
                                latest_prices,
                                total_portfolio_value=portfolio_value)

        allocations = pd.Series(da.lp_portfolio()[0], name='allocation')
        return allocations
コード例 #8
0
 def asset_allocation(tickers, start_date):
     today = pd.datetime.today()
     if start_date == '1y':
         delta = today - pd.DateOffset(years=1)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == '3y':
         delta = today - pd.DateOffset(years=3)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == '5y':
         delta = today - pd.DateOffset(years=5)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == '10y':
         delta = today - pd.DateOffset(years=10)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     elif start_date == 'max':
         delta = today - pd.DateOffset(years=30)
         delta = delta.date()
         delta = delta.strftime('%Y-%m-%d')
     prices = ffn.get(tickers, start=delta)
     mu = expected_returns.mean_historical_return(prices)
     S = risk_models.sample_cov(prices)
     ef = EfficientFrontier(mu, S)
     raw_weights = ef.max_sharpe()
     cleaned_weights = ef.clean_weights()
     latest_prices = discrete_allocation.get_latest_prices(prices)
     da = DiscreteAllocation(cleaned_weights,
                             latest_prices,
                             total_portfolio_value=amount)
     allocation, leftover = da.lp_portfolio()
     st.subheader('Asset Allocation breakdown: ')
     st.write(allocation)
     st.write("Funds remaining: ${:.2f}".format(leftover))
コード例 #9
0
    def get_max_sharpe_recent_weights(self, exp_span, target_return=2.0):
        mu = expected_returns.ema_historical_return(self.pf,
                                                    span=exp_span,
                                                    frequency=252)
        sigma = risk_models.exp_cov(self.pf, span=exp_span, frequency=252)
        ef = EfficientFrontier(mu, sigma)
        try:
            # ef.efficient_return(target_return)
            ef.max_sharpe()
            clean_weights_maxSR = ef.clean_weights()
            print('the optimal weights for recent max_SR portfolio is \n{}'.
                  format(clean_weights_maxSR))
            ef.portfolio_performance(verbose=True)
            out = []
            for weight in list(clean_weights_maxSR.values()):
                if weight == 0:
                    out.append(0)
                else:
                    out.append(weight)

            return out

        except:
            return [0] * len(self.stock_list)
コード例 #10
0
def test_portfolio_allocation():
    df = get_data()
    e_ret = mean_historical_return(df)
    cov = sample_cov(df)
    ef = EfficientFrontier(e_ret, cov)
    w = ef.max_sharpe()

    latest_prices = discrete_allocation.get_latest_prices(df)
    allocation, leftover = discrete_allocation.portfolio(w, latest_prices)
    assert allocation == {
        "MA": 14,
        "FB": 12,
        "PFE": 51,
        "BABA": 5,
        "AAPL": 5,
        "AMZN": 0,
        "BBY": 9,
        "SBUX": 6,
        "GOOG": 1,
    }
    total = 0
    for ticker, num in allocation.items():
        total += num * latest_prices[ticker]
    np.testing.assert_almost_equal(total + leftover, 10000)
コード例 #11
0
def test_rmse_decreases_with_value():
    # As total_portfolio_value increases, rmse should decrease.
    df = get_data()
    mu = mean_historical_return(df)
    S = sample_cov(df)
    ef = EfficientFrontier(mu, S)
    w = ef.max_sharpe()
    latest_prices = get_latest_prices(df)

    da1 = DiscreteAllocation(w, latest_prices, total_portfolio_value=10000)
    da1.greedy_portfolio()
    rmse1 = da1._allocation_rmse_error(verbose=False)
    da2 = DiscreteAllocation(w, latest_prices, total_portfolio_value=100000)
    da2.greedy_portfolio()
    rmse2 = da2._allocation_rmse_error(verbose=False)
    assert rmse2 < rmse1

    da3 = DiscreteAllocation(w, latest_prices, total_portfolio_value=10000)
    da3.lp_portfolio()
    rmse3 = da3._allocation_rmse_error(verbose=False)
    da4 = DiscreteAllocation(w, latest_prices, total_portfolio_value=100000)
    da4.lp_portfolio()
    rmse4 = da4._allocation_rmse_error(verbose=False)
    assert rmse4 < rmse3
コード例 #12
0
def trading_bot(event, context):
    # get historical NASDAQ data
    # sort by index to ensure data is in correct order
    historical_query = """
        SELECT *
        FROM `oval-bot-232220.algorithmic_trader.daily_stock_data`
    """
    stocks = pd.read_gbq(historical_query).sort_values(by='index').reset_index(drop=True)

    # obtain symbols and most recent prices
    latest_prices = stocks.iloc[0]
    symbols = stocks.columns[1:].to_list()

    # get portfolio and sort by date to ensure data is in correct order
    portfolio_query = """
        SELECT *
        FROM `oval-bot-232220.algorithmic_trader.portfolio`
    """
    pf_history = pd.read_gbq(portfolio_query).sort_values(by='Date').reset_index(drop=True)

    # number of stocks to purchase
    pf_size = 10

    # obtain most recent portfolio and calculate its current value
    prev_pf = pf_history.iloc[-1]
    prev_symb = [prev_pf['stock{}'.format(stock_ind)] for stock_ind in range(pf_size)]
    prev_bought = [prev_pf['stock{}Bought'.format(stock_ind)] for stock_ind in range(pf_size)]
    pf_value = sum([latest_prices[prev_symb[i]]*prev_bought[i] for i in range(pf_size)]) + float(prev_pf['Unallocated'])

    # momentum score
    def momentum(closes):
        returns = np.log(closes)
        x = np.arange(len(returns))
        slope, _, rvalue, _, _ = linregress(x, returns)
        return ((1 + slope) ** 252) * (rvalue ** 2)  # annualize slope and multiply by R^2

    # get momentum scores
    momentums = pd.DataFrame()
    for symb in symbols:
        momentums[symb] = stocks[symb].rolling(90).apply(momentum, raw=False)

    # select best momentum scores
    bests = momentums.max().sort_values(ascending=False).index[:pf_size]

    # get price history, expected returns and covariance for stocks with best momentum scores
    best_prices = stocks[bests]
    best_latest = latest_prices[bests]
    mu = expected_returns.mean_historical_return(best_prices)
    S = risk_models.sample_cov(best_prices)

    # use sharpe ratio to obtain portfolio allocation weights
    ef = EfficientFrontier(mu, S, gamma=1) # Use regularization (gamma=1)
    ef.max_sharpe()
    cleaned_weights = ef.clean_weights()

    # allocate money in portfolio using weights 
    da = DiscreteAllocation(cleaned_weights, best_latest, total_portfolio_value=pf_value)
    allocation, unallocated = da.lp_portfolio()

    # create df to store info about purchased
    pf = {'Date': date.today().strftime("%Y-%m-%d"), 'Value': pf_value, 'Unallocated': unallocated}
    for stock_ind in range(pf_size):
        pf['stock{}'.format(stock_ind)] = bests[stock_ind]
        pf['stock{}Bought'.format(stock_ind)] = allocation.get(bests[stock_ind], 0)
    pf = pd.DataFrame(pf, index=[0])

    # add new portfolio to current portfolio data and upload to gbq
    pf_data = pd.concat([pf_history, pf], ignore_index=True)

    pf_data.to_gbq(destination_table='algorithmic_trader.portfolio', 
            project_id="oval-bot-232220",
            if_exists='replace')
コード例 #13
0
tickers = ['GOOGL','FB','AAPL','NFLX','AMZN']
thelen = len(tickers)
price_data = []
for ticker in range(thelen):   
    prices = web.DataReader(tickers[ticker], start='2018-06-20', end = '2020-06-20', data_source='yahoo')   
    price_data.append(prices[['Adj Close']])
df_stocks = pd.concat(price_data, axis=1)
df_stocks.columns=tickers
df_stocks.tail()

#Annualized Return
mu = expected_returns.mean_historical_return(df_stocks)
#Sample Variance of Portfolio
Sigma = risk_models.sample_cov(df_stocks)#Max Sharpe Ratio - Tangent to the EF
from pypfopt import objective_functions, base_optimizer
ef = EfficientFrontier(mu, Sigma, weight_bounds=(0,1)) #weight bounds in negative allows shorting of stocks
sharpe_pfolio=ef.max_sharpe() #May use add objective to ensure minimum zero weighting to individual stocks
sharpe_pwt=ef.clean_weights()
print(sharpe_pwt)

#VaR Calculation
ticker_rx2 = []#Convert Dictionary to list of asset weights from Max Sharpe Ratio Portfolio
sh_wt = list(sharpe_pwt.values())
sh_wt=np.array(sh_wt)

for a in range(thelen):  
    ticker_rx = df_stocks[[tickers[a]]].pct_change()
    ticker_rx = (ticker_rx+1).cumprod()
    ticker_rx2.append(ticker_rx[[tickers[a]]])
    ticker_final = pd.concat(ticker_rx2,axis=1)
ticker_final
コード例 #14
0
    def next(self):
        # Pass counter_period days to compute return
        if self.counter < self.counter_period:
            self.counter += 1
        else:
            # Get data to dataframe in order to feed Pyopt
            appended_data = []
            for i, d in enumerate(self.datas):
                dt, dn = self.datetime.date(), d._name
                get = lambda mydata: mydata.get(0, self.counter_period)
                time = [d.num2date(x) for x in get(d.datetime)]
                df = pd.DataFrame({dn: get(d.close)}, index=time)
                appended_data.append(df)
            df = pd.concat(appended_data,
                           axis=1)  # df is dataframe of n assets

            for i, d in enumerate(self.datas):
                dt, dn = self.datetime.date(), d._name
                if d.close[0] > self.inds[d]['sma_50'][0] and self.inds[d][
                        'sma_50'][0] > self.inds[d]['sma_200']:
                    if dn in self.selected_assets:
                        pass
                    else:
                        self.selected_assets.append(dn)
                else:
                    if dn in self.selected_assets:
                        self.selected_assets.remove(dn)

            # Create dataframe of selected_assets portfolio
            portfolio_today = df[self.selected_assets]

            # Because there are some days having no assets in portfolio may cause error
            if strategy == 'Risk_parity':
                x_t = [0.25, 0.25, 0.25, 0.25]

                cons = ({
                    'type': 'eq',
                    'fun': total_weight_constraint
                }, {
                    'type': 'ineq',
                    'fun': long_only_constraint
                })

                res = minimize(risk_budget_objective,
                               w0,
                               args=[V, x_t],
                               method='SLSQP',
                               constraints=cons,
                               options={'disp': True})

                w_rb = np.asmatrix(res.x)
            elif strategy == 'M_Max_sharpe':
                try:
                    mu = mean_historical_return(portfolio_today)
                    S = CovarianceShrinkage(portfolio_today).ledoit_wolf()
                    ef = EfficientFrontier(mu, S)
                    weights = ef.max_sharpe()
                    cleaned_weights = ef.clean_weights()

                    # Rebalance monthly
                    if self.day_counter % 24 == 0:
                        for key, value in cleaned_weights.items():
                            self.order_target_percent(key, target=value)
                            print(
                                'on {} asset of portfolio is {} with value {}'.
                                format(self.datetime.date(), key, value))
                    self.day_counter += 1
                except:
                    pass
コード例 #15
0
def call_portfolios(trading_days, start_time, end_time, test_start_time = "", test_end_time = "", add_index=False):
    
    total_portfolio = []
    portfolio_weights = []
    portfolio_invests = []
    performances = []
    
    # Getting the S&P500 (benchmark)
    sp500 = pdr.DataReader('^GSPC', 'yahoo', start_time, end_time)['Close']
    sp500 = sp500.rename('SP500')
    sp500 = sp500.to_frame()
    
    for i in range(0, len(stock_list.columns)): 
    #for i in range(0, 1):
        
        stock = []
        stock = stock_list.iloc[:,i].tolist() ### !!! Important: change the number to get the portfolio of interest (first one is 50% percentile, etc.)
        stock = [x for x in stock if str(x) != 'nan']
        
        portfolio_name = stock_list.columns[i]
        
        # Getting stock data (maybe re-do to for loop, if there be problems with memory)
        temp = pdr.DataReader(stock, 'yahoo', start_time, end_time)['Close']
        data = sp500.join(temp)
        del temp
        
        # Main dataset with all tickers and without S&P
        stocks = data.drop('SP500', 1)
        
        # Drop stocks where are less than 50% of data points available, if applicable
        if filter_recent_stocks[i]:
            stocks = stocks.loc[:, (stocks.count() >= stocks.count().max()/2)]
        
        risk_free_rate = 0.0085 # !!! Risk-free rate, 10Y US treasury bond, could be adjusted
        weight_bounds = weight_bounds_tuple[i]  # taken from the tuple each iteration, defined at the beginning
        
        # !!! Different approaches could be taken from here
        mu = mean_historical_return(stocks) # Getting returns
        S = CovarianceShrinkage(stocks).ledoit_wolf() # Getting cov matrix
        
        current_weights = [0] * len(stocks.columns)
        
        # Main function to find optimal portfolio, determining optimal weights
        ef = EfficientFrontier(mu, S, weight_bounds=weight_bounds)
        ef.add_objective(objective_functions.transaction_cost, w_prev=current_weights, k=0.005)
        ef.add_objective(objective_functions.L2_reg, gamma=gamma) 
        weights = ef.max_sharpe(risk_free_rate=risk_free_rate) # using max sharpe optimization
        cleaned_weights = ef.clean_weights() # weights with pretty formatting
        print(cleaned_weights)
        
        # Printing info on returns, variance & sharpe
        temp_tuple = ef.portfolio_performance(verbose=True)
        
        temp_dict = {}
        temp_dict['Portfolio'] = portfolio_name
        temp_dict['Return'] = "{:.4f}".format(temp_tuple[0])
        temp_dict['Risk'] = "{:.4f}".format(temp_tuple[1])
        temp_dict['Sharpe'] = "{:.4f}".format(temp_tuple[2])
        performances.append(temp_dict)
        
        # Putting weights into pandas df
        max_sharpe_allocation = pd.DataFrame.from_dict(cleaned_weights, orient='index')
        max_sharpe_allocation.columns =['allocation_weight']    
        
        ### This function would change the weights to a number of shares based on the last price in the observable interval
        latest_prices = get_latest_prices(stocks)
        if add_index == False:
            da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=total_portfolio_value)    
            if discrete_algo_lp[i] == True:
                allocation, leftover = da.lp_portfolio()
            else: allocation, leftover = da.greedy_portfolio()
            
            print(allocation)
            print("Money left: " + str(leftover))
            print(da._allocation_rmse_error())
            
            # Adding discrete allocation to portfolio dataframe
            allocation = pd.DataFrame.from_dict(allocation, orient='index')
            allocation.columns =['allocation_discrete']   
            max_sharpe_allocation = max_sharpe_allocation.join(allocation)
                
            
        # Add some plots
        plot_covariance(S)
        plot_weights(weights)
        
        
        start_of_investment = str(latest_prices.name)
        if add_index == True:
            if np.any(start_of_investment in trading_days.values) ==  True:
                start_of_investment = trading_days[trading_days.index[trading_days == start_of_investment].tolist()[0] + 1]
            
            ### Function to crete a portfolio and test it on the new data
            portfolio, data_new = load_data(max_sharpe_allocation, test_start_time, test_end_time)
            tmp = create_index(test_start_time, test_end_time, start_of_investment, portfolio_name, portfolio, data_new)
            
            # Add all results to list
            total_portfolio.append(tmp[0])
            portfolio_weights.append(tmp[1])
            
            with open(p+'/portfolios/performance_index.txt', 'w') as outFile:
                for d in performances:
                    line =  str(i) + ": " + " ".join([str(key)+' : '+str(value) for key,value in d.items()]) + '\n'
                    outFile.write(line)
            
            
        else:
            if np.any(start_of_investment in trading_days.values) ==  False:
                start_of_investment = trading_days[trading_days.index[trading_days == start_of_investment].tolist()[0] + 1]
        
            portfolio, data_new = load_data(max_sharpe_allocation, test_end_time, test_end_time)
            tmp2 = create_portfolio(start_of_investment, portfolio_name, portfolio, data_new)
            
            # Add all results to list
            portfolio_invests.append(tmp2)
            
            with open(p+'/portfolios/performance_investment.txt', 'w') as outFile:
                for d in performances:
                    line =  str(i) + ": " + " ".join([str(key)+' : '+str(value) for key,value in d.items()]) + '\n'
                    outFile.write(line)
    
        
    return total_portfolio, portfolio_weights, portfolio_invests 
コード例 #16
0
ファイル: portfolio_creator.py プロジェクト: mingsc/Finance
    ax.plot([p['fun'] for p in efficient_portfolios], target, linestyle='-.', color='black', label='efficient frontier')
    ax.set_title('Portfolio Optimization with Individual Stocks')
    ax.set_xlabel('annualised volatility')
    ax.set_ylabel('annualised returns')
    ax.legend(labelspacing=0.8)

display_ef_with_selected(mean_returns, cov_matrix, risk_free_rate)

stocks = data
v = 100000 # total port. value

df = DataReader(stocks, 'yahoo', start, end)['Close']

# Calculate expected returns and sample covariance
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)

# Optimise for maximal Sharpe ratio
ef = EfficientFrontier(mu, S, gamma=0)
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
ef.portfolio_performance(verbose=True)

latest_prices = get_latest_prices(df)

da = DiscreteAllocation(cleaned_weights, latest_prices, total_portfolio_value=v)
allocation, leftover = da.lp_portfolio()
print ('-'*80)
print ('To maximize sharpe and diversification:')
print("Discrete allocation:", allocation)
print("Funds remaining: ${:.2f}".format(leftover))
コード例 #17
0
    def cv(self,
           back_test_months,
           data,
           optimizer,
           test_months,
           annual_risk_free_rate=0.02):
        """Cross-Validation backtesting method

        Args:
            back_test_months (int): number of backtesting months
            data (pandas.Series): data that includes both training and testing data
            optimizer (str): portfolio optimizer for PyPortfolioOpt
            test_months (int): number of testing months
            annual_risk_free_rate (float, optional): annual risk free rate used in calculating Sharpe ratio. Defaults to 0.02.

        Returns:
            [pandas.Series, float]: expected and realised asset performance
        """

        embargo = np.round_(0.01 * len(data), decimals=0)
        all_weights = np.zeros((back_test_months, np.shape(data)[1]))
        all_realised_annual_return = np.zeros(back_test_months)
        all_realised_annual_volatility = np.zeros(back_test_months)
        all_realised_sharpe_ratio = np.zeros(back_test_months)

        for i in range(back_test_months):

            test_start = i * len(data) / back_test_months
            test_end = test_start + len(data) / back_test_months - 1
            test = data.iloc[int(test_start):int(test_end), :]
            train = data.iloc[np.r_[0:int(test_start),
                                    int(test_end) + int(embargo):len(data)], :]

            if optimizer == "hrp":
                train_returns = train.pct_change().dropna()
                hrp = HRPOpt(train_returns)
                weights = hrp.optimize()
                weights = pd.Series(weights)
                all_weights[i] = weights
                performance = hrp.portfolio_performance(verbose=True)
            else:
                mu = mean_historical_return(train)
                S = CovarianceShrinkage(train).ledoit_wolf()
                ef = EfficientFrontier(mu, S)
                weights = ef.max_sharpe(
                ) if optimizer == "msr" else ef.min_volatility()
                weights = pd.Series(weights)
                all_weights[i] = weights
                performance = ef.portfolio_performance()

            all_realised_annual_return[i] = sum(all_weights[i] * ((test.iloc[
                (len(test) - 1)] / test.iloc[0])**(12 / test_months) - 1))
            all_realised_annual_volatility[i] = sum(
                all_weights[i] * np.std(test.pct_change().dropna()) *
                np.sqrt(251))
            all_realised_sharpe_ratio = (
                all_realised_annual_return[i] -
                annual_risk_free_rate) / all_realised_annual_volatility[i]

        weights = np.mean(all_weights)
        realised_annual_return = np.mean(all_realised_annual_return)
        realised_annual_volatility = np.mean(all_realised_annual_volatility)
        realised_sharpe_ratio = np.mean(all_realised_sharpe_ratio)

        return weights, performance, realised_annual_return, realised_annual_volatility, realised_sharpe_ratio
コード例 #18
0
ファイル: tradebot.py プロジェクト: ThiagoMartinsThome/TFM
    def get_top_stocks(cash, df_pf, pf_size=20):
        # BQ credentials
        client = bigquery.Client(project='tradebot-tfm')

        ### df
        # Load the historical stock data from BQ
        sql_hist = """
               SELECT *
               FROM `tradebot-tfm.tradingbot_query.dataset_hist`
               WHERE
                    date >= DATE_SUB(CURRENT_DATE(), INTERVAL 22 DAY)
               ORDER BY
                    date DESC,
                    ticker
               """

        df = client.query(sql_hist).to_dataframe()

        # Convert the date column to datetime
        df['date'] = pd.to_datetime(df['date'])

        # Get the latest date for the data we have
        current_data_date = df['date'].max()

        ## df_pred
        # Load the historical stock data from BQ
        sql_pred = """
               SELECT *
               FROM `tradebot-tfm.tradingbot_query.predictions`
               WHERE
                    date >= DATE_SUB(CURRENT_DATE(), INTERVAL 4 DAY)
               ORDER BY
                    date DESC,
                    recommendations
               """

        df_pred = client.query(sql_pred).to_dataframe()

        # Convert the date column to datetime
        df_pred['date'] = pd.to_datetime(df_pred['date'])
        df_pred = df_pred[df_pred['date'] == current_data_date]

        # Filter the df to get the top n stocks for the latest day
        df_top_stocks = df_pred[(df_pred['recommendations'] == 'Strong Buy') |
                                (df_pred['recommendations'] == 'Buy')]
        df_top_stocks = df_top_stocks.loc[df['date'] == pd.to_datetime(
            current_data_date)]
        df_top_stocks = df_top_stocks.sort_values(
            by='predictions', ascending=False)  #.head(portfolio_size)

        # Set the universe to the top momentum stocks for the period
        universe = df_top_stocks['ticker'].tolist()

        # Create a df with just the stocks from the universe
        df_u = df.loc[df['ticker'].isin(universe)]

        # Create the portfolio
        # Pivot to format for the optimization library
        df_u = df_u.pivot_table(index='date',
                                columns='ticker',
                                values='close',
                                aggfunc='sum')

        # Calculate expected returns and sample covariance
        mu = expected_returns.mean_historical_return(df_u)
        S = risk_models.sample_cov(df_u)

        # Optimise the portfolio for maximal Sharpe ratio
        ef = EfficientFrontier(mu, S, gamma=1)  # Use regularization (gamma=1)
        weights = ef.max_sharpe()
        cleaned_weights = ef.clean_weights()

        # Allocate
        latest_prices = get_latest_prices(df_u)

        da = DiscreteAllocation(cleaned_weights,
                                latest_prices,
                                total_portfolio_value=cash)

        allocation = da.lp_portfolio()[0]
        # Put the stocks and the number of shares from the portfolio into a df
        symbol_list = []
        num_shares_list = []

        for symbol, num_shares in allocation.items():
            symbol_list.append(symbol)
            num_shares_list.append(num_shares)

        # Now that we have the stocks we want to buy we filter the df for those ones
        df_buy = df.loc[df['ticker'].isin(symbol_list)]
        # Filter for the period to get the closing price
        df_buy = df_buy.loc[df_buy['date'] == current_data_date].sort_values(
            by='ticker')
        # Add in the qty that was allocated to each stock
        df_buy['quantity'] = num_shares_list

        # Calculate the amount we own for each stock
        df_buy['amount_held'] = df_buy['close'] * df_buy['quantity']
        df_buy = df_buy.loc[df_buy['quantity'] != 0]

        # Create a list of stocks to sell based on what is currently in our pf
        sell_list = list(
            set(df_pf['ticker'].tolist()) - set(df_buy['ticker'].tolist()))

        return round(df_buy[['ticker', 'close', 'quantity', 'amount_held']],
                     2), sell_list, df, df_pred, current_data_date
コード例 #19
0
So what is considered a good Sharpe ratio that indicates a high degree of expected return 
for a relatively low amount of risk?

Usually, any Sharpe ratio greater than 1.0 is considered acceptable to good by investors.
A ratio higher than 2.0 is rated as very good.
A ratio of 3.0 or higher is considered excellent.
A ratio under 1.0 is considered sub-optimal.
- Investopedia (https://www.investopedia.com/ask/answers/010815/what-good-sharpe-ratio.asp)
'''

# Calculate expected returns and the annualised sample covariance matrix of (daily) asset returns.
mu = expected_returns.mean_historical_return(df) # returns.mean() * 252, NOTE: Mu is mean
S = risk_models.sample_cov(df) # Get the sample covariance matrix

# Optimize for maximal Sharpe ratio
ef = EfficientFrontier(mu, S) # Create EfficientFrontier Object
weights = ef.max_sharpe() #Maximize the Sharpe ratio, and get the raw weights
cleaned_weights = ef.clean_weights() #Helper method to clean the raw weights, setting any weights whose absolute values are below the cutoff to zero, and rounding the rest.
#ef.save_weights_to_file("weights.csv")  # save weights to file
print(cleaned_weights) #Note the weights may have some rounding error, meaning they may not add up exactly to 1 but should be close
ef.portfolio_performance(verbose=True) #Returns and shows the (Expected return, volatility, Sharpe ratio)

pip install pulp

#Get the discrete allocation of each share per stock
'''
PyPortfolioOpt also provides a method that will  allow you to change the allocated
weights to an actual amount that you can buy. 
Simply get the latest prices, and the desired portfolio amount ($15000 in this example)
'''
コード例 #20
0
def create_portfolio() -> None:
    """
	Create portfolio based on risk assessment and 
	efficient portfolio optimization
	"""
    st.subheader("Portfolio Parameters")
    st.write("Portfolio Name")
    portfolio_name = st.text_input(
        'Portfolio Name',
        'Portfolio_' + datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
    st.write("Portfolio Description")
    portfolio_description = st.text_input('Portfolio Description',
                                          'General Investment')
    st.write("Investment Amount (USD)")
    portfolio_investment = st.number_input(label='', min_value=0, value=100000)
    st.markdown("---")
    st.subheader('Risk Assessment Form')
    st.write('Portfolio Risk Preference')
    risk_level = st.selectbox('Portfolio Risk Preference',
                              ('Low', 'Moderate', 'High'))
    if st.button('Build Portfolio'):
        if risk_level == 'High':
            s_p_weight = 0.2
            equity_investment = 0.8 * portfolio_investment
        elif risk_level == 'Moderate':
            s_p_weight = 0.4
        else:
            s_p_weight = 0.6
        st.markdown("---")

        # Separate investments into S&P 500 and Equities based on risk assessment
        s_p_investment = portfolio_investment * s_p_weight
        equity_investment = portfolio_investment - s_p_investment

        # Create stock portfolio
        price_data_path = os.path.join(
            str(Path(__file__).resolve().parents[1]), 'data/price_data.csv')
        price_df = pd.read_csv(price_data_path)
        price_df['Date'] = pd.to_datetime(price_df['Date'])
        price_df.set_index('Date', inplace=True)

        # Drop the S&P from price data, portfolio is optimized only for stocks
        s_p_data = pd.DataFrame(index=price_df.index)
        s_p_data['Close'] = price_df['^GSPC']
        equity_df = price_df.drop(columns=['^GSPC'], axis=1)

        # Optimize portfolio
        # Calculate the expected annualized returns and annualized covariance matrix of daily returns
        mu = expected_returns.mean_historical_return(equity_df)
        S = risk_models.sample_cov(equity_df)

        # Optimize the Sharpe ratio
        ef = EfficientFrontier(mu, S)
        raw_weights = ef.max_sharpe()
        cleaned_weights = ef.clean_weights()

        latest_prices = get_latest_prices(equity_df)
        da = DiscreteAllocation(cleaned_weights,
                                latest_prices,
                                total_portfolio_value=equity_investment)
        allocation, leftover = da.lp_portfolio()
        print('Discrete Allocation: ', allocation)
        print('Funds Remaining: ', leftover)

        # Store company name into a list
        company_name = []
        discrete_allocation = []
        symbols = list(allocation.keys())
        investment_amount = []
        for symbol in allocation:
            company_name.append(get_company_name(symbol))
            discrete_allocation.append(allocation.get(symbol))
            print(symbol, price_df[symbol].values[-1], allocation.get(symbol))
            investment_amount.append(price_df[symbol].values[-1] *
                                     allocation.get(symbol))

        # Append the S&P Investment to the portfolio
        s_p_allocation = int(s_p_investment / s_p_data.tail(1)['Close'])
        company_name.append("S&P 500")
        symbols.append('^GSPC')
        discrete_allocation.append(s_p_allocation)
        investment_amount.append(s_p_investment)

        # Round off investments to 2 decimal places
        investment_amount = ['%.2f' % elem for elem in investment_amount]

        # Create the stock portfolio portfolio
        portfolio_df = pd.DataFrame(columns=[
            'Company_Name', 'Ticker', 'Purchase_Units', 'Investment_Amount'
        ])
        portfolio_df['Company_Name'] = company_name
        portfolio_df['Ticker'] = symbols
        portfolio_df['Purchase_Units'] = discrete_allocation
        portfolio_df['Investment_Amount'] = investment_amount

        st.subheader('Recommended Portfolio')
        st.write(
            'The recommended portfolio is designed using Markowtiz Portfolio Optimization, based on historical returns and risks for stocks and their associated covariances'
        )
        st.write(portfolio_df)

        # Convert weights to numpy array
        weights = np.array([(1 - s_p_weight) * x[1]
                            for x in cleaned_weights.items()],
                           dtype=float)

        # Add the S_P weight to weights
        weights = np.append(weights, s_p_weight)

        price_matrix = price_df.values
        initial_portfolio_holding = ((portfolio_investment * weights).T /
                                     price_matrix[0, :])

        # # For comparison let's get historical S&P 500 values and get share holding
        s_p_prices = s_p_data['Close'].values.reshape(-1, 1)
        initial_s_p_holding = portfolio_investment / s_p_prices[0, 0]

        # # Compute portfolio value and s_p_holding value
        portfolio_value = np.sum(initial_portfolio_holding * price_matrix,
                                 axis=1)
        s_p_value = np.sum(initial_s_p_holding * s_p_prices, axis=1)

        performance_df = pd.DataFrame(index=s_p_data.index)
        performance_df['Portfolio'] = portfolio_value
        performance_df['SP500'] = s_p_value
        performance_df = performance_df[~performance_df.index.to_period('m').
                                        duplicated()]
        performance_df.to_csv('pf.csv')
        # df1 = performance_df.melt(id_vars=['Date']+list(performance_df.keys()[5:]), var_name='Value')
        line_chart = px.line(performance_df,
                             x=performance_df.index,
                             y=['SP500', 'Portfolio'])
        st.subheader('Backtest Performance over Analysis Period')
        st.plotly_chart(line_chart)

        # Create New Portfolio
        new_portfolio = Portfolio.objects.create(
            user=User.objects.get(pk=1),
            name=portfolio_name,
            description=portfolio_description)
        for index, row in portfolio_df.iterrows():
            new_transaction = Transaction.objects.create(
                portfolio=new_portfolio,
                asset=Asset.objects.get(asset_symbol=row['Ticker']),
                transaction_date=timezone.now(),
                transaction_type='Buy',
                amount=row['Investment_Amount'])
        st.write("Added New Portfolio - {}".format(new_portfolio.name))
コード例 #21
0
def optimize_efficient_risk(expected_returns_df, cov_matrix, target_risk):

    EFOptimizer = EfficientFrontier(expected_returns_df, cov_matrix)
    weights_dict = EFOptimizer.efficient_risk(target_risk)

    return weights_dict
コード例 #22
0
def portfolio_optimizer():

  etfs_meta = { 'SPY': 'SPDR S&P 500 ETF Trust'
        , 'XLF': 'Financial Select Sector SPDR Fund'
        , 'QQQ': 'Invesco QQQ Trust' 
        , 'XLE': 'Energy Select Sector SPDR Fund'
        , 'IAU': 'iShares Gold Trust'
        , 'KRE': 'SPDR S&P Regional Banking ETF'
        , 'XLI': 'Industrial Select Sector SPDR Fund'
        , 'IYR': 'iShares U.S. Real Estate ETF'
        , 'IEFA': 'iShares Core MSCI EAFE ETF'
        , 'XLP': 'Consumer Staples Select Sector SPDR Fund'}


  etfs_options = list(etfs_meta.keys())

  start_date = "2015-01-01"
  # t-1
  yesterday = (date.today() - timedelta(days=1)).strftime("%Y-%m-%d") 
  end_date = yesterday

  @st.cache
  def load_data(etfs, start, end):
      df = yf.download(" ".join(etfs), start=start, end=end)['Adj Close']
      return df

  df = load_data(etfs_options, start_date, end_date)
  returns = df.pct_change().dropna()
  
  st.markdown("---")
  st.subheader("ETF list")

  # Show ETF metadata
  etfs_metadata_df = pd.DataFrame.from_dict(etfs_meta, orient='index').reset_index().rename(columns={"index": "Ticker", 0: "Short description"})
  st.table(etfs_metadata_df)

  st.markdown("---")
  st.subheader("Historical prices")

  # Visualize historical prices
  # title = 'Historical Adj. Close Price of available ETFs'

  df_temp = df.copy()
  df_temp['Date'] = df_temp.index
  df_plot = pd.melt(df_temp, id_vars='Date', value_vars=df_temp.columns[:-1]).rename(columns={'variable': 'Asset', 'value': 'Price ($)'})
  fig = px.line(df_plot, x='Date', y='Price ($)', color='Asset')
  st.plotly_chart(fig)


  st.subheader("Parameters")

  etfs_chosen = st.multiselect(
  'Which ETFs would you like to potentially add into your portfolio? (recommended to include all)',
  etfs_options, default=etfs_options)

  investment_amount = st.number_input('Investment amount (between 1000 and 10000)', min_value=1000, max_value=10000)
  st.write('Your chosen amount is ', investment_amount)


  if st.checkbox("All set, let's run the optimization model!"):

      df = df[etfs_chosen]

      # calculate expected returns
      mu = expected_returns.mean_historical_return(df)

      mu_df = pd.DataFrame(mu).reset_index().rename(columns={"index": "Ticker", 0: "Expected Return (%)"}).sort_values(by="Expected Return (%)", ascending=False)
      mu_df['Expected Return (%)'] = round(mu_df['Expected Return (%)']*100,2)

      st.subheader("Expected returns")
      st.markdown("Showing returns that we could expect when taking into account historical data.")
      fig = px.bar(mu_df, x='Ticker', y='Expected Return (%)', width=300, height=200)
      st.plotly_chart(fig)

      # calculate estimated covariance matrix (risk model) using Ledoit-Wolf shrinkage
      # reduces the extreme values in the covariance matrix 
      S = risk_models.CovarianceShrinkage(df).ledoit_wolf()

      st.subheader("Covariance matrix")
      st.markdown("Showing relationship in price movement between different ETFs.")
      sns.heatmap(S.corr())
      st.pyplot()

      # Optimize the portfolio performance
      # Sharpe ratio: portfolio's return less risk-free rate, per unit of risk (volatility)

      ef = EfficientFrontier(mu, S, weight_bounds=(0.01, 1))

      weights = ef.max_sharpe()
      cleaned_weights = ef.clean_weights()

      portfolio_performance = ef.portfolio_performance()
      st.markdown("---")
      st.subheader("Portfolio performance")
      st.markdown('Summary metrics:')
      st.markdown('Expected annual return: {:.2f}%'.format(portfolio_performance[0]*100))
      st.markdown('Annual volatility: {:.2f}%'.format(portfolio_performance[1]*100))
      st.markdown('Sharpe Ratio: {:.2f}'.format(portfolio_performance[2]))

      latest_prices = get_latest_prices(df)
      da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=investment_amount)

      latest_prices_column = pd.DataFrame(latest_prices).columns[0]
      latest_prices_df = pd.DataFrame(latest_prices).reset_index().rename(columns={"index": "Ticker", latest_prices_column: "Latest Price"}).sort_values(by='Ticker')

      allocation, leftover = da.lp_portfolio()

      st.subheader("Portfolio allocation")

      allocation_df = pd.DataFrame.from_dict(allocation, orient='index').reset_index().rename(columns={"index": "Ticker", 0: "Shares"}).sort_values(by='Ticker')
      allocation_df = pd.merge(allocation_df, latest_prices_df, on='Ticker', how='left')
      allocation_df['Amount'] = allocation_df['Shares'] * allocation_df['Latest Price']
      allocation_df.sort_values(by='Amount', inplace=True, ascending=False)

      allocation_df['Allocation percentage'] = ((allocation_df['Amount'] / allocation_df['Amount'].sum())*100).round(2)
      allocation_df['Amount'] = ['$' + str(round(item,2)) for item in allocation_df['Amount']]
      allocation_df['Latest Price'] = ['$' + str(round(item,2)) for item in allocation_df['Latest Price']]
      
      allocation_df.reset_index(inplace=True, drop=True)

      st.table(allocation_df)

      title = "Allocation visualization (# of shares)"
      fig = px.bar(allocation_df, x='Ticker', y='Shares', width=600, height=400,title=title)
      st.plotly_chart(fig)

      title = "Allocation visualization (% invested)"
      fig = px.bar(allocation_df, x='Ticker', y='Allocation percentage', width=600, height=400,title=title)
      st.plotly_chart(fig)

      invested_amount = investment_amount - leftover
      st.markdown('Funds invested: ${:.2f}'.format(invested_amount))
      st.markdown('Funds remaining: ${:.2f}'.format(leftover))
コード例 #23
0
correlation = np.array([
        [1, 0.488, 0.478, 0.515, 0.439, 0.512, 0.491],
        [0.488, 1, 0.664, 0.655, 0.310, 0.608, 0.779],
        [0.478, 0.664, 1, 0.861, 0.355, 0.783, 0.668],
        [0.515, 0.655, 0.861, 1, 0.354, 0.777, 0.653],
        [0.439, 0.310, 0.355, 0.354, 1, 0.405, 0.306],
        [0.512, 0.608, 0.783, 0.777, 0.405, 1, 0.652],
        [0.491, 0.779, 0.668, 0.653, 0.306, 0.652, 1]])
# 標準偏差
std = np.array([[0.16, 0.203, 0.248, 0.271, 0.21, 0.2, 0.187]])
# 相関行列と標準偏差から共分散行列を計算
Sigma = correlation * np.dot(std.T, std)
Sigma = pd.DataFrame(Sigma, index = assetName, columns=assetName)
# リスクベースポートフォリオの作成
# 分散最小化
min_risk_weight = EfficientFrontier(None, Sigma).min_volatility()
# リスクベースポートフォリオを基に、Black Littermanを適用する
# パラメータdelta, tai  値はHe&Litterman(1999)に従う
delta = 2.5
tau = 0.05
# 均衡リターンを求める(reverse optimization)
# ↑つまり、共分散行列とポートフォリオのweightから、期待収益を推定するという事
r_eq = np.asmatrix(delta * np.dot(Sigma, np.array(list(min_risk_weight.values()), dtype="float"))).T
# 見通しの作成
P = np.array([
        [0,0,-0.295,1,0,-0.705,0],
        [0,1,0,0,0,0,-1]]) # 2x7 matrix (2: number of views, 7: number of assets)
Q = np.array([[0.05],[0.03]]) # 2-vector
Omega = np.array([
        [0.001065383332,0],
        [0,0.0008517381]])
コード例 #24
0
def main():
    print("Starting...")
    # Get the stock symbols/ tickers in the protfolio
    # FAANG (Facebook, Amazon, Apple, Netflix, Google

    assets = ['FB', 'AMZN', 'AAPL', 'NFLX', 'GOOG']

    # Assign weights to the stocks.
    weights = np.array([0.2, 0.2, 0.2, 0.2, 0.2])

    # Get the stock/ portfolio starting date
    stock_start_date = '2013-01-01'

    # Get the stocks ending data (today)
    today = datetime.today().strftime('%Y-%m-%d')

    # Create a data frame to store the adjusted close price of the stocks
    df = pd.DataFrame()

    # Store the adjusted close price of the stock into the df
    for stock in assets:
        df[stock] = web.DataReader(stock,
                                   data_source='yahoo',
                                   start=stock_start_date,
                                   end=today)['Adj Close']

    # Visually show the stock / portfolio
    title = 'Portfolio adj. close price history'
    # Get the stocks
    my_stocks = df
    # Create and plot the graph
    for c in my_stocks.columns.values:
        plt.plot(my_stocks[c], label=c)
    plt.title = title
    plt.xlabel('Date', fontsize=18)
    plt.ylabel('Adj. Price USD', fontsize=18)
    plt.legend(my_stocks.columns.values, loc='upper left')
    plt.show()

    # Show the daily simple return
    returns = df.pct_change()

    # Create the annualized covariance matrix
    cov_matrix_annual = returns.cov() * 252

    # Calculate the portfolio variance
    port_variance = np.dot(weights.T, np.dot(cov_matrix_annual, weights))

    # Calculate the portfolio volatility aka standard deviation
    port_volatility = np.sqrt(port_variance)

    # Calculate annual portfolio return
    portfolio_simple_annual_return = np.sum(returns.mean() * weights) * 252

    # Show the expected annual return, volatility (risk) and variance
    percent_variance = str(round(port_variance, 2) * 100) + '%'
    percent_volatility = str(round(port_volatility, 2) * 100) + '%'
    percent_return = str(round(portfolio_simple_annual_return, 2) * 100) + '%'
    print('Expected annual return: ' + percent_return)
    print('Annual volatility / risk: ' + percent_volatility)
    print('Annual variance: ' + percent_variance)

    # Portfolio optimization
    # Calculate the expected returns and the annualised sample covariance matrix of asset returns

    mu = expected_returns.mean_historical_return(df)
    s = risk_models.sample_cov(df)

    # Optimize for maximum sharpe (William Sharpe) ratio
    ef = EfficientFrontier(mu, s)
    cleaned_weights = ef.clean_weights()
    print(cleaned_weights)
    print(ef.portfolio_performance(verbose=True))

    # Get the discrete allocation of each share per stock
    latest_prices = get_latest_prices(df)
    weights = cleaned_weights
    da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=500)

    allocation, leftover = da.lp_portfolio()
    print('Discrete allocation: ' + str(allocation))
    print('Funds remaining: ${:.2f}'.format(leftover))
コード例 #25
0
assets = data.columns

# In[ ]:

from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns

# In[ ]:

mean = expected_returns.mean_historical_return(data)
S = risk_models.sample_cov(data)

# In[ ]:

ef = EfficientFrontier(mean, S)
weights = ef.max_sharpe()
clean_weights = ef.clean_weights()
print(clean_weights)
ef.portfolio_performance(verbose=True)

# In[ ]:

Investment_fund = float(input("Enter how much you want to invest: $"))

# In[ ]:

from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices

portfolio_value = Investment_fund
latest_prices = get_latest_prices(data)
コード例 #26
0
df = pd.read_csv('smport.csv', index_col='Date', parse_dates=True)
print(df.head(10))
df.info()
print(df.describe())

from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.efficient_frontier import EfficientFrontier
# Calculating expected returns mu
mu = expected_returns.mean_historical_return(df)

# Calculating the covariance matrix S
Sigma = risk_models.sample_cov(df)

# Obtaining the efficient frontier
ef = EfficientFrontier(mu, Sigma)
print(mu, Sigma)
returns = df.pct_change()
covMatrix = returns.cov() * 251
print(covMatrix)
# Getting the minimum risk portfolio for a target return
weights = ef.efficient_return(0.2)
print(weights)
l = list(df.columns)
print(l)
size = list(weights.values())
print(size)
print(type(size))
plt.pie(size, labels=l, autopct='%1.1f%%')
plt.title('Return=20%')
plt.show()
コード例 #27
0
# Remove the date column from the df
# columns = axis 1
df.drop(columns=['Date'], axis=1, inplace=True)
#print(df)
#df
#exit(1)

# Calculate the expected annualized returns and the annualized
# sample covariance matrix of the daily asset returns.
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)

# SR describes the excess return you get for volitility you endure holding
# a riskier asset.

ef = EfficientFrontier(mu, S)  # create the EF object
weights = ef.max_sharpe()  # Get the raw weights

# this will set weights below cutoff to zero, rounding the rest.
cleaned_weights = ef.clean_weights()
print(cleaned_weights)

#show the expected return, SR, and
# in a jupyter notebook, this shows an ordered dicts.
# should sum to 1 for all weights.
ef.portfolio_performance(verbose=True)

#Figure out the allocations for each stock.
# pip install pulp

#Get the discrete allocation of each share per stock
コード例 #28
0
# Import the packages
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.efficient_frontier import EfficientFrontier

# Calculate expected returns mu
mu = expected_returns.mean_historical_return(stock_prices)

# Calculate the covariance matrix S
Sigma = risk_models.sample_cov(stock_prices)

# Obtain the efficient frontier
ef = EfficientFrontier(mu, Sigma)
print(mu, Sigma)

# Get the returns from the stock price data
returns = stock_prices.pct_change()

# Calculate the annualized covariance matrix
covMatrix = returns.cov() * 252

# Calculate the covariance matrix Sigma from a`PyPortfolioOpt` function
Sigma = risk_models.sample_cov(stock_prices)

# Print both covariance matrices
print(covMatrix, Sigma)

# Get the minimum risk portfolio for a target return
weights = ef.efficient_return(0.2)
print(weights)
コード例 #29
0
            label='efficient frontier')
    ax.set_title('Portfolio Optimization with Individual Stocks')
    ax.set_xlabel('annualised volatility')
    ax.set_ylabel('annualised returns')
    ax.legend(labelspacing=0.8)
    plt.show()


display_ef_with_selected(mean_returns, cov_matrix, risk_free_rate)

stocks = df
n = 1000  # total port. value

# Calculate expected returns and sample covariance
mu = expected_returns.mean_historical_return(df)
S = risk_models.sample_cov(df)

# Optimise for maximal Sharpe ratio
ef = EfficientFrontier(mu, S)
raw_weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
ef.portfolio_performance(verbose=True)

latest_prices = get_latest_prices(df)

da = DiscreteAllocation(cleaned_weights,
                        latest_prices,
                        total_portfolio_value=n)
allocation, leftover = da.lp_portfolio()
print("Discrete allocation:", allocation)
print("Funds remaining: ${:.2f}".format(leftover))
コード例 #30
0
def optimize_min_volatility(expected_returns_df, cov_matrix):

    EFOptimizer = EfficientFrontier(expected_returns_df, cov_matrix)
    weights_dict = EFOptimizer.min_volatility()

    return weights_dict