def ef_risk_strategy(returns=None, cov_matrix=None, target_volatility=5.0): assert returns is not None assert cov_matrix is not None ef = EfficientFrontier(returns, cov_matrix) ef.add_objective(objective_functions.L2_reg, gamma=0.1) weights = ef.efficient_risk(target_volatility=target_volatility) return weights, portfolio_performance(ef), ef
def ef_sharpe_strategy(returns=None, cov_matrix=None): assert returns is not None ef = EfficientFrontier(returns, cov_matrix) ef.add_objective(objective_functions.L2_reg, gamma=0.1) # eliminate minor weights weights = ef.max_sharpe() return weights, portfolio_performance(ef), ef
def ef_sharpe_strategy(ld: LazyDictionary, **kwargs) -> None: ef = EfficientFrontier( expected_returns=kwargs.get("returns"), cov_matrix=kwargs.get("cov_matrix", None), ) ef.add_objective(objective_functions.L2_reg, gamma=0.1) # eliminate minor weights ld["optimizer"] = ef ld["raw_weights"] = lambda ld: ld["optimizer"].max_sharpe()
def ef_risk_strategy(ld: LazyDictionary, returns=None, cov_matrix=None, target_volatility=5.0) -> None: assert returns is not None assert cov_matrix is not None ef = EfficientFrontier(returns, cov_matrix) ef.add_objective(objective_functions.L2_reg, gamma=0.1) ld["optimizer"] = ef ld["raw_weights"] = lambda ld: ld["optimizer"].efficient_risk( target_volatility=target_volatility)
def calculateInvestment(limit=10, count=10, write_to_file=True, show_cla=False, tpv=20000): symbols = getSymbolsFromDatabase() prices = createDataFrame(symbols[:limit], count) mu = expected_returns.mean_historical_return(prices) S = risk_models.CovarianceShrinkage(prices).ledoit_wolf() ef = EfficientFrontier(mu, S, weight_bounds=(-1, 1)) ef.add_objective(objective_functions.L2_reg) ef.min_volatility() c_weights = ef.clean_weights() if write_to_file == True: ef.save_weights_to_file("weights.txt") if show_cla == True: cla = CLA(mu, S) ef_plot(cla) ef.portfolio_performance(verbose=True) latest_prices = disc_alloc.get_latest_prices(prices) allocation_minv, leftover = disc_alloc.DiscreteAllocation( c_weights, latest_prices, total_portfolio_value=tpv).lp_portfolio() return allocation_minv, leftover
def call_portfolios(trading_days, start_time, end_time, test_start_time = "", test_end_time = "", add_index=False): total_portfolio = [] portfolio_weights = [] portfolio_invests = [] performances = [] # Getting the S&P500 (benchmark) sp500 = pdr.DataReader('^GSPC', 'yahoo', start_time, end_time)['Close'] sp500 = sp500.rename('SP500') sp500 = sp500.to_frame() for i in range(0, len(stock_list.columns)): #for i in range(0, 1): stock = [] stock = stock_list.iloc[:,i].tolist() ### !!! Important: change the number to get the portfolio of interest (first one is 50% percentile, etc.) stock = [x for x in stock if str(x) != 'nan'] portfolio_name = stock_list.columns[i] # Getting stock data (maybe re-do to for loop, if there be problems with memory) temp = pdr.DataReader(stock, 'yahoo', start_time, end_time)['Close'] data = sp500.join(temp) del temp # Main dataset with all tickers and without S&P stocks = data.drop('SP500', 1) # Drop stocks where are less than 50% of data points available, if applicable if filter_recent_stocks[i]: stocks = stocks.loc[:, (stocks.count() >= stocks.count().max()/2)] risk_free_rate = 0.0085 # !!! Risk-free rate, 10Y US treasury bond, could be adjusted weight_bounds = weight_bounds_tuple[i] # taken from the tuple each iteration, defined at the beginning # !!! Different approaches could be taken from here mu = mean_historical_return(stocks) # Getting returns S = CovarianceShrinkage(stocks).ledoit_wolf() # Getting cov matrix current_weights = [0] * len(stocks.columns) # Main function to find optimal portfolio, determining optimal weights ef = EfficientFrontier(mu, S, weight_bounds=weight_bounds) ef.add_objective(objective_functions.transaction_cost, w_prev=current_weights, k=0.005) ef.add_objective(objective_functions.L2_reg, gamma=gamma) weights = ef.max_sharpe(risk_free_rate=risk_free_rate) # using max sharpe optimization cleaned_weights = ef.clean_weights() # weights with pretty formatting print(cleaned_weights) # Printing info on returns, variance & sharpe temp_tuple = ef.portfolio_performance(verbose=True) temp_dict = {} temp_dict['Portfolio'] = portfolio_name temp_dict['Return'] = "{:.4f}".format(temp_tuple[0]) temp_dict['Risk'] = "{:.4f}".format(temp_tuple[1]) temp_dict['Sharpe'] = "{:.4f}".format(temp_tuple[2]) performances.append(temp_dict) # Putting weights into pandas df max_sharpe_allocation = pd.DataFrame.from_dict(cleaned_weights, orient='index') max_sharpe_allocation.columns =['allocation_weight'] ### This function would change the weights to a number of shares based on the last price in the observable interval latest_prices = get_latest_prices(stocks) if add_index == False: da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=total_portfolio_value) if discrete_algo_lp[i] == True: allocation, leftover = da.lp_portfolio() else: allocation, leftover = da.greedy_portfolio() print(allocation) print("Money left: " + str(leftover)) print(da._allocation_rmse_error()) # Adding discrete allocation to portfolio dataframe allocation = pd.DataFrame.from_dict(allocation, orient='index') allocation.columns =['allocation_discrete'] max_sharpe_allocation = max_sharpe_allocation.join(allocation) # Add some plots plot_covariance(S) plot_weights(weights) start_of_investment = str(latest_prices.name) if add_index == True: if np.any(start_of_investment in trading_days.values) == True: start_of_investment = trading_days[trading_days.index[trading_days == start_of_investment].tolist()[0] + 1] ### Function to crete a portfolio and test it on the new data portfolio, data_new = load_data(max_sharpe_allocation, test_start_time, test_end_time) tmp = create_index(test_start_time, test_end_time, start_of_investment, portfolio_name, portfolio, data_new) # Add all results to list total_portfolio.append(tmp[0]) portfolio_weights.append(tmp[1]) with open(p+'/portfolios/performance_index.txt', 'w') as outFile: for d in performances: line = str(i) + ": " + " ".join([str(key)+' : '+str(value) for key,value in d.items()]) + '\n' outFile.write(line) else: if np.any(start_of_investment in trading_days.values) == False: start_of_investment = trading_days[trading_days.index[trading_days == start_of_investment].tolist()[0] + 1] portfolio, data_new = load_data(max_sharpe_allocation, test_end_time, test_end_time) tmp2 = create_portfolio(start_of_investment, portfolio_name, portfolio, data_new) # Add all results to list portfolio_invests.append(tmp2) with open(p+'/portfolios/performance_investment.txt', 'w') as outFile: for d in performances: line = str(i) + ": " + " ".join([str(key)+' : '+str(value) for key,value in d.items()]) + '\n' outFile.write(line) return total_portfolio, portfolio_weights, portfolio_invests
print(efficient_portfolio_during.min_volatility()) # Compute the efficient frontier (ret, vol, weights) = efficient_portfolio_during.efficient_frontier() # Add the frontier to the plot showing the 'before' and 'after' frontiers plt.scatter(vol, ret, s=4, c='g', marker='.', label='During') plt.legend() plt.show() # plotting using PyPortfolioOpt pplot.plot_covariance(cs, plot_correlation=False, show_tickers=True) pplot.plot_efficient_frontier(efficient_portfolio_during, points=100, show_assets=True) pplot.plot_weights(cw) # Dealing with many negligible weights # efficient portfolio allocation ef = EfficientFrontier(mu, cs) ef.add_objective(objective_functions.L2_reg, gamma=0.1) w = ef.max_sharpe() print(ef.clean_weights()) # Post-processing weights # These are the quantities of shares that should be bought to have a $20,000 portfolio latest_prices = get_latest_prices(assets) da = DiscreteAllocation(w, latest_prices, total_portfolio_value=20000) allocation, leftover = da.lp_portfolio() print2(allocation)
def ef_minvol_strategy(ld: LazyDictionary, returns=None, cov_matrix=None): ef = EfficientFrontier(returns, cov_matrix) ef.add_objective(objective_functions.L2_reg, gamma=0.1) # weights = ef.min_volatility() ld["optimizer"] = ef ld["raw_weights"] = lambda ld: ld["optimizer"].min_volatility()
mu = mean_historical_return( df) # pandas series of estimated expected returns for each asset S = CovarianceShrinkage(df).ledoit_wolf() #estimated covariance matrix print(mu) print(S) # define a loop to iterate through all the different objective functions from pypfopt import objective_functions as objfunc print() print('max sharpe') from pypfopt.efficient_frontier import EfficientFrontier ef = EfficientFrontier(mu, S) ef.add_objective( objfunc.L2_reg, gamma=0.1) # incentivize optimizer to choose non zero weights weights = ef.max_sharpe() x = ef.portfolio_performance(verbose=True) cleaned_weights = ef.clean_weights() ef.save_weights_to_file("weights.txt") # saves to file print(cleaned_weights) x = ef.portfolio_performance(verbose=True) from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices latest_prices = get_latest_prices(df) da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=20000) allocation, leftover = da.lp_portfolio() print(allocation) print(leftover)
def rebalnce_portfolios(): p = abspath(getsourcefile(lambda: 0)) p = p.rsplit('/', 1)[0] os.chdir(p) print('Working Directory is: %s' % os.getcwd()) file_path = p + '/results_2015-2019/' # !!! Editable Folder with files with weights start_time = '2015-01-01' # !!! start date, editable end_time = (datetime.today() - timedelta(days=1)).strftime( '%Y-%m-%d') # last day = day before today # Lists of files with portfolio weights files = [f for f in listdir(file_path) if isfile(join(file_path, f))] files = [f for f in files if f.startswith('portfolio_investment')] files = [file_path + f for f in files] files = sorted(files) weight_bounds_tuple = ( (0.0, 0.1), (0.0, 0.1), (0.0, 0.15), (0.0, 0.05), (0.0, 0.05), (0.01, 0.1) ) # !!! Weights to be adjusted for every iteration by arbitrary value gamma = 0.05 total_portfolio_value = 20000 for i in range(0, len(files)): portfolio = pd.read_csv(files[i], index_col=0).iloc[:, 0:6] tickers = portfolio.iloc[:, 2].tolist( ) # tickers inside portfolio (they will be updated) # Getting stock data (maybe re-do to for loop, if there be problems with memory) temp = pdr.DataReader(tickers, 'yahoo', start_time, end_time)['Close'] current_weights = portfolio['allocation_weight'].to_list() risk_free_rate = 0.0085 # !!! Risk-free rate, 10Y US treasury bond, could be adjusted weight_bounds = weight_bounds_tuple[ i] # taken from the tuple each iteration, defined at the beginning # !!! Different approaches could be taken from here mu = mean_historical_return(temp) # Getting returns S = CovarianceShrinkage(temp).ledoit_wolf() # Getting cov matrix # Main function to find optimal portfolio, determining optimal weights ef = EfficientFrontier(mu, S, weight_bounds=weight_bounds) ef.add_objective(objective_functions.transaction_cost, w_prev=current_weights, k=0.005) ef.add_objective(objective_functions.L2_reg, gamma=gamma) weights = ef.max_sharpe( risk_free_rate=risk_free_rate) # using max sharpe optimization cleaned_weights = ef.clean_weights() # weights with pretty formatting print(cleaned_weights) # Printing info on returns, variance & sharpe ef.portfolio_performance(verbose=True) ### This function would change the weights to a number of shares based on the last price in the observable interval latest_prices = get_latest_prices(temp) da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=total_portfolio_value) allocation, leftover = da.lp_portfolio() print(allocation) print("Money left: " + str(leftover)) print(da._allocation_rmse_error()) allocation = pd.DataFrame.from_dict(allocation, orient='index') allocation.columns = ['allocation_discrete'] weights = pd.DataFrame.from_dict(weights, orient='index') weights.columns = ['allocation_weight'] latest_prices = pd.DataFrame(latest_prices) latest_prices.columns = ['buy_price'] tickers = pd.DataFrame(tickers) tickers.columns = ['ticker'] tickers.index = tickers['ticker'] result = pd.concat([weights, allocation, tickers, latest_prices], axis=1) result['buy_investment'] = result['allocation_discrete'] * result[ 'buy_price'] result['actual_allocation'] = result['buy_investment'] / result[ 'buy_investment'].sum() # Get paths & filenames for saving with replacing old csv files n = files[i].split('_')[-1] s = file_path + 'portfolio_ticker_' + n result.to_csv(s)
def ef_minvol_strategy(returns=None, cov_matrix=None): ef = EfficientFrontier(returns, cov_matrix) ef.add_objective(objective_functions.L2_reg, gamma=0.1) weights = ef.min_volatility() return weights, portfolio_performance(ef), ef