Example #1
0
# Compute ERIs
df_eri = compute_eri(total_return_index=df, funding_return=df_cdi['CDI'])
df_returns = df_eri.pct_change(1).dropna()

# Correlation
emp_cov = empirical_covariance(df_returns)
emp_corr, _ = cov2corr(emp_cov)
# print(emp_corr, '\n')

# Shirinkage
shrunk_cov = shrink_cov(df_returns, alpha=0.5)
shrunk_corr, _ = cov2corr(shrunk_cov)
# print(shrunk_corr, '\n')

# Marchenko-Pastur
mp_cov, _, _ = marchenko_pastur(df_returns)
mp_corr, _ = cov2corr(mp_cov)
# print(mp_corr, '\n')

# Targeted Shrinkage
ts_cov, _, _ = targeted_shirinkage(df_returns, ts_alpha=0.5)  # TODO deveria dar o MP?
ts_corr, _ = cov2corr(ts_cov)
# print(ts_corr, '\n')

# Ledoit-Wolfe
lw_cov = ledoitwolf_cov(df_returns)
lw_corr, _ = cov2corr(lw_cov)
# print(lw_corr, '\n')

# Detoning
detone_corr = detone_corr(emp_corr, n=1)
Example #2
0
# Read Bloomberg Tickers for renaming
df_tickers = pd.read_excel(file_path + r'/Data - BBG Data Values.xlsx',
                           index_col=0, sheet_name='Tickers')

tr_dict = df_tickers['Total Return Index (UBS)'].to_dict()
tr_dict = {v: k for k, v in tr_dict.items()}

# Read Total Return Index
df_tr = pd.read_excel(file_path + r'/Data - BBG Data Values.xlsx',
                      index_col=0, sheet_name='Total Return')
data = df_tr.rename(tr_dict, axis=1)
data = data.pct_change(1).dropna()

# ===== compute different correlations matrices =====
corr = data.corr().values  # empirical correlation
corr_denoised, _, _ = marchenko_pastur(corr_matrix=corr, T=data.shape[0], N=data.shape[1])  # denoised correlation
corr_ts, _, _ = targeted_shirinkage(corr_matrix=corr, T=data.shape[0], N=data.shape[1], ts_alpha=1)  # target-shrinkage

# # ===== sorted eigenvalues of each method =====
eig_empirical = np.sort(np.linalg.eig(corr)[0])[::-1]
eig_denoised = np.sort(np.linalg.eig(corr_denoised)[0])[::-1]
eig_ts = np.sort(np.linalg.eig(corr_ts)[0])[::-1]
df_eig = pd.DataFrame(data={'Empirical Eigenvalues': eig_empirical,
                            'Denoised Eigenvalues': eig_denoised,
                            'Targeted Shirinkage': eig_ts},
                      index=[i + 1 for i in range(data.shape[1])])

# ===== chart =====
MyFont = {'fontname': 'Century Gothic'}
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Century Gothic']
Example #3
0
        # add value views
        value_countries = df_value.loc[date].dropna().index
        for ccy in value_countries:
            try:
                v.loc[f'{ccy} value'] = df_value.loc[date, ccy]
                P.loc[f'{ccy} value', ccy] = 1
            except KeyError:  # If a currency does not have a value signal, skips this view.
                continue

        P = P.fillna(0)
        v = v.to_frame('Views')

        # denoise the covariance matrix
        mp_corr, _, _ = marchenko_pastur(df_corr.loc[date],
                                         T=21 * 3,
                                         N=vols.shape[0])

        mp_cov = pd.DataFrame(data=np.diag(vols) @ mp_corr @ np.diag(vols),
                              index=vols.index,
                              columns=vols.index)

        # optimization
        bl = BlackLitterman(
            sigma=mp_cov,
            estimation_error=1 / (21 * 3),
            views_p=P,
            views_v=v,
            w_equilibrium=weights_iv.loc[date].to_frame(),
            avg_risk_aversion=1.2,
            mu_historical=df_mom.loc[date].to_frame('Historical'),
Example #4
0
        continue

# add value views
for ccy in df_tr.columns:
    try:
        v.loc[f'{ccy} value'] = df_value.loc[last_date, ccy]
        P.loc[f'{ccy} value', ccy] = 1
    except KeyError:  # If a currency does not have a value signal, skips this view.
        continue


P = P.fillna(0)
v = v.to_frame('Views')

# denoise the covariance
mp_corr, _, _ = marchenko_pastur(df_corr.loc[last_date],
                                 T=21 * 3, N=df_tr.shape[1])

mp_cov = pd.DataFrame(data=np.diag(vols) @ mp_corr @ np.diag(vols),
                      index=vols.index, columns=vols.index)

bl = BlackLitterman(sigma=mp_cov,
                    estimation_error=1 / (21 * 3),
                    views_p=P,
                    views_v=v,
                    w_equilibrium=df_weights['Inverse Volatility'].to_frame(),
                    avg_risk_aversion=1.2,
                    mu_historical=df_mom.loc[last_date].to_frame('Historical'),
                    mu_shrink=0.99,  # needs to be tuned
                    overall_confidence=100)  # needs to be tuned

vol_bl = pd.Series(data=np.sqrt(np.diag(bl.sigma_bl)), index=bl.sigma_bl.index)