Exemple #1
0
def testNCO():
    # Chapter 7 - apply the Nested Clustered Optimization (NCO) algorithm
    N = 5
    T = 5
    S_value = np.array([[1., 2, 3, 4, 5], [1.1, 3, 2, 3, 5],
                        [1.2, 4, 1.3, 4, 5], [1.3, 5, 1, 3, 5],
                        [1.4, 6, 1, 4, 5.5], [1.5, 7, 1, 3, 5.5]])
    S_value[:, 1] = 1
    S_value[5, 1] = 1.1

    S, instrument_returns = calculate_returns(S_value)
    _, instrument_returns = calculate_returns(S_value,
                                              percentageAsProduct=True)

    mu1 = None
    cov1_d = np.cov(S, rowvar=0, ddof=1)

    #test baseClustering
    corr1 = mp.cov2corr(cov)
    a, b, c = nco.NCO()._cluster_kmeans_base(pd.DataFrame(corr1))
    d, e, f = clusterKMeansBase(pd.DataFrame(corr1))
    #b={0: [2, 0], 1: [1], 2: [3, 4]}
    #e={0: [1, 2], 1: [3, 4], 2: [0]}

    min_var_markowitz = mc.optPort(cov1_d, mu1).flatten()
    min_var_NCO = pc.optPort_nco(cov1_d, mu1, max(int(cov1_d.shape[0] / 2),
                                                  2)).flatten()
    mlfinlab_NCO = nco.NCO().allocate_nco(cov1_d, mu1,
                                          max(int(cov1_d.shape[0] / 2),
                                              2)).flatten()

    cov1_d = np.cov(S, rowvar=0, ddof=1)
    mlfinlab_NCO = nco.NCO().allocate_nco(cov1_d, mu1,
                                          int(cov1_d.shape[0] / 2)).flatten()

    expected_return_markowitz = [
        min_var_markowitz[i] * instrument_returns[i]
        for i in range(0, cov1_d.shape[0])
    ]
    e_m = sum(expected_return_markowitz)
    expected_return_NCO = [
        min_var_NCO[i] * instrument_returns[i]
        for i in range(0, cov1_d.shape[0])
    ]
    e_NCO = sum(expected_return_markowitz)
    vol = getVolatility(S_value)
    m_minVol = [
        min_var_markowitz[i] * vol[i] for i in range(0, cov1_d.shape[0])
    ]
    NCO_minVol = [mlfinlab_NCO[i] * vol[i] for i in range(0, cov1_d.shape[0])]
def testNCO():
    N = 5
    T = 5
    S_value = np.array([[1., 2,3,  4,5],
                        [1.1,3,2,  3,5],
                        [1.2,4,1.3,4,5],
                        [1.3,5,1,  3,5],
                        [1.4,6,1,  4,5.5],
                        [1.5,7,1,  3,5.5]])
    S_value[:,1] =1
    S_value[5,1] =1.1

    S, instrument_returns = calculate_returns(S_value)
    _, instrument_returns = calculate_returns(S_value, percentageAsProduct=True)
    
    np.testing.assert_almost_equal(S, pd.DataFrame(S_value).pct_change().dropna(how="all"))
    
    mu1 = None
    cov1_d = np.cov(S ,rowvar=0, ddof=1)

    #test baseClustering
    corr1 = mp.cov2corr(cov1_d)
    a,b,c = nco.NCO()._cluster_kmeans_base(pd.DataFrame(corr1))
    d,e,f = clusterKMeansBase(pd.DataFrame(corr1))
    #b={0: [2, 0], 1: [1], 2: [3, 4]}
    #e={0: [1, 2], 1: [3, 4], 2: [0]}


    min_var_markowitz = mc.optPort(cov1_d, mu1).flatten()
    
    #compare min_var_markowitz with mlfinlab impl
    #ml.
    
    
    min_var_NCO = pc.optPort_nco(cov1_d, mu1, max(int(cov1_d.shape[0]/2), 2)).flatten()  
    mlfinlab_NCO= nco.NCO().allocate_nco(cov1_d, mu1, max(int(cov1_d.shape[0]/2), 2)).flatten()

    cov1_d = np.cov(S,rowvar=0, ddof=1)    
    mlfinlab_NCO= nco.NCO().allocate_nco(cov1_d, mu1, int(cov1_d.shape[0]/2)).flatten()

    expected_return_markowitz = [min_var_markowitz[i]*instrument_returns[i] for i in range(0,cov1_d.shape[0])]
    e_m = sum(expected_return_markowitz)
    expected_return_NCO = [min_var_NCO[i]*instrument_returns[i] for i in range(0,cov1_d.shape[0])]
    e_NCO = sum(expected_return_markowitz)
    vol = getVolatility(S_value)
    m_minVol = [min_var_markowitz[i]*vol[i] for i in range(0, cov1_d.shape[0])] 
    NCO_minVol = [mlfinlab_NCO[i]*vol[i] for i in range(0, cov1_d.shape[0])]   
Exemple #3
0
    plt.plot(range(0, len(denoised_eigenvalue)),
             np.log(denoised_eigenvalue),
             color='r',
             label="Denoised eigen-function")
    plt.plot(range(0, len(eigenvalue_prior)),
             np.log(eigenvalue_prior),
             color='g',
             label="Original eigen-function")
    plt.xlabel("Eigenvalue number")
    plt.ylabel("Eigenvalue (log-scale)")
    plt.legend(loc="upper right")
    plt.show()

    #from code snippet 2.10
    detoned_cov = mc.corr2cov(detoned_corr, var0)
    w = mc.optPort(detoned_cov)
    print(w)
    #min_var_port = 1./nTrials*(np.sum(w, axis=0))
    #print(min_var_port)

    #expected portfolio variance: W^T.(Cov).W
    #https://blog.quantinsti.com/calculating-covariance-matrix-portfolio-variance/
    minVarPortfolio_var = np.dot(np.dot(w.T, detnoed_corr), w)

    #Expected return: w.T . mu
    # https://www.mn.uio.no/math/english/research/projects/focustat/publications_2/shatthik_barua_master2017.pdf p8
    # or I.T.cov^-1.mu / I.T.cov^-1.I
    inv = np.linalg.inv(cov)
    e_r = np.dot(np.dot(ones.T, inv), mu) / np.dot(ones.T, np.dot(ones.T, inv))

    #Chapter 4 optimal clustering
def minVarPort(cov):
    return mc.optPort(cov, mu = None)
 
 # code snippet 7.7 - data-generating process
 nBlocks, bSize, bCorr = 10, 50, .5
 np.random.seed(0)
 mu0, cov0 = mc.formTrueMatrix(nBlocks, bSize, bCorr)
    
 # code snippet 7.8 - drawing an empirical vector of means and covariance matrix
 nObs, nSims, shrink, minVarPortf = 1000, 1000, False, True
 np.random.seed(0)
 w1 = pd.DataFrame(0, index=range(0, nSims), columns=range(0, nBlocks*bSize))	
 w1_d = pd.DataFrame(0, index=range(0, nSims), columns=range(0, nBlocks*bSize))
 for i in range(0, nSims):
     mu1, cov1 = mc.simCovMu(mu0, cov0, nObs, shrink=shrink)
     if minVarPortf:
         mu1 = None
     w1.loc[i] = mc.optPort(cov1, mu1).flatten() #markowitc
     w1_d.loc[i] = optPort_nco(cov1, mu1, int(cov1.shape[0]/2)).flatten() #nco
     
 # code snippet 7.9 - Estimation of allocation errors
 w0 = mc.optPort(cov0, None if minVarPortf else mu0)
 w0 = np.repeat(w0.T, w1.shape[0], axis=0) #true allocation
 rmsd = np.mean((w1-w0).values.flatten()**2)**.5 #RMSE
 rmsd_d = np.mean((w1_d-w0).values.flatten()**2)**.5 #RMSE
 '''
 >>> rmsd
 0.020737753489610305 #markowitc
 >>> rmsd_d
 0.015918559234396952 #nco
 '''