Esempio n. 1
0
    def UpdateAlpha():
        OldMean = Parameters.Alpha['Mean']
        OldMean = OldMean.reshape((OldMean.shape[0], 1))
        OldCov = Parameters.Alpha['Cov']
        Sigma_Sq = Parameters.Sigma_Sq['Value']+.0

        NewCov = invert(Z_O.T.dot(Z_O)/Sigma_Sq + invert(OldCov))
        NewMean = NewCov.dot(Z_O.T.dot(Log_H)/Sigma_Sq + invert(OldCov).dot(OldMean))
        NewMean = NewMean.reshape((NewMean.shape[0]))

        NewValue = rand.multivariate_normal(mean=NewMean, cov=NewCov)
        NewAlpha = {'Value': NewValue, 'Mean': NewMean, 'Cov': NewCov}
        return NewAlpha
Esempio n. 2
0
    def UpdateBeta():
        OldMean = Parameters.Beta['Mean']
        OldMean = OldMean.reshape((OldMean.shape[0],1))
        OldCov = Parameters.Beta['Cov']

        NewCov = invert(covariates_O.T.dot(covariates_O)+invert(OldCov))
        NewMean = NewCov.dot(covariates_O.T.dot(r_O) + invert(OldCov).dot(OldMean))
        NewMean = NewMean.reshape((NewMean.shape[0]))

        NewValue = rand.multivariate_normal(mean=NewMean, cov=NewCov)

        NewBeta = {'Value': NewValue, 'Mean': NewMean, 'Cov': NewCov}
        return NewBeta
Esempio n. 3
0
def _test_find_vector_zx_rotation_transform():
    from random import uniform
    a2=uniform(0,2*pi)
    b2=uniform(0,2*pi)
    g2=uniform(0,2*pi)
    eulerangles=[a2,b2,g2]
    #print "secret euler angles:",[e*180/pi for e in eulerangles]
    rotmat_body_to_sensor=find_rotation_transform(eulerangles)

    vz=Vector((0,0,1),vtype="CARTESIAN")
    vx=Vector((1,0,0),vtype="CARTESIAN")

    vz_measured=vz.rotate(rotmat_body_to_sensor)
    vx_measured=vx.rotate(rotmat_body_to_sensor)

    eulerangles_found=list(find_vector_zx_rotation_abg(vz_measured,vx_measured))
    #print "found eulerangles:",[e*180/pi for e in eulerangles_found]
    
    rotmat_sensor_to_body=find_rotation_transform(eulerangles_found)
    rotmat_sensor_to_body=invert(array(rotmat_sensor_to_body))

    vz_compensated=vz_measured.rotate(rotmat_sensor_to_body)
    vx_compensated=vx_measured.rotate(rotmat_sensor_to_body)
    #print "vzmeasured:",vz_measured
    print "vzcompenasted:",vz_compensated

    #print "vxmeasured:",vx_measured
    print "vxcompenasted:",vx_compensated
def main():
    FileLocs = vanillaPCA.getFileLocs()
    Gamma = 50
    FactorCollection = dict()
    LoadingCollection = dict()

    for idx, fileLoc in enumerate(FileLocs):
        dataset = np.genfromtxt(fileLoc, delimiter=',')
        T, N = dataset.shape

        X_Bar = np.mean(dataset, axis=0).reshape((N, 1))
        CovarianceMat = (dataset.T).dot(dataset) / T + Gamma * X_Bar.dot(
            X_Bar.T)
        EigenValues, EigenVectors = np.linalg.eig(CovarianceMat)

        plt.figure()
        plt.plot(EigenValues)
        plt.title('Fig {}.1 Eigenvalues for Dataset {}'.format(
            idx + 4, idx + 1))
        plt.savefig('Fig_{}_1.png'.format(idx + 4))

        Loadings = EigenVectors[:3]
        # rescale loadings to make sure that at any given time, all loadings sum up to 1
        for idy, loading in enumerate(Loadings):
            rescaleFactor = np.sum(loading)
            Loadings[idy] = loading / rescaleFactor
        # get factors by OLS matrix expression: F = X*L*(L^T*L)^(-1)
        Loadings = Loadings.T
        Factors = dataset.dot(Loadings).dot(invert(Loadings.T.dot(Loadings)))

        FactorsForPlot = Factors.T
        trueFactor = np.genfromtxt(
            r"C:\Users\Jiacheng Z\Dropbox\Courses\17Spring\MS&E349\MS&E349_Shared\HW2\code\pca\Simulation_Factor_{}.csv"
            .format(idx + 1),
            delimiter=',').T

        plt.figure()
        f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
        for idy, axis in enumerate([ax1, ax2, ax3]):
            factor = FactorsForPlot[idy]
            if np.mean(factor) < 0: factor = -factor
            axis.plot(factor,
                      label='Fitted ' + r'$\hat{F}$' + '{}'.format(idy + 1))
            axis.plot(trueFactor[idy],
                      label='True ' + r'$F_{}$'.format(idy + 1))
            axis.legend()
        ax1.set_title('Fig {}.2 Factors Dataset {}'.format(idx + 4, idx + 1))
        plt.savefig('Fig_{}_2.png'.format(idx + 4))

        CollectionKey = 'dataset' + str(idx + 1)
        FactorCollection[CollectionKey] = Factors
        LoadingCollection[CollectionKey] = Loadings
    return FactorCollection, LoadingCollection
Esempio n. 5
0
def polyFit(data, degree):
    if len(data) < degree:
        return 1, None
    if len(data) < 2 * degree:
        return 2, None
    xOnly = list(map(lambda x: x[0], data))

    poly = Polynomial(
        matmul(regressVector(data, degree),
               invert(regressMatrix(xOnly, degree))))

    return None, poly, rSquared(data, poly)
Esempio n. 6
0
def _get_efficient_frontier(expected_values,
                            standard_deviations,
                            covariances,
                            returns=[x * 0.25 / 42 for x in range(42)]):
    cov_inv = invert(covariances)
    A = sum(sum(cov_inv))
    B = sum(np.dot(cov_inv, expected_values))
    C = np.dot(expected_values.transpose(), np.dot(cov_inv, expected_values))
    D = A * C - B**2
    risks = [
        math.sqrt((A / D) * r**2 - (2 * B / D) * r + C / D) for r in returns
    ]
    return pd.DataFrame({
        'returns': [r for r in returns],
        'risks': [r for r in risks]
    })
def main():
    FileLocs = getFileLocs()
    FactorCollection = dict()
    LoadingCollection = dict()

    for idx, fileLoc in enumerate(FileLocs):
        dataset = np.genfromtxt(fileLoc, delimiter=',')
        T, N = dataset.shape
        CovarianceMat = np.cov(dataset, rowvar=False)
        EigenValues, EigenVectors = np.linalg.eig(CovarianceMat)

        plt.figure()
        plt.plot(EigenValues)
        plt.title('Fig {}.1 Eigenvalues for Dataset {}'.format(
            idx + 1, idx + 1))
        plt.savefig('Fig_{}_1.png'.format(idx + 1))

        Loadings = EigenVectors[:3]
        for idy, loading in enumerate(Loadings):
            rescaleFactor = np.sum(loading)
            Loadings[idy] = loading / rescaleFactor
        Loadings = Loadings.T
        Factors = dataset.dot(Loadings).dot(invert(Loadings.T.dot(Loadings)))
        FactorsForPlot = Factors.T
        trueFactor = np.genfromtxt(
            r"C:\Users\Jiacheng Z\Dropbox\Courses\17Spring\MS&E349\MS&E349_Shared\HW2\code\pca\Simulation_Factor_{}.csv"
            .format(idx + 1),
            delimiter=',').T

        plt.figure()
        f, (ax1, ax2, ax3) = plt.subplots(3, sharex=True, sharey=True)
        for idy, axis in enumerate([ax1, ax2, ax3]):
            factor = FactorsForPlot[idy]
            if np.mean(factor) < 0: factor = -factor
            axis.plot(factor,
                      label='Fitted ' + r'$\hat{F}$' + '{}'.format(idy + 1))
            axis.plot(trueFactor[idy],
                      label='True ' + r'$F_{}$'.format(idy + 1))
            axis.legend()
        ax1.set_title('Fig {}.2 Factors Dataset {}'.format(idx + 1, idx + 1))
        plt.savefig('Fig_{}_2.png'.format(idx + 1))

        CollectionKey = 'dataset' + str(idx + 1)
        FactorCollection[CollectionKey] = Factors
        LoadingCollection[CollectionKey] = Loadings
    return FactorCollection, LoadingCollection
Esempio n. 8
0
def get_portfolio(df, returns, lb=0, ub=1):
    """Get the best portfolio given the specified returns.

    lb, ub are scalars or a vector with the same length as the number
    of possible stocks in the portfolio.

    lb = 0 means you cannot short a stock
    ub = 1 means you have 100% of your portfolio in one stock

    usually people fix the lb = 0 or lb = - 0.05 (no more than 5% shorted
    per stock) and set ub for treasury bills to be like 0.03 or something.
    """
    ncol = df.shape()[1]
    expected_values = df.mean()
    covariances = df.cov()
    H = invert(covariances)
    lower = lb if hasattr(lb, '__iter__') else [lb] * ncol
    upper = ub if hasattr(ub, '__iter__') else [ub] * ncol
    returns = returns if hasattr(returns, '__iter__') else [returns]
    bounds = zip(lower, upper)

    def objective(x):
        """Goal is to minimize 1/2 * x' H x; drop the 1/2."""
        return np.transpose(x).dot(H.dot(x))

    def gradient(x):
        return np.transpose(x).dot(
            np.multiply(H, (np.ones((ncol, ncol)) + np.eye(ncol))))

    initial_guess = np.ones(ncol) / ncol

    allocations = []
    risks = []
    for r in returns:
        # Constraint is A x = b ==> A x - b = 0
        #
        # First constraint is sum(x) = 1 so portfolio totals to 100%
        # The next constraint is the target return --
        #  sum(x * expected_values) = target_return
        #
        #  A = [1 1 1    b = [1
        #       1 1 1]        desired_return ]
        #
        #
        # --> so ...  A x = b   becomes   A x - b = 0
        #
        #   A           x          b
        #   [1  1  1    [x1    -   [1                 =  [0
        #    e1 e2 e3]   x2         desired_return]       0]
        #                x3]
        #
        # xi = the percent allocation of each stock in the portfolio
        # ei = the expected return for a given stock
        #
        #     x1 +    x2 +    x3 = 1              (portfolio 100%)
        #  e1 x1 + e2 x2 + e3 x3 = desired_return (expected return is as desried)
        #
        A = np.array(np.ones((2, ncol)))
        A[:, :-1] = expected_values
        b = np.array([[1.], [r]])
        constraints = dict(type='eq', fun=lambda x: A.dot(x) - b)

        result = minimize(objective,
                          initial_guess,
                          method='SLSQP',
                          jac=gradient,
                          bounds=bounds,
                          constraints=constraints)

        allocations.append(
            list(result.x) if result.success else [np.nan] * ncol)
        risks.append(result.x.dot(covariances.dot(result.x)))

    result = np.zeros((len(returns), ncol + 2))
    result[:, 0] = returns
    result[:, 1] = risks
    result[:, 2:] = np.array(allocations)
    return pd.DataFrame(result, columns=["returns", "risks"] + list(df.names))
Esempio n. 9
0
def maxSharpe(Factors):
    assert Factors.shape == (150, 3), '[Error] Factor dimensionality error!\n'
    MuVec = np.mean(Factors, axis=0).reshape((3, 1))
    Covariance = np.cov(Factors, rowvar=False)
    MaxSharpe = np.sqrt(MuVec.T.dot(invert(Covariance)).dot(MuVec))
    return MaxSharpe