Exemplo n.º 1
0
    def build_test_problem(self,
                           m,
                           n,
                           is_complex=True,
                           is_non_negative_only=False,
                           data_type='gaussian'):
        """ Creates and outputs random generated data and measurements.

        Inputs:
        isComplex(boolean, default=true): whether the signal and measurement
        matrix is complex. is_non_negative_only(boolean, default=false):
            whether the signal is real and non-negative.
        data_type(string, default='gaussian'): it currently supports
        ['gaussian', 'fourier'].

        Outputs:
        A: m x n measurement matrix/function handle.
        xt: n x 1 vector, true signal.
        b0: m x 1 vector, measurements.
        At: A n x m matrix/function handle that is the transpose of A.
        """
        if data_type.lower() == 'gaussian':
            # mvnrnd(np.zeros(n), np.eye(n)/2, m)
            A = mvnrnd(np.zeros(n),
                       np.eye(n) / 2,
                       m) + is_complex * 1j * mvnrnd(np.zeros(n),
                                                     np.eye(n) / 2, m)
            self.A = ConvolutionMatrix(A)
            x = mvnrnd(np.zeros(n),
                       np.eye(n) /
                       2) + is_complex * 1j * mvnrnd(np.zeros(n),
                                                     np.eye(n) / 2)
            xt = x.reshape((-1, 1))
            self.b0 = np.abs(A @ xt)

        # elif data_type.lower() is 'fourier':
        #     """Define the Fourier measurement operator.

        #     The operator 'A' maps an n-vector into an m-vector, then computes
        #     the fft on that m-vector to produce m measurements.
        #     """
        #     # rips first 'length' entries from a vector
        #     rip = @(x,length) x(1:length);
        #     A = @(x) fft([x;zeros(m-n,1)]);
        #     At = @(x) rip(m*ifft(x),n);     % transpose of FM
        #     xt = (mvnrnd(zeros(1, n), eye(n)/2) + is_complex * 1i * ...
        #         mvnrnd(zeros(1, n), eye(n)/2))';
        #     b0 = abs(A(xt)); % Compute the phaseless measurements

        else:
            print('Invalid data_type: %s', data_type)
            raise Exception(TypeError)

        return self.A, xt, self.b0
Exemplo n.º 2
0
    def samples(self, data_len=1):
        '''
        mvn.samples(data_dim=1)

        Argvs
        data_len: number of samples

        Returns
        dst: (data_len, data_dim, n_states) or (data_len, n_states)
        '''
        dst = None
        if self.mu.ndim == 2:
            dst = zeros((data_len, self.mu.shape[0], self.mu.shape[1]))
            if self.cov.ndim == 2:
                for k in range(self.mu.shape[-1]):
                    dst[:, :, k] = mvnrnd(self.mu[:, k],
                                          self.cov,
                                          size=data_len)
            elif self.cov.ndim == 3:
                for k in range(self.mu.shape[-1]):
                    dst[:, :, k] = mvnrnd(self.mu[:, k],
                                          self.cov[:, :, k],
                                          size=data_len)
            else:
                self._log_error_not_supported()
        elif self.mu.ndim == 3:
            if self.cov.ndim == 3:
                for d in range(self.mu.shape[-2]):
                    for k in range(self.mu.shape[-1]):
                        dst[:, d, k] = mvnrnd(self.mu[d, k],
                                              self.cov[:, :, k],
                                              size=data_len)
            elif self.cov.ndim == 4:
                aug_dim, data_dim, n_states = self.mu.shape
                dst = zeros((data_len, aug_dim, data_dim, n_states))
                for d in range(data_dim):
                    for k in range(n_states):
                        dst[:, :, d, k] = mvnrnd(self.mu[:, d, k],
                                                 self.cov[:, :, d, k],
                                                 size=data_len)
            else:
                self.__log_error_not_supported()
        elif self.mu.ndim == 1:
            raise Exception('self.mu.ndim must be >= 1')
        else:
            self.__log_error_not_supported()
        if data_len == 1:
            dst = dst[0]
        return dst
Exemplo n.º 3
0
def NormalScenarios(mu_, sigma2_, j_, method='Riccati', d=None):
    # This def generates antithetic normal simulations whose
    # moments match the theoretical moments
    #  INPUTS
    #   mu_     : [vector] (n_ x 1) vector of means
    #   sigma2_ : [matrix] (n_ x n_) dispersion matrix
    #   j_      : [scalar] (even) number of simulations
    #   method  : [string] Riccati (default), CPCA, PCA, Cholesky-LDL, Gram-Schmidt
    #   d       : [matrix] (k_ x n_) full rank constraints matrix for CPCA
    #  OUTPUTS
    #   X_      : [matrix] (n_ x j_) normal matrix of simulations
    #   p       : [vector] (1 x j_) vector of Flexible Probabilities
    # NOTE: Use always a large number of simulations j_ >> n_ to ensure that
    # NormalScenarios works properly

    # For details on the exercise, see here .
    ## Code

    n_ = max(mu_.shape)  # number of variables

    # Step 1. normal MC scenarios
    n_scenarios = int(j_ / 2)
    X_tilde = mvnrnd(zeros(n_), eye(n_), n_scenarios)
    X_tilde = X_tilde.T

    # Step 2. Anthitetic (mean = 0)
    X = r_['-1', X_tilde, -X_tilde]
    p = ones((1, j_)) / j_  # flat probabilities

    # Step 3. Twisted t MC scenarios
    X_ = TwistScenMomMatch(X, p, mu_, sigma2_, method, d)
    return X_, p
Exemplo n.º 4
0
 def samples(self, data_len, pi=None, by_posterior=True):
     '''
     zs.samples(data_len, pi=None, by_postrior=True)
     @argvs
     data_len: data length
     pi: array(n_states), probability, sum must be 1
     by_posterior: default True
     @returns
     z: array(aug_dim, n_states, data_len)
     s: array(n_states, data_len)
     '''
     # --- sample S
     s = self.s.samples(data_len, pi)
     # --- sample Z
     if by_posterior:
         z = ones((self.aug_dim, self.n_states, data_len)) * nan
         z[:-1, :, :] = 0
         z[-1, :, :] = 1
         mu_len = 0 if self.z_mu is None else self.z_mu.shape[-1]
         t_max = data_len if data_len < mu_len else mu_len
         for t in range(t_max):
             k = s[t]
             z[:, k, t] = mvnrnd(self.z_mu[:, k, t], self.z_cov[:, :, k])
         t_rest = data_len - t_max
         if t_rest > 0:
             z[:, :, t_max:] = self.prior.samples(t_rest).transpose(1, 2, 0)
     else:
         z = self.prior.samples(data_len)[:, :, 0].T
     return z, s
Exemplo n.º 5
0
 def samples(self, data_len, by_posterior=True):
     lamb, r, pi = self.theta.samples(by_posterior=by_posterior)
     z, s = self.zs.samples(data_len, pi, by_posterior)
     y = ones((self.data_dim, data_len)) * nan
     inv_r = inv(r.transpose(2, 0, 1)).transpose(1, 2, 0)
     for t in range(data_len):
         k = s[t]
         mu = einsum('ld,l->d', lamb[:, :, k], z[:, k, t])
         cov = inv_r[:, :, k]
         y[:, t] = mvnrnd(mu, cov)
     return y, z, s, [lamb, r, inv_r, pi]
Exemplo n.º 6
0
 def init_expt(self, data_len, obs=None):
     '''
     zs.init_expt(data_len)
     '''
     if self.expt_s is not None:
         return
     self.s.init_expt(data_len, obs)
     m, c = self.prior.mu[:, 0], self.prior.cov[:, :, 0]
     n_states = self.n_states
     self.z_mu = mvnrnd(m, c, size=(data_len, n_states)).transpose(2, 1, 0)
     self.z_cov = tile(self.prior.cov, (n_states))
     self.set_expt(self.s.expt, self.z_mu, self.z_cov)
Exemplo n.º 7
0
def RandNormalInverseWishart(mu_0, t_0, sigma2_0, nu_0, j_):
    # Generates a multivariate i.i.d. sample of lenght j_ from the normal-inverse-Wishart distribution:
    #  INPUTS
    #   mu_0      : [vector]
    #   t_0       : [scalar]
    #   sigma2_0  : [matrix]
    #   nu_0      : [scalar]
    #   j_        : [scalar]
    #  OPS
    #   Mu        : [vector]
    #   Sigma2    : [matrix]
    #   InvSigma2 : [matrix]
    #  NOTE
    #   Mu|sigma2   ~ N(mu_0,sigma2/t_0)
    #   inv(Sigma2) ~ W(nu_0,inv(sigma2_0)/nu_0)

    # For details on the exercise, see here .

    ## Code
    if isinstance(mu_0, float):
        n_ = 1
        mu_0 = array([mu_0])
    else:
        n_ = len(mu_0)

    if sigma2_0.ndim == 1:
        sigma2_0 = sigma2_0.reshape(1, -1)

    invsigma2_0 = solve(sigma2_0,
                        eye(sigma2_0.shape[0]))  # inverse of sigma2_0
    phi = (invsigma2_0 / nu_0)[0, 0]

    Mu = zeros((n_, j_))
    Sigma2 = zeros((n_, n_, j_))
    InvSigma2 = zeros((n_, n_, j_))

    for j in range(j_):
        # simulate inv(sigma2)
        InvSigma2[:, :, j] = wishart.rvs(nu_0, phi)
        # compute sigma2
        Sigma2[:, :, j] = solve(InvSigma2[:, :, j], eye(n_))
        # simulate mu
        Mu[0, j] = mvnrnd(mu_0, Sigma2[:, :, j] / t_0)
    return Mu, Sigma2, InvSigma2
Exemplo n.º 8
0
 def samples(self, data_len, by_posterior=True):
     '''
     z.samples(data_len)
     data_len: 100
     @return
     sample_z: (aug_dim, n_states, data_len)
     '''
     if by_posterior:
         sample_z = ones((self.aug_dim, data_len)) * nan
         mu_len = 0 if self.mu is None else self.mu.shape[-1]
         t_max = data_len if data_len < mu_len else mu_len
         for t in range(t_max):
             sample_z[:, t] = mvnrnd(self.mu[:, t], self.cov)
         t_rest = data_len - t_max
         if t_rest > 0:
             sample_z[:, t_max:] = self.prior.samples(t_rest)[:, :, 0].T
     else:
         sample_z = self.prior.samples(data_len)[:, :, 0].T
     return sample_z
Exemplo n.º 9
0
 def sample_mu(self, data_len=1, R=None):
     '''
     mu: (data_len, data_dim, n_states)
     '''
     if R is None:
         R = self.sample_R()[0]
     mu = zeros((data_len, self.data_dim, self.n_states))
     for k in range(self.n_states):
         # cov = inv(self.beta[k] * R[:, :, k])
         cov = inv(R[:, :, k])
         try:
             mu[:, :, k] = mvnrnd(self.mu[:, k], cov, size=data_len)
         except RuntimeWarning as e:
             logger.warn('%s %d. not sampled' % (e, mu.shape[0]))
             mu[:, :, k] = self.mu[newaxis, :, k]
         except Exception as e:
             logger.error('%s %d. not sampled.' % (e, mu.shape[0]))
             mu[:, :, k] = self.mu[newaxis, :, k]
     return mu
Exemplo n.º 10
0
def SimQuadNFP(a, b, c, mu, sigma2, j_):
    # This function generates QuadN simulations with Flexible Probabilities
    # whose first moment match the theoretical moment
    #  INPUTS
    #   a      : [scalar] parameter
    #   b      : [vector] (n_ x 1) parameter
    #   c      : [matrix] (n_ x n_) symmetric matrix parameter
    #   mu     : [vector] (n_ x 1) normal expectations
    #   sigma2 : [matrix] (n_ x n_) normal covariances
    #   j_     : [scalar] number of simulations
    #  OPS
    #   Y      : [matrix] (n_ x j_) MC scenarios
    #   p_     : [vector] (1 x j_) twisted flexible probabilities

    # For details on the exercise, see here .

    ## Code

    n_ = mu.shape[0]  # number of variables

    # Step 1. Cholesky
    l = cholesky(sigma2)

    # Step 2. Eigen-decomposition
    Diag_lamda, _ = eig(l.T @ c @ l)

    # Step 3. Change of variables
    gamma = l.T @ (b + 2 * c @ mu)

    # Step 4. MC scenarios with Flexible Probabilities
    Z = mvnrnd(zeros(n_), eye((n_)), j_)
    Z = Z.T
    p = ones((1, j_)) / j_
    Y = a + b.T @ mu + mu.T @ c @ mu + gamma.T @ Z + Diag_lamda.reshape(
        1, -1) @ (Z**2)

    # Step 5. Moment-matching
    mu_ = a + b.T @ mu + mu.T @ c @ mu + trace(c @ sigma2)
    p_ = TwistFPMomMatch(Y, p, mu_)
    return Y, p_
Exemplo n.º 11
0
    def sample(self, S):
        """Sample the upper-level policy given the context features.

        Sample distribution \pi(w | s) = N(w | a + As, sigma)

        Parameters
        ----------

        S: numpy.ndarray, shape (n_samples, n_context_features)
            Context features

        Returns
        -------

        W: numpy.ndarray, shape (n_samples, n_lower_policy_weights)
           Sampled lower-policy parameters.
        """
        W = np.zeros((S.shape[0], self.n_lower_policy_weights))
        mus = self.mean(S)
        for sample in range(S.shape[0]):
            W[sample, :] = mvnrnd(mus[sample, :], self.sigma)
        return W
Exemplo n.º 12
0
    def samples(self, data_len, by_posterior=True):
        '''
        y, z, [lamb, prec, cov] = fa.samples(data_len, by_posterior=True)

        Returns
        y: np.array(data_dim, data_len).
        z: np.array(aug_dim, data_len).
        lamb: np.array(aug_dim, data_dim, n_states).
        prec: np.array(data_dim, data_dim, n_states).
        cov: np.array(data_dim, data_dim, n_states).
        * aug_dim = fa_dim + 1
        * n_states = 1
        '''
        z = self.z.samples(data_len, by_posterior)
        # lamb, prec = self.theta.samples(1, by_posterior)
        lamb, prec = self.theta.expectations(1, by_posterior)
        y = zeros((self.data_dim, data_len))
        mu = einsum('ld,lt->dt', lamb[:, :, 0], z)
        cov = inv(prec[:, :, 0])
        for t in range(data_len):
            y[:, t] = mvnrnd(mu[:, t], cov)
        cov = cov[:, :, newaxis]
        return y, z, [lamb, prec, cov]
Exemplo n.º 13
0
import matplotlib.pyplot as plt

plt.style.use('seaborn')

# initialize variables

i_ = 5
t_ = 30
mu = rand(i_, 1)
l = rand(i_, i_) - 0.5
sigma2 = l @ l.T
# -

# ## Generate normal sample

Epsi = mvnrnd(mu.flatten(), sigma2, t_).T

# ## Estimate sample covariance

# mu_hat = mean(Epsi,2)
sigma2_hat = cov(Epsi, ddof=0)

# ## Perform shrinkage of dispersion parameter

# +
# target
sigma_target = trace(sigma2_hat) / i_ * eye(i_)

# compute optimal weight
num = 0
for t in range(t_):
Exemplo n.º 14
0
from matplotlib.pyplot import scatter, ylabel, \
    xlabel

plt.style.use('seaborn')

from ARPM_utils import save_plot

# parameters
mu = array([[0.8], [0.8]])
sigma2 = array([[1.2, 0], [0, 1]])
j_ = 40000  # number of simulations
# -

# ## Generate the bivariate lognormal simulations

X = mvnrnd(mu.flatten(), sigma2, j_)
Y = exp(X)

# ## Select an equispaced grid and compute the lognormal pdf

# +
x1 = arange(0.01, 7, 0.1)
x2 = arange(0.01, 7, 0.1)
X1, X2 = np.meshgrid(x1, x2)
lX1 = log(X1)
lX2 = log(X2)
z = r_[lX2.flatten()[np.newaxis, ...], lX1.flatten()[np.newaxis, ...]]
s = len(x1) * len(x2)
f = zeros(s)
for i in range(s):
    f[i] = exp(-1 / 2 *
Exemplo n.º 15
0
i_ = 50  # dimension of the covariance matrix
t_vector = i_ * arange(1, 11)  # different lens of the time series
j_ = 50  # simulations for each time series
mu = zeros(i_)
sigma2 = eye(i_)
# -

# ## Compute sample eigenvalues from time series of different lens

lambda2_hat = zeros((len(t_vector), i_))
for k in range(len(t_vector)):
    t_ = t_vector[k]
    lambda2_tmp = 0
    for j in range(j_):
        # simulate the time series
        Epsi = mvnrnd(mu, sigma2, t_).T
        # compute sample covariance
        sigma2_hat = cov(Epsi, ddof=1)
        # compute eigenvalues
        l, _ = eig(sigma2_hat)
        l, Index = sort(l)[::-1], argsort(l)[::-1]
        lambda2_tmp = lambda2_tmp + l

    # average of eigenvalues across different scenarios
    lambda2_tmp = lambda2_tmp / j_
    # store the resulting average eigenvalues
    lambda2_hat[k, :] = lambda2_tmp

# ## Create figure

# Display surface
Exemplo n.º 16
0
r = 0.02  # risk-free rate

stepsize = len(nstep)
R2 = zeros((stepsize, 1))
for n in range(stepsize):

    # ## Generate a sample from the joint distribution of the factor and the residuals

    mu_Z_U = zeros((k_ + nstep[n], 1))  # expectation
    sig_Z_U = zeros((k_, nstep[n]))  # systematic condition
    d = rand(nstep[n], 1)  # residuals standard deviations
    sig2_U = np.diagflat(d * d)  # idiosyncratic condition
    sig2_Z_U = r_[r_['-1', array([[sig2_Z_]]), sig_Z_U],
                  r_['-1', sig_Z_U.T, sig2_U]]  # covariance

    Z_U = mvnrnd(mu_Z_U.flatten(), sig2_Z_U, j_)
    Z_U = Z_U.T  # ensure Z_U is n_ x nsim

    Z_ = Z_U[0]  # factor sample

    # ## Compute the P&L's: P = alpha + beta@Z_ + U

    alpha = rand(nstep[n], 1)  # shift parameter (P&L's expectation)
    beta = rand(nstep[n], k_)  # loadings
    i_n = eye(nstep[n])
    P = tile(alpha, (1, j_)) + r_['-1', beta, i_n] @ Z_U  # sample
    sig2_P = beta @ array(
        [[sig2_Z_]]) @ beta.T + sig2_U  # (low-rank diagonal) covariance

    # ## Compute the sample of the factor-replicating portfolio
Exemplo n.º 17
0
# ## Further, where the corresponding mean and covariance

x = log(Data.Prices[indexes, :])
dx = diff(x, 1, 1)
mu = mean(dx, 1)
sigma2 = cov(dx)

# ## Simulate j_=10000 Monte Carlo scenarios for the risk drivers.T scenarios at the horizon (20 days ahead)
# ## by using that the risk drivers at the horizon are normally distributed

j_ = 10000
x_tnow = log(Data.Prices[indexes, -1])
mu_tau = tau * mu
sigma2_tau = tau * sigma2
X_thor = tile(x_tnow[..., np.newaxis],
              (1, j_)) + mvnrnd(mu_tau, sigma2_tau, j_).T

# ## Compute the j_ Monte Carlo scenarios for the stocks' values at the horizon
# ## and the corresponding P&L's scenarios

v_tnow = Data.Prices[indexes, -1]
V_thor = exp(
    tile(log(v_tnow[..., np.newaxis]), (1, j_)) + X_thor -
    tile(x_tnow[..., np.newaxis], (1, j_)))
PL = V_thor - tile(v_tnow[..., np.newaxis], (1, j_))

# ## Save the data in db_StocksNormal

vars_to_save = {
    varname: var
    for varname, var in locals().items()
Exemplo n.º 18
0
# ## Generate scenarios for the estimators and their losses

# +
rho = 0.999  # correlation
i_ = 15  # number of invariants
mu = randn(i_, 1)  # true mean
sigma2 = 5 * (rho * ones((i_)) + (1 - rho) * eye(i_))  # true covariance
t_ = 20  # len of time series
j_ = 10**4  # number of simulations

M = zeros((i_, j_))
L_M = zeros((1, j_))
Sigma2 = zeros((i_, i_, j_))
L_Sigma2 = zeros((1, j_))
for j in range(j_):
    I = mvnrnd(mu.flatten(), sigma2, t_).T  # i_ x t_end
    # compute the loss of sample mean
    M[:, j] = mean(I, 1)
    L_M[0, j] = npsum((mu - M[:, [j]])**2)
    # compute the loss of sample covariance
    Sigma2[:, :, j] = cov(I, ddof=1)
    L_Sigma2[0, j] = linalgnorm(sigma2 - Sigma2[:, :, j], ord='fro')**2
# -

# ## Compute error, bias and inefficiency of both estimators

# sample mean
E_M = mean(M, 1)
er_M = mean(L_M)
ineff2_M = mean(npsum((M - tile(E_M[..., np.newaxis], (1, j_)))**2, axis=0))
bias2_M = er_M - ineff2_M
Exemplo n.º 19
0
# initialize variables
Y = zeros((j_, t_sim + 1))
T = zeros((j_, t_sim + 1))
dT = zeros((j_, t_sim))
dX = zeros((j_, t_sim))
Y[:, [0]] = tile(y[[0],-1], (j_, 1))

# initialize inputs for stoch. time function
inp = namedtuple('inp','kappa s2_ eta S2_t z')
inp.kappa = kappa
inp.s2_ = y_
inp.eta = eta

# Euler scheme
for t in range(t_sim):
    W = mvnrnd([0,0], array([[1, rho],[rho, 1]]), j_)
    inp.S2_t = Y[:,t]
    inp.z = W[:, 0]
    dT[:,t] = StochTime(dt, 'Heston', inp)  # stochastic time
    Y[:, t + 1] = dT[:,t] / dt  # variance process
    T[:, t + 1] = T[:,t]+dT[:,t]  # time-change process
    dX[:,t] = mu*dt + sqrt(dT[:,t])*W[:, 1]

X = x[:,-1] + r_['-1',zeros((j_, 1)), cumsum(dX, 1)]
# -

# ## Generate the figure

# +
s_ = 1
Exemplo n.º 20
0
def BGCP(dense_tensor, sparse_tensor, init, rank, maxiter1, maxiter2):
    """Bayesian Gaussian CP (BGCP) decomposition."""
    dim1, dim2, dim3 = sparse_tensor.shape
    binary_tensor = np.zeros((dim1, dim2, dim3))
    dim = np.array([dim1, dim2, dim3])
    pos = np.where((dense_tensor != 0) & (sparse_tensor == 0))
    position = np.where(sparse_tensor != 0)
    binary_tensor[position] = 1
    U = init["U"]
    V = init["V"]
    X = init["X"]
    beta0 = 1
    nu0 = rank
    mu0 = np.zeros((rank))
    W0 = np.eye(rank)
    tau = 1
    alpha = 1e-5
    beta = 1e-5
    U_plus = np.zeros((dim1, rank))
    V_plus = np.zeros((dim2, rank))
    X_plus = np.zeros((dim3, rank))
    tensor_hat_plus = np.zeros((dim1, dim2, dim3))
    for iters in range(maxiter1):
        for order in range(dim.shape[0]):
            if order == 0:
                mat = U.copy()
            elif order == 1:
                mat = V.copy()
            else:
                mat = X.copy()
            mat_bar = np.mean(mat, axis=0)
            var_mu_hyper = (dim[order] * mat_bar + beta0 * mu0) / (dim[order] +
                                                                   beta0)
            var_W_hyper = inv(
                inv(W0) + cov_mat(mat) + dim[order] * beta0 /
                (dim[order] + beta0) * np.outer(mat_bar - mu0, mat_bar - mu0))
            var_Lambda_hyper = wishart(df=dim[order] + nu0,
                                       scale=var_W_hyper,
                                       seed=None).rvs()
            var_mu_hyper = mvnrnd(var_mu_hyper,
                                  inv((dim[order] + beta0) * var_Lambda_hyper))
            if order == 0:
                var1 = kr_prod(X, V).T
            elif order == 1:
                var1 = kr_prod(X, U).T
            else:
                var1 = kr_prod(V, U).T
            var2 = kr_prod(var1, var1)
            var3 = (tau * np.matmul(var2,
                                    ten2mat(binary_tensor, order).T).reshape(
                                        [rank, rank, dim[order]]) +
                    np.dstack([var_Lambda_hyper] * dim[order]))
            var4 = (tau * np.matmul(var1,
                                    ten2mat(sparse_tensor, order).T) +
                    np.dstack([np.matmul(var_Lambda_hyper, var_mu_hyper)] *
                              dim[order])[0, :, :])
            for i in range(dim[order]):
                var_Lambda = var3[:, :, i]
                inv_var_Lambda = inv((var_Lambda + var_Lambda.T) / 2)
                vec = mvnrnd(np.matmul(inv_var_Lambda, var4[:, i]),
                             inv_var_Lambda)
                if order == 0:
                    U[i, :] = vec.copy()
                elif order == 1:
                    V[i, :] = vec.copy()
                else:
                    X[i, :] = vec.copy()
        if iters + 1 > maxiter1 - maxiter2:
            U_plus += U
            V_plus += V
            X_plus += X

        tensor_hat = cp_combine(U, V, X)
        if iters + 1 > maxiter1 - maxiter2:
            tensor_hat_plus += tensor_hat
        rmse = np.sqrt(
            np.sum((dense_tensor[pos] - tensor_hat[pos])**2) /
            dense_tensor[pos].shape[0])

        var_alpha = alpha + 0.5 * sparse_tensor[position].shape[0]
        error = sparse_tensor - tensor_hat
        var_beta = beta + 0.5 * np.sum(error[position]**2)
        tau = np.random.gamma(var_alpha, 1 / var_beta)

        if (iters + 1) % 200 == 0 and iters < maxiter1 - maxiter2:
            print('Iter: {}'.format(iters + 1))
            print('RMSE: {:.6}'.format(rmse))
            print()
    U = U_plus / maxiter2
    V = V_plus / maxiter2
    X = X_plus / maxiter2
    tensor_hat = tensor_hat_plus / maxiter2
    final_mae = np.sum(np.abs(dense_tensor[pos] * 5 - tensor_hat[pos] *
                              5)) / dense_tensor[pos].shape[0]
    final_rmse = np.sqrt(
        np.sum((dense_tensor[pos] * 5 - tensor_hat[pos] * 5)**2) /
        dense_tensor[pos].shape[0])
    print('RMSE: {:.6}'.format(final_rmse))
    print('MAE: {:.6}'.format(final_mae))
    print()
Exemplo n.º 21
0
# -

# ## Generate simulations

# +
W_11 = zeros((1, j_))
W_22 = zeros((1, j_))
W_12 = zeros((1, j_))
vec_W = zeros((4, j_))
dets = zeros((1, j_))
traces = zeros((1, j_))

sig2 = np.diagflat(sigvec) @ array([[1, rho], [rho, 1]]) @ np.diagflat(sigvec)

for j in range(j_):
    X = mvnrnd(zeros(2), sig2, nu).T
    W = X @ X.T

    dets[0, j] = det(W)
    traces[0, j] = trace(W)

    W_11[0, j] = W[0, 0]
    W_22[0, j] = W[1, 1]
    W_12[0, j] = W[0, 1]

    vec_W[:, [j]] = reshape(W, (4, 1))

# expected values of W_11 and W_12
E_11 = nu * sig2[0, 0]
E_12 = nu * sig2[0, 1]
Exemplo n.º 22
0
n_ = 250
j_ = 500
r_rf = 0.05
a_p = 0.7
b_p = 1
a_SDF = 0
b_SDF = 0.9
rho = 0.7
# -

# ## Generate the payoff matrix

# +
# Generate the normal vector
c2 = rho * ones((n_, n_)) + (1 - rho) * eye(n_)  # correlation matrix
X = mvnrnd(zeros(n_), c2, j_).T

# Generate the payoffs
V_payoff = ones((n_, j_))
V_payoff[1] = exp(X[1]) / (sqrt(exp(1) - 1) * exp(0.5))
V_payoff[2::2, :] = (exp(X[2::2, :]) - exp(0.5) /
                     (sqrt(exp(1) - 1)) * exp(0.5))
V_payoff[3::2, :] = (-exp(-X[3::2, :]) + exp(0.5) /
                     (sqrt(exp(1) - 1)) * exp(0.5))
V_payoff[2:, :] = diagflat(uniform.rvs(
    loc=0.8, scale=0.2, size=(n_ - 2, 1))) @ V_payoff[2:, :]  # rescaling
V_payoff[2:, :] = V_payoff[2:, :] + tile(
    uniform.rvs(loc=-0.3, scale=1, size=(n_ - 2, 1)), (1, j_))  # shift
# -

# ## Compute the probabilities
Exemplo n.º 23
0
from OrdLeastSquareFPNReg import OrdLeastSquareFPNReg

# input parameters
n_ = 6  # target dimension
k_ = 3  # number of factors
t_ = 1000  # time series len
p = ones((1, t_)) / t_  # Flexible Probabilities
w = rand(1, t_)  # weights

mu = 5 * ones(n_ + k_)

c = rand(n_ + k_, n_ + k_)
sig2 = c @ c.T

XZ = mvnrnd(mu, sig2, size=(t_))
XZ = XZ.T  # observations of target variables and factors
# -

# ## Compute MLSFP estimators, for given weights w

X = XZ[:n_, :]
Z = XZ[n_:n_ + k_, :]
pw = p * w
alpha, beta, *_ = OrdLeastSquareFPNReg(X, Z, pw / npsum(pw))

# ## Compute alternative compact formulation

Z_ = r_[ones((1, t_)), Z]
XZ_ = r_[X, Z_]
s2_XZ_ = XZ_ @ diagflat(pw) @ XZ_.T