Ejemplo n.º 1
0
    def test_from_abc(self):
        """Test init from abc."""

        nstocks = 2
        amat = np.eye(nstocks)
        bmat = np.eye(nstocks)
        cmat = np.eye(nstocks)
        param = ParamStandard.from_abc(amat=amat, bmat=bmat, cmat=cmat)
        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)

        nstocks = 2
        alpha, beta = .09, .81
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * alpha**.5
        bmat = np.eye(nstocks) * beta**.5
        target = np.eye(nstocks)
        # Choose intercept to normalize unconditional variance to one
        cmat = ParamStandard.find_cmat(amat=amat, bmat=bmat, target=target)

        param = ParamStandard.from_abc(amat=amat, bmat=bmat, cmat=cmat)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)
Ejemplo n.º 2
0
    def test_weights(self):
        """Test weighting function.

        """
        nstocks = 6
        nobs = 10
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * .09**.5
        bmat = np.eye(nstocks) * .9**.5
        target = np.eye(nstocks)

        param = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
        innov, hvar = simulate_bekk(param, nobs=nobs)

        res = BEKKResults(innov=innov, hvar=hvar)

        weights = res.weights()

        npt.assert_array_equal(weights, np.ones_like(innov) / nstocks)

        weights = res.weights(kind='equal')

        npt.assert_array_equal(weights, np.ones_like(innov) / nstocks)

        weights = res.weights(kind='minvar')

        for hvari, wi in zip(hvar, weights):
            hinv = np.linalg.solve(hvari, np.ones(nstocks))
            npt.assert_array_almost_equal(wi, hinv / hinv.sum())
Ejemplo n.º 3
0
def try_standard():
    """Try simulating and estimating standard BEKK.

    """
    use_target = False
    restriction = 'full'
    nstocks = 6
    nobs = 2000
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
    print(param_true)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

#    plot_data(innov, hvar_true)

    bekk = BEKK(innov)
    result = bekk.estimate(param_start=param_true, use_target=use_target,
                           model='standard', method='SLSQP',
                           restriction=restriction)

    print(result)

    theta_true = param_true.get_theta(use_target=use_target,
                                      restriction=restriction)
    theta_final = result.param_final.get_theta(use_target=use_target,
                                               restriction=restriction)
    norm = np.linalg.norm(theta_true - theta_final)

    print('\nParameters (true and estimated):\n',
          np.vstack([theta_true, theta_final]).T)
    print('\nEucledean norm of the difference = %.4f' % norm)
Ejemplo n.º 4
0
def try_bekk():
    """Simulate and estimate BEKK model.

    """
    nstocks = 2
    use_target = True
    nobs = 2000
    restriction = 'full'
    simulate = True

    # A, B, C - n x n matrices
    A = np.eye(nstocks) * .09**.5
    B = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)

    param_true = ParamStandard.from_target(amat=A, bmat=B, target=target)
    theta_true = param_true.get_theta(use_target=use_target,
                                      restriction=restriction)
    print('True parameter:\n', theta_true)
    # Data file
    innov_file = '../data/innovations.npy'

    if simulate:
        # Simulate data
        innov, hvar_true = simulate_bekk(param_true,
                                         nobs=nobs,
                                         distr='skewt',
                                         degf=30)
        np.savetxt(innov_file[:-4] + '.csv', innov, delimiter=",")

    else:
        # Regenerate real data
        download_data(innov_file=innov_file, nstocks=nstocks, nobs=nobs)
        # Load data from the drive
        innov = np.load(innov_file)

    # Estimate parameters
    params = []
    for cython in [True, False]:
        time_start = time.time()
        # Initialize the object
        bekk = BEKK(innov)
        bekk.estimate(param_start=param_true,
                      restriction=restriction,
                      use_target=use_target,
                      method='SLSQP',
                      cython=cython)

        print('Cython: ', cython)
        theta_final = bekk.param_final.get_theta(restriction=restriction,
                                                 use_target=use_target)
        print(theta_final)
        params.append(theta_final)
        print('Time elapsed %.2f, seconds\n' % (time.time() - time_start))

    print('\nNorm difference between the estimates: %.4f' %
          np.linalg.norm(params[0] - params[1]))
    return bekk
Ejemplo n.º 5
0
    def test_init(self):
        """Test init."""

        nstocks = 2
        param = ParamStandard(nstocks)
        self.assertIsInstance(param.amat, np.ndarray)
        self.assertIsInstance(param.bmat, np.ndarray)
        self.assertIsInstance(param.cmat, np.ndarray)
        self.assertEqual(param.amat.shape, (nstocks, nstocks))
        self.assertEqual(param.bmat.shape, (nstocks, nstocks))
        self.assertEqual(param.bmat.shape, (nstocks, nstocks))
Ejemplo n.º 6
0
    def test_find_stationary_var(self):
        """Test find stationary variance matrix."""

        nstocks = 2
        alpha, beta = .09, .5
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * alpha**.5
        bmat = np.eye(nstocks) * beta**.5
        target = np.eye(nstocks)
        # Choose intercept to normalize unconditional variance to one
        cmat = ParamStandard.find_cmat(amat=amat, bmat=bmat, target=target)
        param = ParamStandard.from_abc(amat=amat, bmat=bmat, cmat=cmat)
        hvar = param.get_uvar()

        npt.assert_array_almost_equal(hvar, target)

        hvar = ParamStandard.find_stationary_var(amat=amat, bmat=bmat,
                                                 cmat=cmat)

        npt.assert_array_almost_equal(hvar, target)
        npt.assert_array_equal(hvar, hvar.transpose())
Ejemplo n.º 7
0
def try_bekk():
    """Simulate and estimate BEKK model.

    """
    nstocks = 2
    use_target = True
    nobs = 2000
    restriction = 'full'
    simulate = True

    # A, B, C - n x n matrices
    A = np.eye(nstocks) * .09**.5
    B = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)

    param_true = ParamStandard.from_target(amat=A, bmat=B, target=target)
    theta_true = param_true.get_theta(use_target=use_target,
                                      restriction=restriction)
    print('True parameter:\n', theta_true)
    # Data file
    innov_file = '../data/innovations.npy'

    if simulate:
        # Simulate data
        innov, hvar_true = simulate_bekk(param_true, nobs=nobs,
                                         distr='skewt', degf=30)
        np.savetxt(innov_file[:-4] + '.csv', innov, delimiter=",")

    else:
        # Regenerate real data
        download_data(innov_file=innov_file, nstocks=nstocks, nobs=nobs)
        # Load data from the drive
        innov = np.load(innov_file)

    # Estimate parameters
    params = []
    for cython in [True, False]:
        time_start = time.time()
        # Initialize the object
        bekk = BEKK(innov)
        bekk.estimate(param_start=param_true, restriction=restriction,
                      use_target=use_target, method='SLSQP', cython=cython)

        print('Cython: ', cython)
        theta_final = bekk.param_final.get_theta(restriction=restriction,
                                                 use_target=use_target)
        print(theta_final)
        params.append(theta_final)
        print('Time elapsed %.2f, seconds\n' % (time.time() - time_start))

    print('\nNorm difference between the estimates: %.4f'
          % np.linalg.norm(params[0] - params[1]))
    return bekk
Ejemplo n.º 8
0
    def test_from_target(self):
        """Test init from abc."""

        nstocks = 2
        target = np.eye(nstocks)*.5

        param = ParamStandard.from_target(target=target)
        param_default = ParamStandard(nstocks)
        cmat = ParamStandard.find_cmat(amat=param_default.amat,
                                    bmat=param_default.bmat, target=target)
        param_default = ParamStandard.from_abc(amat=param_default.amat,
                                            bmat=param_default.bmat, cmat=cmat)

        npt.assert_array_equal(param.amat, param_default.amat)
        npt.assert_array_equal(param.bmat, param_default.bmat)
        npt.assert_array_equal(param.cmat, cmat)

        amat = np.eye(nstocks)*.1
        bmat = np.eye(nstocks)*.5

        param = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
        cmat = ParamStandard.find_cmat(amat=amat, bmat=bmat, target=target)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)
Ejemplo n.º 9
0
    def init_param_standard(self,
                            restriction='scalar',
                            use_target=False,
                            method='SLSQP',
                            use_penalty=False):
        """Estimate scalar BEKK with variance targeting.

        Parameters
        ----------
        restriction : str
            Restriction on parameters.

            Must be
                - 'full'
                - 'diagonal'
                - 'scalar'

        method : str
            Optimization method. See scipy.optimize.minimize

        Returns
        -------
        ParamStandard instance
            Parameter object

        """
        param = ParamStandard(nstocks=self.innov.shape[1],
                              target=estimate_uvar(self.innov),
                              abstart=(.2, .6))

        if restriction == 'scalar':
            return param

        kwargs = {
            'model': 'standard',
            'use_penalty': use_penalty,
            'use_target': use_target,
            'method': method
        }
        est_partial = partial(self.estimate, **kwargs)

        if restriction in ('diagonal', 'full'):
            result = est_partial(param_start=param, restriction='scalar')
            param = result.param_final

        if restriction in ('full'):
            result = est_partial(param_start=param, restriction='diagonal')
            param = result.param_final

        return param
Ejemplo n.º 10
0
    def test_find_cmat(self):
        """Test find C matrix."""

        nstocks = 2
        alpha, beta = .09, .81
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * alpha**.5
        bmat = np.eye(nstocks) * beta**.5
        target = np.eye(nstocks)
        # Choose intercept to normalize unconditional variance to one
        cmat1 = ParamStandard.find_cmat(amat=amat, bmat=bmat, target=target)
        ccmat = target - amat.dot(target).dot(amat.T) \
            - bmat.dot(target).dot(bmat.T)
        cmat2 = scl.cholesky(ccmat, 1)

        npt.assert_array_equal(cmat1, cmat2)
Ejemplo n.º 11
0
    def test_simulation(self):
        """Test simulation."""

        nstocks = 6
        nobs = 10
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * .09**.5
        bmat = np.eye(nstocks) * .9**.5
        target = np.eye(nstocks)

        param = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)

        for distr in ['normal', 'student', 'skewt']:
            innov, hvar = simulate_bekk(param, nobs=nobs, distr=distr)

            self.assertEqual(innov.shape, (nobs, nstocks))
            self.assertEqual(hvar.shape, (nobs, nstocks, nstocks))
Ejemplo n.º 12
0
def try_iterative_estimation_standard():
    """Try estimating parameters from simple to more complicated model.

    """
    restriction = 'scalar'
    nstocks = 3
    nobs = 2000
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    bekk = BEKK(innov)
    result = bekk.estimate(use_target=False, restriction=restriction)

    print(result)
Ejemplo n.º 13
0
def try_standard_loss():
    """Try forecast evaluation of BEKK model.

    """
    model = 'standard'
    use_target = True
    nstocks = 2
    nobs = 1000
    window = 990
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
    print(param_true)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    kwargs = {
        'param_start': param_true,
        'innov_all': innov,
        'window': window,
        'model': model,
        'use_target': use_target,
        'alpha': .25,
        'kind': 'equal'
    }
    evaluate = partial(BEKK.collect_losses, **kwargs)
    losses = []
    restrcs = ['scalar', 'diagonal', 'full']
    for restr in restrcs:
        losses.append(evaluate(restriction=restr))

    losses = pd.concat(losses)

    print(losses)

    df = losses['qlike'].unstack('restriction')
    mcs = MCS(df, size=.1)
    mcs.compute()
    print(mcs.pvalues)

    return losses
Ejemplo n.º 14
0
def try_iterative_estimation_standard():
    """Try estimating parameters from simple to more complicated model.

    """
    restriction = 'scalar'
    nstocks = 3
    nobs = 2000
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    bekk = BEKK(innov)
    result = bekk.estimate(use_target=False, restriction=restriction)

    print(result)
Ejemplo n.º 15
0
    def test_likelihood(self):
        """Test likelihood."""

        nstocks = 2
        nobs = 2000
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * .09**.5
        bmat = np.eye(nstocks) * .9**.5
        target = np.eye(nstocks)
        param = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)

        innov, hvar = simulate_bekk(param, nobs=nobs, distr='normal')

        out1 = likelihood_python(hvar, innov)
        out2 = likelihood_gauss(hvar, innov)

        self.assertIsInstance(out1, float)
        self.assertIsInstance(out2, float)

        self.assertAlmostEqual(out1, out2)
Ejemplo n.º 16
0
    def test_forecast(self):
        """Test forecast."""

        nstocks = 2
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * .09**.5
        bmat = np.eye(nstocks) * .9**.5
        cmat = np.eye(nstocks)
        param = ParamStandard.from_abc(amat=amat, bmat=bmat, cmat=cmat)

        innov = np.ones(nstocks)
        hvar = np.ones((nstocks, nstocks))

        forecast = BEKK.forecast_one(hvar=hvar, innov=innov, param=param)
        exp = cmat.dot(cmat.T)
        exp += amat.dot(innov * innov[:, np.newaxis]).dot(amat.T)
        exp += bmat.dot(hvar).dot(bmat.T)

        self.assertEqual(forecast.shape, (nstocks, nstocks))
        npt.assert_array_equal(forecast, exp)
Ejemplo n.º 17
0
def time_likelihood():
    """Compare speeds of recrsions and likelihoods.

    """
    nstocks = 2
    nobs = 2000
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
    cmat = param_true.cmat

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
    hvar[0] = param_true.get_uvar().copy()

    with take_time('Python recursion'):
        filter_var_python(hvar, innov, amat, bmat, cmat)
        hvar1 = hvar.copy()

    hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
    hvar[0] = param_true.get_uvar().copy()

    with take_time('Cython recursion'):
        filter_var(hvar, innov, amat, bmat, cmat)
        hvar2 = hvar.copy()
        idxl = np.tril_indices(nstocks)
        idxu = np.triu_indices(nstocks)
        hvar2[:, idxu[0], idxu[1]] = hvar2[:, idxl[0], idxl[1]]

    print(np.allclose(hvar_true, hvar1))
    print(np.allclose(hvar_true, hvar2))

    with take_time('Python likelihood'):
        out1 = likelihood_python(hvar, innov)
    with take_time('Cython likelihood'):
        out2 = likelihood_gauss(hvar, innov)

    print(np.allclose(out1, out2))
Ejemplo n.º 18
0
def time_likelihood():
    """Compare speeds of recrsions and likelihoods.

    """
    nstocks = 2
    nobs = 2000
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
    cmat = param_true.cmat

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
    hvar[0] = param_true.get_uvar().copy()

    with take_time('Python recursion'):
        filter_var_python(hvar, innov, amat, bmat, cmat)
        hvar1 = hvar.copy()

    hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
    hvar[0] = param_true.get_uvar().copy()

    with take_time('Cython recursion'):
        filter_var(hvar, innov, amat, bmat, cmat)
        hvar2 = hvar.copy()
        idxl = np.tril_indices(nstocks)
        idxu = np.triu_indices(nstocks)
        hvar2[:, idxu[0], idxu[1]] = hvar2[:, idxl[0], idxl[1]]

    print(np.allclose(hvar_true, hvar1))
    print(np.allclose(hvar_true, hvar2))

    with take_time('Python likelihood'):
        out1 = likelihood_python(hvar, innov)
    with take_time('Cython likelihood'):
        out2 = likelihood_gauss(hvar, innov)

    print(np.allclose(out1, out2))
Ejemplo n.º 19
0
    def test_filter_var(self):
        """Test recursions."""

        nstocks = 2
        nobs = 2000
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * .09**.5
        bmat = np.eye(nstocks) * .9**.5
        target = np.eye(nstocks)
        param = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
        cmat = param.cmat

        innov, hvar_true = simulate_bekk(param, nobs=nobs, distr='normal')

        hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
        hvar[0] = param.get_uvar()

        out1 = filter_var_python(hvar, innov, amat, bmat, cmat)

        hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
        hvar[0] = param.get_uvar()

        out2 = filter_var(hvar, innov, amat, bmat, cmat)

        idxl = np.tril_indices(nstocks)
        idxu = np.triu_indices(nstocks)

        npt.assert_array_almost_equal(hvar_true,
                                      np.transpose(hvar_true, axes=(0, 2, 1)))

        # npt.assert_array_almost_equal(out2, np.transpose(out2, axes=(0, 2, 1)))
        out2[:, idxu[0], idxu[1]] = out2[:, idxl[0], idxl[1]]

        npt.assert_array_almost_equal(out1, np.transpose(out1, axes=(0, 2, 1)))
        npt.assert_array_almost_equal(out2, np.transpose(out2, axes=(0, 2, 1)))

        npt.assert_array_almost_equal(hvar_true, out1)
        npt.assert_array_almost_equal(hvar_true, out2)
Ejemplo n.º 20
0
def try_standard_loss():
    """Try forecast evaluation of BEKK model.

    """
    model = 'standard'
    use_target = True
    nstocks = 2
    nobs = 1000
    window = 990
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
    print(param_true)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    kwargs = {'param_start': param_true, 'innov_all': innov,
              'window': window, 'model': model, 'use_target': use_target,
              'alpha': .25, 'kind': 'equal'}
    evaluate = partial(BEKK.collect_losses, **kwargs)
    losses = []
    restrcs = ['scalar', 'diagonal', 'full']
    for restr in restrcs:
        losses.append(evaluate(restriction=restr))

    losses = pd.concat(losses)

    print(losses)

    df = losses['qlike'].unstack('restriction')
    mcs = MCS(df, size=.1)
    mcs.compute()
    print(mcs.pvalues)

    return losses
Ejemplo n.º 21
0
def try_standard():
    """Try simulating and estimating standard BEKK.

    """
    use_target = False
    restriction = 'full'
    nstocks = 6
    nobs = 2000
    # A, B, C - n x n matrices
    amat = np.eye(nstocks) * .09**.5
    bmat = np.eye(nstocks) * .9**.5
    target = np.eye(nstocks)
    param_true = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
    print(param_true)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    #    plot_data(innov, hvar_true)

    bekk = BEKK(innov)
    result = bekk.estimate(param_start=param_true,
                           use_target=use_target,
                           model='standard',
                           method='SLSQP',
                           restriction=restriction)

    print(result)

    theta_true = param_true.get_theta(use_target=use_target,
                                      restriction=restriction)
    theta_final = result.param_final.get_theta(use_target=use_target,
                                               restriction=restriction)
    norm = np.linalg.norm(theta_true - theta_final)

    print('\nParameters (true and estimated):\n',
          np.vstack([theta_true, theta_final]).T)
    print('\nEucledean norm of the difference = %.4f' % norm)
Ejemplo n.º 22
0
    def test_var_ratio(self):
        """Test variance ratio."""

        nstocks = 6
        nobs = 10
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * .09**.5
        bmat = np.eye(nstocks) * .9**.5
        target = np.eye(nstocks)

        param = ParamStandard.from_target(amat=amat, bmat=bmat, target=target)
        innov, hvar = simulate_bekk(param, nobs=nobs)

        res = BEKKResults(innov=innov, hvar=hvar)

        evar = res.portf_evar()
        rvar = res.portf_rvar()
        vratio = res.loss_var_ratio()
        mvar = res.portf_mvar()

        self.assertEqual(evar.shape, (nobs, ))
        self.assertEqual(rvar.shape, (nobs, ))
        self.assertEqual(vratio.shape, (nobs, ))
        self.assertIsInstance(mvar, float)
Ejemplo n.º 23
0
    def test_loss(self):
        """Test loss function."""

        nstocks = 2
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * .09**.5
        bmat = np.eye(nstocks) * .9**.5
        cmat = np.eye(nstocks)
        param = ParamStandard.from_abc(amat=amat, bmat=bmat, cmat=cmat)

        innov = np.ones(nstocks)
        hvar = np.ones((nstocks, nstocks))

        pret = BEKK.pret(innov)
        pvar = BEKK.pvar(hvar)
        self.assertIsInstance(pret, float)
        self.assertIsInstance(pvar, float)

        weights = [1, 3]
        pret = BEKK.pret(innov, weights=weights)
        pvar = BEKK.pvar(hvar, weights=weights)

        self.assertIsInstance(pret, float)
        self.assertIsInstance(pvar, float)
        self.assertEqual(pret, 1)
        self.assertEqual(pvar, 1)

        forecast = BEKK.forecast_one(hvar=hvar, innov=innov, param=param)
        proxy = BEKK.sqinnov(innov)

        self.assertEqual(proxy.shape, (nstocks, nstocks))
        self.assertEqual(forecast.shape, (nstocks, nstocks))

        for kind in ['equal', 'minvar']:
            var = BEKK.portf_var(forecast=forecast, alpha=.05, weights=weights)
            self.assertIsInstance(var, float)

            loss_var = BEKK.loss_var(error=innov[-1])
            self.assertIsInstance(loss_var, float)

        loss_eucl = BEKK.loss_eucl(forecast=forecast, proxy=proxy)
        loss_frob = BEKK.loss_frob(forecast=forecast, proxy=proxy)
        loss_stein = BEKK.loss_stein(forecast=forecast, proxy=proxy)
        loss_stein2 = BEKK.loss_stein2(forecast=forecast, innov=innov)

        self.assertIsInstance(loss_eucl, float)
        self.assertIsInstance(loss_frob, float)
        self.assertIsInstance(loss_stein, float)
        self.assertIsInstance(loss_stein2, float)

        portf_lscore = BEKK.portf_lscore(forecast=hvar, innov=innov)
        portf_mse = BEKK.portf_mse(forecast=hvar, proxy=proxy)
        portf_qlike = BEKK.portf_qlike(forecast=hvar, proxy=proxy)

        self.assertIsInstance(portf_lscore, float)
        self.assertIsInstance(portf_mse, float)
        self.assertIsInstance(portf_qlike, float)
        self.assertEqual(portf_lscore, .5)
        self.assertEqual(portf_mse, 0)
        self.assertEqual(portf_qlike, 1)

        all_losses = BEKK.all_losses(forecast=forecast,
                                     proxy=proxy,
                                     innov=innov)

        self.assertIsInstance(all_losses, dict)
Ejemplo n.º 24
0
    def estimate(self, param_start=None, restriction='scalar', cfree=False,
                 use_target=False, model='standard', groups=None,
                 method='SLSQP', cython=True, use_penalty=False):
        """Estimate parameters of the BEKK model.

        Parameters
        ----------
        param_start : ParamStandard or ParamSpatial instance
            Starting parameters. See Notes for more details.
        model : str
            Specific model to estimate.

            Must be
                - 'standard'
                - 'spatial'

        restriction : str
            Restriction on parameters.

            Must be
                - 'full'
                - 'diagonal'
                - 'group' (only applicable with 'spatial' model)
                - 'scalar'

        use_target : bool
            Whether to use variance targeting (True) or not (False)
        cfree : bool
            Whether to leave C matrix free (True) or not (False)
        groups : list of lists of tuples
            Encoded groups of items
        method : str
            Optimization method. See scipy.optimize.minimize
        cython : bool
            Whether to use Cython optimizations (True) or not (False)
        use_penalty : bool
            Whether to include penalty term in the likelihood

        Returns
        -------
        BEKKResults instance
            Estimation results object

        Notes
        -----

        If no param_start is given, the program will estimate parameters in
        the order 'from simple to more complicated' (from scalar to diagonal
        to full) while always using variance targeting.

        """
        # Start timer for the whole optimization
        time_start = time.time()

        # Check for incompatible inputs
        if use_target and cfree:
            raise ValueError('use_target and cfree are incompatible!')
#        if (groups is not None) and (model != 'spatial'):
#            raise ValueError('The model is incompatible with weights!')
        # Update default settings
        nobs, nstocks = self.innov.shape
        var_target = estimate_uvar(self.innov)
        self.hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
        self.hvar[0] = var_target.copy()

        # Check for existence of initial guess among arguments.
        # Otherwise, initialize.
        if param_start is None:
            common = {'restriction': restriction, 'method': method,
                      'use_penalty': use_penalty, 'use_target': use_target}
            if model == 'standard':
                param_start = self.init_param_standard(**common)
            elif model == 'spatial':
                param_start = self.init_param_spatial(groups=groups,
                                                      cfree=cfree, **common)
            else:
                raise NotImplementedError('The model is not implemented!')

        # Get vector of parameters to start optimization
        theta_start = param_start.get_theta(restriction=restriction,
                                            use_target=use_target, cfree=cfree)
        if use_target:
            target = var_target
        else:
            target = None

        # Optimization options
        options = {'disp': False, 'maxiter': int(1e6)}
        if method == 'Nelder-Mead':
            options['maxfev'] = 3000
        # Likelihood arguments
        kwargs = {'model': model, 'target': target, 'cfree': cfree,
                  'restriction': restriction, 'groups': groups,
                  'cython': cython, 'use_penalty': use_penalty}
        # Likelihood function
        likelihood = partial(self.likelihood, **kwargs)

        # Run optimization
        if method == 'basin':
            opt_out = basinhopping(likelihood, theta_start, niter=100,
                                   disp=options['disp'],
                                   minimizer_kwargs={'method': 'Nelder-Mead'})
        else:
            opt_out = minimize(likelihood, theta_start,
                               method=method, options=options)
        # How much time did it take in minutes?
        time_delta = time.time() - time_start

        # Store optimal parameters in the corresponding class
        if model == 'standard':
            param_final = ParamStandard.from_theta(theta=opt_out.x,
                                                   restriction=restriction,
                                                   target=target,
                                                   nstocks=nstocks)
        elif model == 'spatial':
            param_final = ParamSpatial.from_theta(theta=opt_out.x,
                                                  restriction=restriction,
                                                  target=target, cfree=cfree,
                                                  groups=groups)
        else:
            raise NotImplementedError('The model is not implemented!')

        return BEKKResults(innov=self.innov, hvar=self.hvar, cython=cython,
                           var_target=var_target, model=model, method=method,
                           use_target=use_target, cfree=cfree,
                           restriction=restriction,
                           param_start=param_start, param_final=param_final,
                           time_delta=time_delta, opt_out=opt_out)
Ejemplo n.º 25
0
    def estimate_loop(self,
                      model='standard',
                      use_target=True,
                      groups=None,
                      restriction='scalar',
                      cfree=False,
                      method='SLSQP',
                      ngrid=2,
                      use_penalty=False):
        """Estimate parameters starting from a grid of a and b.

        Parameters
        ----------
        model : str
            Specific model to estimate.

            Must be
                - 'standard'
                - 'spatial'

        restriction : str
            Restriction on parameters.

            Must be
                - 'full' =  'diagonal'
                - 'group'
                - 'scalar'

        groups : list of lists of tuples
            Encoded groups of items
        use_target : bool
            Whether to use variance targeting (True) or not (False)
        cfree : bool
            Whether to leave C matrix free (True) or not (False)
        method : str
            Optimization method. See scipy.optimize.minimize
        ngrid : int
            Number of starting values in one dimension
        use_penalty : bool
            Whether to include penalty term in the likelihood

        Returns
        -------
        BEKKResults instance
            Estimation results object

        """
        target = estimate_uvar(self.innov)
        nstocks = self.innov.shape[1]
        achoice = np.linspace(.01, .5, ngrid)
        bchoice = np.linspace(.1, .9, ngrid)
        out = dict()
        for abstart in itertools.product(achoice, bchoice):
            if model == 'spatial':
                param = ParamSpatial.from_groups(groups=groups,
                                                 target=target,
                                                 abstart=abstart)
            if model == 'standard':
                param = ParamStandard(nstocks=nstocks,
                                      target=target,
                                      abstart=abstart)
            if param.constraint() >= 1:
                continue
            result = self.estimate(param_start=param,
                                   method=method,
                                   use_target=use_target,
                                   cfree=cfree,
                                   model=model,
                                   restriction=restriction,
                                   groups=groups,
                                   use_penalty=use_penalty)
            out[abstart] = (result.opt_out.fun, result)

        df = pd.DataFrame.from_dict(out, orient='index')
        return df.sort_values(by=0).iloc[0, 1]
Ejemplo n.º 26
0
def try_spatial_combinations():
    """Try simulating spatial BEKK
    and estimating it with both spatial and standard.

    """
    use_target = False
    cfree = True
    restriction = 'full'
    nstocks = 3
    nobs = 2000
    groups = [(0, 1)]
    weights = ParamSpatial.get_weight(groups=groups, nitems=nstocks)
    ncat = weights.shape[0]
    alpha = np.array([.1, .01])
    beta = np.array([.5, .01])
    gamma = .0
    # A, B, C - n x n matrices
    avecs = np.ones((ncat+1, nstocks)) * alpha[:, np.newaxis]**.5
    bvecs = np.ones((ncat+1, nstocks)) * beta[:, np.newaxis]**.5
    dvecs = np.ones((ncat, nstocks)) * gamma**.5
    vvec = np.ones(nstocks)

    param_true = ParamSpatial.from_spatial(avecs=avecs, bvecs=bvecs,
                                           dvecs=dvecs, vvec=vvec,
                                           weights=weights)
    print(param_true)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    bekk = BEKK(innov)

    # -------------------------------------------------------------------------
    # Estimate spatial

    result = bekk.estimate(param_start=param_true, use_target=use_target,
                           restriction=restriction, cfree=cfree,
                           model='spatial', weights=weights, method='SLSQP',
                           cython=True)

    print(result)

    theta_true = param_true.get_theta(use_target=use_target, cfree=cfree,
                                      restriction=restriction)
    theta_final = result.param_final.get_theta(use_target=use_target,
                                               cfree=cfree,
                                               restriction=restriction)
    norm = np.linalg.norm(theta_true - theta_final)

    print('\nParameters (true and estimated):\n',
          np.vstack([theta_true, theta_final]).T)
    print('\nEucledean norm of the difference = %.4f' % norm)

    # -------------------------------------------------------------------------
    # Estimate standard

    param_true = ParamStandard.from_abc(amat=param_true.amat,
                                        bmat=param_true.bmat,
                                        cmat=param_true.cmat)

    result = bekk.estimate(param_start=param_true, use_target=use_target,
                           restriction=restriction, cfree=cfree,
                           model='standard', weights=weights, method='SLSQP',
                           cython=True)

    print(result)

    theta_true = param_true.get_theta(use_target=use_target,
                                      restriction=restriction)
    theta_final = result.param_final.get_theta(use_target=use_target,
                                               restriction=restriction)
    norm = np.linalg.norm(theta_true - theta_final)

    print('\nParameters (true and estimated):\n',
          np.vstack([theta_true, theta_final]).T)
    print('\nEucledean norm of the difference = %.4f' % norm)
Ejemplo n.º 27
0
    def test_theta(self):
        """Test theta."""

        nstocks = 2
        alpha, beta = .09, .81
        # A, B, C - n x n matrices
        amat = np.eye(nstocks) * alpha**.5
        bmat = np.eye(nstocks) * beta**.5
        target = np.eye(nstocks)
        cmat = ParamStandard.find_cmat(amat=amat, bmat=bmat, target=target)

        restriction = 'scalar'
        theta = [[alpha**.5], [beta**.5]]
        theta = np.concatenate(theta)

        param = ParamStandard.from_theta(theta=theta, nstocks=nstocks,
                                      target=target, restriction=restriction)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)

        restriction = 'scalar'
        theta = [[alpha**.5], [beta**.5]]
        theta.append(cmat[np.tril_indices(cmat.shape[0])])
        theta = np.concatenate(theta)

        param = ParamStandard.from_theta(theta=theta, nstocks=nstocks,
                                      restriction=restriction)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)

        restriction = 'diagonal'
        theta = [np.diag(amat), np.diag(bmat)]
        theta = np.concatenate(theta)

        param = ParamStandard.from_theta(theta=theta, nstocks=nstocks,
                                      target=target, restriction=restriction)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)

        restriction = 'diagonal'
        theta = [np.diag(amat), np.diag(bmat)]
        theta.append(cmat[np.tril_indices(cmat.shape[0])])
        theta = np.concatenate(theta)

        param = ParamStandard.from_theta(theta=theta, nstocks=nstocks,
                                      restriction=restriction)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)

        restriction = 'full'
        theta = [amat.flatten(), bmat.flatten()]
        theta = np.concatenate(theta)

        param = ParamStandard.from_theta(theta=theta, nstocks=nstocks,
                                      target=target, restriction=restriction)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)

        restriction = 'full'
        theta = [amat.flatten(), bmat.flatten()]
        theta.append(cmat[np.tril_indices(cmat.shape[0])])
        theta = np.concatenate(theta)

        param = ParamStandard.from_theta(theta=theta, nstocks=nstocks,
                                      restriction=restriction)

        npt.assert_array_equal(amat, param.amat)
        npt.assert_array_equal(bmat, param.bmat)
        npt.assert_array_equal(cmat, param.cmat)
Ejemplo n.º 28
0
    def estimate(self,
                 param_start=None,
                 restriction='scalar',
                 cfree=False,
                 use_target=False,
                 model='standard',
                 groups=None,
                 method='SLSQP',
                 cython=True,
                 use_penalty=False):
        """Estimate parameters of the BEKK model.

        Parameters
        ----------
        param_start : ParamStandard or ParamSpatial instance
            Starting parameters. See Notes for more details.
        model : str
            Specific model to estimate.

            Must be
                - 'standard'
                - 'spatial'

        restriction : str
            Restriction on parameters.

            Must be
                - 'full'
                - 'diagonal'
                - 'group' (only applicable with 'spatial' model)
                - 'scalar'

        use_target : bool
            Whether to use variance targeting (True) or not (False)
        cfree : bool
            Whether to leave C matrix free (True) or not (False)
        groups : list of lists of tuples
            Encoded groups of items
        method : str
            Optimization method. See scipy.optimize.minimize
        cython : bool
            Whether to use Cython optimizations (True) or not (False)
        use_penalty : bool
            Whether to include penalty term in the likelihood

        Returns
        -------
        BEKKResults instance
            Estimation results object

        Notes
        -----

        If no param_start is given, the program will estimate parameters in
        the order 'from simple to more complicated' (from scalar to diagonal
        to full) while always using variance targeting.

        """
        # Start timer for the whole optimization
        time_start = time.time()

        # Check for incompatible inputs
        if use_target and cfree:
            raise ValueError('use_target and cfree are incompatible!')
#        if (groups is not None) and (model != 'spatial'):
#            raise ValueError('The model is incompatible with weights!')
# Update default settings
        nobs, nstocks = self.innov.shape
        var_target = estimate_uvar(self.innov)
        self.hvar = np.zeros((nobs, nstocks, nstocks), dtype=float)
        self.hvar[0] = var_target.copy()

        # Check for existence of initial guess among arguments.
        # Otherwise, initialize.
        if param_start is None:
            common = {
                'restriction': restriction,
                'method': method,
                'use_penalty': use_penalty,
                'use_target': use_target
            }
            if model == 'standard':
                param_start = self.init_param_standard(**common)
            elif model == 'spatial':
                param_start = self.init_param_spatial(groups=groups,
                                                      cfree=cfree,
                                                      **common)
            else:
                raise NotImplementedError('The model is not implemented!')

        # Get vector of parameters to start optimization
        theta_start = param_start.get_theta(restriction=restriction,
                                            use_target=use_target,
                                            cfree=cfree)
        if use_target:
            target = var_target
        else:
            target = None

        # Optimization options
        options = {'disp': False, 'maxiter': int(1e6)}
        if method == 'Nelder-Mead':
            options['maxfev'] = 3000
        # Likelihood arguments
        kwargs = {
            'model': model,
            'target': target,
            'cfree': cfree,
            'restriction': restriction,
            'groups': groups,
            'cython': cython,
            'use_penalty': use_penalty
        }
        # Likelihood function
        likelihood = partial(self.likelihood, **kwargs)

        # Run optimization
        if method == 'basin':
            opt_out = basinhopping(likelihood,
                                   theta_start,
                                   niter=100,
                                   disp=options['disp'],
                                   minimizer_kwargs={'method': 'Nelder-Mead'})
        else:
            opt_out = minimize(likelihood,
                               theta_start,
                               method=method,
                               options=options)
        # How much time did it take in minutes?
        time_delta = time.time() - time_start

        # Store optimal parameters in the corresponding class
        if model == 'standard':
            param_final = ParamStandard.from_theta(theta=opt_out.x,
                                                   restriction=restriction,
                                                   target=target,
                                                   nstocks=nstocks)
        elif model == 'spatial':
            param_final = ParamSpatial.from_theta(theta=opt_out.x,
                                                  restriction=restriction,
                                                  target=target,
                                                  cfree=cfree,
                                                  groups=groups)
        else:
            raise NotImplementedError('The model is not implemented!')

        return BEKKResults(innov=self.innov,
                           hvar=self.hvar,
                           cython=cython,
                           var_target=var_target,
                           model=model,
                           method=method,
                           use_target=use_target,
                           cfree=cfree,
                           restriction=restriction,
                           param_start=param_start,
                           param_final=param_final,
                           time_delta=time_delta,
                           opt_out=opt_out)
Ejemplo n.º 29
0
    def estimate_loop(self, model='standard', use_target=True, groups=None,
                      restriction='scalar', cfree=False,
                      method='SLSQP', ngrid=2, use_penalty=False):
        """Estimate parameters starting from a grid of a and b.

        Parameters
        ----------
        model : str
            Specific model to estimate.

            Must be
                - 'standard'
                - 'spatial'

        restriction : str
            Restriction on parameters.

            Must be
                - 'full' =  'diagonal'
                - 'group'
                - 'scalar'

        groups : list of lists of tuples
            Encoded groups of items
        use_target : bool
            Whether to use variance targeting (True) or not (False)
        cfree : bool
            Whether to leave C matrix free (True) or not (False)
        method : str
            Optimization method. See scipy.optimize.minimize
        ngrid : int
            Number of starting values in one dimension
        use_penalty : bool
            Whether to include penalty term in the likelihood

        Returns
        -------
        BEKKResults instance
            Estimation results object

        """
        target = estimate_uvar(self.innov)
        nstocks = self.innov.shape[1]
        achoice = np.linspace(.01, .5, ngrid)
        bchoice = np.linspace(.1, .9, ngrid)
        out = dict()
        for abstart in itertools.product(achoice, bchoice):
            if model == 'spatial':
                param = ParamSpatial.from_groups(groups=groups,
                                                 target=target,
                                                 abstart=abstart)
            if model == 'standard':
                param = ParamStandard(nstocks=nstocks, target=target,
                                      abstart=abstart)
            if param.constraint() >= 1:
                continue
            result = self.estimate(param_start=param, method=method,
                                   use_target=use_target, cfree=cfree,
                                   model=model, restriction=restriction,
                                   groups=groups, use_penalty=use_penalty)
            out[abstart] = (result.opt_out.fun, result)

        df = pd.DataFrame.from_dict(out, orient='index')
        return df.sort_values(by=0).iloc[0, 1]
Ejemplo n.º 30
0
def try_spatial_combinations():
    """Try simulating spatial BEKK
    and estimating it with both spatial and standard.

    """
    use_target = False
    cfree = True
    restriction = 'full'
    nstocks = 3
    nobs = 2000
    groups = [(0, 1)]
    weights = ParamSpatial.get_weight(groups=groups, nitems=nstocks)
    ncat = weights.shape[0]
    alpha = np.array([.1, .01])
    beta = np.array([.5, .01])
    gamma = .0
    # A, B, C - n x n matrices
    avecs = np.ones((ncat + 1, nstocks)) * alpha[:, np.newaxis]**.5
    bvecs = np.ones((ncat + 1, nstocks)) * beta[:, np.newaxis]**.5
    dvecs = np.ones((ncat, nstocks)) * gamma**.5
    vvec = np.ones(nstocks)

    param_true = ParamSpatial.from_spatial(avecs=avecs,
                                           bvecs=bvecs,
                                           dvecs=dvecs,
                                           vvec=vvec,
                                           weights=weights)
    print(param_true)

    innov, hvar_true = simulate_bekk(param_true, nobs=nobs, distr='normal')

    bekk = BEKK(innov)

    # -------------------------------------------------------------------------
    # Estimate spatial

    result = bekk.estimate(param_start=param_true,
                           use_target=use_target,
                           restriction=restriction,
                           cfree=cfree,
                           model='spatial',
                           weights=weights,
                           method='SLSQP',
                           cython=True)

    print(result)

    theta_true = param_true.get_theta(use_target=use_target,
                                      cfree=cfree,
                                      restriction=restriction)
    theta_final = result.param_final.get_theta(use_target=use_target,
                                               cfree=cfree,
                                               restriction=restriction)
    norm = np.linalg.norm(theta_true - theta_final)

    print('\nParameters (true and estimated):\n',
          np.vstack([theta_true, theta_final]).T)
    print('\nEucledean norm of the difference = %.4f' % norm)

    # -------------------------------------------------------------------------
    # Estimate standard

    param_true = ParamStandard.from_abc(amat=param_true.amat,
                                        bmat=param_true.bmat,
                                        cmat=param_true.cmat)

    result = bekk.estimate(param_start=param_true,
                           use_target=use_target,
                           restriction=restriction,
                           cfree=cfree,
                           model='standard',
                           weights=weights,
                           method='SLSQP',
                           cython=True)

    print(result)

    theta_true = param_true.get_theta(use_target=use_target,
                                      restriction=restriction)
    theta_final = result.param_final.get_theta(use_target=use_target,
                                               restriction=restriction)
    norm = np.linalg.norm(theta_true - theta_final)

    print('\nParameters (true and estimated):\n',
          np.vstack([theta_true, theta_final]).T)
    print('\nEucledean norm of the difference = %.4f' % norm)
Ejemplo n.º 31
0
    def likelihood(self,
                   theta,
                   model='standard',
                   restriction='full',
                   target=None,
                   cfree=False,
                   groups=None,
                   cython=True,
                   use_penalty=False):
        """Compute the conditional log-likelihood function.

        Parameters
        ----------
        theta : 1dim array
            Dimension depends on the model restriction
        model : str
            Specific model to estimate.

            Must be
                - 'standard'
                - 'spatial'

        restriction : str
            Restriction on parameters.

            Must be
                - 'full'
                - 'diagonal'
                - 'scalar'

        target : (nstocks, nstocks) array
            Estimate of unconditional variance matrix
        cfree : bool
            Whether to leave C matrix free (True) or not (False)
        groups : list of lists of tuples
            Encoded groups of items
        cython : bool
            Whether to use Cython optimizations (True) or not (False)
        use_penalty : bool
            Whether to include penalty term in the likelihood

        Returns
        -------
        float
            The value of the minus log-likelihood function.
            If some regularity conditions are violated, then it returns
            some obscene number.

        """
        try:
            if model == 'standard':
                param = ParamStandard.from_theta(theta=theta,
                                                 target=target,
                                                 nstocks=self.innov.shape[1],
                                                 restriction=restriction)
            elif model == 'spatial':
                param = ParamSpatial.from_theta(theta=theta,
                                                target=target,
                                                cfree=cfree,
                                                restriction=restriction,
                                                groups=groups)
            else:
                raise NotImplementedError('The model is not implemented!')

            # TODO: Temporary hack to exclude errors in optimization
            if isinstance(param, np.ndarray):
                return 1e10
            if param.constraint() >= 1:
                return 1e10
            # if param.uvar_bad():
            #     return 1e10

            args = [self.hvar, self.innov, param.amat, param.bmat, param.cmat]

            penalty = param.penalty() if use_penalty else 0

            if cython:
                filter_var(*args)
                nstocks = self.innov.shape[1]
                idxl = np.tril_indices(nstocks)
                idxu = np.triu_indices(nstocks)
                self.hvar[:, idxu[0], idxu[1]] = self.hvar[:, idxl[0], idxl[1]]
                return likelihood_gauss(self.hvar, self.innov) + penalty
            else:
                filter_var_python(*args)
                return likelihood_python(self.hvar, self.innov) + penalty
        except:
            return 1e10
Ejemplo n.º 32
0
    def likelihood(self, theta, model='standard', restriction='full',
                   target=None, cfree=False, groups=None, cython=True,
                   use_penalty=False):
        """Compute the conditional log-likelihood function.

        Parameters
        ----------
        theta : 1dim array
            Dimension depends on the model restriction
        model : str
            Specific model to estimate.

            Must be
                - 'standard'
                - 'spatial'

        restriction : str
            Restriction on parameters.

            Must be
                - 'full'
                - 'diagonal'
                - 'scalar'

        target : (nstocks, nstocks) array
            Estimate of unconditional variance matrix
        cfree : bool
            Whether to leave C matrix free (True) or not (False)
        groups : list of lists of tuples
            Encoded groups of items
        cython : bool
            Whether to use Cython optimizations (True) or not (False)
        use_penalty : bool
            Whether to include penalty term in the likelihood

        Returns
        -------
        float
            The value of the minus log-likelihood function.
            If some regularity conditions are violated, then it returns
            some obscene number.

        """
        try:
            if model == 'standard':
                param = ParamStandard.from_theta(theta=theta, target=target,
                                                 nstocks=self.innov.shape[1],
                                                 restriction=restriction)
            elif model == 'spatial':
                param = ParamSpatial.from_theta(theta=theta, target=target,
                                                cfree=cfree,
                                                restriction=restriction,
                                                groups=groups)
            else:
                raise NotImplementedError('The model is not implemented!')

            # TODO: Temporary hack to exclude errors in optimization
            if isinstance(param, np.ndarray):
                return 1e10
            if param.constraint() >= 1:
                return 1e10
            # if param.uvar_bad():
            #     return 1e10

            args = [self.hvar, self.innov, param.amat, param.bmat, param.cmat]

            penalty = param.penalty() if use_penalty else 0

            if cython:
                filter_var(*args)
                nstocks = self.innov.shape[1]
                idxl = np.tril_indices(nstocks)
                idxu = np.triu_indices(nstocks)
                self.hvar[:, idxu[0], idxu[1]] = self.hvar[:, idxl[0], idxl[1]]
                return likelihood_gauss(self.hvar, self.innov) + penalty
            else:
                filter_var_python(*args)
                return likelihood_python(self.hvar, self.innov) + penalty
        except:
            return 1e10