Exemplo n.º 1
0
def test_kernel_errors(data):
    with pytest.raises(ValueError):
        KernelWeight(data.moments, kernel="unknown")
    with pytest.raises(ValueError):
        KernelWeight(data.moments, bandwidth=-0.5)
    with pytest.raises(ValueError):
        KernelCovariance(data.moments, jacobian=data.jacobian, kernel="unknown")
    with pytest.raises(ValueError):
        KernelCovariance(data.moments, jacobian=data.jacobian, bandwidth=-4)
Exemplo n.º 2
0
def test_center(data):
    kw = KernelWeight(data.moments, center=True)
    kw2 = KernelWeight(data.moments, center=False)

    assert kw.bandwidth == kw2.bandwidth
    assert np.any(kw.w(data.moments) != kw2.w(data.moments))

    hw = HeteroskedasticWeight(data.moments, center=True)
    hw2 = HeteroskedasticWeight(data.moments, center=False)

    assert np.any(hw.w(data.moments) != hw2.w(data.moments))
Exemplo n.º 3
0
    def fit(
        self,
        center: bool = True,
        use_cue: bool = False,
        steps: int = 2,
        disp: int = 10,
        max_iter: int = 1000,
        cov_type: str = "robust",
        debiased: bool = True,
        **cov_config: Union[bool, int, str],
    ) -> GMMFactorModelResults:
        """
        Estimate model parameters

        Parameters
        ----------
        center : bool, optional
            Flag indicating to center the moment conditions before computing
            the weighting matrix.
        use_cue : bool, optional
            Flag indicating to use continuously updating estimator
        steps : int, optional
            Number of steps to use when estimating parameters.  2 corresponds
            to the standard efficient GMM estimator. Higher values will
            iterate until convergence or up to the number of steps given
        disp : int, optional
            Number of iterations between printed update. 0 or negative values
            suppresses output
        max_iter : int, positive, optional
            Maximum number of iterations when minimizing objective
        cov_type : str, optional
            Name of covariance estimator
        debiased : bool, optional
            Flag indicating whether to debias the covariance estimator using
            a degree of freedom adjustment
        **cov_config
            Additional covariance-specific options.  See Notes.

        Returns
        -------
        GMMFactorModelResults
            Results class with parameter estimates, covariance and test statistics

        Notes
        -----
        The kernel covariance estimator takes the optional arguments
        ``kernel``, one of 'bartlett', 'parzen' or 'qs' (quadratic spectral)
        and ``bandwidth`` (a positive integer).
        """

        nobs, n = self.portfolios.shape
        k = self.factors.shape[1]
        excess_returns = not self._risk_free
        nrf = int(not bool(excess_returns))
        # 1. Starting Values - use 2 pass
        mod = LinearFactorModel(self.portfolios,
                                self.factors,
                                risk_free=self._risk_free)
        res = mod.fit()
        betas = np.asarray(res.betas).ravel()
        lam = np.asarray(res.risk_premia)
        mu = self.factors.ndarray.mean(0)
        sv = np.r_[betas, lam, mu][:, None]
        g = self._moments(sv, excess_returns)
        g -= g.mean(0)[None, :] if center else 0
        kernel: Optional[str] = None
        bandwidth: Optional[float] = None
        if cov_type not in ("robust", "heteroskedastic", "kernel"):
            raise ValueError("Unknown weight: {0}".format(cov_type))
        if cov_type in ("robust", "heteroskedastic"):
            weight_est_instance = HeteroskedasticWeight(g, center=center)
            cov_est = HeteroskedasticCovariance
        else:  # 'kernel':
            kernel = get_string(cov_config, "kernel")
            bandwidth = get_float(cov_config, "bandwidth")
            weight_est_instance = KernelWeight(g,
                                               center=center,
                                               kernel=kernel,
                                               bandwidth=bandwidth)
            cov_est = KernelCovariance

        w = weight_est_instance.w(g)

        args = (excess_returns, w)

        # 2. Step 1 using w = inv(s) from SV
        callback = callback_factory(self._j, args, disp=disp)
        opt_res = minimize(
            self._j,
            sv,
            args=args,
            callback=callback,
            options={
                "disp": bool(disp),
                "maxiter": max_iter
            },
        )
        params = opt_res.x
        last_obj = opt_res.fun
        iters = 1
        # 3. Step 2 using step 1 estimates
        if not use_cue:
            while iters < steps:
                iters += 1
                g = self._moments(params, excess_returns)
                w = weight_est_instance.w(g)
                args = (excess_returns, w)

                # 2. Step 1 using w = inv(s) from SV
                callback = callback_factory(self._j, args, disp=disp)
                opt_res = minimize(
                    self._j,
                    params,
                    args=args,
                    callback=callback,
                    options={
                        "disp": bool(disp),
                        "maxiter": max_iter
                    },
                )
                params = opt_res.x
                obj = opt_res.fun
                if np.abs(obj - last_obj) < 1e-6:
                    break
                last_obj = obj

        else:
            cue_args = (excess_returns, weight_est_instance)
            callback = callback_factory(self._j_cue, cue_args, disp=disp)
            opt_res = minimize(
                self._j_cue,
                params,
                args=cue_args,
                callback=callback,
                options={
                    "disp": bool(disp),
                    "maxiter": max_iter
                },
            )
            params = opt_res.x

        # 4. Compute final S and G for inference
        g = self._moments(params, excess_returns)
        s = g.T @ g / nobs
        jac = self._jacobian(params, excess_returns)
        if cov_est is HeteroskedasticCovariance:
            cov_est_inst = HeteroskedasticCovariance(
                g,
                jacobian=jac,
                center=center,
                debiased=debiased,
                df=self.factors.shape[1],
            )
        else:
            cov_est_inst = KernelCovariance(
                g,
                jacobian=jac,
                center=center,
                debiased=debiased,
                df=self.factors.shape[1],
                kernel=kernel,
                bandwidth=bandwidth,
            )

        full_vcv = cov_est_inst.cov
        sel = slice((n * k), (n * k + k + nrf))
        rp = params[sel]
        rp_cov = full_vcv[sel, sel]
        sel = slice(0, (n * (k + 1)), (k + 1))
        alphas = g.mean(0)[sel, None]
        alpha_vcv = s[sel, sel] / nobs
        stat = self._j(params, excess_returns, w)
        jstat = WaldTestStatistic(stat,
                                  "All alphas are 0",
                                  n - k - nrf,
                                  name="J-statistic")

        # R2 calculation
        betas = np.reshape(params[:(n * k)], (n, k))
        resids = self.portfolios.ndarray - self.factors.ndarray @ betas.T
        resids -= resids.mean(0)[None, :]
        residual_ss = (resids**2).sum()
        total = self.portfolios.ndarray
        total = total - total.mean(0)[None, :]
        total_ss = (total**2).sum()
        r2 = 1.0 - residual_ss / total_ss
        param_names = []
        for portfolio in self.portfolios.cols:
            for factor in self.factors.cols:
                param_names.append("beta-{0}-{1}".format(portfolio, factor))
        if not excess_returns:
            param_names.append("lambda-risk_free")
        param_names.extend(["lambda-{0}".format(f) for f in self.factors.cols])
        param_names.extend(["mu-{0}".format(f) for f in self.factors.cols])
        rp_names = list(self.factors.cols)[:]
        if not excess_returns:
            rp_names.insert(0, "risk_free")
        params = np.c_[alphas, betas]
        # 5. Return values
        res_dict = AttrDict(
            params=params,
            cov=full_vcv,
            betas=betas,
            rp=rp,
            rp_cov=rp_cov,
            alphas=alphas,
            alpha_vcv=alpha_vcv,
            jstat=jstat,
            rsquared=r2,
            total_ss=total_ss,
            residual_ss=residual_ss,
            param_names=param_names,
            portfolio_names=self.portfolios.cols,
            factor_names=self.factors.cols,
            name=self._name,
            cov_type=cov_type,
            model=self,
            nobs=nobs,
            rp_names=rp_names,
            iter=iters,
            cov_est=cov_est_inst,
        )

        return GMMFactorModelResults(res_dict)