Example #1
0
    def setUpClass(self):

        ## Non-linear least squares
        np.random.seed(100)

        # initialize yield curve and VAR observed factors
        yc_data_test = pa.DataFrame(
            np.random.random((test_size - k_ar, nyields)))
        var_data_test = self.var_data_test = \
            pa.DataFrame(np.random.random((test_size, neqs)))
        self.mats = mats = list(range(1, nyields + 1))

        # initialize masked arrays
        self.dim_nolat = dim = k_ar * neqs
        lam_0 = make_nomask([dim, 1])
        lam_1 = make_nomask([dim, dim])
        delta_0 = make_nomask([1, 1])
        delta_1 = make_nomask([dim, 1])
        mu = make_nomask([dim, 1])
        phi = make_nomask([dim, dim])
        sigma = make_nomask([dim, dim])

        # Setup some of the elements as non-zero
        # This sets up a fake model where only lambda_0 and lambda_1 are
        # estimated
        lam_0[:neqs] = ma.masked
        lam_1[:neqs, :neqs] = ma.masked
        delta_0[:, :] = np.random.random(1)
        delta_1[:neqs] = np.random.random((neqs, 1))
        mu[:neqs] = np.random.random((neqs, 1))
        phi[:neqs, :] = np.random.random((neqs, dim))
        sigma[:, :] = np.identity(dim)

        self.mod_kwargs_nolat = {
            'yc_data': yc_data_test,
            'var_data': var_data_test,
            'k_ar': k_ar,
            'neqs': neqs,
            'mats': mats,
            'lam_0_e': lam_0,
            'lam_1_e': lam_1,
            'delta_0_e': delta_0,
            'delta_1_e': delta_1,
            'mu_e': mu,
            'phi_e': phi,
            'sigma_e': sigma
        }

        guess_params_nolat = np.random.random((neqs**2 + neqs)).tolist()
        affine_obj_nolat = Affine(**self.mod_kwargs_nolat)

        self.results = affine_obj_nolat.solve(guess_params_nolat,
                                              method='nls',
                                              xtol=0.1,
                                              ftol=0.1)
Example #2
0
    def setUpClass(self):

        ## Non-linear least squares
        np.random.seed(100)

        # initialize yield curve and VAR observed factors
        yc_data_test = pa.DataFrame(np.random.random((test_size - k_ar,
                                                      nyields)))
        var_data_test = self.var_data_test = \
            pa.DataFrame(np.random.random((test_size, neqs)))
        self.mats = mats = list(range(1, nyields + 1))

        # initialize masked arrays
        self.dim_nolat = dim = k_ar * neqs
        lam_0 = make_nomask([dim, 1])
        lam_1 = make_nomask([dim, dim])
        delta_0 = make_nomask([1, 1])
        delta_1 = make_nomask([dim, 1])
        mu = make_nomask([dim, 1])
        phi = make_nomask([dim, dim])
        sigma = make_nomask([dim, dim])

        # Setup some of the elements as non-zero
        # This sets up a fake model where only lambda_0 and lambda_1 are
        # estimated
        lam_0[:neqs] = ma.masked
        lam_1[:neqs, :neqs] = ma.masked
        delta_0[:, :] = np.random.random(1)
        delta_1[:neqs] = np.random.random((neqs, 1))
        mu[:neqs] = np.random.random((neqs, 1))
        phi[:neqs, :] = np.random.random((neqs, dim))
        sigma[:, :] = np.identity(dim)

        self.mod_kwargs_nolat = {
            'yc_data': yc_data_test,
            'var_data': var_data_test,
            'k_ar': k_ar,
            'neqs': neqs,
            'mats': mats,
            'lam_0_e': lam_0,
            'lam_1_e': lam_1,
            'delta_0_e': delta_0,
            'delta_1_e': delta_1,
            'mu_e': mu,
            'phi_e': phi,
            'sigma_e': sigma
        }

        guess_params_nolat = np.random.random((neqs**2 + neqs)).tolist()
        affine_obj_nolat = Affine(**self.mod_kwargs_nolat)

        self.results = affine_obj_nolat.solve(guess_params_nolat, method='nls',
                                              xtol=0.1, ftol=0.1)
Example #3
0
                                   sigma_e=sigma_e,
                                   mths=mths)

                guess_length = bsr_model.guess_length
                guess_params = [0.0000] * guess_length

                print source
                print model
                print "xtol " + str(xtol)
                print "ftol " + str(ftol)
                print "Begin " + str(yc_dates[0])
                print "End " + str(yc_dates[-1])
                print "variables " + str(list(bsr_model.names))
                out_bsr = bsr_model.solve(guess_params=guess_params,
                                          method='nls',
                                          ftol=ftol,
                                          xtol=xtol,
                                          maxfev=10000000,
                                          full_output=False)

                lam_0, lam_1, delta_0, delta_1, mu, phi, sigma, a_solve, \
                                b_solve, solv_cov = out_bsr

                a_rsk, b_rsk = bsr_model.gen_pred_coef(lam_0=lam_0,
                                                       lam_1=lam_1,
                                                       delta_0=delta_0,
                                                       delta_1=delta_1,
                                                       mu=mu,
                                                       phi=phi,
                                                       sigma=sigma)

                #generate no risk results
Example #4
0
def robust(mod_data, mod_yc_data, method=None):
    """
    Function to run model with guesses, also generating
    method : string
        method to pass to Affine.solve()
    mod_data : pandas DataFrame
        model data
    mod_yc_data : pandas DataFrame
        model yield curve data
    """
    # subset to pre 2005
    mod_data = mod_data[:217]
    mod_yc_data = mod_yc_data[:214]

    k_ar = 4
    neqs = 5
    lat = 0

    lam_0_e = ma.zeros((k_ar * neqs, 1))
    lam_0_e[:neqs] = ma.masked

    lam_1_e = ma.zeros((k_ar * neqs, k_ar * neqs))
    lam_1_e[:neqs, :neqs] = ma.masked

    delta_0_e = ma.zeros([1, 1])
    delta_0_e[:, :] = ma.masked
    delta_0_e[:, :] = ma.nomask

    delta_1_e = ma.zeros([k_ar * neqs, 1])
    delta_1_e[:, :] = ma.masked
    delta_1_e[:, :] = ma.nomask
    delta_1_e[np.argmax(mod_data.columns == 'fed_funds')] = 1

    var_fit = VAR(mod_data, freq="M").fit(maxlags=k_ar)

    coefs = var_fit.params.values
    sigma_u = var_fit.sigma_u
    obs_var = neqs * k_ar

    mu_e = ma.zeros([k_ar * neqs, 1])
    mu_e[:, :] = ma.masked
    mu_e[:, :] = ma.nomask
    mu_e[:neqs] = coefs[0, None].T

    phi_e = ma.zeros([k_ar * neqs, k_ar * neqs])
    phi_e[:, :] = ma.masked
    phi_e[:, :] = ma.nomask
    phi_e[:neqs] = coefs[1:].T
    phi_e[neqs:obs_var, :(k_ar - 1) * neqs] = np.identity((k_ar - 1) * neqs)

    sigma_e = ma.zeros([k_ar * neqs, k_ar * neqs])
    sigma_e[:, :] = ma.masked
    sigma_e[:, :] = ma.nomask
    sigma_e[:neqs, :neqs] = sigma_u
    sigma_e[neqs:obs_var, neqs:obs_var] = np.identity((k_ar - 1) * neqs)

    #anl_mths, mth_only_data = proc_to_mth(mod_yc_data)
    bsr = Affine(yc_data=mod_yc_data,
                 var_data=mod_data,
                 lam_0_e=lam_0_e,
                 lam_1_e=lam_1_e,
                 delta_0_e=delta_0_e,
                 delta_1_e=delta_1_e,
                 mu_e=mu_e,
                 phi_e=phi_e,
                 sigma_e=sigma_e)
    neqs = bsr.neqs

    guess_length = bsr.guess_length

    guess_params = [0.0000] * guess_length

    for numb, element in enumerate(guess_params[:30]):
        element = 0.0001
        guess_params[numb] = element * (np.random.random() - 0.5)

    out_bsr = bsr.solve(guess_params=guess_params,
                        method=method,
                        ftol=1e-950,
                        xtol=1e-950,
                        maxfev=1000000000,
                        full_output=False)

    if method == "ls":
        lam_0, lam_1, delta_1, mu, phi, sig, a_solve, b_solve, output = out_bsr
        return lam_0, lam_1, output

    else:
        lam_0, lam_1, delta_1, mu, phi, sig, a_solve, b_solve, lam_cov = out_bsr
        return lam_0, lam_1, lam_cov
Example #5
0
                                   lam_1_e=lam_1_e, delta_0_e=delta_0_e, delta_1_e=delta_1_e,
                                   mu_e=mu_e, phi_e=phi_e, sigma_e=sigma_e, mths=mths)


                guess_length = bsr_model.guess_length
                guess_params = [0.0000] * guess_length

                print source
                print model
                print "xtol " + str(xtol)
                print "ftol " + str(ftol)
                print "Begin " + str(yc_dates[0])
                print "End " + str(yc_dates[-1])
                print "variables " + str(list(bsr_model.names))
                out_bsr = bsr_model.solve(guess_params=guess_params, method='nls',
                                        ftol=ftol, xtol=xtol, maxfev=10000000,
                                        full_output=False)

                lam_0, lam_1, delta_0, delta_1, mu, phi, sigma, a_solve, \
                                b_solve, solv_cov = out_bsr

                a_rsk, b_rsk = bsr_model.gen_pred_coef(lam_0=lam_0, lam_1=lam_1,
                                                    delta_0=delta_0,
                                                    delta_1=delta_1, mu=mu,
                                                    phi=phi, sigma=sigma)

                #generate no risk results
                lam_0_nr = np.zeros([neqs*k_ar, 1])
                lam_1_nr = np.zeros([neqs*k_ar, neqs*k_ar])
                sigma_zeros = np.zeros_like(sigma)
                a_nrsk, b_nrsk = bsr_model.gen_pred_coef(lam_0=lam_0_nr,
Example #6
0
class TestEstimationMethods(TestCase):
    """
    Tests for solution methods
    """
    def setUp(self):

        ## Non-linear least squares
        np.random.seed(101)

        # initialize yield curve and VAR observed factors
        yc_data_test = pa.DataFrame(np.random.random((test_size - k_ar,
                                                      nyields)))
        var_data_test = pa.DataFrame(np.random.random((test_size, neqs)))
        mats = list(range(1, nyields + 1))

        # initialize masked arrays
        self.dim_nolat = dim = k_ar * neqs
        lam_0 = make_nomask([dim, 1])
        lam_1 = make_nomask([dim, dim])
        delta_0 = make_nomask([1, 1])
        delta_1 = make_nomask([dim, 1])
        mu = make_nomask([dim, 1])
        phi = make_nomask([dim, dim])
        sigma = make_nomask([dim, dim])

        # Setup some of the elements as non-zero
        # This sets up a fake model where only lambda_0 and lambda_1 are
        # estimated
        lam_0[:neqs] = ma.masked
        lam_1[:neqs, :neqs] = ma.masked
        delta_0[:, :] = np.random.random(1)
        delta_1[:neqs] = np.random.random((neqs, 1))
        mu[:neqs] = np.random.random((neqs, 1))
        phi[:neqs, :] = np.random.random((neqs, dim))
        sigma[:, :] = np.identity(dim)

        self.mod_kwargs_nolat = {
            'yc_data': yc_data_test,
            'var_data': var_data_test,
            'k_ar': k_ar,
            'neqs': neqs,
            'mats': mats,
            'lam_0_e': lam_0,
            'lam_1_e': lam_1,
            'delta_0_e': delta_0,
            'delta_1_e': delta_1,
            'mu_e': mu,
            'phi_e': phi,
            'sigma_e': sigma
        }

        self.guess_params_nolat = np.random.random((neqs**2 + neqs)).tolist()
        self.affine_obj_nolat = Affine(**self.mod_kwargs_nolat)

        ## Maximum likelihood build

        # initialize masked arrays
        self.dim_lat = dim = k_ar * neqs + latent
        lam_0 = make_nomask([dim, 1])
        lam_1 = make_nomask([dim, dim])
        delta_0 = make_nomask([1, 1])
        delta_1 = make_nomask([dim, 1])
        mu = make_nomask([dim, 1])
        phi = make_nomask([dim, dim])
        sigma = make_nomask([dim, dim])

        # Setup some of the elements as non-zero
        # This sets up a fake model where only lambda_0 and lambda_1 are
        # estimated
        lam_0[:neqs] = ma.masked
        lam_0[-latent:] = ma.masked
        lam_1[:neqs, :neqs] = ma.masked
        lam_1[-latent:, -latent:] = ma.masked
        delta_0[:, :] = np.random.random(1)
        delta_1[:neqs] = np.random.random((neqs, 1))
        mu[:neqs] = np.random.random((neqs, 1))
        phi[:neqs, :] = np.random.random((neqs, dim))
        sigma[:, :] = np.identity(dim)

        self.mod_kwargs = {
            'yc_data': yc_data_test,
            'var_data': var_data_test,
            'k_ar': k_ar,
            'neqs': neqs,
            'mats': mats,
            'lam_0_e': lam_0,
            'lam_1_e': lam_1,
            'delta_0_e': delta_0,
            'delta_1_e': delta_1,
            'mu_e': mu,
            'phi_e': phi,
            'sigma_e': sigma,
            'latent': latent,
            'no_err': [1]
        }

        self.guess_params_lat = np.random.random((neqs**2 + neqs +
                                                 (2 * latent),)).tolist()
        self.affine_obj_lat = Affine(**self.mod_kwargs)


    def test_solve_nls(self):
        """
        Tests whether or not basic estimation is performed for non-linear least
        squares case without any latent factors. If the numerical approximation
        method converges, this test passes. Otherwise, the test fails.
        """
        guess_params = self.guess_params_nolat
        method = 'nls'
        self.affine_obj_nolat.solve(guess_params, method=method, alg='newton',
                                    xtol=0.1, ftol=0.1)

    def test_solve_ml(self):
        """
        Tests whether or not model estimation converges is performed for direct
        maximum likelihood with a single latent factor. If the numerical
        approximation method converges, this test passes. Otherwise, the test
        fails.
        """
        guess_params = self.guess_params_lat
        method = 'ml'
        self.affine_obj_lat.solve(guess_params, method=method, alg='bfgs',
                                  xtol=0.1, ftol=0.1)
Example #7
0
mod_init = Affine(yc_data=mod_yc_data,
                  var_data=None,
                  latent=latent,
                  lam_0_e=lam_0_e,
                  lam_1_e=lam_1_e,
                  delta_0_e=delta_0_e,
                  delta_1_e=delta_1_e,
                  mu_e=mu_e,
                  phi_e=phi_e,
                  sigma_e=sigma_e,
                  mats=mats,
                  use_C_extension=False)

guess_length = mod_init.guess_length

guess_params = [0] * guess_length

np.random.seed(100)

for numb, element in enumerate(guess_params):
    element = 0.001
    guess_params[numb] = np.abs(element * np.random.random())

bsr_solve = mod_init.solve(guess_params=guess_params,
                           method="kalman",
                           alg="bfgs",
                           maxfev=10000000,
                           maxiter=10000000,
                           ftol=0.1,
                           xtol=0.1)
Example #8
0

mod_init = Affine(
    yc_data=yc_data_use,
    var_data=macro_data_use,
    latent=latent,
    lam_0_e=lam_0_e,
    lam_1_e=lam_1_e,
    delta_0_e=delta_0_e,
    delta_1_e=delta_1_e,
    mu_e=mu_e,
    phi_e=phi_e,
    sigma_e=sigma_e,
    mats=mats,
    k_ar=k_ar,
    neqs=neqs,
    use_C_extension=False,
)

guess_length = mod_init.guess_length

guess_params = [0.0000] * guess_length

np.random.seed(100)

for numb, element in enumerate(guess_params):
    element = 0.0000000001
    guess_params[numb] = np.abs(element * np.random.random())

bsr_solve = mod_init.solve(guess_params=guess_params, method="kalman", alg="bfgs", maxfev=10000000, maxiter=10000000)
Example #9
0
class TestEstimationMethods(TestCase):
    """
    Tests for solution methods
    """
    def setUp(self):

        ## Non-linear least squares
        np.random.seed(101)

        # initialize yield curve and VAR observed factors
        yc_data_test = pa.DataFrame(
            np.random.random((test_size - k_ar, nyields)))
        var_data_test = pa.DataFrame(np.random.random((test_size, neqs)))
        mats = list(range(1, nyields + 1))

        # initialize masked arrays
        self.dim_nolat = dim = k_ar * neqs
        lam_0 = make_nomask([dim, 1])
        lam_1 = make_nomask([dim, dim])
        delta_0 = make_nomask([1, 1])
        delta_1 = make_nomask([dim, 1])
        mu = make_nomask([dim, 1])
        phi = make_nomask([dim, dim])
        sigma = make_nomask([dim, dim])

        # Setup some of the elements as non-zero
        # This sets up a fake model where only lambda_0 and lambda_1 are
        # estimated
        lam_0[:neqs] = ma.masked
        lam_1[:neqs, :neqs] = ma.masked
        delta_0[:, :] = np.random.random(1)
        delta_1[:neqs] = np.random.random((neqs, 1))
        mu[:neqs] = np.random.random((neqs, 1))
        phi[:neqs, :] = np.random.random((neqs, dim))
        sigma[:, :] = np.identity(dim)

        self.mod_kwargs_nolat = {
            'yc_data': yc_data_test,
            'var_data': var_data_test,
            'k_ar': k_ar,
            'neqs': neqs,
            'mats': mats,
            'lam_0_e': lam_0,
            'lam_1_e': lam_1,
            'delta_0_e': delta_0,
            'delta_1_e': delta_1,
            'mu_e': mu,
            'phi_e': phi,
            'sigma_e': sigma
        }

        self.guess_params_nolat = np.random.random((neqs**2 + neqs)).tolist()
        self.affine_obj_nolat = Affine(**self.mod_kwargs_nolat)

        ## Maximum likelihood build

        # initialize masked arrays
        self.dim_lat = dim = k_ar * neqs + latent
        lam_0 = make_nomask([dim, 1])
        lam_1 = make_nomask([dim, dim])
        delta_0 = make_nomask([1, 1])
        delta_1 = make_nomask([dim, 1])
        mu = make_nomask([dim, 1])
        phi = make_nomask([dim, dim])
        sigma = make_nomask([dim, dim])

        # Setup some of the elements as non-zero
        # This sets up a fake model where only lambda_0 and lambda_1 are
        # estimated
        lam_0[:neqs] = ma.masked
        lam_0[-latent:] = ma.masked
        lam_1[:neqs, :neqs] = ma.masked
        lam_1[-latent:, -latent:] = ma.masked
        delta_0[:, :] = np.random.random(1)
        delta_1[:neqs] = np.random.random((neqs, 1))
        mu[:neqs] = np.random.random((neqs, 1))
        phi[:neqs, :] = np.random.random((neqs, dim))
        sigma[:, :] = np.identity(dim)

        self.mod_kwargs = {
            'yc_data': yc_data_test,
            'var_data': var_data_test,
            'k_ar': k_ar,
            'neqs': neqs,
            'mats': mats,
            'lam_0_e': lam_0,
            'lam_1_e': lam_1,
            'delta_0_e': delta_0,
            'delta_1_e': delta_1,
            'mu_e': mu,
            'phi_e': phi,
            'sigma_e': sigma,
            'latent': latent,
            'no_err': [1]
        }

        self.guess_params_lat = np.random.random(
            (neqs**2 + neqs + (2 * latent), )).tolist()
        self.affine_obj_lat = Affine(**self.mod_kwargs)

    def test_solve_nls(self):
        """
        Tests whether or not basic estimation is performed for non-linear least
        squares case without any latent factors. If the numerical approximation
        method converges, this test passes. Otherwise, the test fails.
        """
        guess_params = self.guess_params_nolat
        method = 'nls'
        self.affine_obj_nolat.solve(guess_params,
                                    method=method,
                                    alg='newton',
                                    xtol=0.1,
                                    ftol=0.1)

    def test_solve_ml(self):
        """
        Tests whether or not model estimation converges is performed for direct
        maximum likelihood with a single latent factor. If the numerical
        approximation method converges, this test passes. Otherwise, the test
        fails.
        """
        guess_params = self.guess_params_lat
        method = 'ml'
        self.affine_obj_lat.solve(guess_params,
                                  method=method,
                                  alg='bfgs',
                                  xtol=0.1,
                                  ftol=0.1)
Example #10
0
def robust(mod_data, mod_yc_data, method=None):
    """
    Function to run model with guesses, also generating
    method : string
        method to pass to Affine.solve()
    mod_data : pandas DataFrame
        model data
    mod_yc_data : pandas DataFrame
        model yield curve data
    """
    # subset to pre 2005
    mod_data = mod_data[:217]
    mod_yc_data = mod_yc_data[:214]

    k_ar = 4
    neqs = 5
    lat = 0

    lam_0_e = ma.zeros((k_ar * neqs, 1))
    lam_0_e[:neqs] = ma.masked

    lam_1_e = ma.zeros((k_ar * neqs, k_ar * neqs))
    lam_1_e[:neqs, :neqs] = ma.masked

    delta_0_e = ma.zeros([1, 1])
    delta_0_e[:, :] = ma.masked
    delta_0_e[:, :] = ma.nomask

    delta_1_e = ma.zeros([k_ar * neqs, 1])
    delta_1_e[:, :] = ma.masked
    delta_1_e[:, :] = ma.nomask
    delta_1_e[np.argmax(mod_data.columns == 'fed_funds')] = 1

    var_fit = VAR(mod_data, freq="M").fit(maxlags=k_ar)

    coefs = var_fit.params.values
    sigma_u = var_fit.sigma_u
    obs_var = neqs * k_ar

    mu_e = ma.zeros([k_ar*neqs, 1])
    mu_e[:, :] = ma.masked
    mu_e[:, :] = ma.nomask
    mu_e[:neqs] = coefs[0, None].T

    phi_e = ma.zeros([k_ar * neqs, k_ar * neqs])
    phi_e[:, :] = ma.masked
    phi_e[:, :] = ma.nomask
    phi_e[:neqs] = coefs[1:].T
    phi_e[neqs:obs_var, :(k_ar - 1) * neqs] = np.identity((k_ar - 1) * neqs)

    sigma_e = ma.zeros([k_ar * neqs, k_ar * neqs])
    sigma_e[:, :] = ma.masked
    sigma_e[:, :] = ma.nomask
    sigma_e[:neqs, :neqs] = sigma_u
    sigma_e[neqs:obs_var, neqs:obs_var] = np.identity((k_ar - 1) * neqs)

    #anl_mths, mth_only_data = proc_to_mth(mod_yc_data)
    bsr = Affine(yc_data = mod_yc_data, var_data = mod_data, lam_0_e=lam_0_e,
                 lam_1_e=lam_1_e, delta_0_e=delta_0_e, delta_1_e=delta_1_e,
                 mu_e=mu_e, phi_e=phi_e, sigma_e=sigma_e)
    neqs = bsr.neqs

    guess_length = bsr.guess_length

    guess_params = [0.0000] * guess_length

    for numb, element in enumerate(guess_params[:30]):
        element = 0.0001
        guess_params[numb] = element * (np.random.random() - 0.5)

    out_bsr = bsr.solve(guess_params=guess_params, method=method, ftol=1e-950,
                        xtol=1e-950, maxfev=1000000000, full_output=False)

    if method == "ls":
        lam_0, lam_1, delta_1, mu, phi, sig, a_solve, b_solve, output = out_bsr
        return lam_0, lam_1, output

    else:
        lam_0, lam_1, delta_1, mu, phi, sig, a_solve, b_solve, lam_cov = out_bsr
        return lam_0, lam_1, lam_cov