Beispiel #1
0
 def stat_function(x):
     assert x.ndim == 1
     #vals = sd_opt_problem.smoother1(
     #x[np.newaxis,:]-ygrid[:,np.newaxis]).mean(axis=1)
     vals = EmpiricalCDF(x)(ygrid)
     return vals
Beispiel #2
0
def plot_truncated_lognormal_example_exact_quantities(num_samples=int(1e5),
                                                      plot=False,
                                                      mu=0,
                                                      sigma=1):
    if plot:
        assert num_samples <= 1e5
    num_vars = 1.
    lb, ub = -1, 3

    f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
        get_truncated_lognormal_example_exact_quantities(lb, ub, mu, sigma)
    # lb,ub passed to truncnorm_rv are defined for standard normal.
    # Adjust for mu and sigma using
    alpha, beta = (lb - mu) / sigma, (ub - mu) / sigma
    samples = truncnorm_rv.rvs(alpha, beta, mu, sigma,
                               size=num_samples)[np.newaxis, :]
    values = f(samples)[:, 0]

    fig, axs = plt.subplots(1, 6, sharey=False, figsize=(16, 6))

    from pyapprox.density import EmpiricalCDF
    ygrid = np.linspace(np.exp(lb) - 1, np.exp(ub) * 1.1, 100)
    ecdf = EmpiricalCDF(values)
    if plot:
        axs[0].plot(ygrid, ecdf(ygrid), '-')
        axs[0].plot(ygrid, f_cdf(ygrid), '--')
        axs[0].set_xlim(ygrid.min(), ygrid.max())
        axs[0].set_title('CDF')

    if plot:
        ygrid = np.linspace(np.exp(lb) - 1, np.exp(ub) * 1.1, 100)
        axs[1].hist(values, bins='auto', density=True)
        axs[1].plot(ygrid, f_pdf(ygrid), '--')
        axs[1].set_xlim(ygrid.min(), ygrid.max())
        axs[1].set_title('PDF')

    pgrid = np.linspace(0.01, 1 - 1e-2, 10)
    evar = np.array([value_at_risk(values, p)[0] for p in pgrid]).squeeze()
    if plot:
        axs[2].plot(pgrid, evar, '-')
        axs[2].plot(pgrid, VaR(pgrid), '--')
        axs[2].set_title('VaR')
    else:
        assert np.allclose(evar, VaR(pgrid), rtol=2e-2)

    pgrid = np.linspace(0, 1 - 1e-2, 100)
    ecvar = np.array([conditional_value_at_risk(values, p) for p in pgrid])
    # CVaR for alpha=0 should be the mean
    assert np.allclose(ecvar[0], values.mean())
    if plot:
        axs[3].plot(pgrid, ecvar, '-')
        axs[3].plot(pgrid, CVaR(pgrid), '--')
        axs[3].set_xlim(pgrid.min(), pgrid.max())
        axs[3].set_title('CVaR')
    else:
        assert np.allclose(ecvar.squeeze(), CVaR(pgrid).squeeze(), rtol=1e-2)

    ygrid = np.linspace(np.exp(lb) - 10, np.exp(ub) + 1, 100)
    essd = compute_conditional_expectations(ygrid, values, False)
    if plot:
        axs[4].plot(ygrid, essd, '-')
        axs[4].plot(ygrid, ssd(ygrid), '--')
        axs[4].set_xlim(ygrid.min(), ygrid.max())
        axs[4].set_title(r'$E[(\eta-Y)^+]$')
        axs[5].set_xlabel(r'$\eta$')
    else:
        assert np.allclose(essd.squeeze(), ssd(ygrid), rtol=2e-2)

    # zoom into ygrid over high probability region of -Y
    ygrid = -ygrid[::-1]
    disutil_essd = compute_conditional_expectations(ygrid, values, True)
    assert np.allclose(disutil_essd,
                       compute_conditional_expectations(ygrid, -values, False))
    # print(np.linalg.norm(disutil_essd.squeeze()-ssd_disutil(ygrid),ord=np.inf))
    if plot:
        axs[5].plot(ygrid, disutil_essd, '-', label='Empirical')
        axs[5].plot(ygrid, ssd_disutil(ygrid), '--', label='Exact')
        axs[5].set_xlim(ygrid.min(), ygrid.max())
        axs[5].set_title(r'$E[(\eta-(-Y))^+]$')
        axs[5].set_xlabel(r'$\eta$')
        axs[5].legend()

        plt.show()
Beispiel #3
0
    def help_test_stochastic_dominance(self,
                                       solver,
                                       nsamples,
                                       degree,
                                       disutility=None,
                                       plot=False):
        """
        disutilty is none plot emprical CDF
        disutility is True plot disutility SSD
        disutility is False plot standard SSD
        """
        from pyapprox.multivariate_polynomials import PolynomialChaosExpansion
        from pyapprox.variable_transformations import \
            define_iid_random_variable_transformation
        from pyapprox.indexing import compute_hyperbolic_indices
        num_vars = 1
        mu, sigma = 0, 1
        f, f_cdf, f_pdf, VaR, CVaR, ssd, ssd_disutil = \
            get_lognormal_example_exact_quantities(mu,sigma)

        samples = np.random.normal(0, 1, (1, nsamples))
        samples = np.sort(samples)
        values = f(samples[0, :])[:, np.newaxis]

        pce = PolynomialChaosExpansion()
        var_trans = define_iid_random_variable_transformation(
            normal_rv(mu, sigma), num_vars)
        pce.configure({'poly_type': 'hermite', 'var_trans': var_trans})
        indices = compute_hyperbolic_indices(1, degree, 1.)
        pce.set_indices(indices)

        eta_indices = None
        #eta_indices=np.argsort(values[:,0])[nsamples//2:]
        coef, sd_opt_problem = solver(samples,
                                      values,
                                      pce.basis_matrix,
                                      eta_indices=eta_indices)

        pce.set_coefficients(coef[:, np.newaxis])
        pce_values = pce(samples)[:, 0]

        ygrid = pce_values.copy()
        if disutility is not None:
            if disutility:
                ygrid = -ygrid[::-1]
            stat_function = partial(compute_conditional_expectations,
                                    ygrid,
                                    disutility_formulation=disutility)
            if disutility:
                # Disutility SSD
                eps = 1e-14
                assert np.all(
                    stat_function(values[:, 0]) <= stat_function(pce_values) +
                    eps)
            else:
                # SSD
                assert np.all(
                    stat_function(pce_values) <= stat_function(values[:, 0]))

        else:
            # FSD
            from pyapprox.density import EmpiricalCDF
            stat_function = lambda x: EmpiricalCDF(x)(ygrid)
            assert np.all(
                stat_function(pce_values) <= stat_function(values[:, 0]))

        if plot:
            lstsq_pce = PolynomialChaosExpansion()
            lstsq_pce.configure({
                'poly_type': 'hermite',
                'var_trans': var_trans
            })
            lstsq_pce.set_indices(indices)

            lstsq_coef = solve_least_squares_regression(
                samples, values, lstsq_pce.basis_matrix)
            lstsq_pce.set_coefficients(lstsq_coef)

            #axs[1].plot(ygrid,stat_function(values[:,0]),'ko',ms=12)
            #axs[1].plot(ygrid,stat_function(pce_values),'rs')
            #axs[1].plot(ygrid,stat_function(lstsq_pce(samples)[:,0]),'b*')

            ylb,yub = values.min()-abs(values.max())*.1,\
                      values.max()+abs(values.max())*.1

            ygrid = np.linspace(ylb, yub, 101)
            ygrid = np.sort(np.concatenate([ygrid, pce_values]))
            if disutility is not None:
                if disutility:
                    ygrid = -ygrid[::-1]
                stat_function = partial(compute_conditional_expectations,
                                        ygrid,
                                        disutility_formulation=disutility)
            else:
                print('here')
                print(ygrid)

                def stat_function(x):
                    assert x.ndim == 1
                    #vals = sd_opt_problem.smoother1(
                    #x[np.newaxis,:]-ygrid[:,np.newaxis]).mean(axis=1)
                    vals = EmpiricalCDF(x)(ygrid)
                    return vals

            fig, axs = plot_1d_functions_and_statistics(
                [f, pce, lstsq_pce], ['Exact', 'SSD', 'Lstsq'], samples,
                values, stat_function, ygrid)

            plt.show()