def test_cdf(self):
     sim_model = EconDensity()
     x = np.ones(shape=(2000, 1))
     y = np.random.uniform(0.01, 5, size=(2000, 1))
     p_sim = sim_model.cdf(x, y)
     p_true = stats.norm.cdf(y, loc=1, scale=2)
     diff = np.sum(np.abs(p_sim - p_true))
     self.assertAlmostEquals(diff, 0.0, places=2)
def generate_report():
  econ_density = EconDensity()

  X, Y = econ_density.simulate(n_samples=1000)
  nke = NeighborKernelDensityEstimation()
  nke.fit_by_cv(X, Y)

  n_samples = 500
  X_test = np.asarray([1 for _ in range(n_samples)])
  Y_test = np.linspace(0, 8, num=n_samples)
  Z = nke.pdf(X_test, Y_test)
    def test_cdf_sample_consistency(self):
        from statsmodels.distributions.empirical_distribution import ECDF
        model = EconDensity()

        x_cond = np.asarray([0.1 for _ in range(200000)])
        _, y_sample = model.simulate_conditional(x_cond)

        emp_cdf = ECDF(y_sample.flatten())
        cdf = lambda y: model.cdf(x_cond, y)

        mean_cdf_diff = np.mean(
            np.abs(emp_cdf(y_sample).flatten() - cdf(y_sample).flatten()))
        self.assertLessEqual(mean_cdf_diff, 0.01)
    def test_conditional_value_at_risk(self):
        sim_model = EconDensity()
        x_cond = np.array([[0], [1]])
        CVaR = sim_model.conditional_value_at_risk(x_cond, alpha=0.03)

        CVaR_mc = super(EconDensity,
                        sim_model).conditional_value_at_risk(x_cond,
                                                             alpha=0.03,
                                                             n_samples=10**7)

        print("CVaR Analytic:", CVaR)
        print("CVaR MC:", CVaR_mc)
        print("VaR", sim_model.value_at_risk(x_cond, alpha=0.03))

        diff = np.mean(np.abs(CVaR_mc - CVaR))

        self.assertAlmostEqual(diff, 0, places=2)
    def test_value_at_risk(self):
        sim_model = EconDensity()
        x_cond = np.array([[0], [1]])
        VaR = sim_model.value_at_risk(x_cond, alpha=0.05)

        VaR_cdf = super(EconDensity, sim_model).value_at_risk(x_cond,
                                                              alpha=0.05)

        diff = np.sum(np.abs(VaR_cdf - VaR))

        self.assertAlmostEqual(VaR[0],
                               stats.norm.ppf(0.05, loc=0, scale=1),
                               places=4)
        self.assertAlmostEqual(VaR[1],
                               stats.norm.ppf(0.05, loc=1, scale=2),
                               places=4)
        self.assertAlmostEqual(diff, 0, places=4)
    def test_random_seed(self):
        sim_model1 = EconDensity(random_seed=22)
        X1, Y1 = sim_model1.simulate(n_samples=100)

        sim_model2 = EconDensity(random_seed=22)
        X2, Y2 = sim_model2.simulate(n_samples=100)

        diff_x = np.sum(np.abs(X1[:100] - X2[:]))
        diff_y = np.sum(np.abs(Y1[:100] - Y2[:]))
        self.assertAlmostEquals(diff_x, 0, places=2)
        self.assertAlmostEquals(diff_y, 0, places=2)
def eval_econ_data():
  gmm = GaussianMixture(ndim_x=1, ndim_y=1)
  econ_density = EconDensity()

  # print("ECON DATA --------------")
  # print("KMN")
  # for n_centers in [50, 100, 200]:
  #   kmn = KernelMixtureNetwork(n_centers=n_centers)
  #   gof = GoodnessOfFit(kmn, econ_density, n_observations=2000, print_fit_result=False, repeat_kolmogorov=1)
  #   gof_results = gof.compute_results()
  #   print("N_Centers:", n_centers)
  #   print(gof_results)

  print("LAZY-Learner:")
  nkde = KernelMixtureNetwork(n_training_epochs=10)
  gof = GoodnessOfFit(nkde, gmm, n_observations=100, print_fit_result=False)
  gof_results = gof.compute_results()
  print(gof_results)
  print(gof_results.report_dict())