Ejemplo n.º 1
0
    def test_convergence_with_map (self):
        """ Tests if we can still achieve convergence when using 
            parallel_map. """
        mu = [2, 1]
        s = [[.01, 0], [0, .01]]
        X = MultivariateLognormal (mu, s)
        N = 2000
        rvs_caller = lambda _: X.rvs ()
        rand_vals = parallel_map (rvs_caller, range (N), 5)
        mean = np.array ([.0, .0])
        for val in rand_vals:
            mean += val
        mean /= N
        assert all (abs (X.mean () - mean) / X.mean () < 1e-1)
        
        mu = [2, 1]
        s = [[.01, 0], [0, .01]]
        X = MultivariateLognormal (mu, s)
        N = 2000

        rvs_caller = lambda _: X.rvs ()
        rand_vals = parallel_map (rvs_caller, range (N), 5)
        mean = np.array ([.0, .0])
        for val in rand_vals:
            mean += val
        mean /= N
        assert all (abs (X.mean () - mean) / X.mean () < 1e-1)
Ejemplo n.º 2
0
 def __init__ (self, theta):
     super ().__init__ (theta, verbose=False)
     n = theta.get_size ()
     mu = np.ones (n)
     Sigma = np.eye (n) / 10
     self.target_distribution = MultivariateLognormal (mu, 
             Sigma)
Ejemplo n.º 3
0
 def test_get_random_value (self):
     """ Tests if one can get the a random value from the random
         variable. """
     mu = [2, 1]
     s = [[1, 0], [0, 1]]
     X = MultivariateLognormal (mu, s)
     X_0 = X.rvs ()
     assert all (X_0 > 0)
Ejemplo n.º 4
0
 def test_get_mean (self):
     """ Tests if one can get the mean of the random variable. """
     mu = [1, 2, 3, 4]
     S = np.array ([[2, 0, 0, 0], 
          [0, 1, 0, 0], 
          [0, 0, 4, 1],
          [0, 0, 1, 2]])
     X = MultivariateLognormal (mu, S)
     assert all (abs (X.mean () - np.exp (np.array (mu) \
             + S.diagonal () / 2)) < 1e-4)
Ejemplo n.º 5
0
 def create_covar_matrix(self):
     my_artificial_sample = []
     sample_mean = [0.05, 0.1, 0.2, .3]
     S = np.eye(4) / 5
     mu = np.log(np.array(sample_mean)) - S.diagonal() / 2
     my_sample_dist = MultivariateLognormal(mu, S)
     for i in range(50):
         values = my_sample_dist.rvs()
         my_artificial_sample.append(values)
     covar = calc_covariance(my_artificial_sample)
     return covar
Ejemplo n.º 6
0
 def test_get_pdf_of_small_prob_point (self):
     """ Tests if we can get the pdf of a point with small pdf value.  
     """
     mu = [0, 0, 0]
     S = [[1, 0, 0], 
          [0, 1, 0], 
          [0, 0, 1]]
     X = MultivariateLognormal (mu, S)
     x = [1e-230, 1e-120, 1e-130]
     analytic = 0
     assert (abs (X.pdf (x) - analytic) < 1e-4)
Ejemplo n.º 7
0
 def test_get_pdf_of_zero_prob_point (self):
     """ Tests if we can get the pdf of a point with pdf equal to
         zero. """
     mu = [0, 0, 0]
     S = [[1, 0, 0], 
          [0, 1, 0], 
          [0, 0, 1]]
     X = MultivariateLognormal (mu, S)
     x = [1, 0, 1]
     analytic = 0
     assert (abs (X.pdf (x) - analytic) < 1e-4)
Ejemplo n.º 8
0
 def test_get_pdf (self):
     """ Tests if we can get a point of the probability density
         function of this random variable. """
     mu = [0, 0, 0]
     S = [[1, 0, 0], 
          [0, 1, 0], 
          [0, 0, 1]]
     X = MultivariateLognormal (mu, S)
     x = [1, 1, 1]
     analytic = 1 / np.sqrt ((2 * np.pi) ** 3 )
     assert (abs (X.pdf (x) - analytic) < 1e-4)
Ejemplo n.º 9
0
 def _create_jump_dist (self, theta_t):
     n = theta_t.get_size ()
     s2 = np.eye (n) / 100
     variances = s2.diagonal ()
     theta_values = np.array (theta_t.get_values ())
     mu = np.log (theta_values) - variances / 2
     return MultivariateLognormal (mu, s2)
Ejemplo n.º 10
0
        class MHMultivariateLognormalTargetMock (MetropolisHastings):
     
            def __init__ (self, theta):
                super ().__init__ (theta, verbose=False)
                n = theta.get_size ()
                mu = np.ones (n)
                Sigma = np.eye (n) / 10
                self.target_distribution = MultivariateLognormal (mu, 
                        Sigma)

            def _calc_log_likelihood (self, t):
                t_values = t.get_values ()
                l = self.target_distribution.pdf (t_values)
                return np.log (l)

            def _create_jump_dist (self, theta_t):
                n = theta_t.get_size ()
                S = np.eye (n) / 100
                variances = S.diagonal ()
                theta_values = np.array (theta_t.get_values ())
                mu = np.log (theta_values) - variances / 2
                return MultivariateLognormal (mu, S)

            def _calc_mh_ratio (self, new_t, new_l, old_t, old_l):
                J_gv_new = self._create_jump_dist (new_t)
                J_gv_old = self._create_jump_dist (old_t)
                p_old_gv_new = J_gv_new.pdf (old_t.get_values ())
                p_new_gv_old = J_gv_old.pdf (new_t.get_values ())
                l_ratio = np.exp (new_l - old_l)
                r = (l_ratio) * (p_old_gv_new / p_new_gv_old)
                return r
 def create_starting_sample (self):
     my_artificial_sample = []
     log_likelihoods = []
     sample_mean = [0.05, 0.1, 0.2, .3]
     S = np.eye (4) / 5
     mu = np.log (np.array (sample_mean)) - S.diagonal () / 2
     my_sample_dist = MultivariateLognormal (mu, S)
     for i in range (50):
         theta = RandomParameterList ()
         values = my_sample_dist.rvs ()
         for v in values[:-1]:
             p = RandomParameter ('p', Gamma (2, 2))
             p.value = v
             theta.append (p)
         exp_error = RandomParameter ('sigma', Gamma (2, 2))
         theta.set_experimental_error (exp_error)
         log_likelihoods.append (1)
         my_artificial_sample.append (theta)
     return (my_artificial_sample, log_likelihoods)
Ejemplo n.º 12
0
 def _create_jump_dist(self, theta_t):
     """ The jump distribution is Multivariate Lognormal with a 
         diagonal covariance matrix, i.e the jumps on each parameter
         are independent. """
     n = theta_t.get_size()
     t_vals = np.array(theta_t.get_values())
     S = np.eye(n)
     for i in range(n):
         S[i, i] = self._jump_S[i]
     mu = np.array(np.log(t_vals))
     dist = MultivariateLognormal(mu, S)
     return dist
Ejemplo n.º 13
0
 def test_convergence_to_mean (self):
     """ Tests if the randomly generated values has a sample mean
         that converges to the correct mean. """
     mu = [2, 1]
     s = [[.01, 0], [0, .01]]
     X = MultivariateLognormal (mu, s)
     N = 2000
     mean = np.array ([.0, .0])
     for i in range (N):
         mean += X.rvs ()
     mean /= N
     assert all (abs (X.mean () - mean) / X.mean () < 1e-1)
     
     mu = [2, 1]
     s = [[.01, 0], [0, .01]]
     X = MultivariateLognormal (mu, s)
     N = 2000
     mean = np.array ([.0, .0])
     for i in range (N):
         mean += X.rvs ()
     mean /= N
     assert all (abs (X.mean () - mean) / X.mean () < 1e-1)
Ejemplo n.º 14
0
 def test_create_shaped_distribution (self):
     """ Tests if we can create a Multivariate Lognormal 
         distribution with a specified mean and variance. """
     mu = [1, 2, .1]
     S = np.array ([[.1,  0,  0], 
                    [ 0, .2,  0], 
                    [ 0,  0, .2]])
     X = MultivariateLognormal.create_lognormal_with_shape (mu, S)
     N = 2000
     mean = np.array ([.0, .0, .0])
     for i in range (N):
         x = X.rvs ()
         mean += x
     mean /= N
     assert all (abs (mu - mean) / mean < 2e-1)
Ejemplo n.º 15
0
 def _create_jump_dist(self, theta_t):
     """ Creates the jump distribution from the current point. 
     
         Parameters
             theta_t: a RandomParameterList with the current point.
         
         Returns
             A MultivariateLognormal distribution which is the jump
             distribution from the current point. This distribution
             have the normal parametrization with 
             mu = log(current_point_values) and covariance equal
             to the sample covariance of the log of the accepted
             points. 
     """
     t_vals = theta_t.get_values()
     mu = np.array(np.log(t_vals))
     dist = MultivariateLognormal(mu, self._jump_S * self._jump_scale)
     return dist
 def _create_jump_dist (self, theta_t):
     """ The jump distribution is Multivariate Lognormal. """
     t_vals = theta_t.get_values ()
     mu = np.log (t_vals) - jump_S.diagonal () / 2
     jump_dist = MultivariateLognormal (mu, jump_S)
     return jump_dist