예제 #1
0
    def test_create_tbg_neural_efficacies(self):
        """ Test the generation of neural efficacies from a truncated
        bi-Gaussian mixture
        """
        m_act = 5.0
        v_act = 0.05
        v_inact = 0.05
        cdef = [Condition(m_act=m_act, v_act=v_act, v_inact=v_inact)]
        npos = 5000
        labels = np.zeros((1, npos), dtype=int)
        labels[0, : npos / 2] = 1
        phy_params = phy.PHY_PARAMS_FRISTON00
        ne = phy.create_tbg_neural_efficacies(phy_params, cdef, labels)

        # check shape consistency:
        self.assertEqual(ne.shape, labels.shape)

        # check that moments are close to theoretical ones
        ne_act = ne[0, np.where(labels[0])]
        ne_inact = ne[0, np.where(labels[0] == 0)]
        m_act_theo = truncnorm.mean(0, phy_params["eps_max"], loc=m_act, scale=v_act ** 0.5)
        v_act_theo = truncnorm.var(0, phy_params["eps_max"], loc=m_act, scale=v_act ** 0.5)
        (ne_act.mean(), m_act_theo)
        npt.assert_approx_equal(ne_act.var(), v_act_theo, significant=2)

        m_inact_theo = truncnorm.mean(0, phy_params["eps_max"], loc=0.0, scale=v_inact ** 0.5)
        v_inact_theo = truncnorm.var(0, phy_params["eps_max"], loc=0.0, scale=v_inact ** 0.5)
        npt.assert_approx_equal(ne_inact.mean(), m_inact_theo, significant=2)
        npt.assert_approx_equal(ne_inact.var(), v_inact_theo, significant=2)
        npt.assert_array_less(ne, phy_params)
        npt.assert_array_less(0.0, ne)
 def truncGaussMM(a, b, m0, s0):
     # computes the mean and variance of a truncated Gaussian distribution
     # a, b: The interval [a, b] on which the Gaussian is being truncated
     # m0,s0: mean and variance of the Gaussian which is to be truncated
     # m, s: mean and variance of the truncated Gaussian
     a_scaled, b_scaled = (a - m0) / np.sqrt(s0), (b - m0) / np.sqrt(s0)
     m = truncnorm.mean(a_scaled, b_scaled, loc=m0, scale=np.sqrt(s0))
     s = truncnorm.var(a_scaled, b_scaled, loc=m0, scale=np.sqrt(s0))
     return m, s
예제 #3
0
    def test_create_tbg_neural_efficacies(self):
        """ Test the generation of neural efficacies from a truncated
        bi-Gaussian mixture
        """
        np.random.seed(25432)
        m_act = 5.
        v_act = .05
        v_inact = .05
        cdef = [Condition(m_act=m_act, v_act=v_act, v_inact=v_inact)]
        npos = 5000
        labels = np.zeros((1, npos), dtype=int)
        labels[0, :npos / 2] = 1
        phy_params = phy.PHY_PARAMS_FRISTON00
        ne = phy.create_tbg_neural_efficacies(phy_params, cdef, labels)

        # check shape consistency:
        self.assertEqual(ne.shape, labels.shape)

        # check that moments are close to theoretical ones
        ne_act = ne[0, np.where(labels[0])]
        ne_inact = ne[0, np.where(labels[0] == 0)]
        m_act_theo = truncnorm.mean(0,
                                    phy_params['eps_max'],
                                    loc=m_act,
                                    scale=v_act**.5)
        v_act_theo = truncnorm.var(0,
                                   phy_params['eps_max'],
                                   loc=m_act,
                                   scale=v_act**.5)
        (ne_act.mean(), m_act_theo)
        npt.assert_approx_equal(ne_act.var(), v_act_theo, significant=2)

        m_inact_theo = truncnorm.mean(0,
                                      phy_params['eps_max'],
                                      loc=0.,
                                      scale=v_inact**.5)
        v_inact_theo = truncnorm.var(0,
                                     phy_params['eps_max'],
                                     loc=0.,
                                     scale=v_inact**.5)
        npt.assert_approx_equal(ne_inact.mean(), m_inact_theo, significant=2)
        npt.assert_approx_equal(ne_inact.var(), v_inact_theo, significant=2)
        npt.assert_array_less(ne, phy_params)
        npt.assert_array_less(0., ne)
예제 #4
0
def momentMatchTruncGauss(low, high, mu1, s1):
    '''
        low, high are the lower and upper bounds of truncation
        mu1, s1 are the mean and variance of the gaussian to truncate
    '''
    a = (low - mu1)/s1**0.5
    b = (high - mu1)/s1**0.5
    mu = truncnorm.mean(a, b, loc=mu1, scale=s1**0.5)
    s = truncnorm.var(a, b, loc=0, scale=s1**0.5)
    return mu, s
예제 #5
0
 def trunc_gauss_mm(a, b, m0, s0):
   # Computes the mean m and variance s of N(m,s) which is the approximation
   # of the truncated gaussian N(m0, s0) defined on the interval [a,b]
   # Inputs: Interval endpoints a,b ; mean m0 and variance s0
   # Outputs: mean m, variance s
 
   # scale interval with mean and variance (NOTE: why?)
   a_scaled, b_scaled = (a - m0) / np.sqrt(s0), (b - m0)/np.sqrt(s0)
   m = truncnorm.mean(a_scaled, b_scaled, loc=m0, scale=np.sqrt(s0))
   s = truncnorm.var(a_scaled, b_scaled, loc=m0, scale=np.sqrt(s0))
 
   return m,s
예제 #6
0
def truncated_gaussian(a, b, mean0, var0):
    # computes the mean and variance of a truncated Gaussian distribution
    #
    # Input:
    # a, b: The interval [a, b] on which the Gaussian is being truncated
    # mean0, var0: mean and variance of the Gaussian which is to be truncated
    #
    # Output:
    # mean, var: mean and variance of the truncated Gaussian
    # scale interval with mean and variance
    a_scaled, b_scaled = (a - mean0) / np.sqrt(var0), (b -
                                                       mean0) / np.sqrt(var0)
    mean = truncnorm.mean(a_scaled, b_scaled, loc=mean0, scale=np.sqrt(var0))
    var = truncnorm.var(a_scaled, b_scaled, loc=mean0, scale=np.sqrt(var0))
    return mean, var
 def true_mean_normal():
     a, b = (self.left - self.mean) / self.stdev, (
         self.right - self.mean) / self.stdev
     return truncnorm.mean(a, b, loc=self.mean, scale=self.stdev)
예제 #8
0
 def mean(self, alpha, beta):
     bounds_rescaled = self.bounds_rescaled(alpha, beta)
     return truncnorm.mean(a=bounds_rescaled[:, 0].detach().numpy(),
                           b=bounds_rescaled[:, 1].detach().numpy(),
                           loc=alpha.detach().numpy(),
                           scale=beta.exp().detach().numpy())
for method_i, method in enumerate(methods):  #for each method
    print "running {}".format(str(method))
    for trial in tqdm(range(max_trials)):  #for each possible trial run
        trial_data[method_i].append([])
        a_k = []
        b_k = []
        mean = []
        sd = []
        for k in range(K):  #generate a bandit problem
            mean_k = random()
            sd_k = 1 * random() + 0.1
            a_k.append(-mean_k / sd_k)
            b_k.append((1 - mean_k) / sd_k)
            mean.append(mean_k)
            sd.append(sd_k)
        means = [truncnorm.mean(a_k[i], b_k[i]) for i in range(K)]
        max_mean = max(means)
        samples = [[] for i in range(K)]
        len_samples = [0 for i in range(K)]
        for t in range(1, max_time + 1):  #run the method
            arm = method(samples, t)
            samples[arm].append(
                truncnorm.rvs(a_k[arm], b_k[arm]) * sd[arm] + mean[arm])
            len_samples[arm] += 1
            trial_data[method_i][-1].append(
                sum([len_samples[i] * (max_mean - means[i])
                     for i in range(K)]))

# transpose data arrays and calculate average regrets
rearranged_trial_data = [list(map(list, zip(*l))) for l in trial_data]
for i in range(len(rearranged_trial_data)):
예제 #10
0
 def mean(self, dist):
     return truncnorm.mean(*self._get_params(dist))