def sample_mu(self, j, k, scale=5.0):
     current = self.mus[j,k]
     proposal = np.random.normal(current, scale)
     current_logp = np.log(laplace.pdf(current, scale=self.gamma_f))
     proposal_logp = np.log(laplace.pdf(proposal, scale=self.gamma_f))
     # current_logp = -1 * ((current * current) / (2.0 * self.sigmas[j,k]))
     # proposal_logp = -1 * ((proposal * proposal) / (2.0 * self.sigmas[j,k]))
     theta_olds = []
     for obj in self.mixlist:
         if obj["ready"][j] == False:
             self._calc_theta(obj, j)
         k2 = obj["assignments"][j]
         current_logp += obj["logthetas"][j,k2]
         theta_olds.append(obj["logthetas"][j].copy())
     self.mus[j,k] = proposal
     for obj in self.mixlist:
         self._calc_theta(obj, j)
         k2 = obj["assignments"][j]
         proposal_logp += obj["logthetas"][j,k2]
     if current_logp > proposal_logp and rand_partition_log([current_logp, proposal_logp]) == 0:
         # rejected, undoing changes
         self.mus[j,k] = current
         for obj, theta_old in zip(self.mixlist, theta_olds):
             obj["logthetas"][j] = theta_old
         return False
     else:
         # accepted
         return True
 def sample_eta(self, obj, k, scale=5.0):
     current = obj["etas"][k]
     proposal = np.random.normal(current, scale)
     current_logp = np.log(laplace.pdf(current, scale=self.gamma_c))
     proposal_logp = np.log(laplace.pdf(proposal, scale=self.gamma_c))
     # current_logp = -1 * ((current * current) / (2.0 * obj["taus"][k]))
     # proposal_logp = -1 * ((proposal * proposal) / (2.0 * obj["taus"][k]))
     for j in xrange(self.dims):
         if obj["ready"][j] == False:
             self._calc_theta(obj, j)
         k2 = obj["assignments"][j]
         current_logp += obj["logthetas"][j,k2]
     theta_old = obj["logthetas"].copy()
     obj["etas"][k] = proposal
     for j in xrange(self.dims):
         self._calc_theta(obj, j)
         k2 = obj["assignments"][j]
         proposal_logp += obj["logthetas"][j,k2]
     if current_logp > proposal_logp and rand_partition_log([current_logp, proposal_logp]) == 0:
         # rejected, undoing changes
         obj["etas"][k] = current
         # do not entirely override obj["logthetas"] because it is pointed from elsewhere
         for j in xrange(self.dims):
             obj["logthetas"][j] = theta_old[j]
         return False
     else:
         # accepted
         return True
Exemple #3
0
 def truncated_laplace(self, start):
     if self.rate == -1:
         rate = np.random.uniform(1, 10, 1)[0]
     else:
         rate = self.rate
     end = []
     n_factors = len(self.factor_sizes)
     for mean, upper in zip(start, np.array(
             self.factor_sizes)):  # sample each feature individually
         x = np.arange(upper)
         p = laplace.pdf(x, loc=mean, scale=np.log(upper) / rate)
         p /= np.sum(p)
         end.append(np.random.choice(x, 1, p=p)[0])
     end = np.array(end).astype(np.int)
     end[self.categorical] = start[
         self.categorical]  # don't change categorical factors s.a. shape
     # make sure there is at least one change
     if np.sum(abs(start - end)) == 0:
         ind = np.random.choice(np.arange(n_factors)[~self.categorical],
                                1)[0]  # don't change categorical factors
         x = np.arange(self.factor_sizes[ind])
         p = laplace.pdf(x,
                         loc=start[ind],
                         scale=np.log(self.factor_sizes[ind]) / rate)
         p[x == start[ind]] = 0
         p /= np.sum(p)
         end[ind] = np.random.choice(x, 1, p=p)
     assert np.sum(abs(start - end)) > 0
     return end
    def plot_distributions(self):
        plt.figure(figsize=(18, 5))
        plt.suptitle("Gaussian vs. Laplace")
        plt.subplot(121)
        xs = np.linspace(-2, 2, 500)
        plt.plot(xs, norm.pdf(xs, loc=self._mu, scale=self._sigma_squared))
        plt.plot(xs, laplace.pdf(xs, loc=self._mu, scale=self._b))
        plt.title("Densities")
        plt.xlabel("$x$")
        plt.ylabel("$p(x)$")

        plt.subplot(122)
        plt.hist(self._x, alpha=0.5)
        plt.xlim([-5, 5])
        plt.ylim([0, self._n * 0.65])
        plt.hist(self._y, alpha=0.5)
        plt.xlim([-5, 5])
        plt.ylim([0, self._n * 0.65])
        plt.legend(["Gaussian", "Laplace"])
        plt.title('Samples')

        print('Gaussian vs. Laplace')
        print(
            f"Sample means: {np.mean(self._x):.2f} vs {np.mean(self._y):.2f}")
        print(
            f"Samples variances: {np.var(self._x):.2f} vs {np.var(self._y):.2f}"
        )

        plt.show()
        return self
def get_epsilon_scheduler_noise_distribution_curve(location, epsilon, j,
                                                   delta):
    # Equation:
    #   b = 2 J \Delta\eta / \epsilon
    b = (2 * j * delta) / epsilon

    # 5% to 95%
    left_bin = int(
        compute_noise_level_percentile(location, epsilon, j, delta, 0.01))
    right_bin = int(
        compute_noise_level_percentile(location, epsilon, j, delta, 0.99))

    # print(left_bin)
    # print(right_bin)

    curve_x = []
    curve_y = []
    total_count = 100
    resolution = (right_bin - left_bin) / total_count
    for i in range(total_count + 1):
        curve_x.append(left_bin + resolution * i)
        curve_y.append(
            laplace.pdf(left_bin + resolution * i, loc=location, scale=b))
        # print(left_bin + resolution*i, ",", curve[-1])

    # for i in range(left_bin, right_bin+1):
    #     curve.append(laplace.pdf(i, loc=location, scale=b))
    #     print(i, ",", curve[-1])

    return curve_x, curve_y
Exemple #6
0
def derivative_quant_err(m, p, dist='norm', pw_opt=2):  
    '''
    Compute the derivative of expected variance of quantization error
    '''
    from scipy.stats import norm, laplace
    if dist == 'norm':
        cdf_func = norm.cdf(p)
        pdf_func = norm.pdf(p)
    elif dist == 'laplace':  
        # https://en.wikipedia.org/wiki/Laplace_distribution
        cdf_func = laplace.cdf(p, 0, np.sqrt(0.5))   
        pdf_func = laplace.pdf(p, 0, np.sqrt(0.5)) # pdf(p, a, b) has variance 2*b^2
    else:
        raise RuntimeError("Not implemented for distribution: %s !!!" % dist) 
    
    ## option 1: overlapping
    if pw_opt == 1: 
        # quant_err = [F(p) - F(-p)] * p^2 + 2*[F(m) - F(p)] * m^2
        df_dp = 2 * pdf_func * (p * p - m * m) + 2 * p * (2 * cdf_func - 1.0)
    ## option 2: non-overlapping
    else:  
        # quant_err = [F(p) - F(-p)] * p^2 + 2*[F(m) - F(p)] * (m - p)^2
        df_dp = p - 2 * m + 2 * m * cdf_func + m * pdf_func * (2 * p - m) 

    return df_dp
    def laplace(shape, scale):
        """
        Standard Laplace noise multiplied by `scale`
        Parameters
        ----------
        shape : tuple
            Shape of noise.
        scale : float
            Scale of noise.
        """
        rv = laplace(scale=scale, loc=0.)
        density = lambda x: np.product(rv.pdf(x))
        cdf = lambda x: laplace.cdf(x, loc=0., scale=scale)
        pdf = lambda x: laplace.pdf(x, loc=0., scale=scale)
        derivative_log_density = lambda x: -np.sign(x) / scale
        grad_negative_log_density = lambda x: np.sign(x) / scale
        sampler = lambda size: rv.rvs(size=shape + size)

        constant = -np.product(shape) * np.log(2 * scale)
        return randomization(shape,
                             density,
                             cdf,
                             pdf,
                             derivative_log_density,
                             grad_negative_log_density,
                             sampler,
                             lipschitz=1. / scale**2,
                             log_density=lambda x: -np.fabs(np.atleast_2d(x)).
                             sum(1) / scale - np.log(scale) + constant)
Exemple #8
0
    def guess(self, res, query_func, epsilon, global_sensitivity):
        data = self.data
        # subset query result
        subset = get_subset(data)
        qry_res = query_func(subset, axis=0)

        # imbalance-class posterior probability
        s = global_sensitivity / epsilon
        posterior = (laplace.pdf(res - qry_res, loc=0, scale=s))
        unique, counts = np.unique(posterior, return_counts=True)
        max_cls = counts.max()
        weights = max_cls / counts
        weight_dict = dict(zip(unique, weights))

        def add_weights(x, wdict):
            return x * wdict[x]

        posterior = np.vectorize(add_weights)(posterior, weight_dict)

        posterior /= posterior.sum()
        m = posterior.max()  #  0.025454860961990372
        m_index = np.argmax(posterior)
        print('max post prob: {}, subset index: {}'.format(m, m_index))

        m_subset = subset[:, m_index]
        values, counts = np.unique(m_subset, return_counts=True)
        config = dict(zip(values, counts))
        print("Imbalance-class MLE Adversary thinks the dataset config is {}".
              format(config))
def plot_laplace_vs_normal(norm_sd=1., b=1.):

    dstrb = pd.DataFrame(index=np.linspace(-10, 10, 1000))
    dstrb['normal'] = norm.pdf(dstrb.index.values, loc=0, scale=norm_sd)
    b0 = max(b * .5, 0)
    b2 = min(b * 2, 10)
    dstrb['laplace b={}'.format(b0)] = laplace.pdf(dstrb.index.values,
                                                   loc=0,
                                                   scale=b0)
    dstrb['laplace b={}'.format(b)] = laplace.pdf(dstrb.index.values,
                                                  loc=0,
                                                  scale=b)
    dstrb['laplace b={}'.format(b2)] = laplace.pdf(dstrb.index.values,
                                                   loc=0,
                                                   scale=b2)
    dstrb.plot(style=['--', '-', '-', '-'], figsize=(12, 4))
    plt.show()
Exemple #10
0
def reward_laplacian(cart_pole):
    x_threshold = 2.4
    if cart_pole.state[0] < -x_threshold or cart_pole.state[0] > x_threshold:
        return -1
    # return 1 if -0.1 <= angle_normalize(cart_pole.state[2]) <= 0.1 else -0.4
    theta_normalise = angle_normalize(cart_pole.state[2])
    # if -0.1 <= theta_normalise <= 0.1:
    reward_lapl = 2 * laplace.pdf(theta_normalise)
    # print(f"inside inhouse reward")
    return reward_lapl
Exemple #11
0
def signalProbability(receivedSignal, location, accessPointLocations,
                      accessPointLocationNumber):
    d = np.sqrt(
        np.power(accessPointLocations[accessPointLocationNumber] -
                 location, 2) + np.power(HALL_HEIGHT, 2))
    signalPower = TX_POWER * TX_DIRECTIVITY * RX_DIRECTIVITY * np.power(
        LAMBDA / (4 * np.pi * d), 2)
    powerDifference = -abs(receivedSignal - signalPower)
    # print(powerDifference)
    return laplace.pdf(powerDifference, scale=POWER_NOISE_STD_DEV)
Exemple #12
0
def laplaceFunc():
    for i in range(len(size)):
        n = size[i]
        fig, ax = plt.subplots(1, 1)
        ax.set_title("Распределение Лапласа, n = " + str(n))
        x = np.linspace(laplace.ppf(0.01), laplace.ppf(0.99), 100)
        ax.plot(x, laplace.pdf(x), 'b-', lw=5, alpha=0.6)
        r = laplace.rvs(size=n)
        ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
        plt.show()
Exemple #13
0
def laplace_distribution(select_size,
                         scale=1 / m.sqrt(2),
                         loc=0,
                         asked=rvs,
                         x=0):
    if asked == rvs:
        return laplace.rvs(size=select_size, scale=scale, loc=loc)
    elif asked == pdf:
        return laplace.pdf(x, loc=loc, scale=scale)
    elif asked == cdf:
        return laplace.cdf(x, loc=loc, scale=scale)
    return
 def log_marginal(self):
     ll = 0.0
     # ## exponential distributions, ignore fixed params
     # ll -= self.gamma_f * self.sigmas.sum()
     # ## normal distributions
     # ll -= 0.5 * np.log(self.sigmas).sum() + ((self.mus ** 2) / (2.0 * self.sigmas)).sum()
     for j in xrange(self.dims):
         for k in xrange(3):
             ll += np.log(laplace.pdf(self.mus[j,k], scale=self.gamma_f))
     for udist in self.udists:
         ll += udist.log_marginal()
     for obj in self.mixlist:
         # ll -= self.gamma_c * obj["taus"].sum()
         # ll -= 0.5 * np.log(obj["taus"]).sum() + ((obj["etas"] ** 2) / (2.0 * obj["taus"])).sum()
         for k in xrange(3):
             ll += np.log(laplace.pdf(obj["etas"][k], scale=self.gamma_c))
         for j in xrange(self.dims):
             if obj["ready"][j] == False:
                 self._calc_theta(obj, j)
             k = obj["assignments"][j]
             ll += obj["logthetas"][j,k]
     return ll
Exemple #15
0
def emp_d(data, bins = 100, method = 'norm', loc = 0, scaling = np.sqrt(2), title = "Empirical distribution"):
    mean = data.mean()
    std = data.std()
    h = sorted(data)
    h = pd.DataFrame(h)
    if method == 'norm':
        fit = norm.pdf(h, mean, std)
    elif method == 'laplace':
        fit = laplace.pdf(h, loc, scaling)
    elif method == 'uniform':
        fit = uniform.pdf(h)
    plt.plot(h,fit, '-')
    plt.hist(h, normed = True, bins = bins)
    plt.title(title)   
Exemple #16
0
def calculate_identifiers(points, weights, means):
    probabilities = np.array([
        np.array([
            laplace.pdf(metric(points[i], means[j]))
            for j in range(0, means.shape[0])
        ]) for i in range(0, len(points))
    ])

    return np.array([
        np.array([
            weights[j] * probabilities[i][j] /
            np.sum(probabilities[i].dot(weights))
            for j in range(0, means.shape[0])
        ]).argmax() for i in range(0, len(points))
    ])
Exemple #17
0
def test_wittness():
    from scipy.stats import laplace, norm
    from mmd import mix_rbf_kernel, witness
    basis = np.linspace(-5, 5, 200)
    K = mix_rbf_kernel(basis, basis, [0.5], False)
    yl = laplace.pdf(basis, scale=0.7)
    yg = norm.pdf(basis)
    #yl=yl/yl.sum()
    #yg=yg/yg.sum()
    wit = witness(K, yl, yg)
    wit = wit / np.linalg.norm(wit)
    plt.ion()
    plt.plot(basis, yl)
    plt.plot(basis, yg)
    plt.plot(basis, wit)
    plt.legend(['laplace', 'gaussian'])
    pdb.set_trace()
    def __init__(self, X):
        """
        :param X: list of training samples
        """

        self.data = {}
        self.data['X'] = X

        feats = X.shape[1] - 1

        self.params = {}

        for c in range(feats):
            self.params[str(c)] = parameter.Parameter(0, -10, 10, (lambda x: laplace.pdf(x, 0, 1)), (lambda x: np.random.normal(x, 0.1)))
            #self.params[str(c)] = parameter.Parameter(0, -10, 10, (lambda x: 1), (lambda x: np.random.normal(x, 0.25)))

        #leave room later for test split
        self.test = {}
Exemple #19
0
    def guess(self, res, query_func, epsilon, global_sensitivity):
        # subset query result
        subset = get_subset(self.data)
        qry_res = query_func(subset, axis=0)

        # posterior probability
        s = global_sensitivity / epsilon
        posterior = laplace.pdf(res - qry_res, loc=0, scale=s)
        # posterior = sigmoid(posterior)
        posterior /= posterior.sum()

        # make a guess
        m = posterior.max()  # 0.001036295052801138
        m_index = np.argmax(posterior)
        print('max post prob: {}, subset index: {}'.format(m, m_index))

        m_subset = subset[:, m_index]
        values, counts = np.unique(m_subset, return_counts=True)
        config = dict(zip(values, counts))
        print("MLE Adversary thinks the dataset config is {}".format(config))
Exemple #20
0
    def posterior(self, th):
        mu_th = self.model.mu_th
        cov_th = self.model.cov_th
        mu_eps = self.model.mu_eps
        cov_eps = self.model.cov_eps

        # compute model prediction corresp. to guess th
        predict = self.model.modelFun(th)
        if any(predict ==
               np.nan):  # if the prediction not make sense, posterior = 0
            return 0
        # data = predict + eps, where eps is assumed normally distr'd
        epsilon = np.linalg.norm(self.data - predict)

        if self.isSparse:  # use sparse inducing prior?
            p_th = laplace.pdf(th, loc=mu_th, scale=cov_th).prod()
        else:
            # generic prior (assume std normal distr)
            p_th = mvn.pdf(th, mean=mu_th, cov=cov_th)
        # likelihood (assume std normal distr for now)
        p_eps = mvn.pdf(epsilon, mean=mu_eps, cov=cov_eps)
        # posterior ~ likelihood * prior
        p = p_eps * p_th
        return p
Exemple #21
0
s1 = 1
mu2 = 4
s2 = 1

# alpha is proportion of N in U; (1 - alpha) is proportion of P in U; these will be unknown for methods below;
# alpha表示N在U中的比例,对于以下方法这将是未知的
# note that not alpha but alpha^* (computed below) is the proportion that the methods are supposed to identify
# (find out why in the paper)
alpha = 0.75

if mode == 'normal':
    p1 = lambda x: norm.pdf(x, mu1, s1)
    p2 = lambda x: norm.pdf(x, mu2, s2)
    pm = lambda x: p1(x) * (1 - alpha) + p2(x) * alpha
elif mode == 'laplace':
    p1 = lambda x: laplace.pdf(x, mu1, s1)
    p2 = lambda x: laplace.pdf(x, mu2, s2)
    pm = lambda x: p1(x) * (1 - alpha) + p2(x) * alpha

# 具体分布可视化

# plt.plot([x/100 for x in range(-1000, 1000)], [p1(x/100) for x in range(-1000, 1000)], 'b')
# plt.plot([x/100 for x in range(-1000, 1000)], [p2(x/100) for x in range(-1000, 1000)], 'g')
# plt.plot([x/100 for x in range(-1000, 1000)], [pm(x/100) for x in range(-1000, 1000)], 'r')
#
# plt.legend(handles=(Line2D([], [], linestyle='-', color='b'),
#                     Line2D([], [], linestyle='-', color='g'),
#                     Line2D([], [], linestyle='-', color='r')),
#            labels=('$f_p(x)$', '$f_n(x)$', '$f_u(x)$'),
#            fontsize='x-large')
# plt.show()
    def __init__(self, X, P, intermediate_states, Dsigma, L1=False, transition_first = False):
        """
        :param X: the observation matrix, -1 padded to the right (to make it square)
        :param P: problem indices, -1 padded to the right
        :param intermediate_states: number of intermediate states (between no-mastery and mastery)
        :param Dsigma: model parameter- initial setting of variance for problem difficulty values. Set to 0 to lock to BKT
        :return: nix
        """

        self.transition_first = transition_first
        print "Transitioning first?", transition_first

        self.data = {}
        self.data['X'] = X
        self.data['P'] = P
        numprobs = int(np.max(P) + 1)
        self.data['num_problems'] = numprobs
        self.N = len(X)
        self.T = len(X[0])
        self.numprobs = numprobs
        self.intermediate_states = intermediate_states
        total_states = intermediate_states + 2
        self.total_states = total_states
        self.params = {}

        #regularization parameter
        l1_b = 0.5

        #setup initial probability vector...
        #!!!!!!!!!!!!!!!!!!!!!! here L[0] is p(unlearned)
        val = np.ones(total_states)/(total_states + 0.0)
        self.params['L'] = parameter.Parameter(val, 0, 1, (lambda x: 1), (lambda x: self.sample_dir(DIRICHLET_SCALE * x)))
        #print "pi starting as:"
        #print val

        """t_mat = np.ones([total_states, total_states])
        #setup transition triangle...
        for row in range(total_states):
            t_mat[row,0:row] = np.zeros(row)
            t_mat[row,:] = np.random.dirichlet(DIRICHLET_SCALE * t_mat[row,:])
        """

        #print "T starting as:"
        #print t_mat

        self.params['T'] = parameter.Parameter(0, -3, 3, (lambda x: laplace.pdf(x, 0, l1_b)), (lambda x: np.random.normal(x, 0.15)) )

        self.params['Tbeta'] = parameter.Parameter(0,0,1, (lambda x: self.uniform(x, -1, 1)), (lambda x: max(0,np.random.normal(x, 0.15))) )
        self.params['Tsigma'] = parameter.Parameter(0.5, 0, 1, (lambda x: self.uniform(x, 0, 1)), (lambda x: np.random.normal(x, 0.15)) )

        #setup guess vector in really clunky way
        for c in range(intermediate_states + 1):
            self.params['G_' + str(c)] = parameter.Parameter(0, -3, 3, (lambda x: self.uniform(x, -3, 3)),
                                                             (lambda x: self.sample_guess_prob(x)))
        self.params['S'] = parameter.Parameter(0, -3, 3, (lambda x: self.uniform(x, -3, 3)),
                                               (lambda x: self.sample_guess_prob(x)))

        #problem difficulty vector, also in clunky way
        self.emission_mask = []
        self.emission_mats = []
        for c in range(numprobs):
            self.emission_mask.append(False)
            self.emission_mats.append(np.ones((total_states, 2)))
            if L1:
                print "Using L1"
                self.params['D_' + str(c)] = parameter.Parameter(0, -3, 3, (lambda x, d_sig: laplace.pdf(x, 0, l1_b) + 0*d_sig),
                                                             (lambda x: np.random.normal(x, 0.15)))
            else:
                print "Using adaptive L2"
                self.params['D_' + str(c)] = parameter.Parameter(0, -3, 3, (lambda x, d_sig: norm.pdf(x, 0, d_sig)),
                                                             (lambda x: np.random.normal(x, 0.15)))

        self.params['Dsigma'] = parameter.Parameter(Dsigma, 0, 3, (lambda x: invgamma.pdf(x, 1, 0, 2)),
                                                    (lambda x: np.random.normal(x, 0.15)))

        #leave room later for test split
        self.test = {}
Exemple #23
0
plt.xlim([ALIV.index[0], ALIV.index[-1]])

plt.plot(ALIV_adj)
plt.xlim([ALIV.index[0], ALIV.index[-1]])

############## DISTRIBUTION
from scipy.stats import norm
from scipy.stats import laplace
from scipy.stats import probplot
std = np.std(ALIV_adj)
mean = np.mean(ALIV_adj)

h = sorted(ALIV_adj.values)
h = pd.DataFrame(h)
fit = norm.pdf(h, mean, std)
fit = laplace.pdf(h, 0, 0.014862419499466137)
plt.plot(h, fit, '-')
plt.hist(h, normed=True, bins=100)
plt.title("ALIV and Laplace distribution")
#plt.hist(h)
plt.show()

import pylab
measurements = np.random.normal(loc=20, scale=5, size=100)
probplot(measurements, dist="norm", plot=pylab)

## Gen random
QQ_aliv = np.random.normal(0, 1, 100)
QQ_aliv = np.random.laplace(2, 5, size=100)
QQ_aliv = np.random.uniform(
    -1.6, 1.6, size=100)  #(upper-lower, lower, rs) returned by probplot
#!/usr/bin/env python

import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import t, laplace, norm


# generalized student t with mu=0, q=1
# See Eqn 2 of "Bayesian sparsity path analysis"
def gt(x, a, c):
    return 1 / (2 * c) * (1 + np.abs(x) / (a * c))**(-a - 1)


x = np.linspace(-4, 4, 100)
n = norm.pdf(x, loc=0, scale=1)
l = laplace.pdf(x, loc=0, scale=1)
t = t.pdf(x, df=1, loc=0, scale=1)
g = gt(x, 1, 1)

plt.figure()
plt.plot(x, g, 'k:', x, l, 'b-', x, t, 'r--')
plt.legend(('GenStudent(a=1,c=1)', r'Laplace($\mu=0,\lambda=1)$',
            r'Student($\mu=0,\sigma=1,\nu=1$)'))
plt.ylabel('pdf')
plt.savefig('figures/genStudentLaplacePdfPlot.pdf')

plt.figure()
plt.plot(x, np.log(g), 'k:', x, np.log(l), 'b-', x, np.log(t), 'r--')
plt.legend(('GenStudent(a=1,c=1)', r'Laplace($\mu=0,\lambda=1)$',
            r'Student($\mu=0,\sigma=1,\nu=1$)'))
plt.ylabel('log pdf')
    def __init__(self, X, P, S, intermediate_states, Dsigma):
        """
        :param X: the observation matrix, -1 padded to the right (to make it square)
        :param P: problem indices, -1 padded to the right
        :param intermediate_states: number of intermediate states (between no-mastery and mastery)
        :param Dsigma: model parameter- initial setting of variance for problem difficulty values. Set to 0 to lock to BKT
        :return: nix
        """

        self.data = {}
        self.data['X'] = X
        self.data['P'] = P
        self.data['S'] = S
        numprobs = int(np.max(P) + 1)
        numskills = int(np.max(S) + 1)
        self.data['num_problems'] = numprobs
        self.total_skills = numskills
        self.N = len(X)
        self.T = len(X[0])
        self.numprobs = numprobs
        self.intermediate_states = intermediate_states

        if intermediate_states > 0:
            raise(ValueError("don't put multiple states in this model yet"))

        total_states = intermediate_states + 2
        self.total_states = total_states
        self.params = {}

        l1_b_u = 0.15
        l1_b_d = 0.5

        #We need one set of BKT params for each skill...
        for sk in range(numskills):
            #setup initial probability vectors...
            #!!!!!!!!!!!!!!!!!!!!!! here L[0] is p(unlearned)
            val = np.ones(total_states)/(total_states + 0.0)
            self.params['L-'+str(sk)+'-'] = parameter.Parameter(val, 0, 1, (lambda x: 1), (lambda x: self.sample_dir(DIRICHLET_SCALE * x)))
            print "pi starting as:"
            print val

            t_mat = np.ones([total_states, total_states])
            #setup transition triangle...
            for row in range(total_states):
                t_mat[row,0:row] = np.zeros(row)
                t_mat[row,:] = np.random.dirichlet(DIRICHLET_SCALE * t_mat[row,:])

            print "T starting as:"
            print t_mat
            self.params['T-'+str(sk)+'-'] = parameter.Parameter(t_mat, 0, 1, (lambda x: 1), (lambda x: self.sample_dir_mat(DIRICHLET_SCALE * x)))

            #setup guess vector in really clunky way
            for c in range(intermediate_states + 1):
                self.params['G-'+str(sk)+'-_' + str(c)] = parameter.Parameter(0, -3, 3, (lambda x: self.uniform(x, -3, 3)),
                                                                 (lambda x: self.sample_guess_prob(x)))
            self.params['S-'+str(sk)+'-'] = parameter.Parameter(0, -3, 3, (lambda x: self.uniform(x, -3, 3)),
                                                   (lambda x: self.sample_guess_prob(x)))

            #skill "usefulness" parameter. Same range/sampling as Guess/Slip
            for sk2 in range(numskills):
                if sk != sk2:
                    self.params['U-'+str(sk)+'-'+str(sk2)] = parameter.Parameter(0, 0, 3, (lambda x: laplace.pdf(x, 0, l1_b_u)),
                                                       (lambda x: self.sample_KT_param(x)))


        #problem difficulty vector, also in clunky way
        for c in range(numprobs):
            self.params['D_' + str(c)] = parameter.Parameter(0, -3, 3, (lambda x: laplace.pdf(x, 0, l1_b_d)),
                                                             (lambda x: np.random.normal(x, 0.15)))
        """for c in range(numprobs):
            self.params['D_' + str(c)] = parameter.Parameter(0, -3, 3, (lambda x, d_sig: norm.pdf(x, 0, d_sig)),
                                                             (lambda x: np.random.normal(x, 0.15)))
        """
        self.params['Dsigma'] = parameter.Parameter(Dsigma, 0, 3, (lambda x: invgamma.pdf(x, 1, 0, 2)),
                                                    (lambda x: np.random.normal(x, 0.15)))

        #leave room later for test split
        self.test = {}
Exemple #26
0
    t_y = 40 - np.array(t.y)
    plt.plot(t_y, t_x)

# In[268]:

# fig = plt.figure(figsize=(10,10))
# for d in data[0].trajectories:
#     ind = [i for i in range(len(d.x))]
#     plt.scatter(ind, d.x)

fig = plt.figure(figsize=(10, 10))
x_s = [d.x for d in data[0].trajectories]
plt.hist(x_s)

x = np.linspace(0, 35, 350)
plt.plot(x, laplace.pdf(x, loc=3.5, scale=0.5) * 4, color='green', lw=3)

plt.plot(x, laplace.pdf(x, loc=6, scale=0.5) * 4, color='green', lw=3)

plt.plot(x, laplace.pdf(x, loc=9, scale=0.5) * 7, color='green', lw=3)

plt.plot(x, laplace.pdf(x, loc=12, scale=0.5) * 6, color='green', lw=3)

plt.plot(x, laplace.pdf(x, loc=15, scale=0.5) * 7, color='green', lw=3)

plt.plot(x, laplace.pdf(x, loc=18, scale=0.5) * 8, color='green', lw=3)

plt.plot(x, laplace.pdf(x, loc=21, scale=0.5) * 12, color='green', lw=3)

plt.plot(x, laplace.pdf(x, loc=24, scale=0.5) * 7, color='green', lw=3)
Exemple #27
0
import numpy as np
import matplotlib.pyplot as plt
import os
figdir = "../figures"


def save_fig(fname):
    plt.savefig(os.path.join(figdir, fname))


from scipy.stats import t, laplace, norm

x = np.linspace(-4, 4, 100)
n = norm.pdf(x, loc=0, scale=1)
l = laplace.pdf(x, loc=0, scale=1 / (2**0.5))
t1 = t.pdf(x, df=1, loc=0, scale=1)
t2 = t.pdf(x, df=2, loc=0, scale=1)

plt.plot(x, n, 'k:', x, t1, 'b--', x, t2, 'g--', x, l, 'r-')
plt.legend(('Gauss', 'Student(dof 1)', 'Student (dof 2)', 'Laplace'))
plt.ylabel('pdf')
save_fig('studentLaplacePdf2.pdf')
plt.show()

plt.figure()
plt.plot(x, np.log(n), 'k:', x, np.log(t1), 'b--', x, np.log(t2), 'g--', x,
         np.log(l), 'r-')
plt.ylabel('log pdf')
plt.legend(('Gauss', 'Student(dof 1)', 'Student (dof 2)', 'Laplace'))
#plt.legend(('Gauss', 'Student', 'Laplace'))
Exemple #28
0
    return numerator / denominator


def foo(data, sigma, x):
    total = 0
    for xi in data:
        total += gaussian_kernel(sigma, x, xi)
    return (1.0 / 1000) * total


def get_data_y(data_x, sigma):
    return [foo(data_x, sigma, x) for x in data_x]


sigmas = np.linspace(0.01, 1.0, 9)
data_x = sorted([np.random.laplace() for i in range(1000)])
real_laplace = np.linspace(laplace.ppf(0.01), laplace.ppf(0.99), 100)

fig, ax = plt.subplots(nrows=3, ncols=3)

i = 0
for row in ax:
    for col in row:
        data_y = get_data_y(data_x, sigmas[i])
        col.plot(data_x, data_y, markersize=2)
        col.plot(real_laplace, laplace.pdf(real_laplace), markersize=3)
        col.set_title('sigma = ' + str(sigmas[i]))
        i += 1

plt.show()
Exemple #29
0
def laplace_function(x, b, x0, df, y0):
    return laplace.pdf((x - x0) / b) / b + y0
Exemple #30
0
    # Plot normal mixture
    plt.plot(
        x,
        norm.pdf(x, loc=-0.5, scale=0.25) * 0.3 +
        norm.pdf(x, loc=2.5, scale=1.0) * 0.7,
        label="Normal Mixture",
        alpha=0.8,
        color="tab:orange",
        linewidth=1.4,
    )

    # Plot Laplace
    plt.plot(
        x,
        laplace.pdf(x, loc=0.5, scale=1.5),
        label="Laplace",
        alpha=0.8,
        color="tab:green",
        linewidth=1.4,
    )

    # Plot Rayleigh
    plt.plot(
        x,
        rayleigh.pdf(x, scale=2),
        label="Rayleigh",
        alpha=0.8,
        color="tab:red",
        linewidth=1.4,
    )
Exemple #31
0
import matplotlib.pyplot as pl
import numpy as np
from scipy.stats import t, laplace, norm

a = np.random.randn(30)
outliers = np.array([8, 8.75, 9.5])
pl.hist(a, 7, weights=[1 / 30] * 30, rwidth=0.8)

#fit without outliers
x = np.linspace(-5, 10, 500)

loc, scale = norm.fit(a)
n = norm.pdf(x, loc=loc, scale=scale)

loc, scale = laplace.fit(a)
l = laplace.pdf(x, loc=loc, scale=scale)

fd, loc, scale = t.fit(a)
s = t.pdf(x, fd, loc=loc, scale=scale)
pl.plot(x, n, 'k>',
        x, s, 'r-',
        x, l, 'b--')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('robustDemo_without_outliers.png')

#add the outliers
pl.figure()
pl.hist(a, 7, weights=[1 / 33] * 30, rwidth=0.8)
pl.hist(outliers, 3, weights=[1 / 33] * 3, rwidth=0.8)
aa = np.hstack((a, outliers))
import matplotlib.pyplot as plt
from scipy.stats import laplace
import numpy as np
x = np.linspace(-3, 3, 100)
plt.plot(x, laplace.pdf(x),linewidth=2.0, label="laplace PDF")
plt.plot(x, laplace.cdf(x),linewidth=2.0, label="laplace CDF")
plt.legend(bbox_to_anchor=(.35,1))
plt.show()
Exemple #33
0
import superimport

import pyprobml_utils as pml
import numpy as np
from scipy.stats import uniform, laplace, norm
import matplotlib.pyplot as plt

n = 2000
x = np.arange(-4, 4, 0.01)
y1 = norm.pdf(x, 0, 1)
y2 = uniform.pdf(x, -2, 4)
y3 = laplace.pdf(x, 0, 1)

plt.plot(x, y1, color='blue')
plt.plot(x, y2, color='green')
plt.plot(x, y3, color='red')
pml.savefig('1D.pdf')
plt.savefig('1D.pdf')
plt.show()

x1 = np.random.normal(0, 1, n).reshape(n, 1)
x2 = np.random.normal(0, 1, n).reshape(n, 1)
plt.scatter(x1, x2, marker='.', color='blue')
plt.gca().set_aspect('equal')
plt.xlim(-4,4)
plt.ylim(-4,4)
plt.title("Gaussian")
pml.savefig('Gaussian.pdf')
plt.savefig('Gaussian.pdf')
plt.show()
Exemple #34
0
    def __call__(self, options, pars, obs=None, trackobs=False):
        """Simulate process model to get predicted
        choice and sample size distributions"""


        ### Basic setup
        np.random.seed()
        N     = pars.get('N', 10000)   # number of simulated trials
        max_T = int(pars.get('max_T', 1000)) # maximum sample size


        ### Stopping rules

        if self.stoprule == 'optional':
            threshold = pars.get('theta', 3)   # decision threshold (optional only)
            r      = pars.get('r', 0)           # rate of boundary collapse (optional only)
            stop_T = None

        # fixed sample size
        elif self.stoprule == 'fixedT':
            stop_T = pars.get('stop_T', 2)
            max_T  = stop_T
            threshold = 1000

        # geometric
        elif self.stoprule == 'fixedGeom':

            threshold = 1000
            p_stop_geom = pars.get('p_stop_geom')
            minss = pars.get('minsamplesize', 1)

            # sample size (not index), adjusted by minsamplesize
            stop_T = geom.rvs(p_stop_geom, size=N) + (minss - 1)

            # don't go past max_T
            stop_T[np.where(stop_T > max_T)[0]] = max_T


        ### Search

        # probability of sampling each option
        p_sample_H = pars.get('p_sample_H', .5)
        p_sample_L = 1 - p_sample_H

        # if p_switch is specified, it will be used to generate
        # sequences of observations (rather than p_sample_H and p_sample_L)
        p_switch   = pars.get('p_switch', None)

        # are the first two samples drawn from different options?
        switchfirst = pars.get('switchfirst', False)


        ### Sequential weights

        # compute value and attentional weights for multinomial problems
        if self.problemtype == 'multinomial':
            if self.rdw is None: wopt = options
            else:                wopt = self.rdw[pars['probid']]
            weights = np.array([cpt.pweight_prelec(option, pars) for option in wopt])
            values = np.array([cpt.value_fnc(option[:,0], pars) for option in options])
            v = np.array([np.multiply(weights[i], values[i]) for i in range(len(options))])
            V = v.sum(axis=1)
            evar = np.array([np.dot(weights[i], values[i] ** 2) - np.sum(v[i]) ** 2 for i in range(len(options))])
            sigma2 = np.max([np.sum(evar), 1e-10])
            sigma2mean = np.max([np.mean(evar), 1e-10])

            # sequential weights
            omega = []
            for i, option in enumerate(options):
                omega.append(weights[i]/option[:,1])
            omega = np.array(omega)
            omega[np.isnan(omega)] = 0
            w_outcomes = np.array([np.multiply(omega[i], values[i]) for i in range(len(options))])

        elif self.problemtype == 'normal':

            if 'pow_gain' in pars:
                w_options = np.array([[0,0],[0,0]])
                for i in range(2):
                    ev, evar = cpt.normal_raised_to_power(options[i], pars['pow_gain'])
                    w_options[i] = np.array([ev, evar])
                sigma2 = w_options[:,1].sum()
                evar = w_options[:,1]
            else:
                evar = options[:,1]
                sigma2 = options[:,1].sum()
                sigma2mean = options[:,1].mean()

        # scale by variance
        if 'sc' in pars:
            # raised to power
            sc = pars.get('sc')
            variance_scale = 1 / float(np.sqrt(sigma2) ** sc)
        elif 'sc2' in pars:
            # multiplicative
            sc = pars.get('sc2')
            variance_scale = 1 / float(np.sqrt(sigma2) * sc)
        elif 'sc0' in pars:
            sc0 = pars.get('sc0')
        elif 'sc_mean' in pars:
            sc = pars.get('sc_mean')
            variance_scale = 1 / float(np.sqrt(sigma2mean) ** sc)
        elif 'sc2_mean' in pars:
            sc = pars.get('sc2_mean')
            variance_scale = 1 / float(np.sqrt(sigma2mean) * sc)
        elif 'sc_x' in pars:
            variance_scale = pars.get('sc_x')
        else:
            variance_scale = 1


        ### Starting distribution

        Z = np.zeros(N)
        if 'tau' in pars:
            tau = pars.get('tau')
            Z = laplace.rvs(loc=0, scale=tau, size=N)

        elif 'tau_trunc' in pars:
            tau = pars.get('tau_trunc')
            dx = .001
            x = np.arange(-(threshold-dx), threshold, dx)
            p = laplace.pdf(x, loc=0, scale=tau)
            pn = p/p.sum()
            Z = np.random.choice(x, N, p=pn)

        elif 'tau_rel' in pars:
            tau = pars.get('tau_rel')
            tau = tau / variance_scale
            Z = laplace.rvs(loc=0, scale=tau, size=N)

        elif 'tau_rel_trunc' in pars:
            tau = pars.get('tau_rel_trunc')
            dx = .001
            x = np.arange(-1+dx, 1, dx)
            p = laplace.pdf(x, loc=0, scale=tau)
            pn = p/p.sum()
            Z = np.random.choice(x, N, p=pn)
            Z = Z * threshold

        elif 'tau_unif' in pars:
            #tau = pars.get('tau_unif', .001)
            #theta_max = pars.get('theta_max', theta)
            #theta_max = 200
            #rng = tau * theta_max
            rng = pars.get('tau_unif', .001)
            Z = np.linspace(-rng, rng, num=N)
            np.random.shuffle(Z)
            #Z = np.random.uniform(low=(-tau), high=tau, size=N)

        elif 'tau_unif_rel' in pars:
            dx = .001
            rng = pars.get('tau_unif_rel', .001)
            Z = np.linspace(-(threshold-dx) * rng, (threshold-dx) * rng, num=N)
            np.random.shuffle(Z)

        elif 'tau_normal' in pars:
            tau = pars.get('tau_normal')
            Z = norm.rvs(loc=0, scale=tau, size=N)

        elif 'tau_normal_trunc' in pars:
            tau = pars.get('tau_normal_trunc')
            dx = .001
            x = np.arange(-(threshold-dx), threshold, dx)
            p = norm.pdf(x, loc=0, scale=tau)
            pn = p/p.sum()
            Z = np.random.choice(x, N, p=pn)


        ### Simulate

        if obs is not None:

            # assume a single sequence of known observations
            sampled_option = obs['option'].values
            outcomes       = obs['outcome'].values
            max_T          = outcomes.shape[0]
            sgn            = 2*sampled_option - 1
            sv             = np.zeros(outcomes.shape)

            if self.problemtype is 'normal':

                c = pars.get('c', 0)

                # add weighting and criterion here
                sv = cpt.value_fnc(outcomes - c, pars)

            elif self.problemtype is 'multinomial':
                pass
                for i, opt in enumerate(options):
                    for j, x in enumerate(opt):
                        ind = np.where((sampled_option==i) & (outcomes==x[0]))[0]
                        sv[ind] = w_outcomes[i][j]

            sv = np.multiply(sv, sgn)
            sampled_option = np.tile(sampled_option, (N, 1))
            outcomes = np.tile(outcomes, (N, 1))
            sv = np.tile(sv, (N, 1))


        elif self.choicerule == 'random':
            sv = np.zeros((N, max_T))
            sampled_option = None
            outcomes = None

        else:
            # otherwise, simulate sampling from options

            if False and not trackobs and self.problemtype is 'multinomial' and p_switch is None:
                sampled_option = None
                outcomes = None

                valence = deepcopy(w_outcomes)
                valence[0] = -1 * valence[0]
                valence = valence.ravel()

                p = deepcopy(options[:,:,1])
                p[0] = p_sample_L * p[0]
                p[1] = p_sample_H * p[1]
                p = p.ravel()

                sv = np.random.choice(valence, p=p, size=(N, max_T))

                # ensure that both options are sampled
                # at least once
                if switchfirst:
                    first = np.random.binomial(1, .5, size=N)
                    second = 1 - first
                    first2 = np.transpose((first, second))
                    sampled_A = first2==0
                    sampled_B = first2==1

                    observed_A = np.random.choice(range(len(w_outcomes[0])),
                                                size=sampled_A.sum(),
                                                p=options[0][:,1])
                    observed_B = np.random.choice(range(len(w_outcomes[1])),
                                                size=sampled_B.sum(),
                                                p=options[1][:,1])

                    # subjective weighting
                    sv2 = np.zeros((N, 2))
                    sv2[sampled_A] = -1 * w_outcomes[0][observed_A]
                    sv2[sampled_B] =      w_outcomes[1][observed_B]
                    sv[:,:2] = sv2

            else:

                # which option was sampled
                sampled_option = np.zeros((N, max_T), int)

                if p_switch is None:
                    # ignore switching, just search based on [p_sample_H, p_sample_L]
                    sampled_option = np.random.binomial(1, p_sample_H, size=(N, max_T))
                else:
                    # generate search sequences based on p_switch
                    switches = np.random.binomial(1, p_switch, size=(N, max_T - 1))
                    sampled_option[:,0] = np.random.binomial(1, .5, size=N)
                    for i in range(max_T - 1):
                        switch_i = switches[:,i]
                        sampled_option[:,i+1] = np.abs(sampled_option[:,i] - switch_i)

                # ensure both options sampled at least once
                if switchfirst:
                    first = np.random.binomial(1, .5, size=N)
                    sampled_option[:,0] = first
                    sampled_option[:,1] = 1 - first

                # FOR SIMULATION
                #sampled_option = np.zeros((N, max_T), int)
                #for i in range(N):
                #    arr = sampled_option[i]
                #    arr[:(max_T/2)] = 1
                #    np.random.shuffle(arr)
                #    sampled_option[i] = arr


                # FOR SIMULATION
                #p_switch = pars.get('p_switch', .5)

                #sampled_option = np.zeros((N, max_T), int)
                #sampled_option[:,0] = np.random.choice([0, 1], p=[.5, .5], size=N)

                #for i in range(max_T - 1):
                #    switch = np.random.choice([0, 1], p=[1-p_switch, p_switch], size=N)
                #    sampled_option[:,i+1] = np.abs(sampled_option[:,i] - switch)


                sampled_A = sampled_option==0
                sampled_B = sampled_option==1
                N_sampled_A = sampled_A.sum()
                N_sampled_B = sampled_B.sum()

                # observation matrix - which outcome occurred (by index)
                observed = np.zeros((N, max_T), int)
                if self.problemtype == 'multinomial':
                    observed_A = np.random.choice(range(len(w_outcomes[0])),
                                                size=sampled_A.sum(),
                                                p=options[0][:,1])
                    observed_B = np.random.choice(range(len(w_outcomes[1])),
                                                size=sampled_B.sum(),
                                                p=options[1][:,1])
                    observed[sampled_A] = observed_A
                    observed[sampled_B] = observed_B


                # record outcomes experienced (by value)
                outcomes = np.zeros((N, max_T))
                if self.problemtype == 'multinomial':
                    obj_outcomes = options[:,:,0]
                    #outcomes[sampled_A] = obj_outcomes[0][observed_A]
                    #outcomes[sampled_B] = obj_outcomes[1][observed_B]

                    # note weighting already done above
                    outcomes[sampled_A] = w_outcomes[0][observed_A]
                    outcomes[sampled_B] = w_outcomes[1][observed_B]
                    outcomes_A = outcomes[sampled_A]
                    outcomes_B = outcomes[sampled_B]
                else:
                    A, B = options
                    sigmaA = np.sqrt(A[1])
                    sigmaB = np.sqrt(B[1])

                    # weird conversion for np.truncnorm
                    lowerA, upperA = (X_MIN - A[0]) / sigmaA, (X_MAX - A[0]) / sigmaA
                    lowerB, upperB = (X_MIN - B[0]) / sigmaB, (X_MAX - B[0]) / sigmaB
                    outcomes_A = np.round(truncnorm.rvs(lowerA, upperA, loc=A[0], scale=sigmaA, size=N_sampled_A))
                    outcomes_B = np.round(truncnorm.rvs(lowerB, upperB, loc=B[0], scale=sigmaB, size=N_sampled_B))
                    outcomes[sampled_A] = outcomes_A
                    outcomes[sampled_B] = outcomes_B

                    if 'pow_gain' in pars:
                        outcomes   = cpt.value_fnc(outcomes, pars)
                        outcomes_A = cpt.value_fnc(outcomes_A, pars)
                        outcomes_B = cpt.value_fnc(outcomes_B, pars)


                # comparison
                sv = np.zeros((N, max_T))


                # criteria for each option
                if 'c' in pars:
                    # compare to constant
                    c = pars.get('c')
                    c_A = c * np.ones(outcomes_A.shape)
                    c_B = c * np.ones(outcomes_B.shape)

                elif 'c_0' in pars:
                    # compare to sample mean
                    c_0 = pars.get('c_0', 45)

                    sum_A = np.cumsum(np.multiply(sampled_A, outcomes), axis=1)
                    N_A = np.cumsum(sampled_A, axis=1, dtype=float)
                    mn_A = np.multiply(sum_A, 1/N_A)
                    mn_A[np.isnan(mn_A)] = c_0

                    sum_B = np.cumsum(np.multiply(sampled_B, outcomes), axis=1)
                    N_B = np.cumsum(sampled_B, axis=1, dtype=float)
                    mn_B = np.multiply(sum_B, 1/N_B)
                    mn_B[np.isnan(mn_B)] = c_0

                    compA = np.multiply(outcomes - mn_B, sampled_A)
                    compB = np.multiply(outcomes - mn_A, sampled_B)
                    #sv = (-1 * compA) + compB

                else:
                    # (default) compare to true (weighted)
                    # mean of other option
                    if self.problemtype is 'multinomial':
                        A, B = V
                    elif self.problemtype is 'normal':
                        if 'pow_gain' in pars:
                            A, B = w_options[:,0]
                        else:
                            A, B = options[:,0]
                    c_A = B * np.ones(outcomes_A.shape)
                    c_B = A * np.ones(outcomes_B.shape)

                # combine
                if 'c_0' in pars:
                    sv = (-1 * compA) + compB
                else:
                    sv[sampled_A] = -1 * (outcomes_A - c_A)
                    sv[sampled_B] =      (outcomes_B - c_B)


                if 'sc0' in pars:

                    # for any options with a variance of zero,
                    # replace with sc0
                    evar[evar==0.] = sc0

                    # scaling factor for each option depends on
                    # its variance
                    sc_A, sc_B = 1/np.sqrt(evar)
                    sv[sampled_A] = sv[sampled_A] * sc_A
                    sv[sampled_B] = sv[sampled_B] * sc_B
                else:
                    # fixed scaling factor across all options
                    sv = sv * variance_scale


                # noise
                if 'c_sigma' in pars:
                    c_sigma = pars.get('c_sigma')
                    err = np.random.normal(loc=0, scale=c_sigma, size=outcomes.shape)

                elif 'dv_sigma' in pars:
                    dv_sigma = pars.get('dv_sigma')
                    err = np.random.normal(loc=0, scale=dv_sigma, size=N)
                    err = np.tile(err, (max_T, 1)).transpose()
                else:
                    err = np.zeros(outcomes.shape)

                sv = sv + err

        ### Accumulation

        # add starting states to first outcome
        sv[:,0] = sv[:,0] + Z

        # p_stay
        #p_stay = pars.get('p_stay', 0)
        #if p_stay > 0:
        #    attended = np.random.binomial(1, 1-p_stay, size=(N, max_T))
        #    sv = np.multiply(sv, attended)


        # accumulate
        P = np.cumsum(sv, axis=1)


        ### Stopping

        if self.stoprule == 'optional':
            if r > 0:
                # collapsing boundaries
                threshold_min = .1
                upper = threshold_min * np.ones((N, max_T))
                dec = np.arange(threshold, threshold_min, -r*threshold)
                dec = dec[:max_T]
                upper[:,:dec.shape[0]] = np.tile(dec, (N, 1))

                lower = -threshold_min * np.ones((N, max_T))
                inc = np.arange(-threshold, -threshold_min, r*threshold)
                inc = inc[:max_T]
                lower[:,:inc.shape[0]] = np.tile(inc, (N, 1))

                crossed = -1 * (P < lower) + 1 * (P > upper)
            else:
                # fixed boundaries
                crossed = -1 * (P < -threshold) + 1 * (P > threshold)

            # if minimum sample size, prevent stopping
            minsamplesize = pars.get('minsamplesize', 1) - 1
            crossed[:,:minsamplesize] = 0

            # any trials where hit max_T, make decision based on
            # whether greater or less than zero
            nodecision = np.where(np.sum(np.abs(crossed), axis=1)==0)[0]
            if len(nodecision) > 0:
                n_pos = np.sum(P[nodecision,max_T-1] > 0)
                n_eq = np.sum(P[nodecision,max_T-1] == 0)
                n_neg = np.sum(P[nodecision,max_T-1] < 0)
                #assert n_eq == 0, "reached max_T with preference of 0"

                crossed[nodecision,max_T-1] +=  1*(P[nodecision,max_T-1] >= 0)
                crossed[nodecision,max_T-1] += -1*(P[nodecision,max_T-1] < 0)

        elif self.stoprule == 'fixedT':
            crossed = np.zeros((N, stop_T), dtype=int)
            crossed[:,(stop_T-1)] = np.sign(P[:,(stop_T-1)])

            indifferent = np.where(crossed[:,(stop_T-1)]==0)[0]
            n_indifferent = len(indifferent)
            crossed[indifferent] = np.random.choice([-1,1], p=[.5, .5], size=(n_indifferent,1))
            assert np.sum(crossed[:,(stop_T-1)]==0)==0

        elif self.stoprule == 'fixedGeom':

            crossed = np.zeros((N, max_T), dtype=int)
            crossed[range(N),stop_T-1] = np.sign(P[range(N),stop_T-1])

            indifferent = np.where(crossed[range(N),stop_T-1]==0)[0]
            n_indifferent = len(indifferent)
            t_indifferent = (stop_T-1)[indifferent]
            crossed[indifferent,t_indifferent] = np.random.choice([-1,1], p=[.5,.5], size=n_indifferent)


        if obs is not None:

            p_stop_choose_A = np.sum(crossed==-1, axis=0)*(1/float(N))
            p_stop_choose_B = np.sum(crossed==1, axis=0)*(1/float(N))
            p_sample = 1 - (p_stop_choose_A + p_stop_choose_B)

            return {'p_stop_choose_A': p_stop_choose_A,
                    'p_stop_choose_B': p_stop_choose_B,
                    'p_sample': p_sample,
                    'traces': P}

        else:

            # samplesize is the **index** where threshold is crossed
            samplesize = np.sum(1*(np.cumsum(np.abs(crossed), axis=1)==0), axis=1)
            choice = (crossed[range(N),samplesize] + 1)/2
            p_resp = choice.mean()
            ss_A = samplesize[choice==0]
            ss_B = samplesize[choice==1]

            p_stop_A = np.zeros(max_T)
            p_stop_B = np.zeros(max_T)
            p_stop_A_f = np.bincount(ss_A, minlength=max_T)
            p_stop_B_f = np.bincount(ss_B, minlength=max_T)
            if self.stoprule == 'optional' or self.stoprule == 'fixedGeom':
                if p_stop_A_f.sum() > 0:
                    p_stop_A = p_stop_A_f/float(p_stop_A_f.sum())
                if p_stop_B_f.sum() > 0:
                    p_stop_B = p_stop_B_f/float(p_stop_B_f.sum())

            elif self.stoprule == 'fixedT':
                p_stop_A[stop_T-1] = 1
                p_stop_B[stop_T-1] = 1

            assert (p_stop_A_f.sum() + p_stop_B_f.sum()) == N

            p_stop_cond = np.transpose([p_stop_A, p_stop_B])
            p_stop_cond[np.isnan(p_stop_cond)] = 0.
            f_stop_cond = np.transpose([p_stop_A_f, p_stop_B_f])/float(N)


            # only include data up to choice
            outcome_ind = None
            traces = None
            if type(sampled_option) is np.ndarray and trackobs:
                sampled_option = [sampled_option[i][:(samplesize[i]+1)] for i in range(samplesize.shape[0])]
                outcomes       = [outcomes[i][:(samplesize[i]+1)] for i in range(samplesize.shape[0])]
                traces         = [P[i][:(samplesize[i]+1)] for i in range(samplesize.shape[0])]
                if self.problemtype is 'multinomial':
                    outcome_ind    = [observed[i][:(samplesize[i]+1)] for i in range(samplesize.shape[0])]


            return {'choice': choice,
                    'samplesize': samplesize + 1,
                    'p_resp': np.array([1-p_resp, p_resp]),
                    'p_stop_cond': p_stop_cond,
                    'f_stop_cond': f_stop_cond,
                    'sampled_option': sampled_option,
                    'outcomes': outcomes,
                    'outcome_ind': outcome_ind,
                    'traces': traces,
                    'Z': Z
                    }
Exemple #35
0
 def density(self, x):
     return laplace.pdf(x, loc=self.mu, scale=self.sigma)
Exemple #36
0
def plot_laplace_dist(signal, ax, lim, bins):
    (mu, sigma) = dist_stats(signal, lim)
    b = sigma / (2**0.5)
    ax.plot(bins, laplace.pdf(bins, mu, b), 'r--')
Exemple #37
0
import numpy as np
from scipy.stats import laplace
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)

mean, var, skew, kurt = laplace.stats(moments='mvsk')

x = np.linspace(laplace.ppf(0.01),                  laplace.ppf(0.99), 100)
ax.plot(x, laplace.pdf(x),        'r-', lw=5, alpha=0.6, label='laplace pdf')



rv = laplace()
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
vals = laplace.ppf([0.001, 0.5, 0.999])
np.allclose([0.001, 0.5, 0.999], laplace.cdf(vals))

r = laplace.rvs(size=1000)
#ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
#ax.legend(loc='best', frameon=False)

plt.show()
#!/usr/bin/env python

import numpy as np
import matplotlib.pyplot as pl
from scipy.stats import t, laplace, norm

x = np.linspace(-4, 4, 100)
n = norm.pdf(x, loc=0, scale=1)
l = laplace.pdf(x, loc=0, scale=1 / (2 ** 0.5))
t = t.pdf(x, df=1, loc=0, scale=1)

pl.plot(n, 'k:',
        t, 'b--',
        l, 'r-')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('studentLaplacePdfPlot_1.png')

pl.figure()
pl.plot(np.log(n), 'k:',
        np.log(t), 'b--',
        np.log(l), 'r-')
pl.legend(('Gauss', 'Student', 'Laplace'))
pl.savefig('studentLaplacePdfPlot_2.png')

pl.show()
Exemple #39
0
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import laplace

x = np.linspace(-6, 6, 200)
pdf1 = laplace.pdf(x, 0, 1)
pdf2 = laplace.pdf(x, -1, 1)
pdf3 = laplace.pdf(x, -2.5, 0.5)

fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)

ax.plot(x, pdf1, color='r', alpha=0.5)
ax.fill_between(x, pdf1, color='r', alpha=0.3)
ax.plot(x, pdf2, color='g', alpha=0.5)
ax.fill_between(x, pdf2, color='g', alpha=0.3)
ax.plot(x, pdf3, color='b', alpha=0.5)
ax.fill_between(x, pdf3, color='b', alpha=0.3)

pdf4 = 0.3 * pdf1 + 0.2 * pdf2 + 0.5 * pdf3
ax.plot(x, pdf4, color='k', alpha=0.8, linewidth=3.0, linestyle='--')
ax.fill_between(x, pdf4, color='k', alpha=0.5)

plt.show()