def __init__(self,h=100,N=100):
     self.h=h
     self.N=N
     self.default=float(h)/N
     a=h+1
     b=(N-h)+1
     self.D=D.beta(a,b)
 def __init__(self, h=100, N=100):
     self.h = h
     self.N = N
     self.default = float(h) / N
     a = h + 1
     b = (N - h) + 1
     self.D = D.beta(a, b)
Esempio n. 3
0
 def get_expert_confidences(self, trial):
     # get attention weights - unclear if softmax
     e_confidences = [
         numpy.e**(beta(**p).mean() / self.xi) for p in self.confidences
     ]
     e_confidences = [i / sum(e_confidences) for i in e_confidences]
     return e_confidences
Esempio n. 4
0
 def get_expert_confidences(self, trial):
     # get attention weights (softmax of confidences)
     c = trial[self.context]
     e_confidences = [
         numpy.e**(beta(**p).mean() / self.zeta)
         for p in self.confidences[c]
     ]
     e_confidences = [i / sum(e_confidences) for i in e_confidences]
     return e_confidences
Esempio n. 5
0
def wfpt_like(choices, rts, v_mean, a, w_mode, w_std=0.0,
              v_std=0.0, t0=0.0, nsamp=5000, err=.0001):
    """
    Calculate WFPT likelihoods for choices and rts


    """
    # fill likes
    likes = np.zeros(len(choices))

    # process the v_mean and w_mode
    if w_std > 0.0:
        # calc with beta distribution
        mu = w_mode
        sigma = w_std
        kappa = mu * (1 - mu) / sigma**2 - 1
        alpha = mu * kappa
        beta = (1 - mu) * kappa

        if alpha <= 0.0 or beta <= 0.0:
            # illegal param
            return likes
        
        # sample from the beta distribution
        w = dists.beta(alpha, beta).rvs(nsamp)
    else:
        w = w_mode
    
    # proc the v
    if v_std > 0.0:
        v = dists.norm(v_mean, v_std).rvs(nsamp)[np.newaxis]
    else:
        v = v_mean
    
    # loop over the two choices
    # first choice 1, no change in v or w
    ind = np.where(choices == 1)[0]

    # loop over rts, setting likes for that choice
    for i in ind:
        # calc the like, adjusting rt with t0
        likes[i] = wfpt(rts[i]-t0, v=v, a=a, w=w,
                        nsamp=nsamp, err=err)

    # then choice 2 with flip of v and w
    v = -v
    w = 1-w
    ind = np.where(choices == 2)[0]
    
    # loop over rts, setting likes for that choice
    for i in ind:
        # calc the like, adjusting rt with t0
        likes[i] = wfpt(rts[i]-t0, v=v, a=a, w=w,
                        nsamp=nsamp, err=err)
    
    return likes
 def get_action_probs(self, trial):
     def subset(lst1, lst2):
         """test if lst1 is subset of lst2"""
         return set(lst1) <= set(lst2)
     features = trial[self.features].tolist()
     key_subset = [k for k in self.reward_probabilities.keys() if subset(features,k)]
     action_probs = {}
     for key in key_subset:
         raw_prob = beta(**self.reward_probabilities[key]).mean()
         # take softmax
         prob = numpy.e**(raw_prob/self.kappa)
         action_probs[key[-1]] = prob
     # normalize by total
     sum_probs = sum(action_probs.values())
     action_probs = {k: v/sum_probs for k,v in action_probs.items()}
     return action_probs
Esempio n. 7
0
    def get_action_probs(self, trial):
        def subset(lst1, lst2):
            """test if lst1 is subset of lst2"""
            return set(lst1) <= set(lst2)

        features = trial[self.features].tolist()
        key_subset = [
            k for k in self.reward_probabilities.keys() if subset(features, k)
        ]
        action_probs = {}
        for key in key_subset:
            raw_prob = beta(**self.reward_probabilities[key]).mean()
            # take softmax
            prob = numpy.e**(raw_prob / self.kappa)
            action_probs[key[-1]] = prob
        # normalize by total
        sum_probs = sum(action_probs.values())
        action_probs = {k: v / sum_probs for k, v in action_probs.items()}
        return action_probs
def beta(h=5, N=10):
    a = h + 1
    b = (N - h) + 1
    return D.beta(a, b)
Esempio n. 9
0
def beta(alpha=.5, beta=.5):
    return dists.beta(alpha, beta)
Esempio n. 10
0
def coinflip(h,N):
    a=h+1
    b=(N-h)+1
    return D.beta(a,b)
 def get_expert_confidences(self, trial):
     # get attention weights (softmax of confidences)
     c = trial[self.context]
     e_confidences = [numpy.e**(beta(**p).mean()/self.zeta) for p in self.confidences[c]]
     e_confidences = [i/sum(e_confidences) for i in e_confidences]
     return e_confidences
Esempio n. 12
0
def beta(alpha=.5, beta=.5):
    return dists.beta(alpha, beta)
def beta(h=5,N=10):
    a=h+1
    b=(N-h)+1
    return D.beta(a,b)
Esempio n. 14
0
def wfpt_like(choices,
              rts,
              v_mean,
              a,
              w_mode,
              w_std=0.0,
              v_std=0.0,
              t0=0.0,
              nsamp=5000,
              err=.0001,
              max_time=5.0,
              trange_nsamp=1000):
    """
    Calculate WFPT likelihoods for choices and rts


    """
    # fill likes
    likes = np.zeros(len(choices))

    # process the v_mean and w_mode
    if w_std > 0.0:
        # calc with beta distribution
        mu = w_mode
        sigma = w_std
        kappa = mu * (1 - mu) / sigma**2 - 1
        alpha = mu * kappa
        beta = (1 - mu) * kappa

        if alpha <= 0.0 or beta <= 0.0:
            # illegal param
            return likes

        # sample from the beta distribution
        w = dists.beta(alpha, beta).rvs(nsamp)
    else:
        w = w_mode

    # proc the v
    if v_std > 0.0:
        v = dists.norm(v_mean, v_std).rvs(nsamp)[np.newaxis]
    else:
        v = v_mean

    # loop over the two choices
    # first choice 1, no change in v or w
    ind = np.where(choices == 1)[0]

    # loop over rts, setting likes for that choice
    for i in ind:
        # calc the like, adjusting rt with t0
        likes[i] = wfpt(rts[i] - t0, v=v, a=a, w=w, err=err)

    ### Debug
#     if np.isnan(likes).any():
#         print(likes, flush=True)
#         print((rts[i]-t0, v, a, w, err), flush=True)
#         raise ValueError("likes is nan")
#     if (likes < 0).any():
#         raise ValueError("likes is negative")

# then choice 2 with flip of v and w
    v = -v
    w = 1 - w
    ind = np.where(choices == 2)[0]

    # loop over rts, setting likes for that choice
    for i in ind:
        # calc the like, adjusting rt with t0
        likes[i] = wfpt(rts[i] - t0, v=v, a=a, w=w, err=err)

#     ### Debug
#     if np.isnan(likes).any():
#         print(likes, flush=True)
#         print((rts[i]-t0, v, a, w, err), flush=True)
#         raise ValueError("likes is nan")
#     if (likes < 0).any():
#         raise ValueError("likes is negative")

# finally the non-responses (the v and w can be either direction)
    ind = np.where(choices == 0)[0]
    for i in ind:
        likes[i] = wfpt_gen(v_mean=v_mean,
                            a=a,
                            w_mode=w_mode,
                            v_std=v_std,
                            nsamp=nsamp,
                            trange=np.linspace(0, max_time - t0, trange_nsamp),
                            only_prob_no_resp=True)


#     ### Debug
#     if np.isnan(likes).any():
#         print(likes, flush=True)
#         print((rts[i]-t0, v, a, w, err), flush=True)
#         raise ValueError("likes is nan")
#     if (likes < 0).any():
#         raise ValueError("likes is negative")

    return likes
 def get_expert_confidences(self, trial):
     # get attention weights - unclear if softmax
     e_confidences = [numpy.e**(beta(**p).mean()/self.xi) for p in self.confidences]
     e_confidences = [i/sum(e_confidences) for i in e_confidences]
     return e_confidences
Esempio n. 16
0
 def D(self):
     a = self.h + 1
     b = (self.N - self.h) + 1
     return D.beta(a, b)
import pandas as pd
import matplotlib.pyplot as plt
import scipy.integrate as integrate
from scipy.stats import distributions
from scipy.optimize import leastsq
from pymc3.stats import hpd
from mpl_toolkits.mplot3d import Axes3D


def f(x):
    f = np.exp(-x**2)
    return f


#x=np.linspace(0,1,num=100)
beta = distributions.beta(1, 2)
unif = distributions.uniform(0, 1)
#y=beta.pdf(x)
#plt.plot(x,y)
N = 10000
I_estimate = []
I_squared = []
for i in range(N):
    x = beta.rvs()
    sample = f(x) / beta.pdf(x)
    I_estimate.append(sample)
    I_squared.append(sample**2)

I_estimate = np.asarray(I_estimate)
I_squared = np.asarray(I_squared)
st.subheader("Define Class B")
st.write(
    "Parameters for a Beta distribution $B\\sim \\Beta (\\alpha, \\beta, 0,1)$."
)
alpha_b = st.slider("Alpha B",
                    min_value=0.01,
                    max_value=10.0,
                    step=0.01,
                    value=5.0)
beta_b = st.slider("Beta B",
                   min_value=0.01,
                   max_value=10.0,
                   step=0.01,
                   value=5.0)

dist_a = dist.beta(alpha_a, beta_a)
dist_b = dist.beta(alpha_b, beta_b)

x = np.linspace(0.01, 0.99, 500)
pdf_a = dist_a.pdf(x)
pdf_b = dist_b.pdf(x)

num_a = st.sidebar.number_input("Number of Samples from A",
                                min_value=1,
                                max_value=10000,
                                value=100)
num_b = st.sidebar.number_input("Number of Samples from B",
                                min_value=1,
                                max_value=10000,
                                value=100)
Esempio n. 19
0

import statsmodels.api as sm
nobs = 50
#np.random.seed(1234)  # Seed random generator
c1 = np.random.beta(.2, .3, size=(nobs, 1))
c2 = np.random.normal(50, 25, size=(nobs, 1))
#print('c1 = ', c1)

# Estimate a bivariate distribution and display the bandwidth found:

#dens_u = sm.nonparametric.KDEMultivariate(data=[c1], var_type='c', bw='normal_reference')
dens_u = sm.nonparametric.KDEMultivariate(data=[c1], var_type='c', bw=[.1])

cum = 0
max = -100
maxx = -100

# for i in range(len(pdf)):
# p = pdf[i]
# x = tests[i]
# if p > max:
# print ('i = ', i)
# max = p
# maxx = x
# cum += p

#mean = cum / len(pdf)

plot(dens_u, beta(.2, .3))