def fisk_rvs(fisk_params, size):
    num_falseLoop = int(fisk_params[0] * size)
    num_trueLoop = int((1 - fisk_params[0]) * size)

    falseEntries = np.array([])
    trueEntries = np.array([])

    if ~np.isnan(fisk_params[1][1]) and (fisk_params[1][1] != 0.0):
        falseEntries = np.zeros((0, ))
        falseEntries = truncfiskprior_rvs(fisk_params[1][0],
                                          fisk_params[1][1],
                                          fisk_params[1][2],
                                          size=num_falseLoop)

    if ~np.isnan(fisk_params[2][0]) and (fisk_params[2][0] != 0):
        trueEntries = invgauss.rvs(fisk_params[2][0],
                                   fisk_params[2][1],
                                   fisk_params[2][2],
                                   size=num_trueLoop)
        print trueEntries.shape

    if (~np.isnan(fisk_params[1][1])) and (~np.isnan(fisk_params[2][0])):
        entries = np.concatenate((falseEntries, -trueEntries), axis=0)
    elif ~np.isnan(fisk_params[1][1]):
        entries = falseEntries
    elif ~np.isnan(fisk_params[2][0]):
        entries = -trueEntries
    else:
        entries = np.array([1.0] * size)

    print entries.shape[0]
    return entries
def simulate_trial(parameters,
                   values,
                   gaze,
                   boundary=1,
                   error_weight=0.05,
                   error_range=(0, 5000)):
    v, gamma, s, tau, t0 = parameters
    n_items = len(values)

    if np.random.uniform(0, 1) < error_weight:
        rt = int(np.random.uniform(*error_range))
        choice = np.random.choice(n_items)

    else:
        drifts = expdrift(v, tau, gamma, values, gaze)

        FPTs = np.zeros(n_items) * np.nan

        for i in range(n_items):
            mu = boundary / drifts[i]
            lam = (boundary / s)**2
            FPTs[i] = invgauss.rvs(mu=mu / lam, scale=lam)

        choice = np.argmin(FPTs)
        rt = int(np.round(np.min(FPTs) + t0))

    return choice, rt
Exemple #3
0
def isIG(d, v, sigma, no_sim=100000, n_bins=20):

    # check the distribution is IG or not

    simData = np.empty([
        no_sim,
    ])
    for i in range(1, no_sim):
        simData[i] = brownian(d, v, sigma)

    fig = plt.figure()

    plt.hist(simData, normed=True, bins=n_bins, range=(0, 5 * (d / v)))
    plt.ylabel('Prob')

    mu = d / v
    lamda = d**2 / sigma**2
    igData = invgauss.rvs(mu / lamda, scale=lamda, size=no_sim)

    plt.hist(igData,
             normed=True,
             histtype='step',
             range=(0, 5 * (d / v)),
             bins=n_bins,
             color='r')

    filename = 'plot_' + str(d) + '_' + str(v) + '_' + str(sigma) + '.png'
    plt.savefig(filename)
Exemple #4
0
def simulate_trial(parameters, values, gaze, boundary=1, error_weight=0.05, error_range=(0, 5000), drift='multiplicative'):

    if drift == 'multiplicative':
        driftfun = drift_multiplicative
    elif drift == 'additive':
        driftfun = drift_additive
    else:
        raise ValueError('Drift function "{}" not recognized.'.format(drift))

    v, gamma, s, tau, t0 = parameters
    n_items = len(values)

    if np.random.uniform(0, 1) < error_weight:
        rt = int(np.random.uniform(*error_range))
        choice = np.random.choice(n_items)

    else:
        drifts = driftfun(v, tau, gamma, values, gaze)

        FPTs = np.zeros(n_items) * np.nan

        for i in range(n_items):
            mu = boundary / drifts[i]
            lam = (boundary / s)**2
            FPTs[i] = invgauss.rvs(mu=mu / lam, scale=lam)

        rt = np.min(FPTs)
        if rt < 1 or not np.isfinite(rt):
            rt = np.nan
            choice = np.nan
        else:
            choice = np.argmin(FPTs)
            rt = int(rt + t0)

    return choice, rt
def inverseGaussian(numRounds):
	mu = np.random.random_sample()
	sigma = math.sqrt(invgauss.var(mu))
	
	# use inverse Gaussian distribution for arm
	dist = invgauss.rvs(mu, size=numRounds)
	
	return dist, (mu, sigma)
Exemple #6
0
def check_invgaussian():
    data = invgauss.rvs(0.5, scale=1.0, size=1000)
    data[data > 1.0] = 1.0
    print "Orig Frac=" + str(np.sum(data == 1.0) / float(data.shape[0]))
    rv = TruncatedInvGaussian_Prior(data)
    res = rv.fit()
    print res.params
    print "================================"
    x = truncinvgauss_rvs(res.params[0],
                          res.params[1],
                          res.params[2],
                          size=1000)
    print min(x), max(x)
    print "Frac=" + str(np.sum(x == 1.0) / float((x.shape[0])))
Exemple #7
0
 def simulate(self, n=1, paramvec=None):
     """
         simulating n random variables from prior
     """
     #invGaussian scale like
     # tX \sim IG(t \mu, t, \lambda)
     # scipy uses
     # X \sim IG( \mu, 1)
     delta, mu, nu, sigma = self._paramvec(paramvec)
     V = nu * invgauss.rvs(1. / nu, size=(n, 1))
     Z = npr.randn(n, 1)
     X = (delta - mu) + mu * V + sigma * np.sqrt(V) * Z
     X = X.flatten()
     return X
Exemple #8
0
    def gibbs_invgauss(self, itr, XT_X, XT_Y):
        self.rng = np.random.RandomState(
            self.trl +
            itr)  #itr includes recv_time, so didn't pass/use that explicitly
        tauinv_vec = 1 / self.rng.rand(self.n)  #1/np.random.rand(self.n)
        for i in range(itr):
            Sig = np.linalg.inv(XT_X + self.sigma2 * np.diag(tauinv_vec))
            beta = self.rng.multivariate_normal(
                np.squeeze(np.matmul(Sig, XT_Y)), self.sigma2 * Sig)
            for j in range(self.n):
                tauinv_vec[j] = invgauss.rvs(
                    np.sqrt(self.sigma2) * (self.lmbd**(1 / 3)) /
                    np.abs(beta[j])) * (self.lmbd**(2 / 3))

        return np.reshape(beta, (self.n, 1))
Exemple #9
0
    def gibbs_invgauss(self, itr, XT_X, XT_Y):

        self.rng = np.random.RandomState(self.trl + itr - 1000)
        tauinv_vec = 1 / np.random.rand(self.n)
        for i in range(itr):
            Sig = np.linalg.inv(XT_X + self.sigma2 * np.diag(tauinv_vec) +
                                1e-3 * np.eye(self.n))
            beta = self.rng.multivariate_normal(
                np.squeeze(np.matmul(Sig, XT_Y)), self.sigma2 * Sig)
            for j in range(self.n):
                tauinv_vec[j] = invgauss.rvs(
                    np.sqrt(self.sigma2) * (self.lmbd**(1 / 3)) /
                    np.abs(beta[j])) * (self.lmbd**(2 / 3))

        return np.reshape(beta, (self.n, 1))
def truncinvgauss_rvs(prob, mu, sigma, size):
    prob = max(1e-10, prob)
    falseEntries = np.zeros((0, ))
    failure_ctr = 5
    while falseEntries.shape[0] < size and failure_ctr > 0:
        s = invgauss.rvs(sigma, scale=mu, loc=0.0, size=size)
        accepted = s[(s <= 1.0)]
        if len(accepted) == 0:
            failure_ctr -= 1
        falseEntries = np.concatenate((falseEntries, accepted), axis=0)
        falseEntries = falseEntries[:size]
    if failure_ctr <= 0: falseEntries = np.zeros(size)
    if size > 0:
        indexes = np.random.choice(range(size),
                                   size=int(prob * size),
                                   replace=False)
        falseEntries[indexes] = 1.0
    return falseEntries
def run_Parametric(story_id, data):
    print "[" + str(story_id) + "]Fitting Fisk"
    fisk_params = fisk.fit(data, floc=0)
    fisk_nll = fisk.nnlf(fisk_params, data)
    fisk_rvs = fisk.rvs(*fisk_params, size=data.shape[0])
    ks_fisk = ks_2samp(data, fisk_rvs)
    bic_fisk = compute_BIC(data, len(fisk_params), fisk_nll)

    print "[" + str(story_id) + "]Fitting IG"
    ig_params = invgauss.fit(data, floc=0)
    ig_nll = invgauss.nnlf(ig_params, data)
    ig_rvs = invgauss.rvs(*ig_params, size=data.shape[0])
    ks_ig = ks_2samp(data, ig_rvs)
    bic_ig = compute_BIC(data, len(ig_params), ig_nll)

    print "[" + str(story_id) + "]Fitting LN"
    ln_params = lognorm.fit(data, floc=0)
    ln_nll = lognorm.nnlf(ln_params, data)
    ln_rvs = lognorm.rvs(*ln_params, size=data.shape[0])
    ks_ln = ks_2samp(data, ln_rvs)
    bic_ln = compute_BIC(data, len(ln_params), ln_nll)

    print "[" + str(story_id) + "]Fitting Weibull"
    weib_params = weibull_min.fit(data, floc=0)
    weib_nll = weibull_min.nnlf(weib_params, data)
    weib_rvs = weibull_min.rvs(*weib_params, size=data.shape[0])
    ks_weib = ks_2samp(data, weib_rvs)
    bic_weib = compute_BIC(data, len(weib_params), weib_nll)

    print "[" + str(story_id) + "]Fitting Gamma"
    gamma_params = gamma.fit(data, floc=0)
    gamma_nll = gamma.nnlf(gamma_params, data)
    gamma_rvs = gamma.rvs(*gamma_params, size=data.shape[0])
    ks_gamma = ks_2samp(data, gamma_rvs)
    bic_gamma = compute_BIC(data, len(gamma_params), gamma_nll)

    return [
        fisk_nll, ig_nll, ln_nll, weib_nll, gamma_nll, ks_fisk, ks_ig, ks_ln,
        ks_weib, ks_gamma, bic_fisk, bic_ig, bic_ln, bic_weib, bic_gamma,
        fisk_params, ig_params, ln_params, weib_params, gamma_params
    ]
Exemple #12
0
    def sample(self):
        X = np.asarray(self.X)
        y = np.asarray(self.y).flatten()

        if len(y) == 0:
            beta_tilde = np.random.laplace(scale=1 / self.lmbd, size=self.n)
            return beta_tilde

        XT_X = X.T @ X
        XT_Y = X.T @ y

        tauinv_vec = 1 / np.random.rand(self.n)
        score = 0
        for i in range(self.itr):
            Sig = np.linalg.inv(XT_X + (self.sigma**2) * np.diag(tauinv_vec) +
                                1e-3 * np.eye(self.n))
            beta = np.random.multivariate_normal(
                np.squeeze(np.matmul(Sig, XT_Y)), (self.sigma**2) * Sig)
            for j in range(self.n):
                tauinv_vec[j] = invgauss.rvs(
                    np.sqrt((self.sigma**2)) * (self.lmbd**(1 / 3)) /
                    np.abs(beta[j])) * (self.lmbd**(2 / 3))
        return beta
Exemple #13
0
from scipy.stats import uniform, invgauss

# Data
np.random.seed(1056)  # set seed to replicate example
nobs = 1000  # number of obs in model
x1 = uniform.rvs(size=nobs)  # random uniform variable

beta0 = 1.0
beta1 = 0.5
l1 = 20

xb = beta0 + beta1 * x1  # linear predictor, xb
exb = np.exp(xb)

y = invgauss.rvs(exb / l1, scale=l1)  # create response variable

# Fit
stan_data = {}  # build data dictionary
stan_data['Y'] = y  # response variable
stan_data['x1'] = x1  # explanatory variable
stan_data['N'] = nobs  # sample size

# Stan code
stan_code = """
data{
    int<lower=0> N;
    vector[N] Y;
    vector[N] x1;
}
parameters{
Exemple #14
0
from scipy.stats import uniform, invgauss

# Data
np.random.seed(1056)                 # set seed to replicate example
nobs= 1000                           # number of obs in model 
x1 = uniform.rvs(size=nobs)          # random uniform variable

beta0 = 1.0
beta1 = 0.5
l1 = 20

xb = beta0 + beta1 * x1                       # linear predictor, xb              
exb = np.exp(xb)

y = invgauss.rvs(exb/l1, scale=l1)            # create response variable
                                             
# Fit
stan_data = {}                                # build data dictionary
stan_data['Y'] = y                            # response variable
stan_data['x1'] = x1                          # explanatory variable
stan_data['N'] = nobs                         # sample size

# Stan code
stan_code = """
data{
    int<lower=0> N;
    vector[N] Y;
    vector[N] x1;
}
parameters{
Exemple #15
0
# Display the probability density function (``pdf``):

x = np.linspace(invgauss.ppf(0.01, mu), invgauss.ppf(0.99, mu), 100)
ax.plot(x, invgauss.pdf(x, mu), 'r-', lw=5, alpha=0.6, label='invgauss pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = invgauss(mu)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = invgauss.ppf([0.001, 0.5, 0.999], mu)
np.allclose([0.001, 0.5, 0.999], invgauss.cdf(vals, mu))
# True

# Generate random numbers:

r = invgauss.rvs(mu, size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
Exemple #16
0
from scipy.stats import invgauss
import matplotlib.pyplot as plt
import numpy as np

#invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))

fig, ax = plt.subplots(1, 1)

mu = 1


x = np.linspace(invgauss.pdf(0.01, mu),invgauss.pdf(0.99, mu), 100)
ax.plot(x, invgauss.pdf(x, mu),'r-', lw=5, alpha=0.6, label='invgauss pdf')

rv = invgauss(mu)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')


r = invgauss.rvs(mu, size=1000)

ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()	
Exemple #17
0
def simulate_trial(parameters,
                   values,
                   gaze,
                   boundary=1,
                   error_weight=0.05,
                   error_range=(0, 5)):
    """
    Predict choice and resonse time with the GLAM
    for a trial, ǵiven a set of parameter estimates,
    trial values, and gazes

    Input
    ---
    parameters : array_like
        set of parameter estimates to use

    values : array_like
        value of each choice alternative

    gaze : array_like
        gaze of each choice alternative

    boundary : float
        decision boundary in linear stochastic race,
        defaults to 1

    error_weight : float, optional
        probability that choice and RT are
        simulated according to an errornous response model,
        which makes a random choice at a random time point,
        defaults to 0.05 (5%)
    
    error_range : tuple, optional
        time range (in seconds) for errornous response model,
        defaults to (0,5)

    Returns
    ---
    choice and RT
    """

    v, gamma, s, tau, t0 = parameters
    n_items = len(values)

    if np.random.uniform(0, 1) < error_weight:
        rt = np.random.uniform(*error_range)
        choice = np.random.choice(n_items)

    else:
        rt = np.nan
        while np.isnan(rt):
            R = make_R(v, tau, gamma, values, gaze)
            FPTs = np.zeros(n_items) * np.nan

            for i in range(n_items):
                mu = boundary / R[i]
                lam = (boundary / s)**2
                FPTs[i] = invgauss.rvs(mu=mu / lam, scale=lam)
            rt = np.min(FPTs)

            if rt < 0 or not np.isfinite(rt):
                rt = np.nan
                choice = np.nan
            else:
                choice = np.argmin(FPTs)
                rt = rt + t0

    return choice, rt
Exemple #18
0
    def initialSpikeGeneration(self, numberOfSpikesPerNeuron, meanTime,
                               synchronny, typeOfDraw, a, b,
                               exciteExciteAmplitude, exciteInhibAmplitude):
        '''
        Generates the intital excitation spikes aka fake neurons
        Also will write the spike times to a file. This is accomplished here
        instead of the write function because the master list of spike times
        gets destroyed in the run process (Note: as of the latest update, we
        are now keeping a dead copy of the initial spike time list, because
        it proves better to do so for the synchronny trials, however, because
        the current write function works perfectly, I have not changed it to
        use the dead copy).
        This method is now also responsible for the amplitudes of the fake
        neurons
        
        Note: Fake neurons are essentially the ones created in the initial spike generation
        they aren't really modelable neurons. The are quite literally a list of when to send currents
        to the 'real' neurons of the entire system
        '''
        self.fakeExciteAmplitude = exciteExciteAmplitude
        self.fakeInhibAmplitude = exciteInhibAmplitude

        # Cleans out pre-existing fake neuron data
        for file in self.os.listdir(self.networkID + '/FakeNeuronSpikes/'):
            self.os.remove("FakeNeuronSpikes/" + file)

        # for type of draw:
        #     1 = Gaussian
        #     2 = Exponential
        #     3 = Uniform
        #     4 = Inverse Gauss
        if meanTime > self.duration:
            print(
                "\nError: Your mean time is greater than the duration, \nand you can ignore the next error statement"
            )
            return
        self.arrayOfInitialSpikeTimes = []
        self.deadCopyOfArrayOfInitialSpikeTimes = []

        # Essentially the "for loop's" job is to iterate once for every single
        # neuron in the list of all Neurons
        # The "while loop" will loop the amount of times as spikes desired per neuron
        # for every "for" iteration, one temporary list will store the spike times
        # and the id number for the neuron to recieve those spikes. At then end
        # of the single for iteration, the temporary list will be appended to the
        # master list of spike times.
        for x in range(len(self.listOfNeurons) - 1):
            # Fist Write is used to help keep a csv format
            self.firstWrite = True
            excitationSpikesdataFile = open(
                "FakeNeuronSpikes/excitationSpike" + str(x + 1) + ".txt", "w")
            numberOfSpikesGenerated = 0
            individualNeuronSpikeTimeList = []
            while (numberOfSpikesGenerated != numberOfSpikesPerNeuron):

                # Drawing excitation spikes from the chosen distribution
                if typeOfDraw == 1:
                    spikeTime = random.gauss(meanTime, synchronny)
                elif typeOfDraw == 2:
                    spikeTime = random.expovariate(meanTime)
                elif typeOfDraw == 3:
                    spikeTime = random.uniform(a, b)
                elif typeOfDraw == 4:
                    spikeTime = invgauss.rvs(meanTime)
                else:
                    raise ValueError(
                        "Initial Spike Gen Error: your requested draw type does not exist"
                    )

                # Makes sure the spike time is not out of bounds
                if (spikeTime > 0 and spikeTime < self.duration):

                    individualNeuronSpikeTimeList.append(spikeTime)

                    # This if else structure is meant to help keep the csv format
                    # the first time a number is written to a file, on the number
                    # char is written. Spaces and commas are only added from the
                    # second write and on.
                    if self.firstWrite == True:
                        excitationSpikesdataFile.write(str(spikeTime))
                        self.firstWrite = False
                    else:
                        excitationSpikesdataFile.write(", " + str(spikeTime))
                    individualNeuronSpikeTimeList.append(x)
                    self.arrayOfInitialSpikeTimes.append(
                        individualNeuronSpikeTimeList)
                    numberOfSpikesGenerated += 1
                    individualNeuronSpikeTimeList = []

            excitationSpikesdataFile.close()

        # Creating the dead copy, so we can still use the information even after the loss of the original spike time list
        # I am taking the copy before sorting because that way, its easier to process for the required data because it's
        # ordered for the neurons and not the spike times.
        self.deadCopyOfArrayOfInitialSpikeTimes = list(
            self.arrayOfInitialSpikeTimes)
        # Sorts the array, but because the addresses have been appended to the
        # spike times, the user specified distribution is still preserved.
        self.arrayOfInitialSpikeTimes.sort()
Exemple #19
0
def bootstrap(a,
              f=None,
              b=100,
              method="balanced",
              family=None,
              strata=None,
              smooth=False,
              random_state=None):
    """
    Calculate function values from bootstrap samples or
    optionally return bootstrap samples themselves

    Parameters
    ----------
    a : array-like
        Original sample
    f : callable or None
        Function to be bootstrapped
    b : int
        Number of bootstrap samples
    method : string
        * 'ordinary'
        * 'balanced'
        * 'parametric'
    family : string or None
        * 'gaussian'
        * 't'
        * 'laplace'
        * 'logistic'
        * 'F'
        * 'gamma'
        * 'log-normal'
        * 'inverse-gaussian'
        * 'pareto'
        * 'beta'
        * 'poisson'
    strata : array-like or None
        Stratification labels, ignored when method
        is parametric
    smooth : boolean
        Whether or not to add noise to bootstrap
        samples, ignored when method is parametric
    random_state : int or None
        Random number seed

    Returns
    -------
    y | X : np.array
        Function applied to each bootstrap sample
        or bootstrap samples if f is None
    """
    np.random.seed(random_state)
    a = np.asarray(a)
    n = len(a)

    # stratification not meaningful for parametric sampling
    if strata is not None and (method != "parametric"):
        strata = np.asarray(strata)
        if len(strata) != len(a):
            raise ValueError("a and strata must have" " the same length")
        # recursively call bootstrap without stratification
        # on the different strata
        masks = [strata == x for x in np.unique(strata)]
        boot_strata = [
            bootstrap(a=a[m],
                      f=None,
                      b=b,
                      method=method,
                      strata=None,
                      random_state=random_state) for m in masks
        ]
        # concatenate resampled strata along first column axis
        X = np.concatenate(boot_strata, axis=1)
    else:
        if method == "ordinary":
            # i.i.d. sampling from ecdf of a
            X = np.reshape(a[np.random.choice(range(a.shape[0]),
                                              a.shape[0] * b)],
                           newshape=(b, ) + a.shape)
        elif method == "balanced":
            # permute b concatenated copies of a
            r = np.reshape([a] * b, newshape=(b * a.shape[0], ) + a.shape[1:])
            X = np.reshape(r[np.random.permutation(range(r.shape[0]))],
                           newshape=(b, ) + a.shape)
        elif method == "parametric":
            if len(a.shape) > 1:
                raise ValueError("a must be one-dimensional")

            # fit parameters by maximum likelihood and sample
            if family == "gaussian":
                theta = norm.fit(a)
                arr = norm.rvs(size=n * b,
                               loc=theta[0],
                               scale=theta[1],
                               random_state=random_state)
            elif family == "t":
                theta = t.fit(a, fscale=1)
                arr = t.rvs(size=n * b,
                            df=theta[0],
                            loc=theta[1],
                            scale=theta[2],
                            random_state=random_state)
            elif family == "laplace":
                theta = laplace.fit(a)
                arr = laplace.rvs(size=n * b,
                                  loc=theta[0],
                                  scale=theta[1],
                                  random_state=random_state)
            elif family == "logistic":
                theta = logistic.fit(a)
                arr = logistic.rvs(size=n * b,
                                   loc=theta[0],
                                   scale=theta[1],
                                   random_state=random_state)
            elif family == "F":
                theta = F.fit(a, floc=0, fscale=1)
                arr = F.rvs(size=n * b,
                            dfn=theta[0],
                            dfd=theta[1],
                            loc=theta[2],
                            scale=theta[3],
                            random_state=random_state)
            elif family == "gamma":
                theta = gamma.fit(a, floc=0)
                arr = gamma.rvs(size=n * b,
                                a=theta[0],
                                loc=theta[1],
                                scale=theta[2],
                                random_state=random_state)
            elif family == "log-normal":
                theta = lognorm.fit(a, floc=0)
                arr = lognorm.rvs(size=n * b,
                                  s=theta[0],
                                  loc=theta[1],
                                  scale=theta[2],
                                  random_state=random_state)
            elif family == "inverse-gaussian":
                theta = invgauss.fit(a, floc=0)
                arr = invgauss.rvs(size=n * b,
                                   mu=theta[0],
                                   loc=theta[1],
                                   scale=theta[2],
                                   random_state=random_state)
            elif family == "pareto":
                theta = pareto.fit(a, floc=0)
                arr = pareto.rvs(size=n * b,
                                 b=theta[0],
                                 loc=theta[1],
                                 scale=theta[2],
                                 random_state=random_state)
            elif family == "beta":
                theta = beta.fit(a)
                arr = beta.rvs(size=n * b,
                               a=theta[0],
                               b=theta[1],
                               loc=theta[2],
                               scale=theta[3],
                               random_state=random_state)
            elif family == "poisson":
                theta = np.mean(a)
                arr = poisson.rvs(size=n * b,
                                  mu=theta,
                                  random_state=random_state)
            else:
                raise ValueError("Invalid family")

            X = np.reshape(arr, newshape=(b, n))
        else:
            raise ValueError("method must be either 'ordinary'"
                             " , 'balanced', or 'parametric',"
                             " '{method}' was supplied".format(method=method))

    # samples are already smooth in the parametric case
    if smooth and (method != "parametric"):
        X += np.random.normal(size=X.shape, scale=1 / np.sqrt(n))

    if f is None:
        return X
    else:
        return np.asarray([f(x) for x in X])
Exemple #20
0
# Python

import numpy as np
from scipy.stats import invgauss, norm, binom
from scipy.optimize import minimize
from numpy import log, exp, apply_along_axis, mean, median, std, quantile
from math import pi, isnan
np.random.seed(111)

n = 100
t0 = 0.35

t = invgauss.rvs(mu=0.5, scale=1, size=n) + t0


def minus_loglik(parms):

    z = parms[0]
    v = parms[1]
    t0 = parms[2]

    return -sum(
        log(z * (2 * pi * (t - t0)**3)**(-0.5) * exp(-(v *
                                                       (t - t0) - z)**2 / 2 /
                                                     (t - t0))))


init = [.5, 1.2, .3]
fit = minimize(minus_loglik, init, method="nelder-mead")
print(fit.x)
from scipy.stats import pearsonr
from scipy.stats import invgauss
from scipy.stats import norm

vector = [1, 23, 4, 13, 9, 7, 7, 13, 4, 23, 1, 23, 1, 23, 3, 23]

print('Vector {}'.format(vector))
print('Average of the vector is {}'.format(statistics.mean(vector)))

print('Mode of the vector is {}'.format(statistics.mode(vector)))

print('Median of the vector is {}'.format(statistics.median(vector)))

print('Standard deviation (sigma) of the vector is {}'.format(statistics.stdev(vector)))

vector_one = [1, 2, 3]
vector_two = [4, 5, 6]
vector_three = [9, 8, 7]

print('Positive correlation with pearson between {} and {} is {}'.format(vector_one, vector_two,
                                                                         pearsonr(vector_one, vector_two)))
print('Negative correlation with pearson between {} and {} is {}'.format(vector_one, vector_three,
                                                                         pearsonr(vector_one, vector_three)))

print('Random number {}'.format(random.random()))
invgauss_mu = 10
invgauss_loc = 10
print('Gaussian random number {}'.format(invgauss.rvs(invgauss_mu, invgauss_loc)))
norm_loc = 10
print('Normal distribution random variates {}'.format(norm.rvs(norm_loc)))
Exemple #22
0
 def rvs(self, shape):
     return invgauss.rvs(mu=shape)
Exemple #23
0
def inv_norm_dist(x, mean):
    np.random.seed(42)
    y = invgauss.rvs(mean, size=len(x))
    y = np.array(sorted(y))
    y = np.clip(y, 0, 1)
    return y