예제 #1
0
def normDist(
    r, rsigma
):  #defines an always positive normal distribution (for very broad size distributions)
    x = pl.normal(r, rsigma)
    while x < 0:
        x = pl.normal(r, rsigma)
    return x
예제 #2
0
 def get_normal_input_times(self, mu=10, sigma=1):
     ''' generates n normal-distributed prosesses with mean mu and deviation sigma'''
     times = pl.normal(mu, sigma, self.n)
     for i in xrange(self.n):
         while times[i] <= self.tstart or times[i] >= self.tstop:
             times[i] = pl.normal(mu, sigma)
     return times
예제 #3
0
def get_age_sex(is_crew=False,
                min_age=18,
                max_age=99,
                crew_age=35,
                crew_std=5,
                guest_age=68,
                guest_std=8):
    '''
    Define age-sex distributions. Passenger age distribution based on:
        https://www.nytimes.com/reuters/2020/02/12/world/asia/12reuters-china-health-japan.html

        "About 80% of the passengers were aged 60 or over [=2130], with 215 in their 80s and 11 in the 90s,
        the English-language Japan Times newspaper reported."
    '''

    # Define female (0) or male (1) -- evenly distributed
    sex = pl.randint(2)

    # Define age distribution for the crew and guests
    if is_crew:
        age = pl.normal(crew_age, crew_std)
    else:
        age = pl.normal(guest_age, guest_std)

    # Normalize
    age = pl.median([min_age, age, max_age])

    return age, sex
예제 #4
0
def test_anova():
    import pandas as pd
    A = normal(0.5, size=10000)
    B = normal(0.25, size=10000)
    C = normal(0, 0.5, size=10000)
    df = pd.DataFrame({"A": A, "B": B, "C": C})
    a = ANOVA(df)
    print(a.anova())
    a.imshow_anova_pairs()
예제 #5
0
def other():
    data = pylab.concatenate((pylab.normal(1, .2,
                                           5000), pylab.normal(2, .2, 2500)))
    y, x, _ = pylab.hist(data, 100, alpha=.3, label='data')

    # x = x[1:]
    x = (x[1:] + x[:-1]) / 2  # for len(x)==len(y)  # TODO what?

    expected = (1, .2, 250, 2, .2, 125)
    params, cov = curve_fit(bimodal, x, y, expected)
    sigma = numpy.sqrt(numpy.diag(cov))
    plt.plot(x, bimodal(x, *params), color='red', lw=3, label='model')
    plt.legend()
    print(params, '\n', sigma)
예제 #6
0
def Perturb(inmat,pfact=.1):
    '''
    Adds some noise to inmat.
    '''

    stds=np.std(inmat)
    return inmat+np.array([pl.normal(0,tmp*pfact,inmat.shape[0]) for tmp in stds]).T
예제 #7
0
    def pull(self):
        rValue = self.meanReturn

        if (self.varianceReturn > 0):
            rValue = normal(self.meanReturn, self.varianceReturn)

        return rValue
예제 #8
0
파일: mixture.py 프로젝트: cokelaer/biokit
    def __init__(self, mu=[-1,1], sigma=[1,1], mixture=[0.5,0.5], N=1000):
        """.. rubric:: constructor
    
        :param list mu: list of mean for each model
        :param list sigma: list of standard deviation for each model
        :param list mixture: list of amplitude for each model

        """
        assert len(mu) == len(sigma)
        assert len(mu) == len(mixture)
        self.mu = mu
        self.sigma = sigma
        self.mixture = mixture
        self.data = []
        self.N = N
        self.Ns = [int(x*N) for x in mixture]
        self.k = len(self.mu)

        if sum(self.Ns) != N:
            print('Warning: rounding mixture ratio. total N will be %s' %
                    sum(self.Ns))

        for m, s, n in zip(self.mu, self.sigma, self.Ns):
            data = pylab.normal(m, s, size=n)
            self.data.extend(data)
예제 #9
0
    def __init__(self, mu=[-1, 1], sigma=[1, 1], mixture=[0.5, 0.5], N=1000):
        """.. rubric:: constructor

        :param list mu: list of mean for each model
        :param list sigma: list of standard deviation for each model
        :param list mixture: list of amplitude for each model

        """
        assert len(mu) == len(sigma)
        assert len(mu) == len(mixture)
        self.mu = mu
        self.sigma = sigma
        self.mixture = mixture
        self.data = []
        self.N = N
        self.Ns = [int(x * N) for x in mixture]
        self.k = len(self.mu)

        if sum(self.Ns) != N:
            print('Warning: rounding mixture ratio. total N will be %s' %
                  sum(self.Ns))

        for m, s, n in zip(self.mu, self.sigma, self.Ns):
            data = pylab.normal(m, s, size=n)
            self.data.extend(data)
예제 #10
0
    def zeroPaddData(self,desiredLength,paddmode='zero',where='end'):    
        #zero padds the time domain data, it is possible to padd at the beginning,
        #or at the end, and further gaussian or real zero padding is possible        
        #might not work for gaussian mode!

        desiredLength=int(desiredLength)
        #escape the function        
        if desiredLength<0:
            return 0

        #calculate the paddvectors        
        if paddmode=='gaussian':
            paddvec=py.normal(0,py.std(self.getPreceedingNoise())*0.05,desiredLength)
        else:
            paddvec=py.ones((desiredLength,self.tdData.shape[1]-1))
            paddvec*=py.mean(self.tdData[-20:,1:])
            
        timevec=self.getTimes()
        if where=='end':
            #timeaxis:
            newtimes=py.linspace(timevec[-1],timevec[-1]+desiredLength*self.dt,desiredLength)
            paddvec=py.column_stack((newtimes,paddvec))
            longvec=py.row_stack((self.tdData,paddvec))
        else:
            newtimes=py.linspace(timevec[0]-(desiredLength+1)*self.dt,timevec[0],desiredLength)
            paddvec=py.column_stack((newtimes,paddvec))
            longvec=py.row_stack((paddvec,self.tdData))
            
        self.setTDData(longvec)
def reassign_values(dict):
    for key in dict:
        if dict[key] < 5:
            dict[key] = plb.normal(2, 3, 256)
            plb.hist(dict[key], density=True, bins=24)
            plb.pause(1)
            plb.close()
    return dict
예제 #12
0
def tsallis_rv(qv, Tqv, D):
    p = (3.-qv)/(2*(qv-1))
    s = ((2.*(qv-1))**0.5) / (Tqv**(1./(3-qv)))
    rv = zeros(D, dtype=float)
    x = normal(0, 1, D)
    u = stats.gamma.rvs(p, loc=0., scale=1., size=1)
    y = s*(u**0.5)
    rv[:] = x[:]/y
    return rv
예제 #13
0
def histogram_1():
    plb.figure(1)
    gaus_dist = plb.normal(-2, 2, size=512)
    plb.hist(gaus_dist, normed=True, bins=24)
    plb.title('Gaussian distribution / Histogram')
    plb.xlabel('Value')
    plb.ylabel('Frequency')
    plb.grid(True)
    plb.pause(5)
예제 #14
0
def tsallis_rv(qv, Tqv, D):
    p = (3. - qv) / (2 * (qv - 1))
    s = ((2. * (qv - 1))**0.5) / (Tqv**(1. / (3 - qv)))
    rv = zeros(D, dtype=float)
    x = normal(0, 1, D)
    u = stats.gamma.rvs(p, loc=0., scale=1., size=1)
    y = s * (u**0.5)
    rv[:] = x[:] / y
    return rv
예제 #15
0
def mine_ok():
    data = pylab.concatenate(
        (pylab.normal(muP, sigmaP,
                      samplesP), pylab.normal(muC, sigmaC, samplesC)))
    y, x, _ = pylab.hist(data, 100, alpha=.3, label='data')

    # x = x[1:]
    x = (x[1:] + x[:-1]) / 2  # for len(x)==len(y)  # TODO what?

    expected = (muP, sigmaP, aP, muC, sigmaC, aC)

    params, cov = curve_fit(bimodal, x, y, expected)
    sigma = numpy.sqrt(numpy.diag(cov))
    plt.plot(x, bimodal(x, *params), color='red', lw=3, label='model')
    plt.legend()
    print('params: ', end='')
    print(params)
    print('sigma: ', end='')
    print(sigma)
예제 #16
0
def transit_fit_example():
    '''Simulate some data containing a transit plus white Gaussian
    noise and fit for the planet / star radius ratio (p = Rp / Rs),
    epoch Ttr, inclination, and ratio of semi-major axis to stellar
    radius (a/Rs), while holding the other parameters fixed at their
    true values.'''
    JD = numpy.arange(100) * 0.01 - 0.5 + pylab.normal(0, 0.005, 100)
    P = 3.0
    Ttr_true = 0.0
    Ttr_guess = 0.02
    p_true = 0.1
    p_guess = 0.08
    incl_true = 0.99 * numpy.pi / 2.
    incl_guess = numpy.pi / 2.
    a_true = 5.0
    a_guess = 4.5
    cn = [0, 0.45, 0, 0.05]
    Ecc = 0
    omega = 0
    sec = 0
    truth = lightcurve(JD, P, p_true, Ttr_true, Ecc, a_true, incl_true, \
                       omega, cn, sec)
    flux = truth + pylab.normal(0, 0.002, numpy.size(JD))
    flux_err = numpy.ones(numpy.size(JD)) * 0.002
    initial_guess = lightcurve(JD, P, p_guess, Ttr_guess, \
                               Ecc, a_guess, incl_guess, omega, cn, sec)
    p_fit, Ttr_fit, incl_fit, a_fit = \
        scipy.optimize.fmin(transit_errfunc_ptia, \
                            [p_guess, Ttr_guess, incl_guess, a_guess], \
            args = ([P, Ecc, omega, cn, sec], JD, flux, flux_err))
    fit = lightcurve(JD, P, p_fit, Ttr_fit, \
                               Ecc, a_fit, incl_fit, omega, cn, sec)
    pylab.clf()
    pylab.errorbar(JD, flux, yerr=flux_err, fmt='k.', label='data')
    pylab.plot(JD, truth, 'k-', label='true')
    pylab.plot(JD, initial_guess, 'b--', label='initial')
    pylab.plot(JD, fit, 'r-', label='fit')
    pylab.legend(loc='lower right')
    print p_true, p_guess, p_fit
    print Ttr_true, Ttr_guess, Ttr_fit
    print incl_true, incl_guess, incl_fit
    print a_true, a_guess, a_fit
    return
예제 #17
0
def fn2(dict):
    for key, val in dict.items():
        if val < 5:
            # 4.1 Using normal distrib. with mean=2 and std = 3 create a 256pts array
            val = plb.normal(loc=2, scale=3, size=256)
            # 4.2 Using a histogram with 12 bins, plot the result from 4.1
            #plb.figure("Histogram"), plb.title("Gaussian distribution histogram")
            #plb.hist(y[key])
            #colour='red', label='Normal')
    return dict
예제 #18
0
파일: ffprime.py 프로젝트: saigrain/pyspot
def rv_sample(obs = None, tspan = 180, npernight = 3, drun = 10, \
                  nrun = 3, nrand = 10, dnight = 8./24.):
    if obs != None:
        # Read in RV data
        if obs == 'corot7':
            rv = atpy.Table(corotdefs.ROOTDIR + 'LRa01/cands/corot7_rv.ipac')
            time = rv.JDB
        if obs == 'hd189':
            rv = atpy.Table('/Users/suz/Data/HD189_rv.ipac')
            time = rv.hjd
    else:
        # One point per night
        days = scipy.arange(tspan)
        dt_night = dnight / float(npernight + 1)
        # Multiple points per night, with small deviations from regularity
        obs = scipy.zeros((tspan, npernight))
        for i in scipy.arange(npernight):
            obs[:,i] = days[:] + dt_night * float(i) + \
                pylab.normal(0, dt_night/2., tspan)
# Select points in "intensive" runs
        if drun == tspan:
            take = scipy.ones((tspan, npernight), 'int')
        else:
            take = scipy.zeros((tspan, npernight), 'int')
            for i in scipy.arange(nrun):
                ok = 0
                while ok == 0:
                    tstart = scipy.fix(scipy.rand(1) * float(tspan))
                    tstart = tstart[0]
                    tend = tstart + drun
                    if tend > tspan: continue
                    if take[tstart:tend, :].any(): continue
                    take[tstart:tend, :] = 1
                    ok = 1


# Select additional individual points
        ntot = tspan * npernight
        obs = scipy.reshape(obs, ntot)
        take = scipy.reshape(take, ntot)
        index = scipy.argsort(obs)
        obs = obs[index]
        take = take[index]
        for i in scipy.arange(nrand):
            ok = 0
            while ok == 0:
                t = scipy.fix(scipy.rand(1) * float(ntot))
                t = t[0]
                if take[t] == 1: continue
                take[t] = 1
                ok = 1
        time = obs[(take == 1)]
    time -= time[0]
    return time
예제 #19
0
def histogram_2():
    plb.figure(2)
    gaus_dist = plb.normal(-2, 2, size=512)
    unif_dist = plb.uniform(-5, 5, size=512)
    plb.hist(unif_dist, bins=24, histtype='stepfilled', normed=True, color='cyan', label='Uniform')
    plb.hist(gaus_dist, bins=24, histtype='stepfilled', normed=True, color='orange', label='Gaussian', alpha=0.65)
    plb.legend(loc='upper right')
    plb.title('Gaussian vs Uniform distribution / Histrogram')
    plb.xlabel('Value')
    plb.ylabel('Frequency')
    plb.grid(True)
    plb.pause(5)
예제 #20
0
파일: ffprime.py 프로젝트: saigrain/pyspot
def rv_sample(obs = None, tspan = 180, npernight = 3, drun = 10, \
                  nrun = 3, nrand = 10, dnight = 8./24.):
    if obs != None:
# Read in RV data 
        if obs == 'corot7':
            rv = atpy.Table(corotdefs.ROOTDIR + 'LRa01/cands/corot7_rv.ipac')
            time = rv.JDB
        if obs == 'hd189':
            rv = atpy.Table('/Users/suz/Data/HD189_rv.ipac')
            time = rv.hjd
    else:
# One point per night
        days = scipy.arange(tspan)
        dt_night = dnight / float(npernight+1)
# Multiple points per night, with small deviations from regularity
        obs = scipy.zeros((tspan, npernight)) 
        for i in scipy.arange(npernight):
            obs[:,i] = days[:] + dt_night * float(i) + \
                pylab.normal(0, dt_night/2., tspan)
# Select points in "intensive" runs
        if drun == tspan:
            take = scipy.ones((tspan, npernight), 'int')
        else:
            take = scipy.zeros((tspan, npernight), 'int')
            for i in scipy.arange(nrun):
                ok = 0
                while ok == 0:
                    tstart = scipy.fix(scipy.rand(1) * float(tspan))
                    tstart = tstart[0]
                    tend = tstart + drun
                    if tend > tspan: continue
                    if take[tstart:tend,:].any(): continue
                    take[tstart:tend,:] = 1
                    ok = 1
# Select additional individual points
        ntot = tspan*npernight
        obs = scipy.reshape(obs, ntot)
        take = scipy.reshape(take, ntot)
        index = scipy.argsort(obs)
        obs = obs[index]
        take = take[index]
        for i in scipy.arange(nrand):
            ok = 0
            while ok == 0:
                t = scipy.fix(scipy.rand(1) * float(ntot))
                t = t[0]
                if take[t] == 1: continue
                take[t] = 1
                ok = 1
        time = obs[(take==1)]
    time -= time[0]
    return time
예제 #21
0
파일: lightcurve.py 프로젝트: KeplerGO/PyKE
def transit_fit_example():
    '''Simulate some data containing a transit plus white Gaussian
    noise and fit for the planet / star radius ratio (p = Rp / Rs),
    epoch Ttr, inclination, and ratio of semi-major axis to stellar
    radius (a/Rs), while holding the other parameters fixed at their
    true values.'''
    JD = numpy.arange(100) * 0.01 - 0.5 + pylab.normal(0, 0.005, 100)
    P = 3.0
    Ttr_true = 0.0; Ttr_guess = 0.02
    p_true = 0.1; p_guess = 0.08
    incl_true = 0.99 * numpy.pi / 2.; incl_guess = numpy.pi / 2.
    a_true = 5.0; a_guess = 4.5
    cn = [0, 0.45, 0, 0.05]
    Ecc = 0; omega = 0; sec = 0
    truth = lightcurve(JD, P, p_true, Ttr_true, Ecc, a_true, incl_true, \
                       omega, cn, sec) 
    flux = truth + pylab.normal(0, 0.002, numpy.size(JD))
    flux_err = numpy.ones(numpy.size(JD)) * 0.002
    initial_guess = lightcurve(JD, P, p_guess, Ttr_guess, \
                               Ecc, a_guess, incl_guess, omega, cn, sec)
    p_fit, Ttr_fit, incl_fit, a_fit = \
        scipy.optimize.fmin(transit_errfunc_ptia, \
                            [p_guess, Ttr_guess, incl_guess, a_guess], \
            args = ([P, Ecc, omega, cn, sec], JD, flux, flux_err))
    fit = lightcurve(JD, P, p_fit, Ttr_fit, \
                               Ecc, a_fit, incl_fit, omega, cn, sec)
    pylab.clf()
    pylab.errorbar(JD, flux, yerr = flux_err, fmt = 'k.', label = 'data')
    pylab.plot(JD, truth, 'k-', label = 'true')
    pylab.plot(JD, initial_guess, 'b--', label = 'initial')
    pylab.plot(JD, fit, 'r-', label = 'fit')
    pylab.legend(loc = 'lower right')
    print p_true, p_guess, p_fit
    print Ttr_true, Ttr_guess, Ttr_fit
    print incl_true, incl_guess, incl_fit
    print a_true, a_guess, a_fit
    return
예제 #22
0
    def draw_rand_gaussian_pos(self, min_r=pl.array([])):
        '''optional min_r, array or tuple of arrays on the form
        array([[r0,r1,...,rn],[z0,z1,...,zn]])'''

        x = pl.normal(0, self.radius, self.n)
        y = pl.normal(0, self.radius, self.n)
        z = pl.normal(0, self.radius, self.n)

        min_r_z = {}
        if pl.size(min_r) > 0:  # != False:
            if type(min_r) == type(()):
                for j in xrange(pl.shape(min_r)[0]):
                    min_r_z[j] = pl.interp(z, min_r[j][0, ], min_r[j][1, ])
                    if j > 0:
                        [w] = pl.where(min_r_z[j] < min_r_z[j - 1])
                        min_r_z[j][w] = min_r_z[j - 1][w]
                    minrz = min_r_z[j]

            else:
                minrz = pl.interp(z, min_r[0], min_r[1])

            R_z = pl.sqrt(x**2 + y**2)
            [u] = pl.where(R_z < minrz)

            while len(u) > 0:
                for i in xrange(len(u)):
                    x[u[i]] = pl.normal(0, self.radius, 1)
                    y[u[i]] = pl.normal(0, self.radius, 1)
                    z[u[i]] = pl.normal(0, self.radius, 1)
                    if type(min_r) == type(()):
                        for j in xrange(pl.shape(min_r)[0]):
                            min_r_z[j][u[i]] = pl.interp(
                                z[u[i]], min_r[j][0, ], min_r[j][1, ])
                            if j > 0:
                                [w] = pl.where(min_r_z[j] < min_r_z[j - 1])
                                min_r_z[j][w] = min_r_z[j - 1][w]
                            minrz = min_r_z[j]
                    else:
                        minrz[u[i]] = pl.interp(z[u[i]], min_r[0, ],
                                                min_r[1, ])
                R_z = pl.sqrt(x**2 + y**2)
                [u] = pl.where(R_z < minrz)

        soma_pos = {
            'xpos': x,
            'ypos': y,
            'zpos': z,
        }
        return soma_pos
예제 #23
0
파일: ffprime.py 프로젝트: saigrain/pyspot
def rv_noise(time, tau = 0.5, ninit = 10):
    npt = len(time)
    # Burn-in section
    dt = 2.3 * tau / float(ninit)
    tinit = (scipy.arange(ninit) + 1) * dt
    tsim = scipy.append(time[0] - tinit[::-1], time)
    nsim = len(tsim)
    # Instead of Gaussian white noise, to simulate effects of moon and
    # such like, each point in the "WGN" component is drawn from a
    # distribution with a different sigma each night, where the sigma itself is
    # drawn from a powerlaw distribution between 1 and 10
    wt = pylab.normal(0, 1, nsim)
    ye = scipy.ones(nsim)
    nights = scipy.fix(tsim)
    unights = scipy.unique(nights)
    nnights = len(unights)
    sigma = 10.0 ** (scipy.rand(nnights))**4
    for i in scipy.arange(nnights):
        l = scipy.where(nights == unights[i])[0]
        if l.any():
            wt[l] *= sigma[i]
            ye[l] = sigma[i]
    # Now apply MA model with exponentially decaying correlation
    ysim = scipy.copy(wt)
    for i in scipy.arange(nsim):
        j = i - 1
        coeff = 1.0
        while (j > 0) * (coeff > 0.1):
            dt = tsim[i] - tsim[j]
            coeff = scipy.exp(-dt/tau)
            ysim[i] += coeff * wt[j]
            j -= 1
    # Discard burn-in and globally re-scale before returning
    yout = ysim[ninit:]
    eout = ye[ninit:]
    med, sig = filter.medsig(yout)
    yout /= sig
    eout /= sig
    return yout, eout
예제 #24
0
파일: SuperLearn.py 프로젝트: qmc1020/c366
def targetFunction(x,y):
    return sin(x-3.0)*cos(y) + normal(0,0.1)
예제 #25
0
print("Deltal = ", Deltal)

border = int(N[0] / 10)
mask = np.zeros(N)
mask[border:-border, border:-border] = 1.0
mask = np.ones(N)
fmask = np.sum(mask) / np.prod(N)

lmax = 10000
lbins = np.linspace(0, lmax, 50)
lcent = lbins[:-1] + np.diff(lbins) / 2.

# Make some E/B

N1 = pylab.normal(size=N)
T = gaussian_filter(N1, 2, mode='wrap')
E = gaussian_filter(N1, 2, mode='wrap')
B = np.zeros(N)

Eharm = cmbtools.map2harm(E, Delta)
Bharm = cmbtools.map2harm(B, Delta)

ClEE = cmbtools.harm2cl(Eharm, Deltal, lbins)
ClBB = cmbtools.harm2cl(Bharm, Deltal, lbins)

Qharm, Uharm = cmbtools.EB2QU(Eharm, Bharm, Deltal)

Q = cmbtools.harm2map(Qharm, Delta) * mask
U = cmbtools.harm2map(Uharm, Delta) * mask
예제 #26
0
    def run(self,
            seed_infections=1,
            verbose=None,
            calc_likelihood=False,
            do_plot=False,
            **kwargs):
        ''' Run the simulation '''

        T = sc.tic()

        # Reset settings and results
        if verbose is None:
            verbose = self['verbose']
        self.init_results()
        self.init_people(
            seed_infections=seed_infections)  # Actually create the people
        daily_tests = self.data[
            'new_tests']  # Number of tests each day, from the data
        evacuated = self.data['evacuated']  # Number of people evacuated

        # Main simulation loop
        for t in range(self.npts):

            # Print progress
            if verbose >= 1:
                string = f'  Running day {t:0.0f} of {self.pars["n_days"]}...'
                if verbose >= 2:
                    sc.heading(string)
                else:
                    print(string)

            self.results['t'][t] = t
            test_probs = {
            }  # Store the probability of each person getting tested

            # Update each person
            for person in self.people.values():

                # Count susceptibles
                if person.susceptible:
                    self.results['n_susceptible'][t] += 1

                # Handle testing probability
                if person.infectious:
                    test_probs[person.uid] = self[
                        'symptomatic']  # They're infectious: high probability of testing
                else:
                    test_probs[person.uid] = 1.0

                # If exposed, check if the person becomes infectious
                if person.exposed:
                    self.results['n_exposed'][t] += 1
                    if not person.infectious and t >= person.date_infectious:  # It's the day they become infectious
                        person.infectious = True
                        if verbose >= 2:
                            print(
                                f'      Person {person.uid} became infectious!'
                            )

                # If infectious, check if anyone gets infected
                if person.infectious:
                    # First, check for recovery
                    if person.date_recovered and t >= person.date_recovered:  # It's the day they become infectious
                        person.exposed = False
                        person.infectious = False
                        person.recovered = True
                        self.results['recoveries'][t] += 1
                    else:
                        self.results['n_infectious'][
                            t] += 1  # Count this person as infectious
                        n_contacts = cov_ut.pt(
                            person.contacts
                        )  # Draw the number of Poisson contacts for this person
                        contact_inds = cov_ut.choose_people(
                            max_ind=len(self.people),
                            n=n_contacts)  # Choose people at random
                        for contact_ind in contact_inds:
                            exposure = cov_ut.bt(
                                self['r_contact']
                            )  # Check for exposure per person
                            if exposure:
                                target_person = self.people[contact_ind]
                                if target_person.susceptible:  # Skip people who are not susceptible
                                    self.results['infections'][t] += 1
                                    target_person.susceptible = False
                                    target_person.exposed = True
                                    target_person.date_exposed = t
                                    incub_dist = round(
                                        pl.normal(person.pars['incub'],
                                                  person.pars['incub_std']))
                                    dur_dist = round(
                                        pl.normal(person.pars['dur'],
                                                  person.pars['dur_std']))
                                    target_person.date_infectious = t + incub_dist
                                    target_person.date_recovered = target_person.date_infectious + dur_dist
                                    if verbose >= 2:
                                        print(
                                            f'        Person {person.uid} infected person {target_person.uid}!'
                                        )

                # Count people who recovered
                if person.recovered:
                    self.results['n_recovered'][t] += 1

            # Implement testing -- this is outside of the loop over people, but inside the loop over time
            if t < len(
                    daily_tests
            ):  # Don't know how long the data is, ensure we don't go past the end
                n_tests = daily_tests.iloc[t]  # Number of tests for this day
                if n_tests and not pl.isnan(
                        n_tests):  # There are tests this day
                    self.results['tests'][
                        t] = n_tests  # Store the number of tests
                    test_probs = pl.array(list(test_probs.values()))
                    test_probs /= test_probs.sum()
                    test_inds = cov_ut.choose_people_weighted(probs=test_probs,
                                                              n=n_tests)
                    uids_to_pop = []
                    for test_ind in test_inds:
                        tested_person = self.people[test_ind]
                        if tested_person.infectious and cov_ut.bt(
                                self['sensitivity']
                        ):  # Person was tested and is true-positive
                            self.results['diagnoses'][t] += 1
                            tested_person.diagnosed = True
                            if self['evac_positives']:
                                uids_to_pop.append(tested_person.uid)
                            if verbose >= 2:
                                print(
                                    f'          Person {person.uid} was diagnosed!'
                                )
                    for uid in uids_to_pop:  # Remove people from the ship once they're diagnosed
                        self.off_ship[uid] = self.people.pop(uid)

            # Implement quarantine
            if t == self['quarantine']:
                if verbose >= 1:
                    print(f'Implementing quarantine on day {t}...')
                for person in self.people.values():
                    if 'quarantine_eff' in self.pars.keys():
                        quarantine_eff = self['quarantine_eff']  # Both
                    else:
                        if person.crew:
                            quarantine_eff = self['quarantine_eff_c']  # Crew
                        else:
                            quarantine_eff = self['quarantine_eff_g']  # Guests
                    person.contacts *= quarantine_eff

            # Implement testing change
            if t == self['testing_change']:
                if verbose >= 1:
                    print(f'Implementing testing change on day {t}...')
                self['symptomatic'] *= self[
                    'testing_symptoms']  # Reduce the proportion of symptomatic testing

            # Implement evacuations
            if t < len(evacuated):
                n_evacuated = evacuated.iloc[
                    t]  # Number of evacuees for this day
                if n_evacuated and not pl.isnan(
                        n_evacuated
                ):  # There are evacuees this day # TODO -- refactor with n_tests
                    if verbose >= 1:
                        print(f'Implementing evacuation on day {t}')
                    evac_inds = cov_ut.choose_people(max_ind=len(self.people),
                                                     n=n_evacuated)
                    uids_to_pop = []
                    for evac_ind in evac_inds:
                        evac_person = self.people[evac_ind]
                        if evac_person.infectious and cov_ut.bt(
                                self['sensitivity']):
                            self.results['evac_diagnoses'][t] += 1
                        uids_to_pop.append(evac_person.uid)
                    for uid in uids_to_pop:  # Remove people from the ship once they're diagnosed
                        self.off_ship[uid] = self.people.pop(uid)

        # Compute cumulative results
        self.results['cum_exposed'] = pl.cumsum(self.results['infections'])
        self.results['cum_tested'] = pl.cumsum(self.results['tests'])
        self.results['cum_diagnosed'] = pl.cumsum(self.results['diagnoses'])

        # Compute likelihood
        if calc_likelihood:
            self.likelihood()

        # Tidy up
        self.results['ready'] = True
        elapsed = sc.toc(T, output=True)
        if verbose >= 1:
            print(f'\nRun finished after {elapsed:0.1f} s.\n')
            summary = self.summary_stats()
            print(f"""Summary: 
     {summary['n_susceptible']:5.0f} susceptible 
     {summary['n_exposed']:5.0f} exposed
     {summary['n_infectious']:5.0f} infectious
               """)

        if do_plot:
            self.plot(**kwargs)

        return self.results
예제 #27
0
def mklc(nspot = 200, incl = (scipy.pi)*5./12., amp = 0.01, \
         tau = 30.5, diffrot = 0.0, \
         dur = 20.0, samp = 0.01, noise = 0.001, doplot = False, myperiod = [10.0], Amplitude = 1.0, quarter = 3):

         # spots.py calls runSim which calls mklc
    
    ''' This is a simplified version of the class-based routines in
    spot_model.py. It generates a light curves for dark, point like
    spots with no limb-darkening. 

    Parameters:
    nspot = desired number of spots present on star at any
            one time
    amp = desired light curve amplitude
    tau = characteristic spot life-time
    dur = light curve duration
    samp = time-sampling
    diffrot = fractional difference between equatorial and polar
              rotation period
    doplot: set to True to produce plots
    (unit of time is equatorial rotation period)'''

    # IMPORT KEPLER LIGHTCURVE

    name = '006370489'
    time2 = []; lightcurve = []
    quarter_times2 = []
    quarter_times_start = []
    quarter_times_fin = []
    files = glob.glob('/Users/angusr/angusr/data2/all_Qs/kplr%s-*llc.fits' % (name))
    #print len(files)
    for f in range(0,12):
        hdulist = pyfits.open(files[f])
        tbdata = hdulist[1].data #"add the first extension to tbdata"
        x = numpy.where(numpy.isfinite(tbdata['TIME']))
        time = tbdata['TIME'][x]
        lc = tbdata['PDCSAP_FLUX'][x]
        x = numpy.where(numpy.isfinite(tbdata['PDCSAP_FLUX']))
        time = tbdata['TIME'][x]
        lc = tbdata['PDCSAP_FLUX'][x]
        quarter_times2.append(tbdata['TIME'])
        quarter_times_start.append(time[0])
        quarter_times_fin.append(time[-1])
        lightcurve.extend(numpy.array(lc))
        time2.extend(numpy.array(time))

    
    
    # myperiod = float(myperiod[0])
    print 'Period = ', myperiod
    dur = (max(time2) - min(time2))

    # SET UP THE SPOTS
    print 'Setting up the spots...'

    # (crude estimate of) total number of spots needed during entire
    # time-series
    nspot_tot = int(nspot * dur / 2 / tau)
    # uniform distribution of spot longitudes
    lon = scipy.rand(nspot_tot) * 2 * scipy.pi 
   # distribution of spot latitudes uniform in sin(latitude)
    lat = scipy.arcsin(scipy.rand(nspot_tot)) 
    # spot rotation rate optionally depends on latitude



    
    period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 )*myperiod

    
    period0 = scipy.ones(nspot_tot)*myperiod
    # all spots have the same maximum area
    # (crude estimate of) filling factor needed per spot
    ff = amp / scipy.sqrt(nspot)
    scale_fac = 1
    amax = scipy.ones(nspot_tot) * ff * scale_fac
    # all spots have the evolution timescale
    decay = scipy.ones(nspot_tot) * tau
    # uniform distribution of spot peak times 
    # start well before and end well after time-series limits (to
    # avoid edge effects)
    extra = 3 * decay.max()
    pk = scipy.rand(nspot_tot) * (dur + 2 * extra) - extra

    # COMPUTE THE LIGHT CURVE
    print 'Computing the light curve...'
    

    time = numpy.array(time2- min(time2))
    addit = min(time2)

    
    npt = len(time)
    area_tot = scipy.zeros(npt)
    dF_tot = scipy.zeros(npt)
    dF_tot0 = scipy.zeros(npt)

   
    
    
    
    
    # add up the contributions of individual spots
    for i in range(nspot_tot):
        # Spot area
        if (pk[i] == 0) + (decay[i] == 0):
            area = scipy.ones(npt) * amax[i]
        else:
            area = amax[i] * \
                scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)
        area_tot += area
        # Fore-shortening 
        phase = 2 * scipy.pi * time / period[i] + lon[i]
        phase0 = 2 * scipy.pi * time / period0[i] + lon[i]
        mu = scipy.cos(incl) * scipy.sin(lat[i]) + \
            scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)
        mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \
            scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)
        mu[mu < 0] = 0.0
        mu0[mu0 < 0] = 0.0
        # Flux
        dF_tot -= area * mu
        dF_tot0 -= area * mu0

   
    
    print 'Adding noise...'
    # ADD NOISE
    noi = pylab.normal(0, 1, npt) * noise
    dF_tot += noi
    dF_tot0 += noi
        
    amp_eff = dF_tot.max()-dF_tot.min()
    nspot_eff = area_tot / scale_fac / ff

    if doplot == True:
        print 'Used %d spots in total over %d rotation periods.' \
            % (nspot_tot, dur)
        print 'Mean filling factor of individual spots was %.4f.' \
            % ff
        print 'Desired amplitude was %.4f, actual amplitude was %.4f.' \
            % (amp, amp_eff)
        print 'Desired number of spots at any one time was %d.' % nspot
        print 'Actual number of spots was %d (min), %d (average), %d (max).' \

        pylab.close(1)
        pylab.figure(1, (10, 4))
        xoff = 0.1
        xoffr = 0.05
        yoff = 0.13
        yoffr = 0.07
        xwi = (1.0 - xoff - xoffr)
        ywi = (1.0 - yoff - yoffr) / 2
        xll = xoff 
        yll = 1.0 - yoffr - ywi
        ax1 = pylab.axes([xll, yll, xwi, ywi])
        pylab.plot(time, area_tot * 100, 'k-')
        #pylab.title('i=%.1f <Ns>=%d A=%.4f Fs=%.4f tau=%.2fP sig=%.4f diff=%.1f' \
        #            % (incl * 180 / scipy.pi, nspot_eff.mean(), amp_eff, ff, tau, noise, diffrot))
        #pylab.title('i=%.1f <Ns>=%d tau=%.2fP Period = %d days' \
        #            % (incl * 180 / scipy.pi, nspot_eff.mean(), tau, myperiod)) 
        pylab.ylabel('fill. fac. (%)')
        yll = 1.0 - yoffr - 2 * ywi
        axc = pylab.axes([xll, yll, xwi, ywi], sharex = ax1)
        pylab.plot(time, 1 + dF_tot, 'k-', label = 'diff rot')
        pylab.plot(time, 1 + dF_tot0 - amp_eff, 'r-', label = 'no diff rot')
        pylab.ylabel('rel. flux')
        pylab.xlabel('time (days)')
        pylab.xlim(time.min(), time.max())
        pylab.legend()
        #pylab.savefig('/Users/angusr/angusr/ACF/star_spot_sim', )

    
    

    

    # Split into quarters
        
    print 'quarter = ', quarter
    time = time + addit
    #print 'start = ', quarter_times_start[quarter]
    #print 'stop = ', quarter_times_fin[quarter]
    print quarter_times_start
    print quarter_times_fin
    x = numpy.where(time == quarter_times_start[quarter])
    y = numpy.where(time == quarter_times_fin[quarter])

    start = int(x[0])
    stop = int(y[0])
    
    time = time[start:stop]
    area_tot = area_tot[start:stop]
    dF_tot = dF_tot[start:stop]
    dF_tot0 = dF_tot0[start:stop]
    lightcurve = lightcurve[start:stop]

    # Add normalised Kepler lc to simulated lc
    lightcurve = lightcurve/numpy.median(lightcurve)
    lightcurve = lightcurve - numpy.median(lightcurve)
    dF_tot = dF_tot - numpy.median(dF_tot)
    dF_tot0 = dF_tot0 - numpy.median(dF_tot0)
    #dF_tot = (dF_tot)*numpy.median(lightcurve) + lightcurve
    #dF_tot0 = (dF_tot0)*numpy.median(lightcurve) + lightcurve
    # Normalise
    #dF_tot = dF_tot/numpy.median(dF_tot)
    #dF_tot0 = dF_tot0/numpy.median(dF_tot0)

    npt = len(time)

    res0 = scipy.array([nspot_eff.mean(), ff, amp_eff, noise])
    res1 = scipy.zeros((4, npt))

    pylab.close(2)
    pylab.figure(2)
    pylab.subplot(3,1,1)
    pylab.plot(time, dF_tot, 'r.')
    pylab.ylim(min(dF_tot), max(dF_tot))
    pylab.subplot(3,1,2)
    pylab.ylim(min(lightcurve),max(lightcurve))
    pylab.plot(time, lightcurve, 'b.')
    pylab.subplot(3,1,3)
    pylab.ylim(min(lightcurve),max(lightcurve))
    dF_tot2 = dF_tot + lightcurve*Amplitude
    pylab.plot(time, dF_tot, 'g.')
    pylab.ylim(min(dF_tot), max(dF_tot))
    
    res1[0,:] = time
    res1[1,:] = area_tot
    res1[2,:] = dF_tot
    res1[3,:] = dF_tot0

    
    print 'Done'
    return res0, res1
예제 #28
0
def normDistRejected(
        r, rd, rsigma):  #looks for smaller particles in case of rejection
    x = pl.normal(r, rsigma)
    while x > rd:
        x = pl.normal(r, rsigma)
    return x
예제 #29
0
def targetFunction(x, y):
    return sin(x - 3.0) * cos(y) + normal(0, 0.1)
예제 #30
0

B = my_func(A)
print(B)


def replace(my_dict, my_array):
    for i, j in my_dict.items():
        if j < 5:
            my_dict[i] = my_array
        else:
            continue
    return my_dict


my_array = plb.normal(loc=2, scale=3, size=256)

C = replace(B, my_array)
C['A'] = C.pop('E')

#print(C)
C['F'] = 1980
del C['D']
#print(C)

# if A.values() < B.values():
#     print("A is less than B")
# elif A.values() < C.values():
#     print("A is less than C")
# else:
#     print('A is the father')
예제 #31
0
 def walk(self, meanStep, varianceStep):
     walkSize = normal(meanStep, varianceStep)
     self.meanReturn += walkSize
     return walkSize
예제 #32
0
def targetFunction(in1, in2):
    return sin(in1 - 3.0) * cos(in2) + normal(0, 0.1)
    logreturnf = np.vectorize(logreturn)
    logret = (logreturnf(i))
    return (logret)


with open(
        'MICRO.csv'
) as csvdata:  #gets the stoke price microsoft from dec20, 2016 to dec 20, 2019
    read = csv.reader(csvdata, delimiter=',')
    a1 = []
    for r in read:
        S = r[1]
        a1.append(float(S))
S1 = a1
N = len(a1)
Normal = pl.normal(0, 1, size=len(f2(1)))  #normal estimates
fig, ax = pl.subplots(1, 3)
ax[0].plot(a1)
ax[1].plot(pl.sort(Normal), f2(1))  #QQ plot
ax[1].set_xlabel('normal distribution')
ax[1].set_ylabel('normalized logret')
ax[1].legend(('QQ plot'), loc='upper right')
ax[2].acorr(f3(1), maxlags=60)  #AUTOCORRELATION PLOT
ax[2].set_xlabel('autocorrelation plot')
pl.tight_layout()
pl.show()
S1 = a1
N = len(S1)
print("[mu,sigma]", f1(1))

#QUESTION NO (c)
예제 #34
0
 def targetFunction(*X):
     return sin(X[0]) + cos(X[1]) + normal(0., 0.1)
예제 #35
0
def targetFunction(in1,in2):
    return sin(in1-3.0)*cos(in2) + normal(0,0.1)
예제 #36
0
파일: Utils_i.py 프로젝트: Solvi/pyhrf
def Main_vbjde(graph,Y,Onsets,Thrf,K,TR,beta,dt,hrf,NitMax = -1, hrf = None):    
    if NitMax < 0:
	NitMax = 100
    D = int(numpy.ceil(Thrf/dt))
    M = len(Onsets)
    N = Y.shape[0]
    J = Y.shape[1]
    l = int(sqrt(J))
    sigma_epsilone = numpy.ones(J)
    X = {}
    for condition,Ons in Onsets.iteritems():
	X[condition] = compute_mat_X_2(N, TR, D, dt, Ons)
    mu_M = numpy.zeros((M,K),dtype=float)
    sigma_M = 0.5 * numpy.ones((M,K),dtype=float)
    mu_M0 = numpy.zeros((M,K),dtype=float)
    sigma_M0 = numpy.zeros((M,K),dtype=float)
    for k in xrange(0,K):
	mu_M[:,0] = 2.0
    mu_M0[:,:] = mu_M[:,:]
    sigma_M0[:,:] = sigma_M[:,:]
    #sigmaH = numpy.ones((J),dtype=float)
    #sigmaH1 = numpy.ones((J),dtype=float)
    sigmaH = 0.1
    sigmaH1 = 0.1
    order = 2    
    D2 = buildFiniteDiffMatrix(order,D)
    R = numpy.dot(D2,D2) / pow(dt,2*order)
    Gamma = numpy.identity(N)
    q_Z = numpy.zeros((M,K,J),dtype=float)
    for k in xrange(0,K):
	q_Z[:,1,:] = 1
    q_Z1 = q_Z.copy()
    Z_tilde = q_Z.copy()
    Sigma_A = numpy.zeros((M,M,J),float)
    m_A = numpy.zeros((J,M),dtype=float)
    m_H = numpy.zeros((D,J),dtype=float)
    m_H1 = numpy.zeros((D,J),dtype=float)
    m_H2 = numpy.zeros((D,J),dtype=float)
    TT,m_h = getCanoHRF(Thrf-dt,dt)
    for j in xrange(0,J):
	Sigma_A[:,:,j] = 0.01*numpy.identity(M)
	for m in xrange(0,M):
	    for k in xrange(0,K):
		m_A[j,m] += normal(mu_M[m,k], numpy.sqrt(sigma_M[m,k]))*Z_tilde[m,k,j]
	m_H[:,j] = numpy.array(m_h)
	m_H1[:,j] = numpy.array(m_h)
    #m_H = numpy.array(m_h)
    #m_H1 = numpy.array(m_h)
    Sigma_H = numpy.ones((D,D,J),dtype=float)
    #Sigma_H = 0.1 * numpy.identity(D)
    Beta = beta * numpy.ones((M),dtype=float)
    m_A1 = numpy.zeros((J,M),dtype=float)
    m_A1[:,:] = m_A[:,:]
    Crit_H = [0]
    Crit_Z = [0]
    Crit_sigmaH = [0]
    Hist_sigmaH = []
    ni = 0
    Y_bar_tilde = numpy.zeros((D),dtype=float)
    zerosND = numpy.zeros((N,D),dtype=float)
    X_tilde = numpy.zeros((Y.shape[1],M,D),dtype=float)
    Q_bar = numpy.zeros(R.shape)
    P = PolyMat( N , 4 , TR)
    L = polyFit(Y, TR, 4,P)
    PL = numpy.dot(P,L)
    y_tilde = Y - PL
    t1 = time.time()
    Norm = numpy.zeros((J),dtype=float)
    while (( (ni < 15) or (Crit_sigmaH[-1] > 5e-3) or (Crit_H[-1] > 5e-3) or (Crit_Z[-1] > 5e-3))) \
	    and (ni < NitMax):
	print "------------------------------ Iteration n° " + str(ni+1) + " ------------------------------"
	pyhrf.verbose(2,"------------------------------ Iteration n° " + str(ni+1) + " ------------------------------")
	pyhrf.verbose(3, "E A step ...")
	Sigma_A, m_A = expectation_A(Y,Sigma_H,m_H,m_A,X,Gamma,PL,sigma_M,q_Z,mu_M,D,N,J,M,K,y_tilde,Sigma_A,sigma_epsilone)
	pyhrf.verbose(3,"E H step ...")
	Sigma_H, m_H = expectation_H(Y,Sigma_A,m_A,X,Gamma,PL,D,R,sigmaH,J,N,y_tilde,zerosND,sigma_epsilone,Sigma_H,m_H)
	Crit_H += [abs(numpy.mean(m_H - m_H1) / numpy.mean(m_H))]
	m_H1[:,:] = m_H[:,:]
	m_H2[:,:] = m_H[:,:]
	pyhrf.verbose(3,"E Z step ...")
	q_Z,Z_tilde = expectation_Z(Sigma_A,m_A,sigma_M,Beta,Z_tilde,mu_M,q_Z,graph,M,J,K)
	DIFF = abs(numpy.reshape(q_Z,(M*K*J)) - numpy.reshape(q_Z1,(M*K*J)))
	Crit_Z += [numpy.mean(DIFF) / (DIFF != 0).sum()]
	q_Z1[:,:] = q_Z[:,:]
	pyhrf.verbose(3,"M (mu,sigma) step ...")
	mu_M , sigma_M = maximization_mu_sigma(mu_M,sigma_M,q_Z,m_A,K,M)
	pyhrf.verbose(3,"M sigma_H step ...")
	#print "M sigma_H step ..."
	sigmaH = maximization_sigmaH(m_H,R,J,D,Sigma_H,sigmaH)
	#print sigmaH
	    #sigmaH = numpy.dot(numpy.dot(m_H.transpose(),R) , m_H ) #+ (numpy.dot(Sigma_H,R)).trace() 
	Crit_sigmaH += [abs((sigmaH - sigmaH1) / sigmaH)]
	Hist_sigmaH += [sigmaH]
	sigmaH1 = sigmaH
	pyhrf.verbose(3,"M L step ...")
	L = maximization_L(Y,m_A,X,m_H,L,P)
	PL = numpy.dot(P,L)
	y_tilde = Y - PL
	pyhrf.verbose(3,"M sigma_epsilone step ...")
	sigma_epsilone = maximization_sigma_noise(Y,X,m_A,m_H,Sigma_H,Sigma_A,PL,sigma_epsilone,M)
	for i in xrange(0,J):
	    Norm[i] = norm(m_H[:,i])	
	    m_H2[:,i] /= Norm[i]
	if ( (ni+1)% 1) == 0:
	    pyplot.clf()
	    figure(1)
	    plot(m_H[:,10],'r')
	    hold(True)
	    plot(hrf/norm(hrf),'b')
	    legend( ('Est','Ref') )
	    title(str(sigmaH))
	    hold(False)
	    draw()
	    show()
	    for m in range(0,M):
		for k in range(0,K):		
		    z1 = q_Z[m,k,:];
		    z2 = reshape(z1,(l,l));
		    figure(2).add_subplot(M,K,1 + m*K + k)
		    imshow(z2)
		    title("m = " + str(m) +"k = " + str(k))
	    draw()
	    show()
	ni +=1
    t2 = time.time()
    CompTime = t2 - t1
    
    for i in xrange(0,J):
	Norm[i] = norm(m_H[:,i])	
	m_H[:,i] /= Norm[i]
	m_A[i,:] *= Norm[i]
    pyhrf.verbose(1, "Computational time = " + str(int( CompTime//60 ) ) + " min " + str(int(CompTime%60)) + " s")
    return m_A, m_H, q_Z , sigma_epsilone, sigmaH
예제 #37
0
# Histogram 2/2:
import pylab as plb

plb.figure(2)

gaus_dist = plb.normal(size=512)  # create a random floating point vector
unif_dist = plb.uniform(-5, 5,
                        size=512)  # Create a uniform distribution vector

#plot the histogrm with specific  bin number, color, transparency, label
plb.hist(unif_dist,
         bins=24,
         histtype='stepfilled',
         density=True,
         color='cyan',
         label='Uniform')
plb.hist(gaus_dist,
         bins=24,
         histtype='stepfilled',
         density=True,
         color='orange',
         label='Gaussian',
         alpha=0.65)

plb.legend(loc='upper left')
plb.title("Gaussian vs Uniform distribution /  Histogram")
plb.xlabel("value")
plb.ylabel("Frequency")
plb.grid(True)
plb.pause(5)
예제 #38
0
def targetFunction(in1,in2):
    return sin(log(in1)/(in1**2+sqrt(2*in1)+1))+cos(exp(in2)/(in2**2-sqrt(2*in2)+1))+normal(0,0.5)
예제 #39
0
def GP_train_MCMC(Nstep, x, y, cov_par, cov_scales, cov_func = None, \
                  cov_typ = 'SE', cov_prior = None, \
                  MF = None, MF_par = None, MF_scales = None, MF_args = None, \
                  MF_prior = None):
    '''    
    MCMC over GP hyper-parameters. Calls GP_negloglik. Takes care of
    merging / splitting the fixed / variable and cov / MF parameters
    Returns a Nstep x (M+1) array where M is the number of *variable*
    (scale > 0) hyper-parameters. The first column of the return array
    contains the neg log likelihood values, then the other columns the
    parameters that were varied along the chain.
    '''
    # Sort out the fixed / variable and cov / MF parameters
    if MF != None:
        params = scipy.append(cov_par, MF_par)
        scales = scipy.append(cov_scales, MF_scales)
        n_MF_par = len(MF_par)
        if MF_prior == None:
            if cov_prior == None:
                prior = None
            else:
                prior = numpy.copy(cov_prior)
        else:
            if cov_prior == None:
                prior = numpy.copy(MF_prior)
            else:
                prior = scipy.append(cov_prior, MF_prior)
        if MF_args == None: MF_args = x
    else:
        params = cov_par[:]
        scales = cov_scales[:]
        n_MF_par = 0
        prior = numpy.copy(cov_prior)
    # No do MCMC proper
    fixed = scales == 0
    var = scales > 0
    nvar = var.sum()
    var_par = params[scales > 0]
    var_scales = scales[scales > 0]
    fixed_par = scales[scales == 0]
    chain = scipy.zeros((Nstep, nvar+1)) - 1
    logL = - GP_negloglik(var_par, x, y, covfunc = cov_func, covtyp = cov_typ, \
                          MF = MF, n_MF_par = n_MF_par, \
                          MF_args = MF_args, fixed = fixed, fixed_par = params[fixed])
    randnos = scipy.log(scipy.rand(Nstep))
    for i in range(Nstep):
        shift = pylab.normal(0., 1., nvar) * scales[var]
        var_par_new = var_par + shift
        print var_par_new
        logL_new = \
            - GP_negloglik(var_par_new, x, y, covfunc = cov_func, \
                           covtyp = cov_typ, MF = MF, n_MF_par = n_MF_par, \
                           MF_args = MF_args, fixed = fixed, fixed_par = params[fixed], \
                           prior = prior)
        dlogL = logL_new - logL
        if (randnos[i] <= dlogL):
            print '%8.6f %11.6f %11.6f %8.6f %8.6f %1s' % \
                (i/float(Nstep), logL, logL_new, dlogL, randnos[i], 'A')
            var_par = var_par_new
            logL = logL_new
        else:
            print '%8.6f %11.6f %11.6f %8.6f %8.6f %1s' % \
                (i/float(Nstep), logL, logL_new, dlogL, randnos[i], 'R')
        # Store the new values of the parameters and the new merit function
        chain[i,0] = logL
        chain[i,1:] = scipy.array(var_par)
    return chain
예제 #40
0
파일: spotsim.py 프로젝트: RuthAngus/K-ACF
def mklc(x, nspot, incl, amp, tau, diffrot, dur, samp, noise, myperiod, \
        Amplitude, doplot = False):

    ''' This is a simplified version of the class-based routines in
    spot_model.py. It generates a light curves for dark, point like
    spots with no limb-darkening.

    Parameters:
    nspot = desired number of spots present on star at any
            one time
    amp = desired light curve amplitude
    tau = characteristic spot life-time
    dur = light curve duration
    samp = time-sampling
    diffrot = fractional difference between equatorial and polar
              rotation period
    doplot: set to True to produce plots
    (unit of time is equatorial rotation period)'''

    print 'Period = ', myperiod
    dur = (max(x) - min(x))

    # SET UP THE SPOTS
    print 'Setting up the spots...'

    # (crude estimate of) total number of spots needed during entire
    # time-series
    nspot_tot = int(nspot * dur / 2 / tau)
    print nspot, 'nspot', dur, 'dur', tau, 'tau'

    # uniform distribution of spot longitudes
    lon = scipy.rand(nspot_tot) * 2 * scipy.pi

    # distribution of spot latitudes uniform in sin(latitude)
    lat = scipy.arcsin(scipy.rand(nspot_tot))

    # spot rotation rate optionally depends on latitude
    period = ((scipy.sin(lat) - 0.5) * diffrot + 1.0 )*myperiod
    period0 = scipy.ones(nspot_tot)*myperiod

    # all spots have the same maximum area
    # (crude estimate of) filling factor needed per spot
    ff = amp / scipy.sqrt(nspot)
    scale_fac = 1
    amax = scipy.ones(nspot_tot) * ff * scale_fac

    # all spots have the evolution timescale
    decay = scipy.ones(nspot_tot) * tau

    # uniform distribution of spot peak times
    # start well before and end well after time-series limits (to
    # avoid edge effects)
    extra = 3 * decay.max()
    pk = scipy.rand(nspot_tot) * (dur + 2 * extra) - extra

    # COMPUTE THE LIGHT CURVE
    print 'Computing the light curve...'

    time = np.array(x - min(x))
    addit = min(x)

    npt = len(time)
    area_tot = scipy.zeros(npt)
    dF_tot = scipy.zeros(npt)
    dF_tot0 = scipy.zeros(npt)

    print "add up the contributions of individual spots"
    print nspot_tot
    for i in range(nspot_tot):
        # Spot area
        if (pk[i] == 0) + (decay[i] == 0):
            area = scipy.ones(npt) * amax[i]
        else:
            area = amax[i] * \
                scipy.exp(-(time - pk[i])**2 / 2. / decay[i]**2)
        area_tot += area
        # Fore-shortening
        phase = 2 * scipy.pi * time / period[i] + lon[i]
        phase0 = 2 * scipy.pi * time / period0[i] + lon[i]
        mu = scipy.cos(incl) * scipy.sin(lat[i]) + \
            scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase)
        mu0 = scipy.cos(incl) * scipy.sin(lat[i]) + \
            scipy.sin(incl) * scipy.cos(lat[i]) * scipy.cos(phase0)
        mu[mu < 0] = 0.0
        mu0[mu0 < 0] = 0.0
        # Flux
        dF_tot -= area * mu
        dF_tot0 -= area * mu0

    print 'Adding noise...'
    # ADD NOISE
    noi = pylab.normal(0, 1, npt) * noise
    dF_tot += noi
    dF_tot0 += noi

    amp_eff = dF_tot.max()-dF_tot.min()
    nspot_eff = area_tot / scale_fac / ff

    if doplot == True:
        print 'Used %d spots in total over %d rotation periods.' \
            % (nspot_tot, dur)
        print 'Mean filling factor of individual spots was %.4f.' \
            % ff
        print 'Desired amplitude was %.4f, actual amplitude was %.4f.' \
            % (amp, amp_eff)
        print 'Desired number of spots at any one time was %d.' % nspot
        print 'Actual number of spots was %d (min), %d (average), %d (max).' \

        pylab.close(1)
        pylab.figure(1, (10, 4))
        xoff = 0.1
        xoffr = 0.05
        yoff = 0.13
        yoffr = 0.07
        xwi = (1.0 - xoff - xoffr)
        ywi = (1.0 - yoff - yoffr) / 2
        xll = xoff
        yll = 1.0 - yoffr - ywi
        ax1 = pylab.axes([xll, yll, xwi, ywi])
        pylab.plot(time, area_tot * 100, 'k-')
        #pylab.title('i=%.1f <Ns>=%d A=%.4f Fs=%.4f tau=%.2fP sig=%.4f diff=%.1f' \
        #            % (incl * 180 / scipy.pi, nspot_eff.mean(), amp_eff, ff, tau, noise, diffrot))
        #pylab.title('i=%.1f <Ns>=%d tau=%.2fP Period = %d days' \
        #            % (incl * 180 / scipy.pi, nspot_eff.mean(), tau, myperiod))
        pylab.ylabel('fill. fac. (%)')
        yll = 1.0 - yoffr - 2 * ywi
        axc = pylab.axes([xll, yll, xwi, ywi], sharex = ax1)
        pylab.plot(time, 1 + dF_tot, 'k-', label = 'diff rot')
        pylab.plot(time, 1 + dF_tot0 - amp_eff, 'r-', label = 'no diff rot')
        pylab.ylabel('rel. flux')
        pylab.xlabel('time (days)')
        pylab.xlim(time.min(), time.max())
        pylab.legend()

    # Normalise
    dF_tot = dF_tot/np.median(dF_tot) - 1.
    dF_tot0 = dF_tot0/np.median(dF_tot0) -1.

    # Split into quarters
    time = time + addit

    res0 = scipy.array([nspot_eff.mean(), ff, amp_eff, noise])
    res1 = scipy.zeros((4, npt))

    res1[0,:] = time
    res1[1,:] = area_tot
    res1[2,:] = dF_tot
    res1[3,:] = dF_tot0

    print 'Done'
    return res0, res1
plt.pause(1)

luminosity = image[:, :, 0]
plt.imshow(luminosity)

plt.show(5)

plt.imshow(luminosity, cmap='hot')
plt.show(5)
plt.imshow(luminosity, cmap='spectral')
plt.show(5)

# In[22]:

plb.figure(1)
gaus_dist = plb.normal(-2, 2, size=512)  #random vector

plb.hist(gaus_dist, normed=True, bins=24)
plb.title("Gaussian Distribution / Histogram")

plb.xlabel("Value")
plb.ylabel("Frequency")
plb.grid(True)
plb.show()

# In[24]:

#Histogram 2
plb.figure(2)
gaus_dist = plb.normal(size=512)
unif_dist = plb.uniform(-5, 5, size=512)  # uniform distibution vector