示例#1
0
 def __init__(self, cdts, Rcut, hyp, Dist, centre_init):
     """
     Constructor of the logposteriorModule
     """
     rad, thet = Deg2pc(cdts, centre_init, Dist)
     c, r, t, self.Rmax = TruncSort(cdts, rad, thet, Rcut)
     self.pro = c[:, 2]
     self.cdts = c[:, :2]
     self.Dist = Dist
     #------------- poisson ----------------
     self.quadrants = [
         0, np.pi / 2.0, np.pi, 3.0 * np.pi / 2.0, 2.0 * np.pi
     ]
     self.poisson = st.poisson(len(r) / 4.0)
     #-------------- priors ----------------
     self.Prior_0 = st.norm(loc=centre_init[0], scale=hyp[0])
     self.Prior_1 = st.norm(loc=centre_init[1], scale=hyp[1])
     self.Prior_2 = st.uniform(loc=-0.5 * np.pi, scale=np.pi)
     self.Prior_3 = st.halfcauchy(loc=0.01, scale=hyp[2])
     self.Prior_4 = st.halfcauchy(loc=0.01, scale=hyp[3])
     self.Prior_5 = st.halfcauchy(loc=0.01, scale=hyp[2])
     self.Prior_6 = st.halfcauchy(loc=0.01, scale=hyp[3])
     self.Prior_7 = st.truncexpon(b=hyp[4], loc=0.01, scale=hyp[5])
     self.Prior_8 = st.truncexpon(b=hyp[4], loc=0.01, scale=hyp[5])
     print("Module Initialized")
示例#2
0
def get_gauss_distribution(twissfile='sequence_totrack.tfs', beam_t='FT',
                           sigmas=None, seed=None, n_part=100, noseeding=False,
                           output='initial_distribution.txt', **kwargs):
    x0,y0,px0,py0,betx,bety,alfx,alfy,dx,dpx,dy,dpy,dpp_twiss = twissinit(twissfile)
    beam = Beam(beam_t, **kwargs)

    if not noseeding:
        np.random.seed(seed)

    if sigmas is None:
        sigmas = beam.n_sigma

    # Momentum distribution
    if beam.pdist == 'unif':
        dpp = np.random.uniform(beam.dpp_0-beam.dpp_d, beam.dpp_0+beam.dpp_d, n_part)
    elif beam.pdist.startswith('gauss'):
        sigp = float(beam.pdist[5:])
        dpp = beam.dpp_0+truncnorm(-sigp, sigp, scale=(beam.dpp_d/sigp)).rvs(n_part)
    else:
        print "Warning: unknown pdist '"+beam.pdist+"', assuming uniform."
        dpp = np.random.uniform(beam.dpp_0-beam.dpp_d, beam.dpp_0+beam.dpp_d, n_part)

    pt = np.fromiter((beam.dpp_to_pt(d) for d in dpp), float)

    # Transverse distributions (from action angle, J and theta)
    pt_twiss = beam.dpp_to_pt(dpp_twiss)

    jx = truncexpon(b=0.5*sigmas**2, scale=beam.emit_x).rvs(n_part)
    thx = np.random.uniform(0, 2*np.pi, n_part)
    x = x0 + np.sqrt(2*jx*betx)*np.cos(thx) + dx*(pt-pt_twiss)
    px = px0 - np.sqrt(2*jx/betx)*(np.sin(thx)+alfx*np.cos(thx)) + dpx*(pt-pt_twiss)

    jy = truncexpon(b=0.5*sigmas**2, scale=beam.emit_y).rvs(n_part)
    thy = np.random.uniform(0, 2*np.pi, n_part)
    y = y0 + np.sqrt(2*jy*bety)*np.cos(thy) + dy*(pt-pt_twiss)
    py = py0 - np.sqrt(2*jy/bety)*(np.sin(thy)+alfy*np.cos(thy)) + dpy*(pt-pt_twiss)

    # Output table
    if output is not None:
        normal = PrettyTable(['*', 'NUMBER', 'TURN', 'X', 'PX', 'Y', 'PY', 'T', 'PT', 'S', 'E'])
        normal.align = 'r'
        normal.left_padding_width = 0
        normal.right_padding_width = 8
        normal.border = False
        normal.add_row(['$', '%d', '%d', '%le', '%le', '%le', '%le', '%le', '%le', '%le', '%le'])
        for i in xrange(n_part):
            normal.add_row([' ', i + 1, 0, x[i], px[i], y[i], py[i], 0.0000000, pt[i], 0.0000000, beam.energy])
        
        with open(output, 'w') as fp:
            fp.write(header())
            fp.write(normal.get_string())

    return x, px, y, py, pt
示例#3
0
def predict(x,h0,w):
## input: x[L,N], h0, w[n]
## output: y[L]

    x_min, x_max = -1., 1.
    
    L,N = x.shape

    y = np.zeros(L)
    #x[0,:] = np.random.rand(1,N) #Initial values of x_i(0)

    #Generating sequences of x_i(t+1)
    for t in range(L):
        h = x[t,:].dot(w) + h0

        if h != 0.:
            x_scale = 1./np.abs(h)
            sampling = stats.truncexpon(b=(x_max-x_min)/x_scale, loc=x_min, scale=x_scale) 
            #truncated exponential dist exp(x h[i]) for x ~ [-1, 1]
            sample = sampling.rvs(1) #obtain 1 samples
            y[t] = -np.sign(h)*sample[0]
        else:
            y[t] = random.uniform(x_min, x_max)
        
    return y
示例#4
0
文件: cspgen.py 项目: zpace/cspgen
    def logzsol_gen(self):

        zsol = self.sp.zlegend / zsol_padova

        d_ = stats.truncexpon(loc=0., scale=1., b=1.)
        d_.random_state = self.RS

        #

        if 'logzsol' in self.override:
            self.FSPS_args.update({'logzsol': self.override['logzsol']})
        # 60% chance of linearly-uniform metallicity range
        elif self.RS.rand() < .6:
            self.FSPS_args.update({
                'logzsol':
                np.log10(
                    ut.lin_transform(r1=[0., 1.],
                                     r2=[zsol.max(), zsol.min()],
                                     x=d_.rvs()))
            })
        # 10% chance of logarithmically-uniform
        else:
            self.FSPS_args.update({
                'logzsol':
                self.RS.uniform(np.log10(zsol.min()), np.log10(zsol.max()))
            })
def do_trial(subj: Model, sample_interval: float) -> float:
    """This function takes a subject and an interval time, and goes through
    the process of an experiment trial.

    Args:
        subj (Model): The subject which will do the trial.
        sample_interval (float): The interval time defining the trial.

    Returns:
        float: The (re)production time.
    """
    # "Trials began with the presentation of a central fixation point for 1s,"
    subj.time += 1
    # "followed by the presentation of a warning stimulus ..."
    # "After a variable delay ranging from 0.25-0.85s drawn randomly from a
    # truncated exponential distribution, ..."
    subj.time += truncexpon(0.6, 0.25).rvs(1, random_state=rng)
    # "two 100ms flashes separated by the sample interval were presented."
    subj.time += 0.1  # READY
    subj.time += sample_interval
    # "Production times, t_p, were measured from the center of the flash, (that
    # is, 50ms after its onset) to when the key was pressed"
    subj.time += 0.05  # SET

    # implement cognitive model here
    production_time = sample_interval

    subj.time += production_time  # GO
    return production_time
示例#6
0
def design_parameters(gap_params=None):
    """Generate a dictionary with default design parameters."""
    # Distributions of pulses per trial
    count = [1, 2, 3, 4, 5]
    count_pmf = trunc_geom_pmf(count, .25)

    # Distribution parameters in stimulus units
    means = -1.1, -0.9
    sd = .15

    # Distributions in log-likelihood ratio units
    llr_m, llr_sd = params_to_llr(means, sd)
    dh, dl = stats.norm(+llr_m, llr_sd), stats.norm(-llr_m, llr_sd)

    # Pulse gap duration
    if gap_params is None:
        gap_params = 3, 2, 2
    gap_dist = stats.truncexpon(*gap_params)

    # Design dictionary to pass to functions
    design = dict(count=count,
                  count_pmf=count_pmf,
                  means=means,
                  sds=sd,
                  llr_m=llr_m,
                  llr_sd=llr_sd,
                  dh=dh,
                  dl=dl,
                  gap_dist=gap_dist)

    return design
    def loftedheight():
        # constants

        beta = (np.pi * 3.64**3) / (2.45**5)

        eta = (9.35**2) / (np.pi * g * Hc**(2 / 3))

        gamma = beta * eta**(5 / 2)

        # Maximum lofted height
        zmax = gamma * (
            (Pa / Pb) *
            (Cd / r0))**(3 / 2) * fire_line_intensity(dmc, dc)[0]**(5 / 3)

        # Exponential distribution for lofting heights (P(Z>zmax)=1%)
        lambd2 = np.log(100) / zmax

        # Truncated exponentaional distribution starting at H
        X = truncexpon(np.abs(zmax - h1) / (1 / lambd2),
                       loc=min(h1, zmax),
                       scale=1 / lambd2)

        # Generates one random value based on the exponential distribution
        Z = X.rvs(1)
        return [zmax, Z]
示例#8
0
    def __init__(self,cdts,Rcut,hyp,Dist,centre_init):
        """
        Constructor of the logposteriorModule
        """
        rad,thet        = Deg2pc(cdts,centre_init,Dist)
        c,r,t,self.Rmax = TruncSort(cdts,rad,thet,Rcut)
        self.pro        = c[:,2]
        self.cdts       = c[:,:2]
        self.Dist       = Dist
        #-------------- Finds the mode of the band -------
        band_all        = c[:,3]
        idv             = np.where(np.isfinite(c[:,3]))[0]
        band            = c[idv,3]
        kde             = st.gaussian_kde(band)
        x               = np.linspace(np.min(band),np.max(band),num=1000)
        self.mode       = x[kde(x).argmax()]
        print "Mode of band at ",self.mode

        #---- repleace NANs by mode -----
        idnv            = np.setdiff1d(np.arange(len(band_all)),idv)
        band_all[idnv]  = self.mode
        self.delta_band = band_all - self.mode
        #------------- poisson ----------------
        self.quadrants  = [0,np.pi/2.0,np.pi,3.0*np.pi/2.0,2.0*np.pi]
        self.poisson    = st.poisson(len(r)/4.0)
        #-------------- priors ----------------
        self.Prior_0    = st.norm(loc=centre_init[0],scale=hyp[0])
        self.Prior_1    = st.norm(loc=centre_init[1],scale=hyp[1])
        self.Prior_2    = st.uniform(loc=-0.5*np.pi,scale=np.pi)
        self.Prior_3    = st.halfcauchy(loc=0.01,scale=hyp[2])
        self.Prior_4    = st.halfcauchy(loc=0.01,scale=hyp[2])
        self.Prior_5    = st.truncexpon(b=hyp[3],loc=2.01,scale=hyp[4])
        self.Prior_6    = st.norm(loc=hyp[5],scale=hyp[6])
        print "Module Initialized"
示例#9
0
    def density_trunc_exp(self, lower, upper, scale):
        '''Samples x and y values in order to plot the truncated exponential distribution with the given parameters.'''

        b = (upper - lower) / scale
        x = np.linspace(lower, upper, 100)
        y = stats.truncexpon(b, loc=lower, scale=scale).pdf(x)
        return x, y
示例#10
0
    def build_priors(self):
        """
        Builds the priors
        :return:
        """
        # samplingMult = 50
        # bandwidthScalar = 2.0
        # # build longitude, latitude and strike prior
        # raw_data = pd.read_excel('./InputData/Fixed92kmFaultOffset50kmgapPts.xlsx')
        # self.latlongstrikeprior = np.array(raw_data[['POINT_X', 'POINT_Y', 'Strike']])
        # distrb0 = gaussian_kde(self.latlongstrikeprior.T)
        #
        # #Garret and spencer chose this 18 June 2019
        # data2 = stats.norm.rvs(size = 1000,loc = np.log(8), scale = 0.05)
        # distrb1 = gaussian_kde(data2)
        #
        # dists = [distrb0, distrb1]

        depth_mu = 30000
        depth_std = 5000
        mindepth = 2500
        maxdepth = 50000
        minlon = 126
        latlon = LatLonPrior(self.fault, depth_mu, depth_std, mindepth,
                             maxdepth)
        mag = stats.truncexpon(b=3, loc=6.5)
        deltalogl = stats.norm(
            scale=0.18842320591492676)  # sample standard deviation from data
        deltalogw = stats.norm(
            scale=0.17186788334444705)  # sample standard deviation from data
        deltadepth = stats.norm(
            scale=5)  # in km to avoid numerically singular covariance matrix
        return Prior(latlon, mag, deltalogl, deltalogw, deltadepth)
示例#11
0
 def get_switching_points(self, nswitches, switch_frequency):
     b = 10 * switch_frequency
     s = switch_frequency
     l = 0.1 * switch_frequency
     switches = np.rint(
         stats.truncexpon(b, scale=s, loc=l).rvs(int(nswitches)))
     switches = np.maximum(1, switches)
     return switches.astype(np.int)
示例#12
0
    def __init__(self, lambd, trunc=1):
        self.lambd = lambd
        self.trunc = trunc

        if trunc:
            self.rv = truncexpon(trunc, scale=1 / lambd)
            self.expectation = self.rv.mean()
        else:
            self.expectation = 1. / lambd
示例#13
0
文件: cspgen.py 项目: zpace/cspgen
    def sigma_gen(self):

        loc, scale = 10., 350.
        trunc_abs = 350.
        pdf_sigma = stats.truncexpon(b=((scale - loc) / trunc_abs),
                                     loc=loc,
                                     scale=scale)
        pdf_sigma.random_state = self.RS

        if 'sigma' in self.override:
            self.FSPS_args.update({'sigma': self.override['sigma']})
        elif 'sigma' in self.subsample_keys:
            self.FSPS_args.update(
                {'sigma': pdf_sigma.rvs(size=self.Nsubsample)})
        else:
            self.FSPS_args.update({'sigma': pdf_sigma.rvs()})
示例#14
0
def do_trial(subj: Model, sample_interval: float) -> float:
    """This function takes a subject and an interval time, and goes through
    the process of an experiment trial.

    Args:
        subj (Model): The subject which will do the trial.
        sample_interval (float): The interval time defining the trial.

    Returns:
        float: The (re)production time.
    """
    # "Trials began with the presentation of a central fixation point for 1s,"
    subj.time += 1
    # "followed by the presentation of a warning stimulus ..."
    # "After a variable delay ranging from 0.25-0.85s drawn randomly from a
    # truncated exponential distribution, ..."
    subj.time += truncexpon(0.6, 0.25).rvs(1, random_state=rng)[0]
    # "two 100ms flashes separated by the sample interval were presented."
    subj.time += 0.1  # READY
    subj.time += sample_interval

    # convert time to pulses and remember how many it took
    pulses = time_to_pulses(sample_interval)
    subj.add_encounter(
        Chunk(name=f'pf_{pulses}',
              slots={
                  'isa': 'pulse-fact',
                  'pulses': pulses
              }))

    # "Production times, t_p, were measured from the center of the flash, (that
    # is, 50ms after its onset) to when the key was pressed"
    subj.time += 0.05  # SET

    # retrieve the most activated memory
    request = Chunk(name='pulse-request', slots={'isa': 'pulse-fact'})
    chunk, latency = subj.retrieve(request)
    subj.add_encounter(chunk)
    subj.time += latency

    # convert pulse to time, then add and return the production time
    production_time = pulses_to_time(chunk.slots['pulses'])
    subj.time += production_time  # GO
    return production_time
示例#15
0
def generate_data(L,N):
    # L : time steps
    # N : number of nodes
    w_true = np.random.normal(0, 1., (N,N))
    x_min, x_max = -1., 1.
    x = np.zeros([L,N])
    x[0,:] = np.random.rand(1,N) #Initial values of x_i(0)

    #Generating sequences of x_i(t+1)
    for t in range(L-1):
        h = x[t,:].dot(w_true)
        for i in range(N):
            if h[i] != 0.:
                x_scale = 1./np.abs(h[i])
                sampling = stats.truncexpon(b=(x_max-x_min)/x_scale, loc=x_min, scale=x_scale) 
                #truncated exponential dist exp(x h[i]) for x ~ [-1, 1]
                sample = sampling.rvs(1) #obtain 1 samples
                x[t+1, i] = -np.sign(h[i])*sample[0]
            else:
                x[t+1,i] = random.uniform(x_min, x_max)
    return w_true,x
示例#16
0
 def __init__(self, cdts, Rcut, hyp, Dist, centre_init):
     """
     Constructor of the logposteriorModule
     """
     rad, thet = Deg2pc(cdts, centre_init, Dist)
     c, r, t, self.Rmax = TruncSort(cdts, rad, thet, Rcut)
     self.pro = c[:, 2]
     self.cdts = c[:, :2]
     self.Dist = Dist
     print "There are ", len(self.cdts), " observations."
     # sys.exit()
     #------------- poisson ----------------
     self.quadrants = [
         0, np.pi / 2.0, np.pi, 3.0 * np.pi / 2.0, 2.0 * np.pi
     ]
     self.poisson = st.poisson(len(r) / 4.0)
     #-------------- priors ----------------
     self.Prior_0 = st.norm(loc=centre_init[0], scale=hyp[0])
     self.Prior_1 = st.norm(loc=centre_init[1], scale=hyp[1])
     self.Prior_2 = st.halfcauchy(loc=0.01, scale=hyp[2])
     self.Prior_3 = st.truncexpon(b=hyp[3], loc=2.01, scale=hyp[4])
     print "Module Initialized"
示例#17
0
def setup(config):
    """Extracts the data from the config object to create the SulawesiFault
    object, and then declares the scenario's initial prior, forward model, and
    covariance in order to create the SulawesiScenario.

    Parameters
    ----------
    config : Config object
        The config object that contains the default scenario data to use for
        the sampling. Essentially, this sets all the initial conditions for the
        bounds, prior, fault, etc.

    Returns
    -------
    BandaScenario : BandaScenario object
    """
    #Flores and Walanae fault objects
    with open(config.fault['walanae_data_path'], 'rb') as file:
        walanae_initialization_data = pickle.load(file)

    fault_initialization_data = [
        np.load(config.fault['flores_data_path']), walanae_initialization_data
    ]
    geoclaw_bounds = config.geoclaw_bounds
    bounds = [config.model_bounds_flo, config.model_bounds_wal]
    # Initialize the kernel for the Gaussian process fault. Strike, dip and
    #  depth will use the same kernel (the RBF kernel).
    flores_kernel = lambda x, y: GPR.rbf_kernel(x, y, sig=0.75)
    fault = [
        tb.fault.GaussianProcessFault( # Flores uses a GaussianProcessFault
            bounds=geoclaw_bounds,
            model_bounds=bounds[FAULT.FLORES],
            kers={
                'depth': flores_kernel,
                'dip': flores_kernel,
                'strike': flores_kernel,
                'rake': flores_kernel
            },
            noise={'depth': 1, 'dip': 1, 'strike': 1, 'rake': 1},
            **fault_initialization_data[FAULT.FLORES]
        ),
        tb.fault.ReferenceCurveFault( # Walanae uses a ReferenceCurveFault
            bounds=geoclaw_bounds,
            model_bounds=bounds[FAULT.WALANAE],
            **fault_initialization_data[FAULT.WALANAE]
        )
    ]

    # Priors
    # latitude/longitude
    depth_mu = [config.prior['depth_mu_flo'], config.prior['depth_mu_wal']]
    depth_std = [config.prior['depth_std_flo'], config.prior['depth_std_wal']]
    mindepth = [config.prior['mindepth_flo'], config.prior['mindepth_wal']]
    maxdepth = [config.prior['maxdepth_flo'], config.prior['maxdepth_wal']]

    lower_bound_depth = [
        (md - dmu) / dstd
        for md, dmu, dstd in zip(mindepth, depth_mu, depth_std)
    ]

    upper_bound_depth = [
        (md - dmu) / dstd
        for md, dmu, dstd in zip(maxdepth, depth_mu, depth_std)
    ]

    depth_dist = [
        stats.truncnorm(lb, ub, loc=dmu, scale=dstd) for lb, ub, dmu, dstd in
        zip(lower_bound_depth, upper_bound_depth, depth_mu, depth_std)
    ]

    latlon = [
        LatLonPrior(fault[FAULT.FLORES], depth_dist[FAULT.FLORES]),
        LatLonPrior(fault[FAULT.WALANAE], depth_dist[FAULT.WALANAE])
    ]

    # magnitude
    mag = [
        stats.truncexpon(b=config.prior['mag_b_flo'],
                         loc=config.prior['mag_loc_flo'],
                         scale=config.prior['mag_scale_flo']),
        stats.truncexpon(b=config.prior['mag_b_wal'],
                         loc=config.prior['mag_loc_wal'],
                         scale=config.prior['mag_scale_wal'])
    ]

    # delta_logl
    # sample standard deviation from data
    delta_logl = [
        stats.norm(scale=config.prior['delta_logl_std_flo']),
        stats.norm(scale=config.prior['delta_logl_std_wal'])
    ]

    # delta_logw
    # sample standard deviation from data
    delta_logw = [
        stats.norm(scale=config.prior['delta_logw_std_flo']),
        stats.norm(scale=config.prior['delta_logw_std_wal'])
    ]

    # depth offset in km to avoid numerically singular covariance matrix
    depth_offset = [
        stats.norm(scale=config.prior['depth_offset_std_flo']),
        stats.norm(scale=config.prior['depth_offset_std_wal'])
    ]

    dip_offset = [
        stats.norm(scale=config.prior['dip_offset_std_flo']),
        stats.norm(scale=config.prior['dip_offset_std_wal'])
    ]

    strike_offset = [
        stats.norm(scale=config.prior['strike_offset_std_flo']),
        stats.norm(scale=config.prior['strike_offset_std_wal'])
    ]

    rake_offset = [
        stats.norm(scale=config.prior['rake_offset_std_flo']),
        stats.norm(scale=config.prior['rake_offset_std_wal'])
    ]

    prior = [
        SulawesiPrior(latlon[FAULT.FLORES], mag[FAULT.FLORES],
                      delta_logl[FAULT.FLORES], delta_logw[FAULT.FLORES],
                      depth_offset[FAULT.FLORES], dip_offset[FAULT.FLORES],
                      strike_offset[FAULT.FLORES], rake_offset[FAULT.FLORES]),
        SulawesiPrior(latlon[FAULT.WALANAE], mag[FAULT.WALANAE],
                      delta_logl[FAULT.WALANAE], delta_logw[FAULT.WALANAE],
                      depth_offset[FAULT.WALANAE], dip_offset[FAULT.WALANAE],
                      strike_offset[FAULT.WALANAE], rake_offset[FAULT.WALANAE])
    ]

    # load gauges
    gauges = build_gauges()

    # Forward model
    config.fgmax['min_level_check'] = (
        len(config.geoclaw['refinement_ratios']) + 1)
    forward_model = [
        tb.GeoClawForwardModel(gauges, fault[FAULT.FLORES], config.fgmax,
                               config.geoclaw['dtopo_path']),
        tb.GeoClawForwardModel(gauges, fault[FAULT.WALANAE], config.fgmax,
                               config.geoclaw['dtopo_path'])
    ]

    # Proposal kernel
    lat_std = [
        config.proposal_kernel['lat_std_flo'],
        config.proposal_kernel['lat_std_wal']
    ]
    lon_std = [
        config.proposal_kernel['lon_std_flo'],
        config.proposal_kernel['lon_std_wal']
    ]
    mag_std = [
        config.proposal_kernel['mag_std_flo'],
        config.proposal_kernel['mag_std_wal']
    ]
    delta_logl_std = [
        config.proposal_kernel['delta_logl_std_flo'],
        config.proposal_kernel['delta_logl_std_wal']
    ]
    delta_logw_std = [
        config.proposal_kernel['delta_logw_std_flo'],
        config.proposal_kernel['delta_logw_std_wal']
    ]
    # in km to avoid singular covariance matrix
    depth_offset_std = [
        config.proposal_kernel['depth_offset_std_flo'],
        config.proposal_kernel['depth_offset_std_wal']
    ]
    dip_offset_std = [
        config.proposal_kernel['dip_offset_std_flo'],
        config.proposal_kernel['dip_offset_std_wal']
    ]
    strike_offset_std = [
        config.proposal_kernel['strike_offset_std_flo'],
        config.proposal_kernel['strike_offset_std_wal']
    ]
    rake_offset_std = [
        config.proposal_kernel['rake_offset_std_flo'],
        config.proposal_kernel['rake_offset_std_wal']
    ]

    # square for std => cov
    covariance = [
        np.diag(
            np.square([
                lat_std[FAULT.FLORES], lon_std[FAULT.FLORES],
                mag_std[FAULT.FLORES], delta_logl_std[FAULT.FLORES],
                delta_logw_std[FAULT.FLORES], depth_offset_std[FAULT.FLORES],
                dip_offset_std[FAULT.FLORES], strike_offset_std[FAULT.FLORES],
                rake_offset_std[FAULT.FLORES]
            ])),
        np.diag(
            np.square([
                lat_std[FAULT.WALANAE], lon_std[FAULT.WALANAE],
                mag_std[FAULT.WALANAE], delta_logl_std[FAULT.WALANAE],
                delta_logw_std[FAULT.WALANAE], depth_offset_std[FAULT.WALANAE],
                dip_offset_std[FAULT.WALANAE],
                strike_offset_std[FAULT.WALANAE],
                rake_offset_std[FAULT.WALANAE]
            ]))
    ]

    scenarios = [
        SulawesiScenario(prior[FAULT.FLORES], forward_model[FAULT.FLORES],
                         covariance[FAULT.FLORES]),
        SulawesiScenario(prior[FAULT.WALANAE], forward_model[FAULT.WALANAE],
                         covariance[FAULT.WALANAE])
    ]
    return MultiFaultScenario(scenarios)
示例#18
0
    def random_trunc_exp(self, N, lower, upper, scale):
        '''Draws N random values from the truncated exponential distribution.'''

        b = (upper - lower) / scale
        return stats.truncexpon(b, loc=lower,
                                scale=scale).rvs(N, random_state=self.random)
示例#19
0
                    delay = s.get_delay()
                    rttresult['value'][:,x][np.logical_and(rttresult['scheme'] == 'del', rttresult['mean'] == value[0])] = delay
                    s.join()
                    break


    #+++++++++++++++++++++RTT (exponential)++++++++++++++++++++++++++++++++++++++++++

    for x in range(repetitions):
        print(x)

        for value in means:
            lower, upper = 1, 1000
            mu, sigma = value[0], value[1]
            scale = mu
            E = stats.truncexpon(loc = lower, b = (upper-lower) / scale , scale = scale)
            rtt = E.rvs(size=1000)

            #++++++++++++++++++++++Sequential++++++++++++++++++++++++++++++
            clientq = queue.Queue()
            serverq = queue.Queue()
            ackq = queue.Queue()

            s = Server(4, ackq, clientq, serverq, "receive" , "sequential" , rtt, 10)
            c = Client(4, ackq, clientq, serverq, "send", "sequential")

            s.setName("Server")
            c.setName("Client")
            s.start()
            c.start()
            c.join()
示例#20
0
b = 4.69
mean, var, skew, kurt = truncexpon.stats(b, moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(truncexpon.ppf(0.01, b), truncexpon.ppf(0.99, b), 100)
ax.plot(x, truncexpon.pdf(x, b), 'r-', lw=5, alpha=0.6, label='truncexpon pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = truncexpon(b)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = truncexpon.ppf([0.001, 0.5, 0.999], b)
np.allclose([0.001, 0.5, 0.999], truncexpon.cdf(vals, b))
# True

# Generate random numbers:

r = truncexpon.rvs(b, size=1000)

# And compare the histogram:

ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
示例#21
0
def all_dists():
    # dists param were taken from scipy.stats official
    # documentaion examples
    # Total - 89
    return {
        "alpha":
        stats.alpha(a=3.57, loc=0.0, scale=1.0),
        "anglit":
        stats.anglit(loc=0.0, scale=1.0),
        "arcsine":
        stats.arcsine(loc=0.0, scale=1.0),
        "beta":
        stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0),
        "betaprime":
        stats.betaprime(a=5, b=6, loc=0.0, scale=1.0),
        "bradford":
        stats.bradford(c=0.299, loc=0.0, scale=1.0),
        "burr":
        stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0),
        "cauchy":
        stats.cauchy(loc=0.0, scale=1.0),
        "chi":
        stats.chi(df=78, loc=0.0, scale=1.0),
        "chi2":
        stats.chi2(df=55, loc=0.0, scale=1.0),
        "cosine":
        stats.cosine(loc=0.0, scale=1.0),
        "dgamma":
        stats.dgamma(a=1.1, loc=0.0, scale=1.0),
        "dweibull":
        stats.dweibull(c=2.07, loc=0.0, scale=1.0),
        "erlang":
        stats.erlang(a=2, loc=0.0, scale=1.0),
        "expon":
        stats.expon(loc=0.0, scale=1.0),
        "exponnorm":
        stats.exponnorm(K=1.5, loc=0.0, scale=1.0),
        "exponweib":
        stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0),
        "exponpow":
        stats.exponpow(b=2.7, loc=0.0, scale=1.0),
        "f":
        stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0),
        "fatiguelife":
        stats.fatiguelife(c=29, loc=0.0, scale=1.0),
        "fisk":
        stats.fisk(c=3.09, loc=0.0, scale=1.0),
        "foldcauchy":
        stats.foldcauchy(c=4.72, loc=0.0, scale=1.0),
        "foldnorm":
        stats.foldnorm(c=1.95, loc=0.0, scale=1.0),
        # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0),
        # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0),
        "genlogistic":
        stats.genlogistic(c=0.412, loc=0.0, scale=1.0),
        "genpareto":
        stats.genpareto(c=0.1, loc=0.0, scale=1.0),
        "gennorm":
        stats.gennorm(beta=1.3, loc=0.0, scale=1.0),
        "genexpon":
        stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0),
        "genextreme":
        stats.genextreme(c=-0.1, loc=0.0, scale=1.0),
        "gausshyper":
        stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0),
        "gamma":
        stats.gamma(a=1.99, loc=0.0, scale=1.0),
        "gengamma":
        stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0),
        "genhalflogistic":
        stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0),
        "gilbrat":
        stats.gilbrat(loc=0.0, scale=1.0),
        "gompertz":
        stats.gompertz(c=0.947, loc=0.0, scale=1.0),
        "gumbel_r":
        stats.gumbel_r(loc=0.0, scale=1.0),
        "gumbel_l":
        stats.gumbel_l(loc=0.0, scale=1.0),
        "halfcauchy":
        stats.halfcauchy(loc=0.0, scale=1.0),
        "halflogistic":
        stats.halflogistic(loc=0.0, scale=1.0),
        "halfnorm":
        stats.halfnorm(loc=0.0, scale=1.0),
        "halfgennorm":
        stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0),
        "hypsecant":
        stats.hypsecant(loc=0.0, scale=1.0),
        "invgamma":
        stats.invgamma(a=4.07, loc=0.0, scale=1.0),
        "invgauss":
        stats.invgauss(mu=0.145, loc=0.0, scale=1.0),
        "invweibull":
        stats.invweibull(c=10.6, loc=0.0, scale=1.0),
        "johnsonsb":
        stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0),
        "johnsonsu":
        stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0),
        "ksone":
        stats.ksone(n=1e03, loc=0.0, scale=1.0),
        "kstwobign":
        stats.kstwobign(loc=0.0, scale=1.0),
        "laplace":
        stats.laplace(loc=0.0, scale=1.0),
        "levy":
        stats.levy(loc=0.0, scale=1.0),
        "levy_l":
        stats.levy_l(loc=0.0, scale=1.0),
        "levy_stable":
        stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0),
        "logistic":
        stats.logistic(loc=0.0, scale=1.0),
        "loggamma":
        stats.loggamma(c=0.414, loc=0.0, scale=1.0),
        "loglaplace":
        stats.loglaplace(c=3.25, loc=0.0, scale=1.0),
        "lognorm":
        stats.lognorm(s=0.954, loc=0.0, scale=1.0),
        "lomax":
        stats.lomax(c=1.88, loc=0.0, scale=1.0),
        "maxwell":
        stats.maxwell(loc=0.0, scale=1.0),
        "mielke":
        stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0),
        "nakagami":
        stats.nakagami(nu=4.97, loc=0.0, scale=1.0),
        "ncx2":
        stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0),
        "ncf":
        stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0),
        "nct":
        stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0),
        "norm":
        stats.norm(loc=0.0, scale=1.0),
        "pareto":
        stats.pareto(b=2.62, loc=0.0, scale=1.0),
        "pearson3":
        stats.pearson3(skew=0.1, loc=0.0, scale=1.0),
        "powerlaw":
        stats.powerlaw(a=1.66, loc=0.0, scale=1.0),
        "powerlognorm":
        stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0),
        "powernorm":
        stats.powernorm(c=4.45, loc=0.0, scale=1.0),
        "rdist":
        stats.rdist(c=0.9, loc=0.0, scale=1.0),
        "reciprocal":
        stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0),
        "rayleigh":
        stats.rayleigh(loc=0.0, scale=1.0),
        "rice":
        stats.rice(b=0.775, loc=0.0, scale=1.0),
        "recipinvgauss":
        stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0),
        "semicircular":
        stats.semicircular(loc=0.0, scale=1.0),
        "t":
        stats.t(df=2.74, loc=0.0, scale=1.0),
        "triang":
        stats.triang(c=0.158, loc=0.0, scale=1.0),
        "truncexpon":
        stats.truncexpon(b=4.69, loc=0.0, scale=1.0),
        "truncnorm":
        stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0),
        "tukeylambda":
        stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0),
        "uniform":
        stats.uniform(loc=0.0, scale=1.0),
        "vonmises":
        stats.vonmises(kappa=3.99, loc=0.0, scale=1.0),
        "vonmises_line":
        stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0),
        "wald":
        stats.wald(loc=0.0, scale=1.0),
        "weibull_min":
        stats.weibull_min(c=1.79, loc=0.0, scale=1.0),
        "weibull_max":
        stats.weibull_max(c=2.87, loc=0.0, scale=1.0),
        "wrapcauchy":
        stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0),
    }
示例#22
0
import numpy as np
import scipy.stats as ss


class Timer:
    def __enter__(self):
        self.start = time.perf_counter()
        return self

    def __exit__(self, *args):
        self.end = time.perf_counter()
        self.interval = self.end - self.start


standard_normal = ss.norm(loc=0, scale=1)
truncated_exponential = ss.truncexpon(b=np.inf, loc=4.5, scale=1)

for N in np.logspace(4, 8, base=10, num=5, dtype=np.int):
    # estimator (3)
    with Timer() as t1:
        y = standard_normal.rvs(size=N)
        estimator_1 = np.mean(y > 4.5)

    # estimator (4)
    with Timer() as t2:
        x = truncated_exponential.rvs(size=N)
        f = standard_normal.pdf(x)
        g = truncated_exponential.pdf(x)
        estimator_2 = np.mean(f / g)

    print(f"N = {N}")
示例#23
0
def rtexp(ntrials, lam, lower, upper, seed):
    a = float(lower)
    b = float(upper)
    x = lam
    smp = stats.truncexpon((b - a) / x, loc=a, scale=x).rvs(ntrials)
    return smp
示例#24
0
def difexp(lam, lower, upper, mean):
    diff = stats.truncexpon((float(upper) - float(lower)) / float(lam),
                            loc=float(lower),
                            scale=float(lam)).mean() - float(mean)
    return abs(diff)
示例#25
0
def setup(config):
    """Extracts the data from the config object to create the BandaFault object, 
    and then declares the scenario's initial prior, forward model, and covariance 
    in order to create the BandaScenario. 
    
    Parameters
    ----------
    config : Config object
        The config object that contains the default scenario data to use for the sampling.
        Essentially, this sets all the initial conditions for the bounds, prior, fault, etc.
    
    Returns
    -------
    BandaScenario : BandaScenario object
    """
    # Banda Arc fault object
    arrays = np.load(config.fault['grid_data_path'])
    fault = tb.GridFault(bounds=config.model_bounds, **arrays)

    # Priors
    # latitude/longitude
    depth_mu = config.prior['depth_mu']
    depth_std = config.prior['depth_std']
    mindepth = config.prior['mindepth']
    maxdepth = config.prior['maxdepth']
    a, b = (mindepth - depth_mu) / depth_std, (maxdepth - depth_mu) / depth_std
    depth_dist = stats.truncnorm(a, b, loc=depth_mu, scale=depth_std)
    latlon = LatLonPrior(fault, depth_dist)

    # magnitude
    mag = stats.truncexpon(b=config.prior['mag_b'],
                           loc=config.prior['mag_loc'])

    # delta_logl
    delta_logl = stats.norm(scale=config.prior['delta_logl_std']
                            )  # sample standard deviation from data

    # delta_logw
    delta_logw = stats.norm(scale=config.prior['delta_logw_std']
                            )  # sample standard deviation from data

    # depth offset
    depth_offset = stats.norm(
        scale=config.prior['depth_offset_std']
    )  # in km to avoid numerically singular covariance matrix
    prior = BandaPrior(latlon, mag, delta_logl, delta_logw, depth_offset)

    # load gauges
    gauges = build_gauges()

    # Forward model
    config.fgmax['min_level_check'] = len(
        config.geoclaw['refinement_ratios']) + 1
    forward_model = tb.GeoClawForwardModel(gauges, fault, config.fgmax,
                                           config.geoclaw['dtopo_path'])

    # Proposal kernel
    lat_std = config.proposal_kernel['lat_std']
    lon_std = config.proposal_kernel['lon_std']
    mag_std = config.proposal_kernel['mag_std']
    delta_logl_std = config.proposal_kernel['delta_logl_std']
    delta_logw_std = config.proposal_kernel['delta_logw_std']
    depth_offset_std = config.proposal_kernel[
        'depth_offset_std']  #in km to avoid singular covariance matrix

    # square for std => cov
    covariance = np.diag(
        np.square([
            lat_std, lon_std, mag_std, delta_logl_std, delta_logw_std,
            depth_offset_std
        ]))

    return BandaScenario(prior, forward_model, covariance)
示例#26
0
def get_truncated_expon(scale=100, low=0, upp=1000):
    return truncexpon(b=(upp - low) / scale, loc=low, scale=scale)
示例#27
0
Marton's methods makes more sense if the point to keep hazard function as flat as possible.

@author: Han
"""

import scipy.stats as stats
import matplotlib.pyplot as plt
import numpy as np

lower, upper, scale = 0.5, 3, 1  # Delay
# lower, upper, scale = 1.5, 10, 3   # ITI
# lower, upper, scale = 50, 150, 30  # block
n = 10000

# Real truncated exponential
X = stats.truncexpon(b=(upper - lower) / scale, loc=lower, scale=scale)
truncexp = X.rvs(n)

# Old method
truncexp_old = np.random.exponential(scale, n) + lower
truncexp_old[truncexp_old > upper] = upper

# Plotting
fig = plt.figure(1)
fig.clf()

# Trunc exp
ax = fig.subplots(2, 2)
truncexp_hist, xx, _ = ax[0, 0].hist(truncexp, 100, density=True)
truncexp_hazard = truncexp_hist / np.flip(np.flip(truncexp_hist).cumsum())
ax[0, 0].set(title='TruncExp (scipy)', ylim=(0, max(truncexp_hist) * 1.5))
示例#28
0
def _rtexp(ntrials, lam, lower, upper, seed):
    a = float(lower)
    b = float(upper)
    np.random.seed(seed)
    smp = stats.truncexpon((b - a) / lam, loc=a, scale=lam).rvs(ntrials)
    return smp
示例#29
0
def list_of_force_angle_lists(num_forces, num_mags, num_angles_tang,
                              num_angles_inner, f_lower_bound, f_upper_bound,
                              delta_angle_inner):
    '''
    This function generates force lists.
    
    At most it will generate num_mags * num_angles_tang * num_angles_inner * num_random 
    force lists.
    
    The process works as follows:
        1. The magnitude value are sampled from the truncated exponential distribution (truncated to lower and upper bounds)
        2. Then the following process is repeated num_angles_tang times:
            i. (num_forces - 1) forces are generated sequentially:
                a. The magnitude comes from a random normal distribution with mean = mag
                and std. dev = mag / 5
                b. The tangential component is generated by random normal with mean = 
                0 and std.dev = pi/12, which is to ensure that the tangential anle is mostly
                within pi/4 and -pi/4
                c. The interval [0, 2*pi] is divided into num_forces equal sub-intervals, 
                such that these sub-intervals are delta_angle_inner apart. The position angle for each 
                of the forces is drawn from uniform distribition over the correspodning 
                sub-interval.
            ii. The last force is calculated to ensure that the balance equations are satisfied
            iii. The forces are checked again to ensure that they are at least delta_angle_inner apart
            and each has the tangential component within -pi/4 to pi/4. This is necessary as
            the last force may have changed this. 
            iv. If the conditions are satisfied then we have a ready-to-go force list. We
            repetitively (num_angles_inner times) add this force list to the final list of force lists as follows:
                a. At each step, we shift all of the position angles by random angle (picked uniformly from [0, 2 pi]).
                b. We attach the resulting force list to the final list of force lists.
    
    In total, at most (num_angles_inner * num_angles_tang * num_mags) force lists corresponding to
    (physically feasible) particles are generated. The number of force lists may be smaller if at some iteration of the loops,
    we cannot generate physically feasible list for more than max_attempts = 10^6 attempts. 
    '''
    # Initialize variables
    list_of_F_lists = []
    phi_init = 0  # phi_init is defined to help produce the sub-intervals for position angles from the interval
    delta = delta_angle_inner / 2  # delta serves to ensure that the sub-inrervals for position angles are pi/6 apart
    shift = 2 * pi / num_forces  # shift is defined to produce the sub-intervals for position angles from the interval [0,2*pi]
    alpha_init = 0  # alpha_init and alpha_std_dev are parameters for the random normal distribution to produce tangential angles
    alpha_std_dev = pi / 12  # the std. dev was chosen so that alpha mostly stays within -pi/4 to pi/4
    max_attempts = 10**6  # max_attempts is defined to limit the number of attempts to produce a force list given a starting magnitude

    # draw mean force
    scale = 1 / 2
    rv = truncexpon(b=(f_upper_bound - f_lower_bound) / scale,
                    loc=f_lower_bound,
                    scale=scale)
    mags = rv.rvs(num_mags)
    for mag in mags:
        for i in range(num_angles_tang):
            # count attempts made to generate new force list
            attempt_count = 0
            while attempt_count < max_attempts:
                F_list = []
                for j in range(num_forces - 1):
                    # generate (num_forces - 1) forces acting on particle
                    f_mag = abs(np.random.normal(mag, mag / 5))
                    f_phi = np.random.uniform(
                        phi_init + j * shift + delta, phi_init +
                        (j + 1) * shift - delta) % (2 * pi)
                    f_alpha = np.random.normal(alpha_init, alpha_std_dev)
                    F_list.append(Force(f_mag, f_phi, f_alpha))
                # calculate last force to balance the rest
                f_last = calculate_last_force(F_list)
                F_list.append(f_last)

                # check that the generated forces are physically feasible
                check = [
                    calculate_conv_angle((f_last.get_phi() - f.get_phi()) %
                                         (2 * pi)) >= delta_angle_inner
                    for f in F_list[:-1]
                ]
                check += [(f.get_alpha() <= pi / 4
                           and f.get_alpha() >= -pi / 4) for f in F_list]
                check += [(f.get_mag() >= 0) for f in F_list]
                attempt_count += 1

                if all(check):
                    for ang_inner in range(num_angles_inner):
                        # add num_angles_inner rotations
                        rand_ang = np.random.uniform(0, 2 * pi)
                        F_list_new = [
                            Force(f.get_mag(),
                                  (f.get_phi() + rand_ang) % (2 * pi),
                                  f.get_alpha()) for f in F_list
                        ]
                        list_of_F_lists.append(F_list_new)
                    attempt_count = max_attempts
    return list_of_F_lists
示例#30
0
def truncexponential_sampling_fn(n, shift=0., scale=1., truncation=3.):
    return truncexpon(truncation/scale, shift, scale).rvs(n)