def __init__(self, scenario_flag = "Freeway_Free"):
     """
     Totally five scenarios are supported here:
     Freeway_Night, Freeway_Free, Freeway_Rush;
     Urban_Peak, Urban_Nonpeak.
     The PDFs of the vehicle speed and the inter-vehicle space are adapted 
      from existing references.
     """
     if scenario_flag == "Freeway_Night":
         self.headway_random = expon(0.0, 1.0/256.41)
         meanSpeed = 30.93 #m/s
         stdSpeed = 1.2 #m/s
     elif scenario_flag == "Freeway_Free":
         self.headway_random = lognorm(0.75, 0.0, np.exp(3.4))
         meanSpeed = 29.15 #m/s
         stdSpeed = 1.5 #m/s
     elif scenario_flag == "Freeway_Rush":
         self.headway_random = lognorm(0.5, 0.0, np.exp(2.5))
         meanSpeed = 10.73 #m/s
         stdSpeed = 2.0 #m/s
     elif scenario_flag == "Urban_Peak":
         scale = 1.096
         c = 0.314
         loc = 0.0
         self.headway_random = fisk(c, loc, scale)
         meanSpeed = 6.083 #m/s
         stdSpeed = 1.2 #m/s
     elif scenario_flag == "Urban_Nonpeak":
         self.headway_random = lognorm(0.618, 0.0, np.exp(0.685)) 
         meanSpeed = 12.86 #m/s
         stdSpeed = 1.5 #m/s
     else:
         raise
     
     self.speed_random = norm(meanSpeed, stdSpeed)
def main(options):
    """
    The main function.
    """
    with open(options.results, 'rb') as fd:
        results = pickle.load(fd)
    for key in results.keys():
        print '=' * 80
        print key, ':'
        print str(results[key])
        print '=' * 80
    
    if options.catalysis is None:
        return

    log_q = results['log_q']
    print 'Median\t95\% Interval'
    for i in xrange(log_q.mu.shape[1]):
        mu = log_q.mu[0, i]
        s = np.sqrt(log_q.C[0, i, i])
        if i == log_q.mu.shape[1] - 1:
            rv = stats.lognorm(s, scale=np.exp(mu))
        else:
            rv = stats.lognorm(s, scale=np.exp(mu)/180.)
        I = rv.interval(0.95)
        print '{0:1.4f} & ({1:1.4f}, {2:1.4f}) \\\\'.format(rv.median(), I[0], I[1])
Ejemplo n.º 3
0
    def plot_multiplicative(self, T, npaths=25, show_trend=True):
        """
        Plots for the multiplicative decomposition

        """
        # Pull out right sizes so we know how to increment
        nx, nk, nm = self.nx, self.nk, self.nm
        # Matrices for the multiplicative decomposition
        nu_tilde, H, g = self.multiplicative_decomp()

        # Allocate space (nm is the number of functionals - we want npaths for each)
        mpath_mult = np.empty((nm*npaths, T))
        mbounds_mult = np.empty((nm*2, T))
        spath_mult = np.empty((nm*npaths, T))
        sbounds_mult = np.empty((nm*2, T))
        tpath_mult = np.empty((nm*npaths, T))
        ypath_mult = np.empty((nm*npaths, T))

        # Simulate for as long as we wanted
        moment_generator = self.lss.moment_sequence()
        # Pull out population moments
        for t in range (T):
            tmoms = next(moment_generator)
            ymeans = tmoms[1]
            yvar = tmoms[3]

            # Lower and upper bounds - for each multiplicative functional
            for ii in range(nm):
                li, ui = ii*2, (ii+1)*2
                Mdist = lognorm(np.asscalar(np.sqrt(yvar[nx+nm+ii, nx+nm+ii])), 
                                scale=np.asscalar( np.exp( ymeans[nx+nm+ii]- \
                                                t*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])))
                Sdist = lognorm(np.asscalar(np.sqrt(yvar[nx+2*nm+ii, nx+2*nm+ii])),
                                scale = np.asscalar( np.exp(-ymeans[nx+2*nm+ii])))
                mbounds_mult[li:ui, t] = Mdist.ppf([.01, .99])
                sbounds_mult[li:ui, t] = Sdist.ppf([.01, .99])

        # Pull out paths
        for n in range(npaths):
            x, y = self.lss.simulate(T)
            for ii in range(nm):
                ypath_mult[npaths*ii+n, :] = np.exp(y[nx+ii, :])
                mpath_mult[npaths*ii+n, :] = np.exp(y[nx+nm + ii, :] - np.arange(T)*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])
                spath_mult[npaths*ii+n, :] = 1/np.exp(-y[nx+2*nm + ii, :])
                tpath_mult[npaths*ii+n, :] = np.exp(y[nx+3*nm + ii, :] + np.arange(T)*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])

        mult_figs = []

        for ii in range(nm):
            li, ui = npaths*(ii), npaths*(ii+1)
            LI, UI = 2*(ii), 2*(ii+1)

            mult_figs.append(self.plot_given_paths(T, ypath_mult[li:ui,:], mpath_mult[li:ui,:], 
                                                   spath_mult[li:ui,:], tpath_mult[li:ui,:], 
                                                   mbounds_mult[LI:UI,:], sbounds_mult[LI:UI,:], 1, 
                                                   show_trend=show_trend))
            mult_figs[ii].suptitle( r'Multiplicative decomposition of $y_{%s}$' % str(ii+1), fontsize=14)

        return mult_figs
Ejemplo n.º 4
0
    def estimate_distribution(self,index_stats,queries,qrel=None):

        self._estimate_para(index_stats,queries,qrel)


        for qid in self._run.ranking:
            self._rel_distribution[qid] = lognorm(self._sigma1[qid],scale = math.exp(self._mu1[qid])) 
            self._non_rel_distribution[qid] = lognorm(self._sigma0[qid],scale = math.exp(self._mu0[qid]))
def test__validate_input():
    """Testing validation of inputs."""

    # valid inputs must be an instance of the Input class
    invalid_workers = stats.lognorm(s=1.0)
    invalid_firms = stats.lognorm(s=1.0)

    with nose.tools.assert_raises(AttributeError):
        models.Model('positive', invalid_workers, invalid_firms,
                     production=valid_F, params=valid_F_params)
Ejemplo n.º 6
0
    def __init__(self, 
            gamma=2, 
            beta=0.95, 
            alpha=0.90, 
            sigma=0.1, 
            grid_size=100):

        self.gamma = gamma
        self.beta = beta
        self.alpha = alpha
        self.sigma = sigma

        # == Set the grid interval to contain most of the mass of the
        # stationary distribution of the consumption endowment == #
        ssd = self.sigma / np.sqrt(1 - self.alpha**2)
        grid_min, grid_max = np.exp(-4 * ssd), np.exp(4 * ssd)
        self.grid = np.linspace(grid_min, grid_max, grid_size)
        self.grid_size = grid_size

        # == set up distribution for shocks == #
        self.phi = lognorm(sigma)
        self.draws = self.phi.rvs(500)

        # == h(y) = beta * int G(y,z)^(1-gamma) phi(dz) == #
        self.h = np.empty(self.grid_size)
        for i, y in enumerate(self.grid):
            self.h[i] = beta * np.mean((y**alpha * self.draws)**(1 - gamma))
Ejemplo n.º 7
0
    def __init__(self, mu, sigma):
        self.mu = mu
        self.sigma = sigma

        # set dist before calling super's __init__
        self.dist = st.lognorm(sigma, scale=exp(mu))
        super(LogNormal, self).__init__()
Ejemplo n.º 8
0
def log10norm(x, mu, sigma=1.0):
    """ Scale scipy lognorm from natural log to base 10
    x     : input parameter
    mu    : mean of the underlying log10 gaussian
    sigma : variance of underlying log10 gaussian
    """
    return stats.lognorm(sigma * np.log(10), scale=mu).pdf(x)
Ejemplo n.º 9
0
def plot(vals, name, bins=100, power=1, nmu=None, nstd=None):
    """Function accepts a dictionary of orf length - frequency pairs, and the length of the
	ORF of interest, also optional plot flag for if data histogram should be plotted. Fits a
	lognormal distribution to the orf length data. Returns dictionaries of the probabilites of
	each length, the Predicted p-value for each length based on log normal, the observed
	p-value for each length, and the p-value for the length of interest.
	"""
    # 	mu, std = log_normal(vals)
    loged = np.log(vals)
    if not nmu and not nstd:
        nmu, nstd = np.mean(loged), np.std(loged)
    lgn = lognorm([nstd], scale=math.e ** nmu)

    # Plot the fit line
    f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)

    ax1.scatter(range(len(vals)), sorted(vals))
    hi, pos, patch = ax2.hist(vals, bins, normed=True)
    x = np.linspace(min(vals), max(vals), 500)
    y1 = lgn.pdf(x)
    #        y2 = n.pdf(x) ** power
    ax2.plot(x, y1, color="green", lw=3.0)
    #        ax2.plot(x, y2, color='red', lw=3.0)
    plt.text(
        0.4,
        max(hi) * 3 / 4,
        "E(X) = {:4f}\nVAR(X) = {:4f}\nSTD(X) = {:4f}".format(np.mean(vals), np.var(vals), np.std(vals)),
        fontsize=8,
    )
    plt.savefig(name)
    plt.close()
Ejemplo n.º 10
0
def particlesInVolumeLogNorm(vol_frac, mu, sigma, particle_diameters):
    '''
    Function that calculates particle densities in a volume element.
    The particles are diameters are log-normally distributed (sigma, mu)
    and they have a given volume fraction.
    '''

    D = particle_diameters

    # Calculate particle density(particles per um ^ 3)
    N = lognorm(sigma, scale=np.exp(mu))
    # Weight factors of each particle size
    pdf = N.pdf(D)

    # Volume of particle having radius R[m ^ 3]
    Vsph = 4.0 / 3.0 * np.pi * (D / 2.0) ** 3.0

    # Particle volumes multiplied with weight factors = > volume distribution
    WV = pdf * Vsph

    # Total volume of the volume distribution
    Vtot = np.trapz(WV, D)
    # Number of particles in um ^ 3
    n_part = vol_frac / Vtot

    print('Number of particles in cubic micrometer = %.18f' % n_part)

    # Check, should give the volume fraction in %
    print("Volume fraction was: %.1f %%" %
          (np.trapz(n_part * pdf * Vsph, D) * 100))
    bins = pdf * (D[1] - D[0])
    # print(bins.sum())
    return(n_part * bins)
Ejemplo n.º 11
0
    def __generateMieEffective(self,
                               n_particle,
                               n_host,
                               particle_mu,
                               particle_sigma,
                               effective_model=True,
                               wavelen_n=1000,
                               wavelen_max=1100.0,
                               wavelen_min=100.0,
                               particle_n=20,
                               particle_max=20.0,
                               particle_min=1.0):
        '''
        Private function.
        Generates new effective mie-data file for the database.
        '''

        if particle_n < 10:
            print(
                "Too few particles to calculate \
                effective model: particle_n < 10")
            exit()
        n_x_rv = 10000
        n_tht = 91
        wavelengths = np.linspace(wavelen_min, wavelen_max, wavelen_n) / 1000.0
        p_diameters = np.linspace(particle_min, particle_max, particle_n)

        o_f = ("mie_eff_p-%dum-%dum-%d_" % (particle_min,
                                            particle_max,
                                            particle_n) +
               'np-%s_nh-%.2f_' % (n_particle.__format__('.2f'),
                                   n_host) +
               'wave-%.1fnm-%.1fnm-%d' % (wavelen_min,
                                          wavelen_max,
                                          wavelen_n) +
               '.hdf5')
        o_f = baseDir + '/' + o_f
        # Calculate particle distribution
        N = lognorm(particle_sigma, scale=np.exp(particle_mu))
        # Weight factors of each particle size
        pdf = N.pdf(p_diameters)
        pdf /= pdf.sum()

        weight = dict(zip(p_diameters, pdf))

        df = 0
        df = mie.generateMieDataEffective(wavelengths,
                                          p_normed_weights_dict=weight,
                                          number_of_rvs=n_x_rv,
                                          number_of_theta_angles=n_tht,
                                          n_particle=n_particle,
                                          n_silicone=n_host,
                                          p_diameters=p_diameters)
        print(df.info())
        mie.saveMieDataToHDF5([df],
                              particle_diameters=[p_diameters.mean()],
                              out_fname=o_f,
                              wavelengths=wavelengths * 1000.0)

        return(o_f)
Ejemplo n.º 12
0
def lognorm(mu=1,sigma=1,phi=0):
    ''' Y ~ log(X)+phi
        X ~ Normal(mu,sigma)
        mu - mean of X
        sigma - standard deviation of X
    '''
    return stats.lognorm(sigma,loc=-phi,scale=np.exp(mu))
Ejemplo n.º 13
0
    def test_ss_wage_flexible(self):
        w_grid = np.linspace(0.40000000000000002, 3.5, 40)

        sigma = .4
        mu = -(sigma ** 2) / 2
        ln_dist = lognorm(sigma, scale=np.exp(-(sigma) ** 2 / 2))
        zl, zh = ln_dist.ppf(.05), ln_dist.ppf(.95)
        z_grid = np.linspace(zl, zh, 22)

        params = {
            "lambda_": [0.0, "degree of rigidity"],
            "pi": [0.02, "target inflation"],
            "eta": [2.5, "elas. of subs among labor types"],
            "gamma": [0.5, "frisch elas. of labor supply"],
            "wl": [0.4, "wage lower bound"],
            "wu": [3.5, "wage upper bound"],
            "wn": [40, "wage grid point"],
            "beta": [0.97, "disount factor. check this"],
            "tol": [10e-6, "error tolerance for iteration"],
            "sigma": [sigma, "standard dev. of underlying normal dist"],
            'z_grid': (z_grid, 'a'),
            'w_grid': (w_grid, 'a'),
            'full_ln_dist': (ln_dist, 'a'),
            'mu': (mu, 'mean of underlying nomral distribution.')}

        res_dict = run_one(params)
        expected = Interp(z_grid, ss_wage_flexible(params, shock=z_grid))
        actual = res_dict['ws']
        print(expected.Y)
        print(actual.Y)
        np.testing.assert_almost_equal(actual.Y, expected.Y, 5)
Ejemplo n.º 14
0
def get_population_density_lognorm(size, upper, lower, log_mean, log_sd, mean, sd, position_marker):
	'''This :returns a value of a lognormal population distribution
	
	:param size: total size 
	:type size: float 
	:param upper: upper value of bin 
	:type upper:  float 
	:param lower: lower value of bin 
	:type lower:  float 
	:param log_mean: log mean 
	:type log_mean:  float 
	:param log_sd: log standard deviation 
	:type log_sd:  float 
	:param mean: mean 
	:type mean:  float 
	:param sd: standard deviation 
	:type sd:  float 
	:param position_marker: is the last/first bin of samples or not, -1: first bin; 1: last bin; 0:any bin between the first and the last
	:type position_marker: int 
	:returns: population density 
	'''
	distribution 	= lognorm(loc=log_sd,scale=log_mean)
	if position_marker: 
		cdf1 	= 1
		lower 	= upper
	else: 
		cdf1 	= logncdf(upper,log_mean,log_sd)
	cdf2 		= logncdf(lower,log_mean,log_sd)
	posibility_space = cdf1 - cdf2 
	population 	= size * posibility_space 
	population_int 	= population
	return population_int;
def test_sklearn_():
    '''
    Test whether the booster indeed gets updated
    :return:
    '''
    Xtrain = np.random.randn(100,10)
    ytrain = np.random.randint(0,2,100)

    Xval = np.random.randn(20, 10)
    yval = np.random.randint(0, 2, 20)


    classifier = SHSklearnEstimator(model=RandomForestClassifier(n_estimators=4),\
                                      ressource_name='n_estimators')

    param_grid = {'max_depth': randint(1,10),
                   'min_impurity_decrease':lognorm(0.1)
                  }
    scoring = make_scorer(accuracy_score)
    successiveHalving = SuccessiveHalving(
        estimator=classifier,
        n = 10,
        r = 100,
        param_grid=param_grid,
        ressource_name='n_estimators',
        scoring=scoring,
        n_jobs=1,
        cv=None,
        seed=0
    )

    T = successiveHalving.apply(Xtrain,ytrain,Xval,yval)
    print(T)

    assert(True)
Ejemplo n.º 16
0
    def __init__(self, gamma, beta, alpha, sigma, grid=None):
        self.gamma = gamma
        self.beta = beta
        self.alpha = alpha
        self.sigma = sigma

        # == set up grid == #
        if grid is None:
            (self.grid, self.grid_min,
             self.grid_max, self.grid_size) = self._new_grid()
        else:
            self.grid = np.asarray(grid)
            self.grid_min = min(grid)
            self.grid_max = max(grid)
            self.grid_size = len(grid)

        # == set up distribution for shocks == #
        self.phi = lognorm(sigma)

        # == set up integration bounds. 4 Standard deviations. Make them
        # private attributes b/c users don't need to see them, but we
        # only want to compute them once. == #
        self._int_min = np.exp(-4.0 * sigma)
        self._int_max = np.exp(4.0 * sigma)

        # == Set up h from the Lucas Operator == #
        self.h = self._init_h()
  def testTransformedDistribution(self):
    g = ops.Graph()
    with g.as_default():
      mu = 3.0
      sigma = 2.0
      # Note: the Jacobian callable only works for this example; more generally
      # you may or may not need a reduce_sum.
      log_normal = self._cls()(
          distribution=ds.Normal(loc=mu, scale=sigma),
          bijector=bs.Exp(event_ndims=0))
      sp_dist = stats.lognorm(s=sigma, scale=np.exp(mu))

      # sample
      sample = log_normal.sample(100000, seed=235)
      self.assertAllEqual([], log_normal.event_shape)
      with self.test_session(graph=g):
        self.assertAllEqual([], log_normal.event_shape_tensor().eval())
        self.assertAllClose(
            sp_dist.mean(), np.mean(sample.eval()), atol=0.0, rtol=0.05)

      # pdf, log_pdf, cdf, etc...
      # The mean of the lognormal is around 148.
      test_vals = np.linspace(0.1, 1000., num=20).astype(np.float32)
      for func in [[log_normal.log_prob, sp_dist.logpdf],
                   [log_normal.prob, sp_dist.pdf],
                   [log_normal.log_cdf, sp_dist.logcdf],
                   [log_normal.cdf, sp_dist.cdf],
                   [log_normal.survival_function, sp_dist.sf],
                   [log_normal.log_survival_function, sp_dist.logsf]]:
        actual = func[0](test_vals)
        expected = func[1](test_vals)
        with self.test_session(graph=g):
          self.assertAllClose(expected, actual.eval(), atol=0, rtol=0.01)
    def __init__(self, gamma=2, beta=0.95, alpha=0.90, sigma=0.1, grid=None):
        self.gamma = gamma
        self.beta = beta
        self.alpha = alpha
        self.sigma = sigma

        # == set up grid == #
        if grid is None:
            (self.grid, self.grid_min,
             self.grid_max, self.grid_size) = self._new_grid()
        else:
            self.grid = np.asarray(grid)
            self.grid_min = min(grid)
            self.grid_max = max(grid)
            self.grid_size = len(grid)

        # == set up distribution for shocks == #
        self.phi = lognorm(sigma)

        # == set up integration bounds. 4 Standard deviations.
        self._int_min = np.exp(-4.0 * sigma)
        self._int_max = np.exp(4.0 * sigma)

        # == initial h to iterate from == #
        self.h = self._init_h()
Ejemplo n.º 19
0
 def set_distribution(self, mean, mode):
     """
     Create lognormal distribution from given mean and mode.
     Distances are converted to km to prevent overflow.
     """
     scale = np.exp(mean)
     s = np.sqrt(np.log(scale / float(mode)))
     return scs.lognorm(s=s, scale=scale)
Ejemplo n.º 20
0
def lognorm_mean(mean, sigma):
    """ returns a lognormal distribution parameterized by its mean and a spread
    parameter `sigma` """
    if sigma == 0:
        return DeterministicDistribution(mean)
    else:
        mu = mean * np.exp(-0.5 * sigma**2)
        return stats.lognorm(scale=mu, s=sigma)
Ejemplo n.º 21
0
def lognorm_mean_var(mean, variance):
    """ returns a lognormal distribution parameterized by its mean and its
    variance. """
    if variance == 0:
        return DeterministicDistribution(mean)
    else:
        scale, sigma = lognorm_mean_var_to_mu_sigma(mean, variance, 'scipy')
        return stats.lognorm(scale=scale, s=sigma)
Ejemplo n.º 22
0
def estimate_smoothG_cutoff_lognorm_robust(smoothG, fdr=0.05, independent=False, modefunc=half_sample_mode, madmult=5.2):
    lmu, ls2 = robust_lognormal_mean_var(smoothG, modefunc=modefunc, madmult=madmult)
    nulldist = stats.lognorm(math.sqrt(ls2),scale=math.exp(lmu))
    pvals = [nulldist.sf(i) for i in smoothG]
    pcutoff = BH_fdr_procedure(pvals, fdr, independent=independent)
    if pcutoff is None:
        return None, None
    gcutoff = nulldist.isf(pcutoff)
    return pcutoff, gcutoff
Ejemplo n.º 23
0
    def __init__(self, mu, sigma, a, b):
        super(Lognormal, self).__init__()

        self.__mu = mu
        self.__sigma = sigma
        self._dist = lognorm(sigma, scale=mu)

        self.__a = a
        self.__b = b
Ejemplo n.º 24
0
def test__validate_input():
    """Testing validation of inputs."""

    # valid inputs must be an instance of the Input class
    invalid_input = stats.lognorm(s=1.0)

    with nose.tools.assert_raises(AttributeError):
        mod = model.AssortativeMatchingModelLike()
        mod.input2 = invalid_input
Ejemplo n.º 25
0
    def __init__(self, mu, sigma, bounds=None):
        self.mu = mu
        self.sigma = sigma
        self.scale = np.exp(mu)
        self.log_s = np.log(sigma)

        self.distribution = lognorm(sigma, scale=np.exp(mu))
        self._bounds = (0, np.inf)

        super().__init__(self)
Ejemplo n.º 26
0
 def __init__(self, lossType = "Large", 
              sevDist = ["lognorm", 0, 1],
              freqDist = [ "poisson", 1 ]
              ):
     self.sevDist = None
     self.freqDist = None
     
     if lossType == "Cat":
         #do the thing for cat losses
         self.sevDist = sp.lognorm(sevDist[2], scale = np.exp(sevDist[1]))
     elif lossType == "Att":
         self.sevDist = sp.lognorm(sevDist[2], scale = np.exp(sevDist[1]))
         self.freqDist = None
     elif lossType == "Large":
         if sevDist[0] == "lognorm" or sevDist[0] == "lognormal":
             self.sevDist = sp.lognorm(sevDist[2], scale = np.exp(sevDist[1]))
         elif sevDist[0] == "gamma":
             self.sevDist = sp.gamma(sevDist[1], scale = sevDist[2])
         self.freqDist = sp.poisson(freqDist[1])
Ejemplo n.º 27
0
def estimate_smoothG_cutoff_lognorm_theory(smoothG, tmean, tvar, fdr=0.05, independent=False):
    tmean, tvar = float(tmean), float(tvar)
    lmu, ls2 = lognormal_mean_var(tmean, tvar)
    nulldist = stats.lognorm(math.sqrt(ls2),scale=math.exp(lmu))
    pvals = [nulldist.sf(i) for i in smoothG]
    pcutoff = BH_fdr_procedure(pvals, fdr, independent=independent)
    if pcutoff is None:
        return None, None
    gcutoff = nulldist.isf(pcutoff)
    return pcutoff, gcutoff
Ejemplo n.º 28
0
def lognormal_diff(tau, amp, mu, sigma):
    print amp, mu, sigma
    dist = lognorm([sigma], loc=mu * 1000)
    n = 10000
    # taus = dist.rvs(n)
    # (a,b) = dist.interval(0.9)
    (a, b) = (max(1e-4, mu - sigma), mu + 100 * sigma)
    taus = np.logspace(np.log(a), np.log(b), n)
    G = amp * np.sum(dist.pdf(taus) * np.exp(-tau[:, newaxis] / taus[newaxis, :]), axis=1)
    return G
 def choose(self):
     if self.user_class == 'HF':
         self.name = "Log-norm"
         peak_hours_for_iat_hf = [1, 2, 3, 4, 5, 6]
         if self.hour in peak_hours_for_iat_hf:
             lognorm_shape, lognorm_scale, lognorm_location = 4.09174469261446, 1.12850165892419, 4.6875
         else:
             lognorm_shape, lognorm_scale, lognorm_location = 3.93740014906562, 0.982210300411203, 3
         return lognorm(lognorm_shape, loc=lognorm_location, scale=lognorm_scale)
     elif self.user_class == 'HO':
         self.name = "Gamma"
         peak_hours_for_iat_ho = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
         if self.hour in peak_hours_for_iat_ho:
             gamma_shape, gamma_rate, gamma_location = 1.25170029089175, 0.00178381168026473, 0.5
         else:
             gamma_shape, gamma_rate, gamma_location = 1.20448161464647, 0.00177591076721503, 0.5
         return gamma(gamma_shape, loc=gamma_location, scale=1. / gamma_rate)
     elif self.user_class == 'MF':
         self.name = "Gamma"
         peak_hours_for_iat_mf = [1, 2, 3, 4, 5, 6, 7, 22, 23]
         if self.hour in peak_hours_for_iat_mf:
             gamma_shape, gamma_rate, gamma_location = 2.20816848575484, 0.00343216949000565, 1
         else:
             gamma_shape, gamma_rate, gamma_location = 2.03011412986896, 0.00342699308280547, 1
         return gamma(gamma_shape, loc=gamma_location, scale=1. / gamma_rate)
     elif self.user_class == 'MO':
         self.name = "Gamma"
         peak_hours_for_iat_mo = [1, 2, 3, 4, 5, 6]
         if self.hour in peak_hours_for_iat_mo:
             gamma_shape, gamma_rate, gamma_location = 1.29908195595742, 0.00163527376977441, 0.5
         else:
             gamma_shape, gamma_rate, gamma_location = 1.19210494792398, 0.00170354443324898, 0.5
         return gamma(gamma_shape, loc=gamma_location, scale=1. / gamma_rate)
     elif self.user_class == 'LF':
         peak_hours_for_iat_lf = [1, 2, 3, 4, 5, 6, 7]
         if self.hour in peak_hours_for_iat_lf:
             self.name = "Gamma"
             gamma_shape, gamma_rate, gamma_location = 1.79297773527656, 0.00191590321039876, 2
             return gamma(gamma_shape, loc=gamma_location, scale=1. / gamma_rate)
         else:
             self.name = "Weibull"
             weibull_c_shape, weibull_scale, weibull_location = 1.1988117443903, 827.961760834184, 1
             return weibull_min(weibull_c_shape, loc=weibull_location, scale=weibull_scale)
     elif self.user_class == 'LO':
         peak_hours_for_iat_lo = [2, 3, 4, 10, 11, 12, 13, 14, 15, 17, 18, 19, 20]
         if self.hour in peak_hours_for_iat_lo:
             self.name = "Weibull"
             weibull_c_shape, weibull_scale, weibull_location = 0.850890858519732, 548.241539446292, 1
             return weibull_min(weibull_c_shape, loc=weibull_location, scale=weibull_scale)
         else:
             self.name = "Gamma"
             gamma_shape, gamma_rate, gamma_location = 0.707816241615835, 0.00135537879658998, 1
             return gamma(gamma_shape, loc=gamma_location, scale=1. / gamma_rate)
     else:
         raise Exception('The user class %s does not exist' % self.user_class)
Ejemplo n.º 30
0
def log_normal(mean, mode):
    """Creating log normal, by first calculating
        sigma and mu by manipulating these equations:
        - mode = np.exp(mu - sig**2)
        - mean = np.exp(mu + (sig**2/2))
        then using the scipy.stats package.
        """
    sigma = np.sqrt((2. / 3.) * (np.log(mean) - np.log(mode)))
    mu = ((2. * np.log(mean)) + np.log(mode)) / 3.
    lognorm = stats.lognorm(sigma, loc=0., scale=np.exp(mu))
    return lognorm
Ejemplo n.º 31
0
def solve_compound(solFunc,
                   parameters,
                   hyperparameter,
                   N,
                   parIdx=3,
                   distribution='normal',
                   recurrence=False,
                   precision=50):
    """Obtain a compound distribution for the model in solfunc.

    Arguments:
    solFunc -- The solution function over which to compound
    parameters -- List of parameters accepted by solFunc
    hyperparameter -- Standard deviation of the compounding distribution
    N -- Maximal mRNA copy number. The distribution is evaluated for n=0:N-1

    Keyword arguments:
    parIdx -- Index of the parameter over which the solution is compunded
    distribution -- String specifying the type of compounding distribution
    recurrence -- Boolean specifying if compounding is over recurrence terms
    precision -- Integer specifying the precision used by the Decimal class
    """

    # Specify some hyperparameters governing integration accuracy
    cdfMax = 0.999
    nTheta = 200

    # Set up the parameter distribution
    m, s = parameters[parIdx], hyperparameter
    if distribution == 'normal':
        a, b = (0 - m) / s, 10000
        dist = st.truncnorm(a, b, m, s)
    elif distribution == 'gamma':
        theta = s**2 / m
        k = (m / s)**2
        dist = st.gamma(k, scale=theta)
    elif distribution == 'lognormal':
        mu = np.log(m / np.sqrt(1 + (s / m)**2))
        sg = np.sqrt(np.log(1 + (s / m)**2))
        dist = st.lognorm(s=sg, scale=np.exp(mu))
    else:
        print('Invalid distribution selected')
        return

    # Set up parameter vector
    thetMax = dist.ppf(cdfMax)
    thetMin = dist.ppf(1 - cdfMax)
    thetVec = np.linspace(thetMin, thetMax, nTheta)
    dThet = thetVec[1] - thetVec[0]
    P = np.zeros(N)
    parMod = deepcopy(parameters)

    # If operating on the recurrence terms, need to comnvert to Decimal
    if recurrence:
        P = np.array([Decimal(p) for p in P])
        dThet = Decimal(dThet)

    # Evaluate distribution for each theta and add contribution
    for thet in thetVec:
        parMod[parIdx] = thet
        if recurrence:
            P += np.array(solFunc(parMod, N, precision=precision)) * Decimal(
                dist.pdf(thet))
        else:
            P += solFunc(parMod, N) * dist.pdf(thet)

    P *= dThet
    return P
Ejemplo n.º 32
0
from pyNetica import Node, Network
from scipy import stats
import numpy as np
import itertools
import sys

from bnexample1_funcs import form2y

# random variables
rvX1 = stats.lognorm(1., scale=np.exp(0))
rvX3 = stats.lognorm(1., scale=np.exp(3*np.sqrt(2)))

# create nodes
x1 = Node("X1", parents=None, rvname='lognormal', rv=rvX1)
x2 = Node("X2", parents=[x1], rvname='continuous')
y = Node("Y", parents=[x2], rvname='discrete')

# discretize continuous rv
x1num = 5
x2num = 5
m = x1.rv.stats('m'); s = np.sqrt(x1.rv.stats('v'))
#lb = np.maximum(0, m-2*s); ub = m+2*s
lb = 0.; ub = m+1.5*s
x1names = x1.discretize(lb, ub, x1num, infinity='+')
x2names = x2.discretize(lb*10., ub*10., x2num, infinity='+')

# calculate and assign CPT
# node X1
x1cpt = x1.rv.cdf(x1.bins[1:]) - x1.rv.cdf(x1.bins[:-1])
x1cpt = x1cpt[np.newaxis,:]
x1.assign_cpt(x1cpt, statenames=x1names)
Ejemplo n.º 33
0
def get_random_rect_sz_HARDCODE():
    shape, scale = 0.7635779560378387, 0.07776496289182451
    dist = stats.lognorm(shape, 0.0, scale)
    size = dist.rvs()
    size = size * 0.9671784150570207 + 0.007142151004612083
    return (size)
Ejemplo n.º 34
0
 def errfunc(mu_, sig_):
     N = lognorm(sig_, scale=np.exp(mu_))
     # minimize ze difference between D10 and D90 to cumulative function
     # Weight the D10 more by 2*
     zero = 2 * np.abs(0.1 - N.cdf(D10)) + np.abs(0.9 - N.cdf(D90))
     return (zero)
Ejemplo n.º 35
0
# == NON-UNIFORM SIMULATION =========================
'''
Simulate an array of N non-uniform droplets being linearly cooled.
steps:
1.  Generate N areas from a given distribution
2.  Generate N uniformly distributed random numbers in [0,1)
3.  Multiply each area by J_HET and generate the liquid probability curve
    as a function of temperature for that freezing rate.
4.  Map the corresponding random number to the liquid probability curve
    corresponding to the given droplet.
'''

fig, ax = plt.subplots(1, figsize=(7, 5))

rv = lognorm(1.5)
LOWER, UPPER = rv.ppf(0.001) / SCALER, rv.ppf(0.999) / SCALER
areas = rv.rvs(N) / SCALER  # [cm^2]
ps = np.random.random(N)  # probability for each droplet
mean_area = rv.mean() / SCALER
std_area = rv.std() / SCALER

Ts = T_freeze(ps, areas, B, T_0)  # Simulated freezing temperatures

data = np.c_[areas, ps, Ts]

Ts.sort()

frac = [i / N for i in range(1, N + 1)]  # Liquid proportion

Ts_fit = np.linspace(Ts[0], Ts[-1], 250)
Ejemplo n.º 36
0
def ln_log10norm(x, mu, sigma=1.0):
    """ Natural log of base 10 lognormal """
    return np.log(stats.lognorm(sigma * np.log(10), scale=mu).pdf(x))
Ejemplo n.º 37
0
def lognorm(x, mu, sigma=1.0):
    """ Log-normal function from scipy """
    return stats.lognorm(sigma, scale=mu).pdf(x)
try:
    from ISP_mystyle import setFonts, showData

except ImportError:
    # Ensure correct performance otherwise
    def setFonts(*options):
        return

    def showData(*options):
        plt.show()
        return


# Generate the data
x = np.logspace(-9, 1, 1001) + 1e-9
lnd = stats.lognorm(2)
y = lnd.pdf(x)

# Generate 2 plots, side-by-side
sns.set_style('ticks')
setFonts(18)
fig, axs = plt.subplots(1, 2, sharey=True)
sns.set_context('poster')

# Left plot: linear scale on x-axis
axs[0].plot(x, y)
axs[0].set_xlim(-0.5, 8)
axs[0].set_xlabel('x')
axs[0].set_ylabel('pdf(x)')

# Right plot: logarithmic scale on x-axis
Ejemplo n.º 39
0
# in incomes.txt came from the distribution in part (b)
log_lik_h0 = log_lik_norm(income, mu, sigma, cutoff)
log_lik_mle = log_lik_norm(income, mu_MLE, sig_MLE, cutoff)
LR_val = 2 * (log_lik_mle - log_lik_h0)
pval_h0 = 1.0 - sts.chi2.cdf(LR_val, 2)  #area under the graph

print('1-d. Chi squared of H0 with 2 degrees of freedom p-value is ', pval_h0)
print(
    'P-value is in the rejection area so that we can conclude that the data \n in income.txt\
 is not came from the distribution in part(b).')
print("")

# 1 - e

# According to the part (c), estimated the probability
lognorm_mle = sts.lognorm(s=sig_MLE, scale=np.exp(mu_MLE))
cdf_mle_100k = lognorm_mle.cdf(100000)
cdf_mle_75k = lognorm_mle.cdf(75000)

gt_than_100k = 1 - cdf_mle_100k
ls_than_75k = cdf_mle_75k

print(
    '1-e. The probability that the student will earn more than $100,000 is \n {:.2f}%.'
    .format(gt_than_100k * 100))
print(
    'The probability that the student will earn less than $75,000 is \n {:.2f}%.'
    .format(ls_than_75k * 100))
print("")
print("")
'''
Ejemplo n.º 40
0
 def __call__(self, mean, stddev):
     func = lognorm(scale=np.exp(mean), s=stddev)
     return func.cdf
# In[85]:

#4
for parametro in informacoes:
    a = sc.probplot(parametro.ordem,
                    dist=sc.expon(scale=(1 / parametro.eLambda)),
                    plot=plot)
    plot.title('Exponencial x %s' % parametro.nome)
    plot.show()
    b = sc.probplot(parametro.ordem,
                    dist=sc.norm(loc=parametro.media, scale=parametro.var),
                    plot=plot)
    plot.title('Normal x %s' % parametro.nome)
    plot.show()
    c = sc.probplot(parametro.ordem,
                    dist=sc.lognorm(s=(parametro.logvar)**0.5,
                                    scale=math.exp(parametro.logmedia)),
                    plot=plot)
    plot.title('Lognormal x %s' % parametro.nome)
    plot.show()
    d = sc.probplot(parametro.ordem,
                    dist=sc.weibull_min(c=parametro.wshape,
                                        loc=parametro.wloc,
                                        scale=parametro.wscale),
                    plot=plot)
    plot.title('Weibull x %s' % parametro.nome)
    plot.show()

# In[86]:

#5
d = 1.36 / ((len(idade.dados))**0.5)  #d adequado para a situação
Ejemplo n.º 42
0
 def saturation(self):
     """ Upper bound on support. """
     return st.lognorm(self.sigma, scale=np.exp(self.mu)).ppf(0.999)
Ejemplo n.º 43
0
 def saturation(self):
     """ Upper bound on support. """
     return st.lognorm(self.loc[2], scale=np.exp(self.scale[2])).ppf(0.999)
Ejemplo n.º 44
0
# specify prior distributions using stats.scipy for each parameter (independently)
# available options (univariate): https://docs.scipy.org/doc/scipy/reference/stats.html

from scipy import stats

from spux.distributions.tensor import Tensor
from spux.utils import transforms

distributions = {}

# model parameters
distributions['drift'] = stats.uniform(loc=-1, scale=2)
distributions['volatility'] = stats.uniform(loc=0.2, scale=1)

# observational error model parameters
distributions['error'] = stats.lognorm(**transforms.logmeanstd(logm=1, logs=1))

# construct a joint distribution for a vector of independent parameters by tensorization
prior = Tensor(distributions)

from units import units
prior.setup(units=units['parameters'])
Ejemplo n.º 45
0
import numpy as np

from utils import ContinuousDistribution, DiscreteDistribution
from MDP.mdp_solver import ValueIteration

# # searching robot environment
# state_names = ['high', 'low']
# action_names = ['wait', 'recharge', 'search']
# P = np.array([[[1, 0], [1, 0], [0.8, 0.2]], [[0, 1], [1, 0], [0.6, 0.4]]])
# R = np.array([[[2, 0], [0, 0], [5, 5]], [[0, 2], [0, 0], [-3, 5]]])



# bidding MDP
n_bidders = 10
dst = stats.lognorm(s=0.5, loc=0, scale=np.exp(1))
dst = stats.uniform(0,10)
dist = ContinuousDistribution(dst)

dst = stats.geom(p=0.3)
dist = DiscreteDistribution(dst)

print(dist.exp_fos(n_bidders))

state_names = ['win', 'lose']
budget = 10
states = np.arange(budget+1)
actions = np.arange(budget+1)

P = np.zeros((states.size, actions.size, states.size))
R = np.zeros((states.size, actions.size, states.size))
Ejemplo n.º 46
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 29 22:30:05 2020

@author: huangxiaoyi
"""
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as plt
a = np.linspace(0,50,25)
val = st.lognorm(1,1).pdf(a)#very useful lib to calculate density
val2 = st.lognorm(2,1).pdf(a)
val3 = st.lognorm(3,1).pdf(a)
fig, ax = plt.subplots()
ax.plot(val,'g--',label='m=1')
ax.plot(val2,'b:',label='m=2')
ax.plot(val3,'r',label='m=3')
legend = ax.legend(loc='upper right', shadow=True)
plt.xlabel('success')
plt.ylabel('probability')

cdf = st.lognorm(1,1).cdf(a)##compute culmulated probability density
cdf2 = st.lognorm(2,1).cdf(a)
cdf3 = st.lognorm(3,1).cdf(a)
fig2, bx = plt.subplots()
bx.plot(cdf,'g--',label='m=1')
bx.plot(cdf2,'b:',label='m=2')
bx.plot(cdf3,'r',label='m=3')
legend = bx.legend(loc='upper right', shadow=True)
plt.xlabel('success')
Ejemplo n.º 47
0
def lognorm(mu: float, sigma2: float):
    assert sigma2 > 0
    return stats.lognorm(s=np.sqrt(sigma2), scale=np.exp(mu))
Ejemplo n.º 48
0
 def survival_times(self):
     # base the survival times upon the parameters mu and sigma
     return {state: lognorm(s=self.s[self._ix[state]], loc=0, scale=self.scale[self._ix[state]]) for state in self.states}
Ejemplo n.º 49
0
 def __call__(self, x):
     """ Log-normal function from scipy """
     return stats.lognorm(self._sigma, scale=self._mu).pdf(x * self.j_ref())
s = 0.954
mean, var, skew, kurt = lognorm.stats(s, moments='mvsk')

# Display the probability density function (``pdf``):

x = np.linspace(lognorm.ppf(0.01, s), lognorm.ppf(0.99, s), 100)
ax.plot(x, lognorm.pdf(x, s), 'r-', lw=5, alpha=0.6, label='lognorm pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = lognorm(s)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = lognorm.ppf([0.001, 0.5, 0.999], s)
np.allclose([0.001, 0.5, 0.999], lognorm.cdf(vals, s))
# True

# Generate random numbers:

r = lognorm.rvs(s, size=1000)

# And compare the histogram:

ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
Ejemplo n.º 51
0
def lognormal(media, varianza):
    from scipy.stats import lognorm
    return lognorm(media, varianza)
Ejemplo n.º 52
0
import cmdstanpy
import os

cmdstanpy.utils.cxx_toolchain_path()

#%% [markdown]
## Generate Claims

# Simple generator of lognormal claims

#%%
# parameters
mu = 0.5
sigma = 1

dist = stats.lognorm(np.exp(mu), sigma)

# generate claims
claims = dist.rvs(size=1000)

# graph
sns.distplot(claims)

#%% [markdown]
# \begin{aligned}
# X_i & \sim LN(mu, sigma) \\
# mu & \sim N( \mu_{mu}, 1 ) \\
# sigma & \sim exp( \lambda_{sigma} ) \\
# \mu_{mu} & = 1 \\
# \lambda_{sigma} & = 1 \\
# \end{aligned}
### TUNE HYPERPARAMETERS HERE ###
CONFIG = {
    "env": "Taxi-v3",
    "total_eps": 100000,
    "eps_max_steps": 50,
    "eval_episodes": 500,
    "eval_freq": 1000,
    "gamma": 0.99,
    "alpha": 0.15,
    "epsilon": 0.9,
}

if __name__ == "__main__":
    # env = gym.make(CONFIG["env"])
    dist = ContinuousDistribution(stats.lognorm(s=0.5, loc=0, scale=np.exp(0.25)))
    n_bidders = 10
    env = my_envs.AuctionEnvV1(dist, maxbudget=25, n_bidders=n_bidders, maxsteps=CONFIG["eps_max_steps"], action_granularity=0.25)
    total_reward, evaluation_return_means, evaluation_negative_returns, q_table, obs_counter, episode_lens = train(env, CONFIG)
    np.save("Qlearning_qtable.npy", q_table)
    np.save("Qlearning_obscount.npy", obs_counter)
    np.save("Qlearning_epslens.npy", episode_lens)
    print()
    print(f"Total reward over training: {total_reward}\n")
    arr = np.array(evaluation_return_means)

    fig = plt.figure(figsize=(6,5))
    ax1 = fig.add_subplot(311)
    ax2 = fig.add_subplot(312)
    ax3 = fig.add_subplot(313, sharex=ax2)
Ejemplo n.º 54
0
                         [Normal, Gamma, Beta, InverseGamma, LogNormal])
def test_prior_equality(Prior):
    assert Prior() == Prior()


@pytest.mark.parametrize(
    'Prior, log, dlog, scipy_fun',
    [
        (Normal, -0.9639385332046727, -0.3, stats.norm(loc=0, scale=1)),
        (Beta, 0.27990188513281833, 3.8095238095238098, stats.beta(a=3, b=3)),
        (Gamma, -3.4010927892118175, 5.666666666666667,
         stats.gamma(a=3, scale=1 / 1)),
        (InverseGamma, 0.7894107034104656, -2.222222222222223,
         stats.invgamma(a=3, scale=1)),
        (LogNormal, -0.43974098565696607, 0.6799093477531204,
         stats.lognorm(loc=0, s=1)),
    ],
)
def test_log_pdf(Prior, log, dlog, scipy_fun, N):
    prior = Prior()
    rvs = prior.random(N)
    x = np.random.uniform()

    assert prior.log_pdf(0.3) == pytest.approx(log)
    assert prior.dlog_pdf(0.3) == pytest.approx(dlog)
    assert prior.mean == pytest.approx(np.mean(rvs),
                                       abs=3 * np.std(rvs) / np.sqrt(N))
    assert prior.log_pdf(x) == pytest.approx(scipy_fun.logpdf(x))


@pytest.mark.parametrize('Prior',
Ejemplo n.º 55
0
print("Суженное множество:")
print("p=", p)
print("delta=", delta)
print("m1=", m1)
# plotting bar chart
X = np.array([(delta[j] + delta[j + 1]) / 2 for j in range(m1)])
Y = np.array([p[j] / (delta[j + 1] - delta[j]) for j in range(m1)])
fig = plt.figure(dpi=100)
plt.bar(X, Y, 1)
# set interval min=-inf; max=inf
delta[0] = -np.inf
delta[-1] = np.inf
# setting distribution type
if data['low'] == 'lognorm':
    mu, sigma = np.log(x).mean(), np.sqrt(np.log(x).var())
    dist = stats.lognorm(sigma, scale=np.exp(mu))
    print(mu, sigma)
elif data['low'] == 'exp':
    la = 1 / x.mean()
    dist = stats.expon(scale=1 / la)
    print(la)
else:
    dist = stats.uniform(a, b)
    print(a, b)
# setting real percentage of fall into intervals multiply by N
nt = np.array(
    [dist.cdf(delta[j + 1]) - dist.cdf(delta[j]) for j in range(m1 - 1)]) * N
print("nt", nt)
# calculating chi
chi = np.array([(p[j] - nt[j])**2 / nt[j] for j in range(m1 - 1)]).sum()
# finding table value
Ejemplo n.º 56
0
_DIST_MAP = {
    dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),
    dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),
    dist.Beta: lambda con1, con0: osp.beta(con1, con0),
    dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),
    dist.BinomialLogits: lambda logits, total_count: osp.binom(n=total_count, p=_to_probs_bernoulli(logits)),
    dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),
    dist.Chi2: lambda df: osp.chi2(df),
    dist.Dirichlet: lambda conc: osp.dirichlet(conc),
    dist.Exponential: lambda rate: osp.expon(scale=np.reciprocal(rate)),
    dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1./rate),
    dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),
    dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),
    dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),
    dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=np.exp(loc)),
    dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(n=total_count, p=probs),
    dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(n=total_count,
                                                                        p=_to_probs_multinom(logits)),
    dist.MultivariateNormal: _mvn_to_scipy,
    dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,
    dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),
    dist.Pareto: lambda alpha, scale: osp.pareto(alpha, scale=scale),
    dist.Poisson: lambda rate: osp.poisson(rate),
    dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),
    dist.Uniform: lambda a, b: osp.uniform(a, b - a),
}


CONTINUOUS = [
    T(dist.Beta, 1., 2.),
Ejemplo n.º 57
0
def get_random_rect_sz(min_x, max_x, shape, loc, scale):
    dist = stats.lognorm(shape, loc, scale)
    size = dist.rvs()
    size = size * (max_x - min_x) + min_x
    return (size)
Ejemplo n.º 58
0
def check_one_value(params):  # in prior_samples:
    logLstar, logPhiStar, alpha = params
    logLmin = 8.5
    #distro=gamma(alpha,loc=0,scale=10**logLstar)   #this choice of alpha means that the LF form is NOT same as Mike's but rather has alpha in the exponent when written as dN/dV/dlogL
    vol_COS = 20189.
    vol_GN = 131042.
    normaliz = fp.gammainc(alpha, 10**(logLmin - logLstar))
    Numexp_COS = 10**(
        logPhiStar
    ) * normaliz * vol_COS  #*(1-distro.cdf(1e9))   #This is how many galaxies in volume vol, are expected on average
    Numexp_GN = 10**(logPhiStar) * normaliz * vol_GN
    #plt.loglog(np.logspace(9,12,100),distro.pdf(np.logspace(9,12,100)))
    N_to_use_COS = poisson.rvs(Numexp_COS)
    N_to_use_GN = poisson.rvs(Numexp_GN)
    Lprimes_COS = simulate_schechter_distribution(
        alpha, 10**logLstar, 10**logLmin,
        N_to_use_COS)  #distro.rvs(size=N_to_use_COS)
    Lprimes_GN = simulate_schechter_distribution(
        alpha, 10**logLstar, 10**logLmin,
        N_to_use_GN)  #distro.rvs(size=N_to_use_GN)
    #while(np.any(Lprimes<1e9)):
    #	Lprimes=distro.rvs(size=N_to_use)
    freq_list_COS = 31 + 8 * np.random.random(N_to_use_COS)
    freq_list_GN = 30 + 8 * np.random.random(N_to_use_GN)
    sdv_list_COS = Lprimes_COS / L_prime_freq_conversion[(
        (freq_list_COS - 30) / 9.0e-5).astype(int)]
    sdv_list_GN = Lprimes_GN / L_prime_freq_conversion[(
        (freq_list_GN - 30) / 9.0e-5).astype(int)]
    #sdv_list_COS=np.array([Lprimes_COS[idx]/calc_L_prime(1,fre) for idx,fre in enumerate(freq_list_COS)])
    #sdv_list_GN=np.array([Lprimes_GN[idx]/calc_L_prime(1,fre) for idx,fre in enumerate(freq_list_GN)])
    spat_real_COS = np.random.choice(3,
                                     size=N_to_use_COS,
                                     replace=True,
                                     p=[0.88, 0.1, 0.02])
    freq_real_COS = np.random.choice(3,
                                     size=N_to_use_COS,
                                     replace=True,
                                     p=[0.34, 0.33, 0.33])
    spat_real_GN = np.random.choice(3,
                                    size=N_to_use_GN,
                                    replace=True,
                                    p=[0.88, 0.1, 0.02])
    freq_real_GN = np.random.choice(3,
                                    size=N_to_use_GN,
                                    replace=True,
                                    p=[0.34, 0.33, 0.33])
    compl_params_fit_COS = np.array([[[0.51093145, 0.02771134],
                                      [0.41731388, 0.03699222],
                                      [0.48504448, 0.04833295]],
                                     [[0.52961387, 0.06536336],
                                      [0.47647305, 0.09091878],
                                      [0.43582843, 0.11752236]],
                                     [[0.48816207, 0.1055706],
                                      [0.41942854, 0.15440935],
                                      [0.43130629, 0.21278495]]])
    compl_params_fit_GN = np.array([[[0.30244889, 0.0647361],
                                     [0.19111455, 0.0863606],
                                     [0.26355572, 0.11940371]],
                                    [[0.34815344, 0.11987698],
                                     [0.3085827, 0.17502077],
                                     [0.3268172, 0.23117797]],
                                    [[0.37420752, 0.21330359],
                                     [0.30220447, 0.30313399],
                                     [0.29627785, 0.40176673]]])
    myfit = lambda f, d, f0: max(0, 1 - (1. / (f + d) * np.exp(-f / f0)))
    completenesses_COS = [
        myfit(sdv, *compl_params_fit_COS[spat, freq])
        for sdv, spat, freq in zip(sdv_list_COS, spat_real_COS, freq_real_COS)
    ]
    completenesses_GN = [
        myfit(sdv, *compl_params_fit_GN[spat, freq])
        for sdv, spat, freq in zip(sdv_list_GN, spat_real_GN, freq_real_GN)
    ]
    observed_COS = np.random.random(size=N_to_use_COS) < completenesses_COS
    Nobs_COS = np.sum(observed_COS)
    observed_GN = np.random.random(size=N_to_use_GN) < completenesses_GN
    Nobs_GN = np.sum(observed_GN)
    #now take care of the real candidates
    purities_COS = np.array([
        max(0, obj.purity *
            np.random.normal(loc=1., scale=1.)) if obj.purity < 1 else 1
        for obj in objects_COS
    ])
    purities_GN = np.array([
        max(0, obj.purity *
            np.random.normal(loc=1., scale=1.)) if obj.purity < 1 else 1
        for obj in objects_GN
    ])
    #purities_COS=np.array([np.random.random()*obj.purity if obj.purity<1 else 1 for obj in objects_COS ])
    #purities_GN=np.array([np.random.random()*obj.purity if obj.purity<1 else 1 for obj in objects_GN ])
    selected_candidates_COS = np.random.random(
        size=len(objects_COS)) < purities_COS
    Nselected_COS = np.sum(selected_candidates_COS)
    selected_candidates_GN = np.random.random(
        size=len(objects_GN)) < purities_GN
    Nselected_GN = np.sum(selected_candidates_GN)
    #print N_to_use_COS,N_to_use_GN,Nobs_COS,Nobs_GN,'real:',Nselected_COS,Nselected_GN
    if np.absolute(Nselected_COS - Nobs_COS) <= 1 and np.absolute(
            Nselected_GN - Nobs_GN) <= 1 and Nobs_GN > 0:
        #print 'Match!'
        Lprime_observed_real_COS = np.array([
            obj.L_prime for idx, obj in enumerate(objects_COS)
            if selected_candidates_COS[idx]
        ])
        Lprime_observed_real_GN = np.array([
            obj.L_prime for idx, obj in enumerate(objects_GN)
            if selected_candidates_GN[idx]
        ])
        SNRbins = np.arange(4, 7, .1)
        SNRbin_idx = np.digitize(5.5, SNRbins) - 1  #I set this to 5.5
        observed_Lprime_COS = []
        for idx in range(Nobs_COS):
            inj_spa_COS = spat_real_COS[observed_COS][idx]
            #inj_fre=freq_real[observed][idx]
            spat_distrib_COS = [
                np.mean([
                    likelihood_COS[inj_spa_COS][key][SNR_b]
                    for SNR_b in range(SNRbin_idx - 3, SNRbin_idx + 3)
                ]) for key in [-1, 0, 2, 4, 6, 8, 10, 12]
            ]
            spat_obs_idx_COS = min(
                5,
                np.random.choice(8, size=1, replace=True,
                                 p=spat_distrib_COS)[0])
            spat_obs_COS = [-1, 0, 2, 4, 6, 8][spat_obs_idx_COS]
            #freq_distrib=[np.mean([likelihood_freq[inj_fre][key][SNR_b]  for SNR_b in range(SNRbin_idx-3,SNRbin_idx+3)]) for  key in [4,8,12,16,20]]
            #freq_obs=[4,8,12,16,20][np.random.choice(5, size=1, replace=True, p=freq_distrib)[0]]
            #for the given intrinsic and observed properties, and the SNR=5.5, what flux ratio do we expect?
            lognorm_par_COS = lognorm_params_given_inj_and_meas_COS[
                spat_obs_idx_COS, inj_spa_COS]
            from scipy.stats import lognorm
            flux_correct_COS = lognorm(s=lognorm_par_COS[1],
                                       scale=np.exp(lognorm_par_COS[0])).rvs()
            observed_flux_COS = sdv_list_COS[observed_COS][
                idx] * flux_correct_COS
            observed_Lprime_COS.append(Lprimes_COS[observed_COS][idx] *
                                       flux_correct_COS)
        observed_Lprime_GN = []
        for idx in range(Nobs_GN):
            inj_spa_GN = spat_real_GN[observed_GN][idx]
            #inj_fre=freq_real[observed][idx]
            spat_distrib_GN = [
                np.mean([
                    likelihood_GN[inj_spa_GN][key][SNR_b]
                    for SNR_b in range(SNRbin_idx - 3, SNRbin_idx + 3)
                ]) for key in [-1, 0, 2, 4, 6, 8, 10]
            ]
            spat_obs_idx_GN = min(
                5,
                np.random.choice(7, size=1, replace=True,
                                 p=spat_distrib_GN)[0])
            spat_obs_GN = [-1, 0, 2, 4, 6, 8][spat_obs_idx_GN]
            #freq_distrib=[np.mean([likelihood_freq[inj_fre][key][SNR_b]  for SNR_b in range(SNRbin_idx-3,SNRbin_idx+3)]) for  key in [4,8,12,16,20]]
            #freq_obs=[4,8,12,16,20][np.random.choice(5, size=1, replace=True, p=freq_distrib)[0]]
            #for the given intrinsic and observed properties, and the SNR=5.5, what flux ratio do we expect?
            lognorm_par_GN = lognorm_params_given_inj_and_meas_GN[
                spat_obs_idx_GN, inj_spa_GN]
            from scipy.stats import lognorm
            flux_correct_GN = lognorm(s=lognorm_par_GN[1],
                                      scale=np.exp(lognorm_par_GN[0])).rvs()
            observed_flux_GN = sdv_list_GN[observed_GN][idx] * flux_correct_GN
            observed_Lprime_GN.append(Lprimes_GN[observed_GN][idx] *
                                      flux_correct_GN)
        allmatched = True
        for obs_real_Lprime in Lprime_observed_real_COS:
            if np.min(
                    np.absolute(obs_real_Lprime -
                                np.array(observed_Lprime_COS)) /
                    obs_real_Lprime) > .2:
                #print np.min(np.absolute(obs_real_Lprime-np.array(observed_Lprime_COS))/obs_real_Lprime)
                allmatched = False
        for obs_real_Lprime in Lprime_observed_real_GN:
            if np.min(
                    np.absolute(obs_real_Lprime - np.array(observed_Lprime_GN))
                    / obs_real_Lprime) > .2:
                #print np.min(np.absolute(obs_real_Lprime-np.array(observed_Lprime_GN))/obs_real_Lprime)
                allmatched = False
        if allmatched:
            #print 'amazing this works!'#,'real:',Lprime_observed_real,'simul:',observed_Lprime
            return params
Ejemplo n.º 59
0
def d_lnorm(meanlog, sdlog):
    return SpDouble(sts.lognorm(s=np.exp(sdlog),
                                scale=np.exp(np.exp(meanlog))))
Ejemplo n.º 60
0
    def plot_multiplicative(self, T, npaths=25, show_trend=True):
        """
        Plots for the multiplicative decomposition

        """
        # Pull out right sizes so we know how to increment
        nx, nk, nm = self.nx, self.nk, self.nm
        # Matrices for the multiplicative decomposition
        nu_tilde, H, g = self.multiplicative_decomp()

        # Allocate space (nm is the number of functionals - we want npaths for each)
        mpath_mult = np.empty((nm * npaths, T))
        mbounds_mult = np.empty((nm * 2, T))
        spath_mult = np.empty((nm * npaths, T))
        sbounds_mult = np.empty((nm * 2, T))
        tpath_mult = np.empty((nm * npaths, T))
        ypath_mult = np.empty((nm * npaths, T))

        # Simulate for as long as we wanted
        moment_generator = self.lss.moment_sequence()
        # Pull out population moments
        for t in range(T):
            tmoms = next(moment_generator)
            ymeans = tmoms[1]
            yvar = tmoms[3]

            # Lower and upper bounds - for each multiplicative functional
            for ii in range(nm):
                li, ui = ii * 2, (ii + 1) * 2
                Mdist = lognorm(np.asscalar(np.sqrt(yvar[nx+nm+ii, nx+nm+ii])),
                                scale=np.asscalar( np.exp( ymeans[nx+nm+ii]- \
                                                t*(.5)*np.expand_dims(np.diag(H @ H.T),1)[ii])))
                Sdist = lognorm(np.asscalar(
                    np.sqrt(yvar[nx + 2 * nm + ii, nx + 2 * nm + ii])),
                                scale=np.asscalar(
                                    np.exp(-ymeans[nx + 2 * nm + ii])))
                mbounds_mult[li:ui, t] = Mdist.ppf([.01, .99])
                sbounds_mult[li:ui, t] = Sdist.ppf([.01, .99])

        # Pull out paths
        for n in range(npaths):
            x, y = self.lss.simulate(T)
            for ii in range(nm):
                ypath_mult[npaths * ii + n, :] = np.exp(y[nx + ii, :])
                mpath_mult[npaths * ii + n, :] = np.exp(
                    y[nx + nm + ii, :] - np.arange(T) *
                    (.5) * np.expand_dims(np.diag(H @ H.T), 1)[ii])
                spath_mult[npaths * ii +
                           n, :] = 1 / np.exp(-y[nx + 2 * nm + ii, :])
                tpath_mult[npaths * ii + n, :] = np.exp(
                    y[nx + 3 * nm + ii, :] + np.arange(T) *
                    (.5) * np.expand_dims(np.diag(H @ H.T), 1)[ii])

        mult_figs = []

        for ii in range(nm):
            li, ui = npaths * (ii), npaths * (ii + 1)
            LI, UI = 2 * (ii), 2 * (ii + 1)

            mult_figs.append(
                self.plot_given_paths(T,
                                      ypath_mult[li:ui, :],
                                      mpath_mult[li:ui, :],
                                      spath_mult[li:ui, :],
                                      tpath_mult[li:ui, :],
                                      mbounds_mult[LI:UI, :],
                                      sbounds_mult[LI:UI, :],
                                      1,
                                      show_trend=show_trend))
            mult_figs[ii].suptitle(
                r'Multiplicative decomposition of $y_{%s}$' % str(ii + 1),
                fontsize=14)

        return mult_figs