def mse_exp(theoretical_distribution, estimated_distribution):
    theoretical_lambda = theoretical_distribution[1]
    theoretical_scale = 1 / theoretical_lambda

    estimated_lambda = estimated_distribution[1]
    estimated_scale = 1 / estimated_lambda

    linspace = np.linspace(expon.ppf(0.001, scale=theoretical_scale),
                           expon.ppf(0.999, scale=theoretical_scale), 1000)
    theoretical_pdf = expon.pdf(linspace, scale=theoretical_scale)
    estimated_pdf = expon.pdf(linspace, scale=estimated_scale)

    mse_pdf = mean_squared_error(theoretical_pdf, estimated_pdf)

    theoretical_cdf = expon.cdf(linspace, scale=theoretical_scale)
    estimated_cdf = expon.cdf(linspace, scale=estimated_scale)

    mse_cdf = mean_squared_error(theoretical_cdf, estimated_cdf)

    theoretical_reliability = 1 - expon.cdf(linspace, scale=theoretical_scale)
    estimated_reliability = 1 - expon.cdf(linspace, scale=estimated_scale)

    mse_reliability = mean_squared_error(theoretical_reliability,
                                         estimated_reliability)

    return [mse_pdf, mse_cdf, mse_reliability]
Esempio n. 2
0
def main():
    src_path_map = '../data/map/wean.dat'
    map_obj = MapReader(src_path_map)
    occupancy_map = map_obj.get_map()
    sensor_model = SensorModel(occupancy_map)

    particleMeasurement = 500
    probabilities = np.zeros(1000)

    index = 0
    for actualMeasurement in range(1000):
        probabilities[index] = sensor_model.calculateProbability(
            actualMeasurement, particleMeasurement)
        index += 1
    plotProbabilities(probabilities)

    numSamples = 1000
    stdev = 100
    gaussPDF = signal.gaussian(numSamples * 2, std=stdev)

    #plotProbabilities(gaussPDF)

    # my Bin-based version
    x = np.linspace(
        expon.ppf(0.01), expon.ppf(0.99), numSamples
    )  # Makes numSamples samples with a probability ranging from .99 to .1
    expPDF = expon.pdf(x)
def calculate_linspace(distribution):
    if distribution[0] == 'EXP':
        lambda_ = distribution[1]
        scale_ = 1 / lambda_
        return np.linspace(expon.ppf(0.001, scale=scale_),
                           expon.ppf(0.999, scale=scale_), 1000)

    if distribution[0] == 'WEIBULL':
        scale = distribution[1]
        shape = distribution[2]
        return np.linspace(weibull_min.ppf(0.001, shape, loc=0, scale=scale),
                           weibull_min.ppf(0.999, shape, loc=0, scale=scale),
                           1000)

    if distribution[0] == 'NORMAL':
        mu = distribution[1]
        sigma = distribution[2]
        return np.linspace(norm.ppf(0.001, loc=mu, scale=sigma),
                           norm.ppf(0.999, loc=mu, scale=sigma), 1000)

    if distribution[0] == 'LOGNORM':
        mu = distribution[1]
        sigma = distribution[2]
        scale = math.exp(mu)
        return np.linspace(lognorm.ppf(0.001, sigma, loc=0, scale=scale),
                           lognorm.ppf(0.999, sigma, loc=0, scale=scale), 1000)
    else:
        return np.linspace(0, 100, 1000)
Esempio n. 4
0
def chi_experiment(alpha):
    """
    :param alpha: the scale parameter
    :return: line about accepting or rejecting a hypothesis
    """
    arr = np.random.exponential(scale=1 / alpha, size=n)
    r = int(20 * n / 1000)

    z_gamma = chdtri(r - 1, gamma)

    nu, bin_edges = np.histogram(arr,
                                 bins=r,
                                 range=(expon.ppf(0.001), expon.ppf(0.999)))

    p = np.array([
        expon.sf(x=bin_edges[i - 1], scale=1) -
        expon.sf(x=bin_edges[i], scale=1) for i in range(1, r + 1)
    ])

    delta = np.sum(((nu - n * p)**2) / (n * p))

    if delta > z_gamma:
        return f'r = {r}, z_gamma = {z_gamma:.3f}, delta = {delta:.3f}. \n' \
               f'The statistical data do CONFLICT with the H0 hypothesis.'

    else:
        return f'z_gamma = {z_gamma:.3f}, delta = {delta:.3f}. \n' \
               f'The statistical data do NOT CONFLICT with the H0 hypothesis.'
Esempio n. 5
0
    def test_statistics(self):
        # This is a statistical test that has a non-zero chance of failure
        # during normal operation. Thus, we set the random seed to a value that
        # creates a realization passing the test.
        np.random.seed(seed=12345)

        for rate in [self.rate_profile, self.rate_profile.rescale(kHz)]:
            spiketrain = stgen.inhomogeneous_poisson_process(rate)
            intervals = isi(spiketrain)

            # Computing expected statistics and percentiles
            expected_spike_count = (np.sum(rate) *
                                    rate.sampling_period).simplified
            percentile_count = poisson.ppf(.999, expected_spike_count)
            expected_min_isi = (1 / np.min(rate))
            expected_max_isi = (1 / np.max(rate))
            percentile_min_isi = expon.ppf(.999, expected_min_isi)
            percentile_max_isi = expon.ppf(.999, expected_max_isi)

            # Testing (each should fail 1 every 1000 times)
            self.assertLess(spiketrain.size, percentile_count)
            self.assertLess(np.min(intervals), percentile_min_isi)
            self.assertLess(np.max(intervals), percentile_max_isi)

            # Testing t_start t_stop
            self.assertEqual(rate.t_stop, spiketrain.t_stop)
            self.assertEqual(rate.t_start, spiketrain.t_start)

        # Testing type
        spiketrain_as_array = stgen.inhomogeneous_poisson_process(
            rate, as_array=True)
        self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
        self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
Esempio n. 6
0
def plot_histogram(data_sets, attribute_names, range, fit_dist='', ylabel='', title='', bins=20, normed=1, histtype='bar', facecolor='#0099FF', log=0):
    data_to_plot = retrieve_attributes_by_name(data_sets, attribute_names)
    n, bins, patches = plt.hist(data_to_plot[0], bins=bins, range=range, normed=1, histtype=histtype, facecolor='#0099FF', log=0)
    
    if fit_dist is 'normal':
        mu, sigma = data_to_plot.mean(), data_to_plot.std() # obtain samplemean and sample standard deviation from array
        y = mlab.normpdf( bins, mu, sigma) # obtain the corresponding distribution
        l = plt.plot(bins, y, 'r--', linewidth=1) # plot the distribution
    
    if fit_dist is 'uniform':
        uniform_pdf = 1 / (range[1] - range[0]) # U_pdf(x) := 1 / (b - a), horizontal line with (a, b)=range
        l = plt.plot(range, (uniform_pdf, uniform_pdf), 'r-', linewidth=2) # plot the uniform pdf
    
    if fit_dist is 'exponential':
        k = data_to_plot.mean() # the sample mean is MLE estimate of lambda ('k') of exp. dist.
        x = np.linspace(expon.ppf(0.01, scale=k), expon.ppf(0.99, scale=k), 100)
        l = plt.plot(x, expon.pdf(x, scale=k), 'r--', linewidth=1)

    if fit_dist is 'lognormal':
        data_to_plot[np.where(data_to_plot==0)] = 0.0001
        x = np.log(data_to_plot)
        print x
        mu, sigma = x.mean(), x.std()
        print '%d %d', mu, sigma
        y = lognorm.pdf(x=bins, s=sigma, loc=mu, scale=math.exp(mu))
        l = plt.plot(bins, y, 'r--', linewidth=1)

    plt.ylabel(ylabel)
    plt.xlabel(attribute_names)
    plt.title(title)
    plt.grid(True)
    plt.show()
    def test_statistics(self):
        # This is a statistical test that has a non-zero chance of failure
        # during normal operation. Thus, we set the random seed to a value that
        # creates a realization passing the test.
        np.random.seed(seed=12345)

        for rate in [self.rate_profile, self.rate_profile.rescale(kHz)]:
            spiketrain = stgen.inhomogeneous_poisson_process(rate)
            intervals = isi(spiketrain)

            # Computing expected statistics and percentiles
            expected_spike_count = (np.sum(
                rate) * rate.sampling_period).simplified
            percentile_count = poisson.ppf(.999, expected_spike_count)
            expected_min_isi = (1 / np.min(rate))
            expected_max_isi = (1 / np.max(rate))
            percentile_min_isi = expon.ppf(.999, expected_min_isi)
            percentile_max_isi = expon.ppf(.999, expected_max_isi)

            # Testing (each should fail 1 every 1000 times)
            self.assertLess(spiketrain.size, percentile_count)
            self.assertLess(np.min(intervals), percentile_min_isi)
            self.assertLess(np.max(intervals), percentile_max_isi)

            # Testing t_start t_stop
            self.assertEqual(rate.t_stop, spiketrain.t_stop)
            self.assertEqual(rate.t_start, spiketrain.t_start)

        # Testing type
        spiketrain_as_array = stgen.inhomogeneous_poisson_process(
            rate, as_array=True)
        self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
        self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))
def demand_generator(origin, zipcode):
    filter = (distributions['Origin'] == origin) & (distributions['ZIP']
                                                    == zipcode)
    prob_dist_info = distributions[filter]
    prob_dist_info = prob_dist_info.reset_index()
    min_demand = prob_dist_info['Minimum'].item()
    max_demand = prob_dist_info['Maximum'].item()
    # Normal distribution
    if prob_dist_info['Distribution'].item() == 'norm':
        mean = ast.literal_eval(prob_dist_info.loc[0, 'Parameters'])[0]
        st_dev = ast.literal_eval(prob_dist_info.loc[0, 'Parameters'])[1]
        sample = norm.ppf(uniform(0, 1), mean, st_dev)
        while sample < min_demand * min_factor or sample > max_demand * max_factor:
            sample = norm.ppf(uniform(0, 1), mean, st_dev)
        return sample
    # elif prob_dist_info['Distribution'].item() == 'lognorm':
    #     shape = ast.literal_eval(prob_dist_info.loc[0, 'Parameters'])[0]
    #     loc = ast.literal_eval(prob_dist_info.loc[0, 'Parameters'])[1]
    #     scale = ast.literal_eval(prob_dist_info.loc[0, 'Parameters'])[2]
    #     return lognorm.ppf(uniform(0, 1), shape, loc, scale)
    # Exponential distribution
    elif prob_dist_info['Distribution'].item() == 'expon':
        loc = ast.literal_eval(prob_dist_info.loc[0, 'Parameters'])[0]
        scale = ast.literal_eval(prob_dist_info.loc[0, 'Parameters'])[1]
        sample = expon.ppf(uniform(0, 1), loc, scale)
        while sample < min_demand * min_factor or sample > max_demand * max_factor:
            sample = expon.ppf(uniform(0, 1), loc, scale)
        return sample
    return
Esempio n. 9
0
def generate_expon_distributions(batch=1000, size=1000):
    x_values = []
    for _ in range(batch):
        x = np.linspace(expon.ppf(0.01), expon.ppf(0.99), size)
        rv = expon()
        x_values.append(rv.pdf(x))

    return np.array(x_values)
Esempio n. 10
0
 def equitailed_cs(self, alpha2):
     """
     Calculates the equitailed credible set of a parameter.
     The alpha to be inserted should be between (0-100).
     """
     alpha_split = (100 - alpha2) / 200
     lower_bound = expon.ppf(alpha_split, scale=1 / self.lambda_)
     upper_bound = expon.ppf(1 - alpha_split, scale=1 / self.lambda_)
     return (lower_bound, upper_bound)
    def test_statistics(self):
        # This is a statistical test that has a non-zero chance of failure
        # during normal operation. Thus, we set the random seed to a value that
        # creates a realization passing the test.
        np.random.seed(seed=12345)

        shape_factor = 2.5

        for rate in [self.rate_profile, self.rate_profile.rescale(pq.kHz)]:
            spiketrain = stgen.inhomogeneous_gamma_process(
                rate, shape_factor=shape_factor)
            intervals = isi(spiketrain)

            # Computing expected statistics and percentiles
            expected_spike_count = (np.sum(rate) *
                                    rate.sampling_period).simplified
            percentile_count = poisson.ppf(.999, expected_spike_count)
            expected_min_isi = (1 / np.min(rate))
            expected_max_isi = (1 / np.max(rate))
            percentile_min_isi = expon.ppf(.999, expected_min_isi)
            percentile_max_isi = expon.ppf(.999, expected_max_isi)

            # Testing (each should fail 1 every 1000 times)
            self.assertLess(spiketrain.size, percentile_count)
            self.assertLess(np.min(intervals), percentile_min_isi)
            self.assertLess(np.max(intervals), percentile_max_isi)

            # Testing t_start t_stop
            self.assertEqual(rate.t_stop, spiketrain.t_stop)
            self.assertEqual(rate.t_start, spiketrain.t_start)

        # Testing type
        spiketrain_as_array = stgen.inhomogeneous_gamma_process(
            rate, shape_factor=shape_factor, as_array=True)
        self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
        self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))

        # check error if rate has wrong format
        self.assertRaises(ValueError,
                          stgen.inhomogeneous_gamma_process,
                          rate=[0.1, 2.],
                          shape_factor=shape_factor)

        # check error if negative values in rate
        self.assertRaises(ValueError,
                          stgen.inhomogeneous_gamma_process,
                          rate=neo.AnalogSignal([-0.1, 10.] * pq.Hz,
                                                sampling_period=0.001 * pq.s),
                          shape_factor=shape_factor)

        # check error if rate is empty
        self.assertRaises(ValueError,
                          stgen.inhomogeneous_gamma_process,
                          rate=neo.AnalogSignal([] * pq.Hz,
                                                sampling_period=0.001 * pq.s),
                          shape_factor=shape_factor)
Esempio n. 12
0
 def plot_expon_dist(lambda__, ax, num_points=1000):
     x = np.linspace(expon.ppf(0.01, lambda__), expon.ppf(0.99, lambda__),
                     num_points)
     label = r'$Exp(' + str(lambda__) + ')$'
     ax.plot(x,
             expon.pdf(x, lambda__),
             label=label,
             linewidth=4.0,
             alpha=0.8)
     ax.set_title(label)
    def test_statistics(self):
        # This is a statistical test that has a non-zero chance of failure
        # during normal operation. Thus, we set the random seed to a value that
        # creates a realization passing the test.
        np.random.seed(seed=12345)

        for rate in (self.rate_profile, self.rate_profile.rescale(pq.kHz)):
            for refractory_period in (3 * pq.ms, None):
                spiketrain = stgen.inhomogeneous_poisson_process(
                    rate, refractory_period=refractory_period)
                intervals = isi(spiketrain)

                # Computing expected statistics and percentiles
                expected_spike_count = (np.sum(rate) *
                                        rate.sampling_period).simplified
                percentile_count = poisson.ppf(.999, expected_spike_count)
                expected_min_isi = (1 / np.min(rate))
                expected_max_isi = (1 / np.max(rate))
                percentile_min_isi = expon.ppf(.999, expected_min_isi)
                percentile_max_isi = expon.ppf(.999, expected_max_isi)

                # Check that minimal ISI is greater than the refractory_period
                if refractory_period is not None:
                    self.assertGreater(np.min(intervals), refractory_period)

                # Testing (each should fail 1 every 1000 times)
                self.assertLess(spiketrain.size, percentile_count)
                self.assertLess(np.min(intervals), percentile_min_isi)
                self.assertLess(np.max(intervals), percentile_max_isi)

                # Testing t_start t_stop
                self.assertEqual(rate.t_stop, spiketrain.t_stop)
                self.assertEqual(rate.t_start, spiketrain.t_start)

        # Testing type
        spiketrain_as_array = stgen.inhomogeneous_poisson_process(
            rate, as_array=True)
        self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
        self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))

        # Testing type for refractory period
        refractory_period = 3 * pq.ms
        spiketrain = stgen.inhomogeneous_poisson_process(
            rate, refractory_period=refractory_period)
        spiketrain_as_array = stgen.inhomogeneous_poisson_process(
            rate, as_array=True, refractory_period=refractory_period)
        self.assertTrue(isinstance(spiketrain_as_array, np.ndarray))
        self.assertTrue(isinstance(spiketrain, neo.SpikeTrain))

        # Check that to high refractory period raises error
        self.assertRaises(ValueError,
                          stgen.inhomogeneous_poisson_process,
                          self.rate_profile,
                          refractory_period=1000 * pq.ms)
Esempio n. 14
0
    def test_expon(self):
        from scipy.stats import expon
        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(1, 1)

        mean, var, skew, kurt = expon.stats(moments='mvsk')

        x = np.linspace(expon.ppf(0.01), expon.ppf(0.99), 100)
        ax.plot(x, expon.pdf(x), 'r-', lw=5, alpha=0.6, label='expon pdf')

        rv = expon()
        ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

        vals = expon.ppf([0.001, 0.5, 0.999])
        np.allclose([0.001, 0.5, 0.999], expon.cdf(vals))
        self.assertEqual(str(ax), "AxesSubplot(0.125,0.11;0.775x0.77)")
Esempio n. 15
0
def default_bps(model, c, r, t):
    noBrPointsPerEpoch = model.nbreakpoints
    nepochs = len(noBrPointsPerEpoch)
    ebps = []
    bps = []

    def avg(xs):
        if isinstance(xs, list):
            return sum(xs) / len(xs)
        else:
            return xs

    for e in xrange(0, nepochs):
        theta = 1.0 / avg(c[e])
        nbps = noBrPointsPerEpoch[e]
        if e == nepochs - 1:
            new_bps = [
                (x * theta + t[e])
                for x in expon.ppf([float(i) / nbps for i in xrange(nbps)])
            ]
        else:
            new_bps = linspace(t[e], t[e + 1], nbps + 1)[:nbps]
        ebps.append(new_bps)
        bps.extend(new_bps)
    return bps, ebps
Esempio n. 16
0
    def __init__(self, edgeList):

        # To make things fast, the exponential function should be precomputed as a lookup table
        # It should then be scaled to get it to be the correct size relative to the other distributions.
        # The gaussian should also be precomputed but with twice the width of the ranges.  
        # The gaussian should have things in the form   [pr(x),  sum of this and all previous probabilities]
        # The exponential function should be calculated the same way.
        # I'm not going to use the delta function because I am not seeing it in the data.
        # The uniform distribution should have [pr(x), pr(x) * 1000]
        # Make the exponential distribution

        ############################## KNOBS TO TURN #########################################
        # Adjust these to get an acceptable distribution.  The distribution is adjusted later
        # To get its sum to equal 1.
        self.stdev = 1   # Adjusts the width of the gaussian
        self.exponentialScaleFactor = 1/50   # The sum of the exponential curve that I am generating is ~21.6.  
                                        # The values as initially generated range from .99 to .01
        self.uniformValue = .05 # This is the value that is used in each bin for the uniform distribution
        ######################################################################################

        self.edges = edgeList


        self.numSamples = 1000 # This should be 1000 since our max range is 10m and the divisions are in 10cm units.

        x = np.linspace(expon.ppf(0.01), expon.ppf(0.99), self.numSamples) # Makes numSamples samples with a probability ranging from .99 to .1
        self.expPDF = expon.pdf(x)
        self.expPDF *= self.exponentialScaleFactor # Scale it down so that 

        # Make the gaussian distribution
        self.gaussPDF = signal.gaussian(self.numSamples * 2,std=self.stdev) # We want this to be 2000 samples wide


        # Resize the distributions to give them a second row.
        self.expPDF.resize(2,self.numSamples)
        self.gaussPDF.resize(2,self.numSamples*2)

        # Find the sums at each point in the two PDFs
        for I in range(self.numSamples):
            self.expPDF[1][I] = self.expPDF[0][0:I+1].sum()

        for I in range(self.numSamples*2):
            self.gaussPDF[1][I] = self.gaussPDF[0][0:I+1].sum()

        self.uniformSum = self.uniformValue * self.numSamples

        self.rangeLines = np.zeros((180,4))
Esempio n. 17
0
    def exec(self, param1, param2, isUniform=True):
        data0 = []
        data1 = []
        data2 = []

        if isUniform:
            delta = abs(param1 - param2)
            data0 = np.linspace(param1 - 0.1 * delta, param2 + 0.1 * delta, 1000)
            dist = uniform(loc=param1, scale=delta)
            data1 = dist.pdf(data0)
            data2 = dist.cdf(data0)
        else:
            data0 = np.linspace(expon.ppf(0.0001), expon.ppf(0.999999), 1000)
            dist = expon(scale = 1/param1)
            data1 = dist.pdf(data0)
            data2 = dist.cdf(data0)

        self.emitter.emit([data0, data1, data2])
Esempio n. 18
0
def exp_break_points(no_intervals, coal_rate, offset=0.0):
    """
    Return break points for equal probably intervals over exponential
    distribution given the coalescent rate
    :param no_intervals: The number of intervals to make
    :param coal_rate: The coalescent rate
    :param offset: An offset added to all break points
    :return: A list of break points
    """
    points = expon.ppf([float(i) / no_intervals for i in xrange(no_intervals)])
    return points / coal_rate + offset
    def test_cont_int_nonspatial(self):
        """Test continuous removal with non-spatial epidemic."""

        params = IndividualSimulator.code.config.read_config_file(
            filename=self.config_filename)
        params['InterventionScripts'] = [ContinuousIntervention]

        simulator = IndividualSimulator.Simulator(params=params)
        simulator.setup()
        simulator.initialise()
        all_hosts, all_cells, run_params = simulator.run_epidemic()

        waiting_times = [host.trans_times[1][0] for host in all_hosts]

        scale = 1.0 / self._full_lambda
        x = np.linspace(expon.ppf(0.001, scale=scale),
                        expon.ppf(0.999, scale=scale),
                        num=100)

        plt.style.use("ggplot")

        fig = plt.figure()
        ax = fig.add_subplot(111)
        ax.hist(waiting_times, normed=True, bins=100, color="blue", alpha=0.3)
        ax.plot(x,
                expon.pdf(x, scale=scale),
                'r--',
                lw=2,
                label='Exponential pdf')
        ax.set_xlabel("Waiting Time")
        ax.set_ylabel("Frequency")
        ax.set_title("Continuous Intervention Test Results")
        fig.savefig(os.path.join("testing", "ContInterventionHist.png"))

        ks_stat, pval = kstest(waiting_times, expon.cdf, args=(0, scale))
        self.assertGreater(
            pval,
            0.1,
            msg=
            "Waiting Time distribution significantly different from Exponential."
        )
Esempio n. 20
0
def prepare_model_parameters(
        parameters: Dict[str,
                         FloatOrDistVar], data: DataFrame, beta_fun, splines,
        spline_power) -> Tuple[Dict[str, FloatLike], Dict[str, NormalDistVar]]:
    """Prepares model input parameters and returns independent and dependent parameters

    Also shifts back simulation to start with only exposed people.
    """

    # Set up fit parameters
    ## Dependent parameters which will be fitted
    pp = {key: val for key, val in parameters.items() if isinstance(val, GVar)}
    ## Independent model meta parameters
    xx = {key: val for key, val in parameters.items() if key not in pp}

    # This part ensures that the simulation starts with only exposed persons
    ## E.g., we shift the simulation backwards such that exposed people start to
    ## become infected
    xx["offset"] = int(expon.ppf(0.99, 1 / pp["incubation_days"].mean)
                       )  # Enough time for 95% of exposed to become infected
    # pp["logistic_x0"] += xx["offset"]
    xx['beta_fun'] = beta_fun
    xx['knots'] = splines
    xx['spline_power'] = spline_power

    ## Store the actual first day and the actual last day
    xx["day0"] = data.index.min()
    xx["day-1"] = data.index.max()

    ## And start earlier in time
    xx["dates"] = date_range(xx["day0"] - timedelta(xx["offset"]),
                             freq="D",
                             periods=xx["offset"]).union(data.index)

    # initialize the spline parameters on the flexible beta
    if xx['beta_fun'] == "flexible_beta":
        pp['beta_splines'] = gvar(
            [pp['pen_beta'].mean for i in range(len(xx['knots']))],
            [pp['pen_beta'].sdev for i in range(len(xx['knots']))])
        pp.pop("pen_beta")
        pp.pop('logistic_k')
        pp.pop('logistic_x0')
        pp.pop('logistic_L')
    ## Thus, all compartment but exposed and susceptible are 0
    for key in ["infected", "recovered", "icu", "vent", "hospital"]:
        xx[f"initial_{key}"] = 0

    pp["initial_exposed"] = (xx["n_hosp"] / xx["market_share"] /
                             pp["hospital_probability"])
    xx["initial_susceptible"] -= pp["initial_exposed"].mean

    return xx, pp
Esempio n. 21
0
 def Amount(Rn, Dist, Coef):
     if Dist == "exp":
         GenP = expon.ppf(Rn, Coef[0], Coef[1])
     elif Dist == "gamma":
         GenP = gamma.ppf(Rn, Coef[0], Coef[1], Coef[2])
     elif Dist == "weibull":
         GenP = weibull_min.ppf(Rn, Coef[0], Coef[1], Coef[2])
     elif Dist == "lognorm":
         GenP = np.exp(norm.ppf(Rn, Coef[0],
                                Coef[1]))  # use norm generate lognorm
     #elif Dist == "pearson3":
     #    GenP = pearson3.ppf(Rn,coef[0],coef[1],coef[2])
     return GenP
    def get_next_on(self, current_time):
        max_rate = self.get_max_rate()
        exp_scale = 1 / max_rate
        next_on = current_time

        while True:
            prob = random.random()
            next_on += expon.ppf(prob, scale=exp_scale)
            next_on_in_day = next_on % (24*60)
            p_accept = random.random()
            if p_accept <= self.get_rate(next_on_in_day) / max_rate:
                break

        return round(int(next_on))
Esempio n. 23
0
     def priortrans_spec(self,upars):
     
          # calcuate transformation from prior volume to parameter for all modeled parameters

          outdict = {}

          for namepar in ['Teff','log(g)','[Fe/H]','[a/Fe]','Vrad','Vrot','Inst_R','CarbonScale']:
               if namepar in upars.keys():
                    upars_i = upars[namepar]
                    if namepar in self.priordict['uniform'].keys():
                         par_i = (
                              (max(self.priordict['uniform'][namepar])-min(self.priordict['uniform'][namepar]))*upars_i + 
                              min(self.priordict['uniform'][namepar])
                              )
                    elif namepar in self.priordict['gaussian'].keys():
                         par_i = norm.ppf(upars_i,loc=self.priordict['gaussian'][namepar][0],scale=self.priordict['gaussian'][namepar][1])

                    elif namepar in self.priordict['tgaussian'].keys():
                         a = (self.priordict['tgaussian'][namepar][0] - self.priordict['tgaussian'][namepar][2]) / self.priordict['tgaussian'][namepar][3]
                         b = (self.priordict['tgaussian'][namepar][1] - self.priordict['tgaussian'][namepar][2]) / self.priordict['tgaussian'][namepar][3]                     
                         par_i = truncnorm.ppf(upars_i,a,b,loc=self.priordict['tgaussian'][namepar][2],scale=self.priordict['tgaussian'][namepar][3])
                         if par_i == np.inf:
                              par_i = self.priordict['tgaussian'][namepar][1]
                    elif namepar in self.priordict['exp'].keys():
                         par_i = expon.ppf(upars_i,loc=self.priordict['exp'][namepar][0],scale=self.priordict['exp'][namepar][1])
                    elif namepar in self.priordict['texp'].keys():
                         b = (self.priordict['texp'][namepar][1] - self.priordict['texp'][namepar][0]) / self.priordict['texp'][namepar][2]
                         par_i = truncexpon.ppf(upars_i,b,loc=self.priordict['texp'][namepar][0],scale=self.priordict['texp'][namepar][2])
                         if par_i == np.inf:
                              par_i = self.priordict['texp'][namepar][1]
                    else:
                         par_i = (self.defaultpars[namepar][1]-self.defaultpars[namepar][0])*upars_i + self.defaultpars[namepar][0]

                    outdict[namepar] = par_i

          # if fitting a blaze function, do transformation for polycoef
          pcarr = [x_i for x_i in upars.keys() if 'pc' in x_i]
          if len(pcarr) > 0:
               for pc_i in pcarr:
                    if pc_i == 'pc_0':
                         uspec_scale = upars['pc_0']
                         outdict['pc_0'] = (1.25 - 0.75)*uspec_scale + 0.75
                    else:
                         pcind = int(pc_i.split('_')[-1])
                         pcmax = self.polycoefarr[pcind][0]+5.0*self.polycoefarr[pcind][1]
                         pcmin = self.polycoefarr[pcind][0]-5.0*self.polycoefarr[pcind][1]
                         outdict[pc_i] = (pcmax-pcmin)*upars[pc_i] + pcmin

          return outdict
Esempio n. 24
0
    def exponential_pdf_plot(lambd):
        """
        lambd = rate parameter
        """
        pdf = lambda x,lambd: lambd*np.exp(-lambd*x)
        mean = 1/lambd
        std = mean

        upper_x = expon.ppf(0.95,lambd)
        X = np.arange(0.1,upper_x,upper_x/1000)
        fx = [pdf(x,lambd) for x in X]

        plt.plot(X,fx, label='mean={:.2f}, std={:.2f}'.format(mean,std));
        plt.title('Exponential probability distribution function')
        plt.xlabel('x')
        plt.ylabel('pdf(x)')
        plt.legend()
        plt.grid()
        plt.show();
Esempio n. 25
0
    def qq_plot(self, events, ax, scatter_params=None, line_params=None):
        self._check_fit()
        rescaled_intervals = self.rescale(events)
        percentiles = np.linspace(0, 100, len(rescaled_intervals))[1:-1]
        theoreticalp = expon.ppf(percentiles / 100)

        if scatter_params is None:
            scatter_params = {}
        if line_params is None:
            line_params = dict(c='black', linestyle='dashed')

        ax.scatter(
            np.sort(rescaled_intervals)[1:-1], theoreticalp, **scatter_params)

        xx = np.linspace(rescaled_intervals.min(), rescaled_intervals.max(),
                         100)
        ax.plot(xx, xx, **line_params)
        ax.set_xlabel('empirical quantiles')
        ax.set_ylabel('theoretical quantiles')
Esempio n. 26
0
def default_bps(model, c, r, t):
    noBrPointsPerEpoch = model.nbreakpoints
    nepochs = len(noBrPointsPerEpoch)
    ebps = []
    bps = []
    def avg(xs):
        if isinstance(xs, list):
            return sum(xs)/len(xs)
        else:
            return xs
    for e in xrange(0, nepochs):
        theta = 1.0 / avg(c[e])
        nbps = noBrPointsPerEpoch[e]
        if e == nepochs - 1:
            new_bps = [(x*theta+t[e]) for x in expon.ppf([float(i)/nbps for i in xrange(nbps)])]
        else:
            new_bps = linspace(t[e], t[e+1], nbps+1)[:nbps]
        ebps.append(new_bps)
        bps.extend(new_bps)
    return bps, ebps
Esempio n. 27
0
def simulate(c, r, t, N):
    theta = 1.0 / c
    time_breakpoints = [[0.0]]
    for e in xrange(1, len(noBrPointsPerEpoch)):
        nbps = noBrPointsPerEpoch[e]
        time_breakpoints.append([
            (x * theta + t[e])
            for x in expon.ppf([float(i) / nbps for i in xrange(nbps)])
        ])

    M = []
    m = [0.0, 0.01, 0.0]
    for e in xrange(len(noBrPointsPerEpoch)):
        newM = identity(2)
        newM[:] = m[e]
        M.append(newM)

    print "  Matrices generated"
    pi, T, E = model.run(r, c, time_breakpoints, M)
    species = model.nleaves
    S = zeros(N, int)
    columns = [zeros(N, int) for _ in xrange(species)]
    E = asarray(E)

    S[0] = choose_weighted(pi)
    if do_progress_bar:
        print "  [" + ' ' * 100 + ']',
    for i in xrange(1, N):
        progress = 1.0 * i / N
        if do_progress_bar:
            print "\r  [" + '=' * int(progress * 100) + ' ' * int(
                (1.0 - progress) * 100) + ']',
        col = choose_weighted(E[S[i - 1], :])
        for ci, cv in index_to_cols(col, species):
            columns[ci][i - 1] = cv
        S[i] = choose_weighted(T[:, S[i - 1]])

    if do_progress_bar:
        print "\r" + ' ' * 104, "\r",

    return columns
Esempio n. 28
0
def simulate(c,r,t,N):
    theta = 1.0 / c
    time_breakpoints = [[0.0]]
    for e in xrange(1, len(noBrPointsPerEpoch)):
        nbps = noBrPointsPerEpoch[e]
        time_breakpoints.append(
                [(x*theta+t[e]) for x in expon.ppf([float(i)/nbps for i in xrange(nbps)])]
                )

    M = []
    m = [0.0, 0.01, 0.0]
    for e in xrange(len(noBrPointsPerEpoch)):
        newM = identity(2)
        newM[:] = m[e]
        M.append(newM)

    print "  Matrices generated"
    pi, T, E = model.run(r, c, time_breakpoints, M)
    species = model.nleaves
    S = zeros(N, int)
    columns = [zeros(N, int) for _ in xrange(species)]
    E = asarray(E)

    S[0] = choose_weighted(pi)
    if do_progress_bar:
        print "  [" + ' '*100 + ']',
    for i in xrange(1, N):
        progress = 1.0*i/N
        if do_progress_bar:
            print "\r  [" + '='*int(progress*100) + ' '*int((1.0 - progress)*100) + ']',
        col = choose_weighted(E[S[i-1],:])
        for ci, cv in index_to_cols(col, species):
            columns[ci][i-1] = cv
        S[i] = choose_weighted(T[:,S[i-1]])

    if do_progress_bar:
        print "\r" + ' '*104, "\r",

    return columns
Esempio n. 29
0
def main(A, number_of_rounds):

    tab = np.zeros(number_of_rounds)
    #tab_coop_level= np.zeros(number_of_rounds)
    t = [expon.ppf(0.10 * i) for i in range(10)]
    le = len(t)
    count_c = np.zeros((number_of_generations, le))
    proportion_c = np.zeros((number_of_generations, le))
    count = np.zeros((number_of_generations, le))
    W = complete_game(A)
    for i in range(number_of_generations):
        for l in range(le - 1):
            for j in range(Z):
                if (t[l] <= A[1][j] <= t[l + 1]):
                    count[i][l] += 1
                    if (A[0][j] == 1):
                        count_c[i][l] += 1
        count[i][le - 1] = Z - sum(count[i][k] for k in range(le - 1))
        for j in range(Z):
            if (A[1][j] > t[le - 1]):
                if (A[0][j] == 1):
                    count_c[i][le - 1] += 1
        tab[i] = number_of_cooperators(A[0])
        s = np.sum(count_c[i])
        for k in range(le):
            proportion_c[i][k] = count_c[i][k] / s
        #tab_coop_level[i]= coop_level
        #print(i,tab[i])
        #C=[ W[j] for j in range(len(A[0])) if (A[0][j]==1) ]
        #D=[ W[j] for j in range(len(A[0])) if (A[0][j]==0) ]
        #print(W[0],W[1],W[2])
        #print(i,"c:"+ str(sum(C)/number_of_cooperators(A[0])),"d:"+str(sum(D)/(len(A[0])-number_of_cooperators(A[0]))))
        #print(i,coop_level)
        B = evolution(A, W)
        #print(B[0]!= A[0])
        #if (B[0] != A[0]):
        A = B
        W = complete_game(A)
    return tab, proportion_c, count / Z
Esempio n. 30
0
def exp_break_points(no_intervals, coal_rate, offset=0.0):
    """Compute break points for equal probably intervals given the
    coalescence rate. The optional parameter "offset" is added to all
    the break points and can be used for e.g. a speciation time.


    :param no_intervals: Number of intervals desired. The number of
    points will match this.
    :type no_intervals: int

    :param coal_rate: The coalescence rate used for specifying the
    exponential distribution the break points are taken from.
    :type coal_rate: float

    :param offset: This offset is added to all break points.
    :type offset: float

    :returns: a list of no_intervals break points
    :rtype: list
    """
    points = expon.ppf([float(i) / no_intervals for i in xrange(no_intervals)])
    return points / coal_rate + offset
 def _get_diff_cell_simulation_times(self):
     """
     Need to simulate for a period prior to the sample time.
     Use a specified percentile of the exponential distribution of stratification times
     to find the minimum time to simulate.
     For the Moran models, find the last simulation step prior to this minimum time before the sample point.
     For the Branching process, the times can be compared to the time of the next progenitor division
     """
     if self.stratification_sim_percentile < 1:
         min_diff_sim_time = expon.ppf(self.stratification_sim_percentile, scale=1 / self.gamma)
         if self.algorithm == 'Branching':
             diff_sim_starts = self.times - min_diff_sim_time
             diff_sim_starts[diff_sim_starts < 0] = 0
             self.diff_cell_sim_switches = self._merge_time_intervals(diff_sim_starts, self.times) + [np.inf]
         else:
             steps_per_unit_time = self.simulation_steps / self.max_time
             sim_steps_for_diff_sims = min_diff_sim_time * steps_per_unit_time
             sim_steps_to_start_diff = (self.sample_points - sim_steps_for_diff_sims).astype(int)
             sim_steps_to_start_diff[sim_steps_to_start_diff < 0] = 0
             self.diff_cell_sim_switches = self._merge_time_intervals(sim_steps_to_start_diff, self.sample_points)
     else:
         self.diff_cell_sim_switches = [0, np.inf]
Esempio n. 32
0
 def ppf(self, dist, p):
     return expon.ppf(p, *self._get_params(dist))
Esempio n. 33
0
def expon_equally_spaced(mean_interval: float, _min: float,
                         n: int) -> NDArray[1, float]:
    intervals = expon.ppf(
        np.linspace(0.01, 0.99, n), scale=mean_interval, loc=_min) - _min
    return np.random.choice(intervals, size=len(intervals), replace=False)
Esempio n. 34
0
    return nparray


if __name__ == '__main__':
    n = 10000
    seed = 1234
    rhoMatrix = rho()
    myArrayX = []
    myArrayY = []
    for i in range(0, n):
        MVN, seed = s1.MVNgen(2, rhoMatrix, seed)
        MVN = norm.cdf(MVN)
        myArrayX.append(MVN[0])
        myArrayY.append(MVN[1])

    myArrayX = expon.ppf(myArrayX)
    Xindex = rankdata(myArrayX)
    Yindex = rankdata(myArrayY)

    xmean = np.mean(myArrayX)
    ymean = np.mean(myArrayY)

    upper = 0
    for i in range(0, n):
        upper += (myArrayX[i]-xmean)*(myArrayY[i]-ymean)

    lowerleft = 0
    lowerright = 0
    for i in range(0, n):
        lowerleft += (myArrayX[i] - xmean)**2
        lowerright += (myArrayY[i] - ymean)**2
Esempio n. 35
0
v=[];
for i in range(n):
	if(r[i]==0):
		temp=0;
	else:
		temp=((-1)*math.log(r[i]))/avg;
	v.append(temp);
print "The random variates are as follows\n";
for i in range(n):
	print str(i)+" element is "+str(v[i])+"\n";
 
 
for i in range(len(v)):
	v[i]=math.floor(v[i]);

#plt.hist(v, bins=50, histtype='stepfilled',  color='b', label='Gaussian')
#plt.hist(uniform_numbers, bins=20, histtype='stepfilled', normed=True, color='r', alpha=0.5, label='Uniform')

fig, ax = plt.subplots(1, 1)
x = np.linspace(expon.ppf(0.001),expon.ppf(0.999), 1000)
ax.plot(x, expon.pdf(x),'r-', lw=5, alpha=0.6, label='expon pdf')
plt.title("Histogram of RNG Data")
plt.xlabel("Random Variate")
plt.ylabel("Frequency")
#plt.axis([0,1,0,1])
ax.hist(v, normed=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()#plt.show()


from scipy.stats import expon
from scipy.stats import norm

# set plot to render labels using latex
pl.rc('text', usetex=True)
pl.rc('font', family='serif')
pl.rc('font', size=14)
fig = pl.figure(figsize=(7,5), dpi=150)

Ns = [5, 10, 100] # number of samples for the distribution
scale = 1.

samples=10000

# variance of exponential distributions
x = np.linspace(expon.ppf(0.01), expon.ppf(0.995), 200)
rv = expon()

pl.subplot(2,2,1)
pl.plot(x, rv.pdf(x), 'k-')
ax = pl.gca()
ax.set_xlabel('$x$', fontsize=14)
ax.set_ylabel('$p(x)$', fontsize=14)
ax.set_title('Exponential Distribution', fontsize=14)
ax.text(2, 0.8, '$\mu = 1, \sigma^2 = 1$', fontsize=16)

x2 = np.linspace(0., scale+3.*np.sqrt(rv.var()), 100)

# draw n samples from an exponential distributions 10000 times
for i, n in enumerate(Ns):
  samps = np.random.exponential(scale, (n, samples))
Esempio n. 37
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-s', '--stages', type=int, required=False,
                        help='Etapas de la distribución')
    parser.add_argument('-l', '--lambdap', type=float, required=True,
                        nargs='+',
                        help='Parámetro lambda de cada distribución')
    parser.add_argument('-r', '--runs', type=int, required=True,
                        help='Ejecuciones a realizar por cada simulación')
    parser.add_argument('-o', '--output', type=str, required=False,
                        help='Archivo de salida para la grafica')
    parser.add_argument('-d', '--dist', type=str, required=True,
                        choices=['erlang', 'expon', 'hyperexp'],
                        help='Distribución a emplear para la simulación')
    parser.add_argument('--no-graph', required=False,
                        help='Suprime la salida como gráfica',
                        dest='graph', action='store_false')
    parser.add_argument('--graph', required=False,
                        help='Habilita la salida como gráfica (usar con [-o])',
                        dest='graph', action='store_true')
    parser.add_argument('-p', '--probability', required=False, type=float,
                        help='Probabilidad para la distribución Hiperexp.')
    parser.set_defaults(graph=True)
    args = parser.parse_args()
    # msg = 'Distribución {3} con {0} etapas (lambda={1}) en {2} ejecuciones'
    # print msg.format(args.stages, args.lambdap, args.runs, args.dist)
    fig, ax = plt.subplots(1, 1)
    if args.dist in 'erlang':
        if args.stages <= 0:
            print 'Error: se necesita un número válido de etapas'
            sys.exit(1)
        lambdap = args.lambdap[0]
        mean, var, skew, kurt = erlang.stats(args.stages, scale=lambdap,
                                             moments='mvsk')
        x = np.linspace(erlang.ppf(0.00001, args.stages, scale=lambdap),
                        erlang.ppf(0.99999, args.stages, scale=lambdap),
                        num=1000)
        rv = erlang(args.stages, scale=lambdap)
        ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='Erlang PDF')
        # Generate random numbers with this distribution
        r = erlang.rvs(args.stages, scale=lambdap, size=args.runs)
        ax.hist(r, bins=20, normed=True, histtype='stepfilled', alpha=0.4,
                label='Experimental values')
        meanexp = np.mean(r)
        varexp = np.var(r)
        print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp,
                                                                  mean)
        print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var)
        print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp,
                                               np.sqrt(var)/mean)
    elif args.dist in 'expon':
        lambdap = args.lambdap[0]
        mean, var, skew, kurt = expon.stats(scale=lambdap, moments='mvsk')
        x = np.linspace(expon.ppf(0.00001, scale=lambdap),
                        expon.ppf(0.99999, scale=lambdap),
                        num=1000)
        rv = expon(scale=lambdap)
        ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='Exponential PDF')
        # Generate random numbers with this distribution
        r = expon.rvs(scale=lambdap, size=args.runs)
        ax.hist(r, bins=20, normed=True, histtype='stepfilled', alpha=0.4,
                label='Experimental values')
        meanexp = np.mean(r)
        varexp = np.var(r)
        print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp,
                                                                  mean)
        print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var)
        print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp,
                                               np.sqrt(var)/mean)
    elif args.dist in 'hyperexp':
        rv = hyperexp(args.probability, args.lambdap[0], args.lambdap[1])
        x = np.linspace(0.00000001, 10.99999, num=1000)
        ax.plot(x, rv.pdf(x), 'r-', lw=5, alpha=0.6, label='HyperExp PDF')
        # ax.plot(x, rv.cdf(x), 'b-', lw=2, alpha=0.6, label='HyperExp CDF')
        r = rv.rvs(size=args.runs)
        ax.hist(r, normed=True, bins=100, range=(0, 11),
                histtype='stepfilled', alpha=0.4, label='Experimental values')
        meanexp = np.mean(r)
        varexp = np.var(r)
        mean = rv.mean()
        var = rv.standard_dev()**2
        print 'Mediaexperimental: {0} MediaAnalitica: {1}'.format(meanexp,
                                                                  mean)
        print 'Sigma2_exp: {0} Sigma2_a: {1}'.format(varexp, var)
        print 'CoV_exp: {0} CoV_a: {1}'.format(np.sqrt(varexp)/meanexp,
                                               rv.CoV())
    if args.graph:
        ax.legend(loc='best', frameon=False)
        plt.show()
Esempio n. 38
0
"""
Generate simulated values from the following distributions
⋄ Exponential distribution
• Verify the results by comparing histograms with analytical
results and erform tests for distribution type.
"""
def expo(lam,U):
    res = (-np.log(U)/lam)
    return res


#Rate parameter
U = np.random.uniform(0.0,1.0,10000)
lam = 1
res = expo(lam,U)
x = np.linspace(expon.ppf(0),expon.ppf(0.99999),100)


#Histogram
plt.figure()
plt.hist(res,align='mid',color='tan',edgecolor='moccasin',bins=10,density=True,stacked=True)
xmin, xmax = plt.xlim()
plt.plot(x, expon.pdf(x),'g-', lw=2,alpha=0.6)
plt.title("Exponentially Distributed Histogram")
plt.xlabel("Classes")
plt.ylabel("Density")
plt.show
#%%
#---- Normal Distribution ----
"""
Generate simulated values from the following distributions
Esempio n. 39
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-s", "--stages", type=int, required=False, help="Etapas de la distribución")
    parser.add_argument(
        "-l", "--lambdap", type=float, required=True, nargs="+", help="Parámetro lambda de cada distribución"
    )
    parser.add_argument("-r", "--runs", type=int, required=True, help="Ejecuciones a realizar por cada simulación")
    parser.add_argument("-o", "--output", type=str, required=False, help="Archivo de salida para la grafica")
    parser.add_argument(
        "-d",
        "--dist",
        type=str,
        required=True,
        choices=["erlang", "expon", "hyperexp"],
        help="Distribución a emplear para la simulación",
    )
    parser.add_argument(
        "--no-graph", required=False, help="Suprime la salida como gráfica", dest="graph", action="store_false"
    )
    parser.add_argument(
        "--graph",
        required=False,
        help="Habilita la salida como gráfica (usar con [-o])",
        dest="graph",
        action="store_true",
    )
    parser.set_defaults(graph=True)
    args = parser.parse_args()
    msg = "Distribución {3} con {0} etapas (lambda={1}) en {2} ejecuciones"
    print msg.format(args.stages, args.lambdap, args.runs, args.dist)
    fig, ax = plt.subplots(1, 1)
    if args.dist in "erlang":
        if args.stages <= 0:
            print "Error: se necesita un número válido de etapas"
            sys.exit(1)
        lambdap = args.lambdap[0]
        mean, var, skew, kurt = erlang.stats(args.stages, scale=lambdap, moments="mvsk")
        print "E[X]={0}, var(X)={1}".format(mean, var)
        x = np.linspace(
            erlang.ppf(0.00001, args.stages, scale=lambdap), erlang.ppf(0.99999, args.stages, scale=lambdap), num=1000
        )
        rv = erlang(args.stages, scale=lambdap)
        ax.plot(x, rv.pdf(x), "r-", lw=5, alpha=0.6, label="Erlang PDF")
        # Generate random numbers with this distribution
        r = erlang.rvs(args.stages, scale=lambdap, size=args.runs)
        ax.hist(r, bins=20, normed=True, histtype="stepfilled", alpha=0.2)
        meanexp = np.mean(r)
        varexp = np.var(r)
        print "Mediaexperimental: {0} MediaAnalitica: {1}".format(meanexp, mean)
        print "Sigma2_exp: {0} Sigma2_a: {1}".format(varexp, var)
        print "CoV_exp: {0} CoV_a: {1}".format(np.sqrt(varexp) / meanexp, np.sqrt(var) / mean)
    elif args.dist in "expon":
        lambdap = args.lambdap[0]
        mean, var, skew, kurt = expon.stats(scale=lambdap, moments="mvsk")
        print "E[X]={0}, var(X)={1}".format(mean, var)
        x = np.linspace(expon.ppf(0.00001, scale=lambdap), expon.ppf(0.99999, scale=lambdap), num=1000)
        rv = expon(scale=lambdap)
        ax.plot(x, rv.pdf(x), "r-", lw=5, alpha=0.6, label="Exponential PDF")
        # Generate random numbers with this distribution
        r = expon.rvs(scale=lambdap, size=args.runs)
        ax.hist(r, bins=20, normed=True, histtype="stepfilled", alpha=0.2)
        meanexp = np.mean(r)
        varexp = np.var(r)
        print "Mediaexperimental: {0} MediaAnalitica: {1}".format(meanexp, mean)
        print "Sigma2_exp: {0} Sigma2_a: {1}".format(varexp, var)
        print "CoV_exp: {0} CoV_a: {1}".format(np.sqrt(varexp) / meanexp, np.sqrt(var) / mean)
    elif args.dist in "hyperexp":
        print "HyperExponential RV"
        rv = hyperexp(0.1, args.lambdap[0], args.lambdap[1])
        x = np.linspace(0.00000001, 10.99999, num=1000)
        ax.plot(x, rv.pdf(x), "r-", lw=5, alpha=0.6, label="HyperExp PDF")
        # ax.plot(x, rv.cdf(x), 'b-', lw=2, alpha=0.6, label='HyperExp CDF')
        r = rv.rvs(size=args.runs)
        ax.hist(r, normed=True, bins=100, range=(0, 11), histtype="stepfilled", alpha=0.2)
        meanexp = np.mean(r)
        varexp = np.var(r)
        print "Mediaexperimental: {0} MediaAnalitica: {1}".format(meanexp, mean)
        print "Sigma2_exp: {0} Sigma2_a: {1}".format(varexp, var)
        print "CoV_exp: {0} CoV_a: {1}".format(np.sqrt(varexp) / meanexp, np.sqrt(var) / mean)
    if args.graph:
        plt.show()