Example #1
0
def get_prior(p, prior):
        if prior == 'normal':
            # TODO: control scale hyperparameter better. Here we take the chain_std_deviation*5 as the prior std_deviation
            rv_instance = norm(loc=p, scale=np.abs(0.1*p*5))
        elif prior == 'halfnormal':
            # TODO: control scale hyperparameter better. Here we take the chain_std_deviation*5 as the prior std_deviation
            rv_instance = norm(loc=p, scale=np.abs(0.1*p*5/2.0))
        elif prior == 'uniform':
            # TODO: control scale hyperparameter manually here, later we can update
            distance_frac = 1.0
            # hyperparameter is the distance to low, e.g.
            # hyperparameter=3.0 :  low:=p-(3.0*p), high:=p+(3.0*p)
            diff = abs(distance_frac*p)
            rv_instance = uniform(loc=p-diff, scale=2*diff)
        elif prior == 'triangular':
            distance_frac = 1.0
            # hyperparameter is the distance to low, e.g.
            # hyperparameter=3.0 :  low:=p-(3.0*p), high:=p+(3.0*p)
            diff = abs(distance_frac*p)
            # the maximum (controlled by c) is always in the center here
            rv_instance = triang(loc=p-diff, scale=2*diff, c=0.5)
        elif prior == 'halftriangular':
            distance_frac = 0.5 
            # hyperparameter is the distance to low, e.g.
            # hyperparameter=3.0 :  low:=p-(3.0*p), high:=p+(3.0*p)
            diff = abs(distance_frac*p)
            # the maximum (controlled by c) is always in the center here
            rv_instance = triang(loc=p-diff, scale=2*diff, c=0.5)
        elif prior == 'zero':
            rv_instance = rv_zero()
        else:
            raise ValueError('Invalid prior ({}) specified. Specify one of "normal", "uniform", "triangular", "zero"'.format(prior))
        return rv_instance
def f3TruncNormRVSnp(parameters):
    N = parameters['N']
    target = parameters['target']
    rv1, rv2, rv3 = ndarray(shape = (N,), dtype=float), ndarray(shape = (N,), dtype=float), ndarray(shape = (N,), dtype=float)

    # if parameters['ncpu']:
    #     ncpu = parameters['ncpu']
    # else:
    #     ncpu = mp.cpu_count()
    #
    # pool = mp.Pool(ncpu)
    # workers = []
    if not parameters['distribution']:
        print 'No distribution set...abort'
        exit(1)
    elif parameters['distribution'] == 'truncnorm':
        a1, b1 = (parameters['min_intrv1'] - parameters['mu1']) / parameters['sigma1'], (parameters['max_intrv1'] - parameters['mu1']) / parameters['sigma1']
        a2, b2 = (parameters['min_intrv2'] - parameters['mu2']) / parameters['sigma2'], (parameters['max_intrv2'] - parameters['mu2']) / parameters['sigma2']
        a3, b3 = (parameters['min_intrv3'] - parameters['mu3']) / parameters['sigma3'], (parameters['max_intrv3'] - parameters['mu3']) / parameters['sigma3']
        rv1 = truncnorm(a1, b1, loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = truncnorm(a2, b2, loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = truncnorm(a3, b3, loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'norm':
        rv1 = norm(loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = norm(loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = norm(loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'uniform':
        rv1 = uniform(loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = uniform(loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = uniform(loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'beta':
        rv1 = beta(a=parameters['min_intrv1'], b=parameters['max_intrv1'], loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
        rv2 = beta(a=parameters['min_intrv2'], b=parameters['max_intrv2'], loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
        rv3 = beta(a=parameters['min_intrv3'], b=parameters['max_intrv3'], loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'triang':
        rv1 = triang(loc=parameters['min_intrv1'], scale=parameters['max_intrv1'], c=parameters['mu1']).rvs(N)
        rv2 = triang(loc=parameters['min_intrv2'], scale=parameters['max_intrv2'], c=parameters['mu2']).rvs(N)
        rv3 = triang(loc=parameters['min_intrv3'], scale=parameters['max_intrv3'], c=parameters['mu3']).rvs(N)
    else:
        print 'Distribution not recognized...abort'
        exit(1)

    if parameters['scaling']:
        #scale the values of Qs in the allowed range such that sum(Q_i) = A
        r = ABS(parameters['Q1']) + ABS(parameters['Q2']) + ABS(parameters['Q3'])
        if r == 0.0:
            r = 1.

        # rounding the values, the sum could exceed A
        Q1 = ABS(parameters['Q1']) * parameters['A'] / r
        Q2 = ABS(parameters['Q2']) * parameters['A'] / r
        Q3 = parameters['A'] - Q1 - Q2
    else:
        # print "scaling = False"
        Q1 = parameters['Q1']
        Q2 = parameters['Q2']
        Q3 = parameters['Q3']

    return _f3(rv1, rv2, rv3, Q1, Q2, Q3, target)
Example #3
0
def greedy_allocation3(parameters):
    """
    Greedy heuristic for 3 supplier (the same as heu_allocation3 but with different parameters)
    Does not write on the file but returns the solution
    :param df: dataframe containing the data from the excel file
    :param parameters: parameters dict
    :return: write o the df and save on the file
    """
    if not parameters['distribution']:
        print 'No distribution set...abort'
        exit(1)
    elif parameters['distribution'] == 'truncnorm':
        rv1 = truncnorm_custom(parameters['min_intrv1'], parameters['max_intrv1'], parameters['mu1'], parameters['sigma1'])
        rv2 = truncnorm_custom(parameters['min_intrv2'], parameters['max_intrv2'], parameters['mu2'], parameters['sigma2'])
        rv3 = truncnorm_custom(parameters['min_intrv3'], parameters['max_intrv3'], parameters['mu3'], parameters['sigma3'])
    elif parameters['distribution'] == 'norm':
        rv1 = norm(parameters['mu1'], parameters['sigma1'])
        rv2 = norm(parameters['mu2'], parameters['sigma2'])
        rv3 = norm(parameters['mu3'], parameters['sigma3'])
    elif parameters['distribution'] == 'uniform':
        rv1 = uniform(loc=parameters['mu1'], scale=parameters['sigma1'])
        rv2 = uniform(loc=parameters['mu2'], scale=parameters['sigma2'])
        rv3 = uniform(loc=parameters['mu3'], scale=parameters['sigma3'])
    elif parameters['distribution'] == 'beta':
        rv1 = beta(a=parameters['min_intrv1'], b=parameters['max_intrv1'], loc=parameters['mu1'], scale=parameters['sigma1'])
        rv2 = beta(a=parameters['min_intrv2'], b=parameters['max_intrv2'], loc=parameters['mu2'], scale=parameters['sigma2'])
        rv3 = beta(a=parameters['min_intrv3'], b=parameters['max_intrv3'], loc=parameters['mu3'], scale=parameters['sigma3'])
    elif parameters['distribution'] == 'triang':
        rv1 = triang(loc=parameters['min_intrv1'], scale=parameters['max_intrv1'], c=parameters['mu1'])
        rv2 = triang(loc=parameters['min_intrv2'], scale=parameters['max_intrv2'], c=parameters['mu2'])
        rv3 = triang(loc=parameters['min_intrv3'], scale=parameters['max_intrv3'], c=parameters['mu3'])
    else:
        print 'Distribution not recognized...abort'
        exit(1)

    A = parameters['A']
    Q = {i: 0 for i in xrange(3)}

    while A > 0:
        best_probability = -1
        best_retailer = -1
        for n, r in enumerate([rv1, rv2, rv3]):
            p = 1 - r.cdf(Q[n]+1)
            if p > best_probability:
                best_probability = p
                best_retailer = n

        Q[best_retailer] += 1
        A -= 1

    parameters['Q1'] = Q[0]
    parameters['Q2'] = Q[1]
    parameters['Q3'] = Q[2]

    return {'Q1': Q[0],
            'Q2': Q[1],
            'Q3': Q[2],
            'PROB': f3TruncNormRVSnp(parameters)}
Example #4
0
def create_rvs(cr_c_b, cf_c_b, cr_c_e, cf_c_e):
    cr_behavior = stats.triang(c=(cr_c_b - CR_LOW) / (CR_HIGH - CR_LOW),
                               loc=CR_LOW,
                               scale=CR_HIGH - CR_LOW)
    cf_behavior = stats.triang(c=(cf_c_b - CF_LOW) / (CF_HIGH - CF_LOW),
                               loc=CF_LOW,
                               scale=CF_HIGH - CF_LOW)
    cr_evaluation = stats.triang(c=(cr_c_e - CR_LOW) / (CR_HIGH - CR_LOW),
                                 loc=CR_LOW,
                                 scale=CR_HIGH - CR_LOW)
    cf_evaluation = stats.triang(c=(cf_c_e - CF_LOW) / (CF_HIGH - CF_LOW),
                                 loc=CF_LOW,
                                 scale=CF_HIGH - CF_LOW)
    return cr_behavior, cf_behavior, cr_evaluation, cf_evaluation
Example #5
0
    def setDistObj(self):
        """ create and freeze the distribution object from the stats module with the current scale, locationa nd shape parameters """
        if self.type == 'normal':
            self.distObj = stats.norm(loc=self.loc, scale=self.scale)

        elif self.type == 'lognormal':
            self.distObj = stats.lognorm(s=self.shape,
                                         loc=self.loc,
                                         scale=self.scale)

        elif self.type == 'weibull':
            self.distObj = stats.weibull_min(c=self.shape,
                                             loc=self.loc,
                                             scale=self.scale)

        elif self.type == 'exponential':
            self.distObj = stats.expon(loc=self.loc, scale=self.scale)

        elif self.type == 'triangular':
            self.distObj = stats.triang(c=self.shape,
                                        loc=self.loc,
                                        scale=self.scale)

        elif self.type == 'uniform':
            self.distObj = stats.uniform(loc=self.loc, scale=self.scale)

        elif self.type == 'gamma':
            self.distObj = stats.gamma(a=self.shape,
                                       loc=self.loc,
                                       scale=self.scale)

        #         elif self.type == 'constant':
        #             self.value = float(settings[self.name + '_' + 'value'])

        return
Example #6
0
def plot_triangular_fit(data,
                        fit_results,
                        title=None,
                        x_label=None,
                        x_range=None,
                        y_range=None,
                        fig_size=(6, 5),
                        bin_width=None,
                        filename=None):
    """
    :param data: (numpy.array) observations
    :param fit_results: dictionary with keys "c", "loc", "scale",
    :param title: title of the figure
    :param x_label: label to show on the x-axis of the histogram
    :param x_range: (tuple) x range
    :param y_range: (tuple) y range
        (the histogram shows the probability density so the upper value of y_range should be 1).
    :param fig_size: int, specify the figure size
    :param bin_width: bin width
    :param filename: filename to save the figure as
    """

    plot_fit_continuous(data=data,
                        dist=stat.triang(c=fit_results['c'],
                                         scale=fit_results['scale'],
                                         loc=fit_results['loc']),
                        label='Triangular',
                        bin_width=bin_width,
                        title=title,
                        x_label=x_label,
                        x_range=x_range,
                        y_range=y_range,
                        fig_size=fig_size,
                        filename=filename)
Example #7
0
def latin_sampler(locator, num_samples, variables, region):
    """
    This script creates a matrix of m x n samples using the latin hypercube sampler.
    for this, it uses the database of probability distribtutions stored in locator.get_uncertainty_db()
    it returns clean and normalized samples.

    :param locator: pointer to locator of files of CEA
    :param num_samples: number of samples to do
    :param variables: list of variables to sample
    :return:
        1. design: a matrix m x n with the samples where each feature is normalized from [0,1]
        2. design_norm: a matrix m x n with the samples where each feature is normalized from [0,1]
        3. pdf_list: a dataframe with properties of the probability density functions used in the exercise.

    """

    # get probability density function PDF of variables of interest
    variable_groups = ('ENVELOPE', 'INDOOR_COMFORT', 'INTERNAL_LOADS',
                       'SYSTEMS')
    database = pd.concat([
        pd.read_excel(locator.get_uncertainty_db(region), group, axis=1)
        for group in variable_groups
    ])
    pdf_list = database[database['name'].isin(variables)].set_index('name')

    # get number of variables
    num_vars = pdf_list.shape[0]  # alternatively use len(variables)

    # get design of experiments
    samples = latin_hypercube.lhs(num_vars,
                                  samples=num_samples,
                                  criterion='maximin')
    for i, variable in enumerate(variables):

        distribution = pdf_list.loc[variable, 'distribution']
        #sampling into lhs
        min = pdf_list.loc[variable, 'min']
        max = pdf_list.loc[variable, 'max']
        mu = pdf_list.loc[variable, 'mu']
        stdv = pdf_list.loc[variable, 'stdv']
        if distribution == 'triangular':
            loc = min
            scale = max - min
            c = (mu - min) / (max - min)
            samples[:, i] = triang(loc=loc, c=c, scale=scale).ppf(samples[:,
                                                                          i])
        elif distribution == 'normal':
            samples[:, i] = norm(loc=mu, scale=stdv).ppf(samples[:, i])
        elif distribution == 'boolean':  # converts a uniform (0-1) into True/False
            samples[:, i] = ma.make_mask(
                np.rint(uniform(loc=min, scale=max).ppf(samples[:, i])))
        else:  # assume it is uniform
            samples[:, i] = uniform(loc=min, scale=max).ppf(samples[:, i])

    min_max_scaler = preprocessing.MinMaxScaler(copy=True,
                                                feature_range=(0, 1))
    samples_norm = min_max_scaler.fit_transform(samples)

    return samples, samples_norm, pdf_list
Example #8
0
 def __init__(self,minVal,avgVal,maxVal):
     self.minVal = minVal
     self.avgVal = avgVal
     self.maxVal = maxVal
     self.c = (avgVal-minVal)/(maxVal-minVal)
     self.loc = minVal
     self.scale = maxVal-minVal
     self.triangFunc = triang(self.c, loc=self.loc,scale=self.scale)
Example #9
0
def draw_triangle_hist(a, b, c):
    scale = float(b - a)
    t = triang(float(c - a) / scale, loc=float(a), scale=scale)
    x = np.linspace(60, 100, 10000)
    h = plt.plot(x,
                 t.pdf(x),
                 label='$a=%d$, $b=%d$, $c=%d$' % (a, b, c),
                 linewidth=2)
Example #10
0
def greedy_allocation(parameters):
    """
    Greedy heuristic for 3 supplier (the same as heu_allocation3 but with different parameters)
    Does not write on the file but returns the solution
    :param df: dataframe containing the data from the excel file
    :param parameters: parameters dict
    :return: write o the df and save on the file
    """
    # Number of retailers
    R = parameters['retailers']

    if not parameters['distribution']:
        print 'No distribution set...abort'
        exit(1)
    # elif parameters['distribution'] == 'truncnorm':
    #     rvs = [truncnorm_custom(parameters['min_intrv{}'.format(i)],
    #                             parameters['max_intrv{}'.format(i)],
    #                             parameters['mu{}'.format(i)],
    #                             parameters['sigma{}'.format(i)]) for i in xrange(1, R+1)]

    elif parameters['distribution'] == 'norm':
        rvs = [norm(parameters['mu{}'.format(i)],
                    parameters['sigma{}'.format(i)]) for i in xrange(1, R+1)]

    elif parameters['distribution'] == 'uniform':
        rvs = [uniform(loc=parameters['mu{}'.format(i)],
                       scale=parameters['sigma{}'.format(i)]) for i in xrange(1, R+1)]

    elif parameters['distribution'] == 'triang':
        rvs = [triang(loc=parameters['min_intrv{}'.format(i)],
                      scale=parameters['max_intrv{}'.format(i)],
                      c=parameters['mu{}'.format(i)]) for i in xrange(1, R+1)]

    else:
        print 'Distribution not recognized...abort'
        exit(1)

    A = parameters['A']
    Q = {i: 0 for i in xrange(R)}

    while A > 0:
        best_probability = -1
        best_retailer = -1
        for n, r in enumerate(rvs):
            p = 1 - r.cdf(Q[n]+1)
            if p > best_probability:
                best_probability = p
                best_retailer = n

        Q[best_retailer] += 1
        A -= 1

    for i in xrange(1, R+1):
        parameters['Q{}'.format(i)] = Q[i-1]

    ret = {'Q{}'.format(i): Q[i-1] for i in xrange(1, R+1)}
    ret['PROB'] = integral(parameters)
    return ret
Example #11
0
def random_weight():
    #TODOPT avec un seul triangle ?
    if np.random.rand()<0.6:
        #unif from -18.5 to 15
        x = sst.uniform(-18.5,15+18.5).rvs()
    else:
        #triangle from 15 to 15+45.98
        x = sst.triang(c=0,loc=15,scale=45.98).rvs()
    return min(max(x,1.),50.)
Example #12
0
 def _scipy_triangular(self, low, high, peak):
     # Scipy triangular specifies a triangular distribution
     # from loc to loc + scale, with loc + c * scale as the
     # peak, giving:
     # loc = low, loc + scale = high, loc + c * scale = peak.
     # We invert this mapping here.
     return stats.triang(c=(peak - low) / (high - low),
                         loc=low,
                         scale=(high - low))
Example #13
0
 def __init__(self, lower, upper, mode):
     c = (mode - lower) / (upper - lower)
     self._rv = stats.triang(c, loc=lower, scale=upper - lower)
     self._kw = dict(loc=lower,
                     scale=upper - lower,
                     c=mode,
                     mode=mode,
                     lower=lower,
                     upper=upper)
Example #14
0
 def _scipy_triangular(self, low, high, peak):
   # Scipy triangular specifies a triangular distribution
   # from loc to loc + scale, with loc + c * scale as the
   # peak, giving:
   # loc = low, loc + scale = high, loc + c * scale = peak.
   # We invert this mapping here.
   return stats.triang(
       c=(peak - low)/(high - low),
       loc=low,
       scale=(high - low))
Example #15
0
def triangle(min, mode, max):  # @ReservedAssignment
    # correct ordering if necessary
    if min > max:
        tmp = min
        min = max
        max = tmp

    scale = max - min
    if scale == 0:
        raise PygcamMcsUserError("Scale of triangle distribution is zero")

    c = (mode - min) / scale  # central value (mode) of the triangle
    return triang(c, loc=min, scale=scale)
Example #16
0
def Tri(a, b, c, tag=None):
    """
    A triangular random variate
    
    Parameters
    ----------
    a : scalar
        Lower bound of the distribution support (default=0)
    b : scalar
        Upper bound of the distribution support (default=1)
    c : scalar
        The location of the triangle's peak (a <= c <= b)
    """
    assert a<=c<=b, 'peak must lie in between low and high'
    return uv(rv=ss.triang(c, loc=a, scale=b-a), tag=tag)
Example #17
0
def Tri(a, b, c, tag=None):
    """
    A triangular random variate
    
    Parameters
    ----------
    a : scalar
        Lower bound of the distribution support (default=0)
    b : scalar
        Upper bound of the distribution support (default=1)
    c : scalar
        The location of the triangle's peak (a <= c <= b)
    """
    assert a <= c <= b, 'peak must lie in between low and high'
    return uv(rv=ss.triang(c, loc=a, scale=b - a), tag=tag)
def generateToy():

  np.random.seed(12345)

  fig,ax = plt.subplots(4,sharex=True)
  #fig,ax = plt.subplots(2)

  powerlaw_arg = 2
  triang_arg=0.7
  n_samples = 500
  #generate simple line with slope 1, from 0 to 1
  frozen_powerlaw = powerlaw(powerlaw_arg) #powerlaw.pdf(x, a) = a * x**(a-1)
  #generate triangle with peak at 0.7
  frozen_triangle = triang(triang_arg) #up-sloping line from loc to (loc + c*scale) and then downsloping for (loc + c*scale) to (loc+scale).
  frozen_uniform = uniform(0.2,0.5)
  frozen_uniform2 = uniform(0.3,0.2)

  x = np.linspace(0,1)

  signal = np.random.normal(0.5, 0.1, n_samples/2)

  data_frame = pd.DataFrame({'powerlaw':powerlaw.rvs(powerlaw_arg,size=n_samples),
    'triangle':triang.rvs(triang_arg,size=n_samples),
    'uniform':np.concatenate((uniform.rvs(0.2,0.5,size=n_samples/2),uniform.rvs(0.3,0.2,size=n_samples/2))),
    'powerlaw_signal':np.concatenate((powerlaw.rvs(powerlaw_arg,size=n_samples/2),signal))})

  ax[0].plot(x, frozen_powerlaw.pdf(x), 'k-', lw=2, label='powerlaw pdf')
  hist(data_frame['powerlaw'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[0])
  #hist(data_frame['powerlaw'],bins='blocks',fitness='poly_events',normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[0])
  ax[0].legend(loc = 'best')

  ax[1].plot(x, frozen_triangle.pdf(x), 'k-', lw=2, label='triangle pdf')
  hist(data_frame['triangle'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[1])
  hist(data_frame['triangle'],bins='blocks',fitness='poly_events',normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[1])
  ax[1].legend(loc = 'best')

  #ax[0].plot(x, frozen_powerlaw.pdf(x), 'k-', lw=2, label='powerlaw pdf')
  hist(data_frame['powerlaw_signal'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[2])
  #hist(data_frame['powerlaw_signal'],bins='blocks',normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[2])
  ax[2].legend(loc = 'best')

  ax[3].plot(x, frozen_uniform.pdf(x)+frozen_uniform2.pdf(x), 'k-', lw=2, label='uniform pdf')
  hist(data_frame['uniform'],bins=100,normed=True,histtype='stepfilled',alpha=0.2,label='100 bins',ax=ax[3])
  #hist(data_frame['uniform'],bins='blocks',fitness = 'poly_events',p0=0.05,normed=True,histtype='stepfilled',alpha=0.2,label='b blocks',ax=ax[3])
  ax[3].legend(loc = 'best')

  plt.show()
  fig.savefig('plots/toy_plots.png')
Example #19
0
    def rand(self, alpha):
        """Transforms a random number between 0 and 1 to valid value according to the distribution of probability of the parameter"""

        if self.distrib == DistributionType.FIXED :
            return self.default
        
        elif self.distrib == DistributionType.LINEAR:
            return self.min + alpha * (self.max - self.min)

        else :
            if not hasattr(self, "_distrib"):

                if self.distrib == DistributionType.TRIANGLE:
                    scale = self.max - self.min
                    c = (self.default - self.min) / scale
                    self._distrib = triang(c, loc=self.min, scale=scale)

                elif self.distrib == DistributionType.NORMAL:

                    if self.min :
                        # Truncated normal
                        self._distrib = truncnorm(
                            (self.min - self.default) / self.std,
                            (self.max - self.min) / self.std,
                            loc=self.default,
                            scale=self.std)
                    else :
                        # Normal
                        self._distrib = norm(
                            loc=self.default,
                            scale=self.std)

                elif self.distrib == DistributionType.LOGNORMAL:

                    self._distrib = lognorm(self.default, self.std)

                elif self.distrib == DistributionType.BETA:
                    self._distrib = beta(
                        self.a,
                        self.b,
                        loc=self.default,
                        scale=self.std)

                else:
                    raise Exception("Unkown distribution type " + self.distrib)


            return self._distrib.ppf(alpha)
Example #20
0
def Triangular(low, peak, high, tag=None):
    """
    A triangular random variate
    
    Parameters
    ----------
    low : scalar
        Lower bound of the distribution support
    peak : scalar
        The location of the triangle's peak (low <= peak <= high)
    high : scalar
        Upper bound of the distribution support
    """
    assert low<=peak<=high, 'Triangular "peak" must lie between "low" and "high"'
    low, peak, high = [float(x) for x in [low, peak, high]]
    return uv(ss.triang((1.0*peak - low)/(high - low), loc=low, 
        scale=(high - low)), tag=tag)
Example #21
0
def get_prior_same_oom(p, prior):
        if prior == 'normal':
            # TODO: control scale hyperparameter better. Here we take the chain_std_deviation*5 as the prior std_deviation
            rv_instance = norm(loc=p, scale=np.abs(p/3))
        elif prior == 'uniform':
            # TODO: control scale hyperparameter manually here, later we can update
            pair = (p*0.1, p*10)
            rv_instance = uniform(loc=min(pair), scale=max(pair))
        elif prior == 'triangular':
            pair = (p*0.1, p*10)
            # the maximum (controlled by c) is always in the center here
            rv_instance = triang(loc=min(pair), scale=max(pair), c=0.5)
        elif prior == 'zero':
            rv_instance = rv_zero()
        else:
            raise ValueError('Invalid prior ({}) specified. Specify one of "normal", "uniform", "triangular", "zero"'.format(prior))
        return rv_instance
Example #22
0
    def pdfrange(pdf, param1, param2=None):
        # apply this function for initializing the prior bounds
        # parameters need to be passed in as floats

        if pdf == Distribution.NORMAL:
            # norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
            mu = param1
            sigma = param2
            rv = norm(loc=mu, scale=sigma)
        elif pdf == Distribution.LOGNORMAL:
            # lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
            mu = param1  # scale parameter "exp(mu)"
            sigma = param2  # shape parameter "s"
            rv = lognorm(sigma, loc=0, scale=np.exp(mu))
        elif pdf == Distribution.TRIANGLE:
            # triangular dist represented as upsloping line from loc to (loc+c*scale)
            # and then downsloping line from (loc+c*scale) to (loc+scale)
            c = param1  # shape parameter "mode"
            scale = param2  # scale parameter "width"
            loc = c - scale / 2  # location parameter "start"
            rv = triang(c, loc=loc, scale=scale)
        elif pdf == Distribution.GAMMA:
            # gamma.pdf(x, a) = b**a * x**(a-1) * exp(-b*x) / gamma(a)
            a = param1  # shape parameter "alpha" > 0
            b = param2  # rate parameter "beta" > 0
            rv = gamma(a, loc=0, scale=1.0 / b)
        elif pdf == Distribution.BETA:
            # beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(b-1)
            a = param1  # shape parameter "alpha" > 0
            b = param2  # shape parameter "beta" > 0
            rv = beta(a, b, loc=0, scale=1)
        elif pdf == Distribution.EXPONENTIAL:
            # expon.pdf(x) = b * exp(- b*x)
            b = param1  # rate parameter "lambda" > 0
            rv = expon(loc=0, scale=1.0 / b)
        elif pdf == Distribution.WEIBULL:
            # weibull_min.pdf(x, c) = c * x**(c-1) * exp(-x**c)
            scale = param1  # scale parameter "lambda" > 0
            c = param2  # shape parameter "k" > 0
            rv = weibull_min(c, loc=0, scale=scale)
        else:
            return None

        return rv.interval(0.99)
Example #23
0
    def take_samples(self):

        if self.dist == 'Normal':
            mean = self.param[0]
            std = self.param[1]
            if len(self.param) == 3:
                minum = self.param[2]
                maxum = numpy.inf
                self.values = st.truncnorm(a=minum, loc=mean, scale= std, b=maxum).ppf(self.lhs)
            elif len(self.param) == 4:
                minum = self.param[2]
                maxum = self.param[3]
                self.values = st.truncnorm(a=minum, loc=mean, scale= std, b=maxum).ppf(self.lhs)
            else:
                self.values = st.norm(loc=mean, scale=std).ppf(self.lhs)
        elif self.dist == 'Uniform':
            a = self.param[0]
            b = self.param[1]
            self.values = st.uniform(loc=a, scale=b).ppf(self.lhs)
        elif self.dist == 'Triangle':
            a = self.param[0]
            b = self.param[1] - self.param[0]
            c = (self.param[2] - a) / b
            self.values = st.triang(c, loc=a, scale=b).ppf(self.lhs)
        elif self.dist == 'Log-Normal':
            m_y = self.param[0]
            sig_y = self.param[1]
            s, scale = lognorm_to_scipyinput(m_y, sig_y)
            self.values = st.lognorm(s=s, scale=scale).ppf(self.lhs)
        elif self.dist == 'Log-Uniform':
            a = self.param[0]
            b = self.param[1]
            self.values = Loguniform(loc=a, scale=b).ppf(self.lhs)
        elif self.dist == 'Beta':
            alpha = self.param[0]
            beta = self.param[1]
            self.values = st.beta(alpha, beta).ppf(self.lhs)
            self.values = sorted(self.values)
        elif self.dist == 'Weibull':
            lamb = self.param[0]
            k = self.param[1]
            self.values = st.weibull_min(c=k, scale=lamb).ppf(self.lhs)
        else:
            print('There is a unknown distribution called ', '\'' + self.dist + '\'.\nRefer to page 13 Table 1 for the usable distrubtions.')
Example #24
0
def fit_triang(data, x_label, fixed_location=0, figure_size=5):
    """
    :param data: (numpy.array) observations
    :param x_label: label to show on the x-axis of the histogram
    :param fixed_location: fixed location
    :param figure_size: int, specify the figure size
    :returns: dictionary with keys "c", "loc", "scale", and "AIC"
    """
    # The triangular distribution can be represented with an up-sloping line from
    # loc to (loc + c*scale) and then downsloping for (loc + c*scale) to (loc+scale).

    # plot histogram
    fig, ax = plt.subplots(1, 1, figsize=(figure_size + 1, figure_size))
    ax.hist(data,
            normed=1,
            bins='auto',
            edgecolor='black',
            alpha=0.5,
            label='Frequency')

    # estimate the parameters
    c, loc, scale = scs.triang.fit(data, floc=fixed_location)

    # plot the estimated distribution
    x_values = np.linspace(scs.triang.ppf(0.0001, c, loc, scale),
                           scs.triang.ppf(0.9999, c, loc, scale), 200)
    rv = scs.triang(c, loc, scale)
    ax.plot_all(x_values,
                rv.pdf(x_values),
                color=COLOR_CONTINUOUS_FIT,
                lw=2,
                label='Triangular')

    ax.set_xlabel(x_label)
    ax.set_ylabel("Frequency")
    ax.legend()
    plt.show()

    # calculate AIC
    aic = AIC(k=2,
              log_likelihood=np.sum(scs.triang.logpdf(data, c, loc, scale)))

    # report results in the form of a dictionary
    return {"c": c, "loc": loc, "scale": scale, "AIC": aic}
Example #25
0
def latin_sampler(locator, num_samples, variables, region):
    """
    This script creates a matrix of m x n samples using the latin hypercube sampler.
    for this, it uses the database of probability distribtutions stored in locator.get_uncertainty_db()

    :param locator: pointer to locator of files of CEA
    :param num_samples: number of samples to do
    :param variables: list of variables to sample
    :return:
        1. design: a matrix m x n with the samples
        2. pdf_list: a dataframe with properties of the probability density functions used in the excercise.
    """

    # get probability density function PDF of variables of interest
    variable_groups = ('ENVELOPE', 'INDOOR_COMFORT', 'INTERNAL_LOADS')
    database = pd.concat([
        pd.read_excel(locator.get_uncertainty_db(region), group, axis=1)
        for group in variable_groups
    ])
    pdf_list = database[database['name'].isin(variables)].set_index('name')

    # get number of variables
    num_vars = pdf_list.shape[0]  #alternatively use len(variables)

    # get design of experiments
    design = lhs(num_vars, samples=num_samples)
    for i, variable in enumerate(variables):
        distribution = pdf_list.loc[variable, 'distribution']
        min = pdf_list.loc[variable, 'min']
        max = pdf_list.loc[variable, 'max']
        mu = pdf_list.loc[variable, 'mu']
        stdv = pdf_list.loc[variable, 'stdv']
        if distribution == 'triangular':
            loc = min
            scale = max - min
            c = (mu - min) / (max - min)
            design[:, i] = triang(loc=loc, c=c, scale=scale).ppf(design[:, i])
        elif distribution == 'normal':
            design[:, i] = norm(loc=mu, scale=stdv).ppf(design[:, i])
        else:  # assume it is uniform
            design[:, i] = uniform(loc=min, scale=max).ppf(design[:, i])

    return design, pdf_list
Example #26
0
def Triangular(low, peak, high, tag=None):
    """
    A triangular random variate
    
    Parameters
    ----------
    low : scalar
        Lower bound of the distribution support
    peak : scalar
        The location of the triangle's peak (low <= peak <= high)
    high : scalar
        Upper bound of the distribution support
    """
    assert low <= peak <= high, 'Triangular "peak" must lie between "low" and "high"'
    low, peak, high = [float(x) for x in [low, peak, high]]
    return uv(ss.triang((1.0 * peak - low) / (high - low),
                        loc=low,
                        scale=(high - low)),
              tag=tag)
Example #27
0
    def __init__(self, low: None, peak: None, high: None):

        self.a = low
        self.b = peak
        self.c = high
        self.range = (self.c - self.a)

        if self.a is None or self.b is None or self.c is None:
            raise ValueError('Parameters low, peak and high must be specified')
        assert low <= peak <= high, 'Triangular "peak" must lie between "low" and "high"'

        self.dist = ss.triang((1.0 * self.b - self.a) / (self.c - self.a),
                              loc=self.a,
                              scale=self.range)
        self.mean = self.dist.mean()
        self.median = self.dist.median()
        self.variance = self.dist.var()
        self.std = self.dist.std()
        self.param_title = str('low=' + str(self.a) + ', peak=' + str(self.b) +
                               ', high=' + str(self.c))
        self.param_title_long = str('Triangular (low=' + str(self.a) +
                                    ', peak=' + str(self.b) + ', high=' +
                                    str(self.c) + ')')
Example #28
0
def compare_triang_trapezoidal():
    # Test if triang() and trapezoidal() return same values
    # for a triangular distribution.
    c = 0.7
    loc = 0.3
    scale = 2.9
    triangle = stats.triang(c, loc=loc, scale=scale)
    trapezoid = stats.trapezoidal(c, c, 2.0, 2.0, 1.0, loc=loc, scale=scale)
    x = np.linspace(-0.1, 1.1, 1000)
    decimal = 10
    npt.assert_almost_equal(triangle.cdf(x), trapezoid.cdf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal cdf not almost equal')
    npt.assert_almost_equal(triangle.logcdf(x), trapezoid.logcdf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal logcdf not almost equal') 
    npt.assert_almost_equal(triangle.pdf(x), trapezoid.pdf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal pdf not almost equal') 
    npt.assert_almost_equal(triangle.logpdf(x), trapezoid.logpdf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal logpdf not almost equal') 
    npt.assert_almost_equal(triangle.ppf(x), trapezoid.ppf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal ppf not almost equal')  
    npt.assert_almost_equal(triangle.cdf(x), trapezoid.cdf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal cdf not almost equal') 
    npt.assert_almost_equal(triangle.sf(x), trapezoid.sf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal sf not almost equal') 
    npt.assert_almost_equal(triangle.logsf(x), trapezoid.logsf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal logsf not almost equal')
    npt.assert_almost_equal(triangle.isf(x), trapezoid.isf(x),
        decimal=decimal, 
        err_msg='stats.triang stats.trapezoidal isf not almost equal')  
Example #29
0
    def __init__(self, lower=None, upper=None, mode=None):
        self.lower = lower  # loc
        self.upper = upper
        self.mode = mode
        self.bounds = np.array([0, 1.0])
        self.scale = upper - lower  # scale
        self.shape = (self.mode - self.lower) / (self.upper - self.lower)  # c

        if (self.lower is not None) and (self.upper
                                         is not None) and (self.mode
                                                           is not None):
            mean, var, skew, kurt = triang.stats(c=self.shape,
                                                 loc=self.lower,
                                                 scale=self.scale,
                                                 moments='mvsk')
            self.mean = mean
            self.variance = var
            self.skewness = skew
            self.kurtosis = kurt
            self.x_range_for_pdf = np.linspace(self.lower, self.upper,
                                               RECURRENCE_PDF_SAMPLES)
            self.parent = triang(loc=self.lower,
                                 scale=self.scale,
                                 c=self.shape)
Example #30
0
def Triangular(mean=1., stdev=1.):
    """
  A triangular symetric distribution function that returns a frozen distribution of the `scipy.stats.rv_continuous <http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.rv_continuous.html>`_ class.
   
  :param mean: mean value
  :type mean: float
  :param stdev: standard deviation
  :type stdev: float
  :rtype: scipy.stats.rv_continuous instance
   
   
  >>> import compmod
  >>> tri = compmod.distributions.Triangular
  >>> tri = compmod.distributions.Triangular(mean = 1., stdev = .1)
  >>> tri.rvs(10)
  array([ 1.00410636,  1.05395898,  1.03192428,  1.01753651,  0.99951611,
          1.1718781 ,  0.94457269,  1.11406294,  1.08477038,  0.98861803])


  .. plot:: example_code/distributions/triangular.py
     :include-source:
  """
    width = np.sqrt(6) * stdev
    return stats.triang(.5, loc=mean - width, scale=2. * width)
Example #31
0
def Triangular(mean = 1., stdev = 1.):
  """
  A triangular symetric distribution function that returns a frozen distribution of the `scipy.stats.rv_continuous <http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.stats.rv_continuous.html>`_ class.
   
  :param mean: mean value
  :type mean: float
  :param stdev: standard deviation
  :type stdev: float
  :rtype: scipy.stats.rv_continuous instance
   
   
  >>> import compmod
  >>> tri = compmod.distributions.Triangular
  >>> tri = compmod.distributions.Triangular(mean = 1., stdev = .1)
  >>> tri.rvs(10)
  array([ 1.00410636,  1.05395898,  1.03192428,  1.01753651,  0.99951611,
          1.1718781 ,  0.94457269,  1.11406294,  1.08477038,  0.98861803])


  .. plot:: example_code/distributions/triangular.py
     :include-source:
  """
  width = np.sqrt(6) * stdev 
  return stats.triang(.5, loc = mean - width, scale = 2.*width)
Example #32
0
def all_dists():
    # dists param were taken from scipy.stats official
    # documentaion examples
    # Total - 89
    return {
        "alpha":
        stats.alpha(a=3.57, loc=0.0, scale=1.0),
        "anglit":
        stats.anglit(loc=0.0, scale=1.0),
        "arcsine":
        stats.arcsine(loc=0.0, scale=1.0),
        "beta":
        stats.beta(a=2.31, b=0.627, loc=0.0, scale=1.0),
        "betaprime":
        stats.betaprime(a=5, b=6, loc=0.0, scale=1.0),
        "bradford":
        stats.bradford(c=0.299, loc=0.0, scale=1.0),
        "burr":
        stats.burr(c=10.5, d=4.3, loc=0.0, scale=1.0),
        "cauchy":
        stats.cauchy(loc=0.0, scale=1.0),
        "chi":
        stats.chi(df=78, loc=0.0, scale=1.0),
        "chi2":
        stats.chi2(df=55, loc=0.0, scale=1.0),
        "cosine":
        stats.cosine(loc=0.0, scale=1.0),
        "dgamma":
        stats.dgamma(a=1.1, loc=0.0, scale=1.0),
        "dweibull":
        stats.dweibull(c=2.07, loc=0.0, scale=1.0),
        "erlang":
        stats.erlang(a=2, loc=0.0, scale=1.0),
        "expon":
        stats.expon(loc=0.0, scale=1.0),
        "exponnorm":
        stats.exponnorm(K=1.5, loc=0.0, scale=1.0),
        "exponweib":
        stats.exponweib(a=2.89, c=1.95, loc=0.0, scale=1.0),
        "exponpow":
        stats.exponpow(b=2.7, loc=0.0, scale=1.0),
        "f":
        stats.f(dfn=29, dfd=18, loc=0.0, scale=1.0),
        "fatiguelife":
        stats.fatiguelife(c=29, loc=0.0, scale=1.0),
        "fisk":
        stats.fisk(c=3.09, loc=0.0, scale=1.0),
        "foldcauchy":
        stats.foldcauchy(c=4.72, loc=0.0, scale=1.0),
        "foldnorm":
        stats.foldnorm(c=1.95, loc=0.0, scale=1.0),
        # "frechet_r": stats.frechet_r(c=1.89, loc=0.0, scale=1.0),
        # "frechet_l": stats.frechet_l(c=3.63, loc=0.0, scale=1.0),
        "genlogistic":
        stats.genlogistic(c=0.412, loc=0.0, scale=1.0),
        "genpareto":
        stats.genpareto(c=0.1, loc=0.0, scale=1.0),
        "gennorm":
        stats.gennorm(beta=1.3, loc=0.0, scale=1.0),
        "genexpon":
        stats.genexpon(a=9.13, b=16.2, c=3.28, loc=0.0, scale=1.0),
        "genextreme":
        stats.genextreme(c=-0.1, loc=0.0, scale=1.0),
        "gausshyper":
        stats.gausshyper(a=13.8, b=3.12, c=2.51, z=5.18, loc=0.0, scale=1.0),
        "gamma":
        stats.gamma(a=1.99, loc=0.0, scale=1.0),
        "gengamma":
        stats.gengamma(a=4.42, c=-3.12, loc=0.0, scale=1.0),
        "genhalflogistic":
        stats.genhalflogistic(c=0.773, loc=0.0, scale=1.0),
        "gilbrat":
        stats.gilbrat(loc=0.0, scale=1.0),
        "gompertz":
        stats.gompertz(c=0.947, loc=0.0, scale=1.0),
        "gumbel_r":
        stats.gumbel_r(loc=0.0, scale=1.0),
        "gumbel_l":
        stats.gumbel_l(loc=0.0, scale=1.0),
        "halfcauchy":
        stats.halfcauchy(loc=0.0, scale=1.0),
        "halflogistic":
        stats.halflogistic(loc=0.0, scale=1.0),
        "halfnorm":
        stats.halfnorm(loc=0.0, scale=1.0),
        "halfgennorm":
        stats.halfgennorm(beta=0.675, loc=0.0, scale=1.0),
        "hypsecant":
        stats.hypsecant(loc=0.0, scale=1.0),
        "invgamma":
        stats.invgamma(a=4.07, loc=0.0, scale=1.0),
        "invgauss":
        stats.invgauss(mu=0.145, loc=0.0, scale=1.0),
        "invweibull":
        stats.invweibull(c=10.6, loc=0.0, scale=1.0),
        "johnsonsb":
        stats.johnsonsb(a=4.32, b=3.18, loc=0.0, scale=1.0),
        "johnsonsu":
        stats.johnsonsu(a=2.55, b=2.25, loc=0.0, scale=1.0),
        "ksone":
        stats.ksone(n=1e03, loc=0.0, scale=1.0),
        "kstwobign":
        stats.kstwobign(loc=0.0, scale=1.0),
        "laplace":
        stats.laplace(loc=0.0, scale=1.0),
        "levy":
        stats.levy(loc=0.0, scale=1.0),
        "levy_l":
        stats.levy_l(loc=0.0, scale=1.0),
        "levy_stable":
        stats.levy_stable(alpha=0.357, beta=-0.675, loc=0.0, scale=1.0),
        "logistic":
        stats.logistic(loc=0.0, scale=1.0),
        "loggamma":
        stats.loggamma(c=0.414, loc=0.0, scale=1.0),
        "loglaplace":
        stats.loglaplace(c=3.25, loc=0.0, scale=1.0),
        "lognorm":
        stats.lognorm(s=0.954, loc=0.0, scale=1.0),
        "lomax":
        stats.lomax(c=1.88, loc=0.0, scale=1.0),
        "maxwell":
        stats.maxwell(loc=0.0, scale=1.0),
        "mielke":
        stats.mielke(k=10.4, s=3.6, loc=0.0, scale=1.0),
        "nakagami":
        stats.nakagami(nu=4.97, loc=0.0, scale=1.0),
        "ncx2":
        stats.ncx2(df=21, nc=1.06, loc=0.0, scale=1.0),
        "ncf":
        stats.ncf(dfn=27, dfd=27, nc=0.416, loc=0.0, scale=1.0),
        "nct":
        stats.nct(df=14, nc=0.24, loc=0.0, scale=1.0),
        "norm":
        stats.norm(loc=0.0, scale=1.0),
        "pareto":
        stats.pareto(b=2.62, loc=0.0, scale=1.0),
        "pearson3":
        stats.pearson3(skew=0.1, loc=0.0, scale=1.0),
        "powerlaw":
        stats.powerlaw(a=1.66, loc=0.0, scale=1.0),
        "powerlognorm":
        stats.powerlognorm(c=2.14, s=0.446, loc=0.0, scale=1.0),
        "powernorm":
        stats.powernorm(c=4.45, loc=0.0, scale=1.0),
        "rdist":
        stats.rdist(c=0.9, loc=0.0, scale=1.0),
        "reciprocal":
        stats.reciprocal(a=0.00623, b=1.01, loc=0.0, scale=1.0),
        "rayleigh":
        stats.rayleigh(loc=0.0, scale=1.0),
        "rice":
        stats.rice(b=0.775, loc=0.0, scale=1.0),
        "recipinvgauss":
        stats.recipinvgauss(mu=0.63, loc=0.0, scale=1.0),
        "semicircular":
        stats.semicircular(loc=0.0, scale=1.0),
        "t":
        stats.t(df=2.74, loc=0.0, scale=1.0),
        "triang":
        stats.triang(c=0.158, loc=0.0, scale=1.0),
        "truncexpon":
        stats.truncexpon(b=4.69, loc=0.0, scale=1.0),
        "truncnorm":
        stats.truncnorm(a=0.1, b=2, loc=0.0, scale=1.0),
        "tukeylambda":
        stats.tukeylambda(lam=3.13, loc=0.0, scale=1.0),
        "uniform":
        stats.uniform(loc=0.0, scale=1.0),
        "vonmises":
        stats.vonmises(kappa=3.99, loc=0.0, scale=1.0),
        "vonmises_line":
        stats.vonmises_line(kappa=3.99, loc=0.0, scale=1.0),
        "wald":
        stats.wald(loc=0.0, scale=1.0),
        "weibull_min":
        stats.weibull_min(c=1.79, loc=0.0, scale=1.0),
        "weibull_max":
        stats.weibull_max(c=2.87, loc=0.0, scale=1.0),
        "wrapcauchy":
        stats.wrapcauchy(c=0.0311, loc=0.0, scale=1.0),
    }
Example #33
0
             })),
 ]),
 param_distributions={
     'nn__name': ['nn{0:03d}'.format(k) for k in range(10000)],
     'nn__dense1_nonlinearity': nonlinearities.keys(),
     'nn__dense1_init': initializers.keys(),
     'nn__dense2_nonlinearity': nonlinearities.keys(),
     'nn__dense2_init': initializers.keys(),
     'nn__batch_size': binom(n=256, p=0.5),
     'nn__learning_rate': norm(0.0005, 0.0002),
     'nn__learning_rate_scaling': [10, 100, 1000],
     'nn__momentum': uniform(loc=0.9, scale=0.1),
     'nn__dense1_size': randint(low=400, high=900),
     'nn__dense2_size': randint(low=300, high=700),
     'nn__dense3_size': randint(low=200, high=500),
     'nn__dropout0_rate': triang(loc=0, c=0, scale=0.5),
     'nn__dropout1_rate': uniform(loc=0, scale=0.8),
     'nn__dropout2_rate': uniform(loc=0, scale=0.8),
     'nn__dropout3_rate': uniform(loc=0, scale=0.8),
     #'nn__weight_decay': norm(0.00006, 0.0001),
 },
 fit_params={
     'nn__random_sleep': 600,
 },
 n_iter=30,
 n_jobs=cpus,
 scoring=get_logloss_loggingscorer(join(
     OPTIMIZE_RESULTS_DIR, '{0:s}.log'.format(name_from_file())),
                                   treshold=.7),
 iid=False,
 refit=True,
Example #34
0
def adaptive_integrate(f1, f2, key, value):

    '''inputs:
       f1: function 1 of x, function string
       f2: function 2 of x, function string
       key: distribution type of random variable, string
       value: parameters of random distribution, tuple
       outputs:
       y: integral value
    '''
    
    if key.startswith('Uniform'):
        # stats.uniform defined in the range of [0, 1]
        # we have to convert it to [-1, 1] for the definition of Legendre basis
        # stats.uniform(location, scale)
        # or we can also do arbitrary type, will work on this later
        f_distr = stats.uniform(-1, 2)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, -1, 1)
        
    elif key.startswith('Gaussian'):
        # this is for hermite polynomial basis
        # we can do arbitrary type by not using standard normal distribution
        # will work on this later
        f_distr = stats.norm(0, 1)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, -npy.inf, npy.inf)
        
    elif key.startswith('Gamma'):
        # compare the stats.gamma with the one showed in UQLab tutorial (input)
        # stats.gamma accepts only one value, but UQLab accepts two
        # we can do the location and scale to make them the same
        # argument "1" is for the "standardized" format
        # or we can do arbitrary type later
        # value[0]: lambda, value[1]: k (a for stats.gamma)
        a = value[1]
        loc = 0
        scale = 1./value[0] # stats.gamma uses "beta" instead of "lambda"
        f_distr = stats.gamma(a, loc, scale)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, 0, npy.inf)
        
    elif key.startswith('Beta'):
        # compare the stats.beta with the one showed in UQLab tutorial (input)
        # stats.beta accepts only one value, but UQLab accepts two
        # we can do the location and scale to make them the same
        # value[0]: alpha, value[1]: beta, no "loc" or "scale" needed
        # always in the range of [0, 1]
        alpha = value[0]
        beta = value[1]
        f_distr = stats.beta(alpha, beta)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, 0, 1)
    
    elif key.startswith('Exponential'):
        # value: lambda
        loc = 0
        scale = 1./value
        f_distr = stats.expon(loc, scale)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, 0, npy.inf)
    
    elif key.startswith('Lognormal'):
        # this part is very interesting
        # in UQLab they do Hermite for lognormal
        # and U the same as those from gaussian
        # then convert U to X using exp(U)
        # or they can specify arbitrary polynomial basis to be the same as here
        # we can do both, actually
        
        # value[0]: mu, value[1]:sigma
        s = value[1]
        loc = 0
        scale = npy.exp(value[0])
        f_distr = stats.lognorm(s, loc, scale)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, 0, npy.inf)
    
    elif key.startswith('Gumbel'):
        # compare the stats.gumbel_r with the one showed in UQLab tutorial (input)
        # stats.gamma accepts only one value, but UQLab accepts two
        # we can do the location and scale to make them the same
        # value[0]: mu, value[1]: beta
        loc = value[0]
        scale = value[1]
        f_distr = stats.gumbel_r(loc, scale)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, -npy.inf, npy.inf)
        
    elif key.startswith('Weibull'):
        # compare the stats.weibull_min with the one showed in UQLab tutorial (input)
        # stats.gamma accepts only one value, but UQLab accepts two
        # we can do the location and scale to make them the same
        # value[0]: lambda, value[1]: k
        k  = value[1]
        loc = 0
        scale = value[0]
        f_distr = stats.weibull_min(k, loc, scale)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, 0, npy.inf)
        
    elif key.startswith('Triangular'):
        # compare the stats.triang with the one showed in UQLab tutorial (input)
        # stats.gamma accepts only one value, but UQLab accepts two
        # we can do the location and scale to make them the same
        # value: c, no "loc" and "scale" needed
        # always in the range of [0, 1]
        c = value
        f_distr = stats.triang(c)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, 0, 1)
        
    elif key.startswith('Logistic'):
        # compare the stats.logistic with the one showed in UQLab tutorial (input)
        # stats.gamma accepts only one value, but UQLab accepts two
        # we can do the location and scale to make them the same
        # value[0]: location, value[1]: scale
        loc = value[0]
        scale = value[1]
        f_distr = stats.logistic(loc, scale)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, -npy.inf, npy.inf)

    elif key.startswith('Laplace'):
        # compare the stats.laplace with the one showed in UQLab tutorial (input)
        # stats.gamma accepts only one value, but UQLab accepts two
        # we can do the location and scale to make them the same
        # value[0]: location, value[1]: scale
        loc = value[0]
        scale = value[1]
        f_distr = stats.laplace(loc, scale)
        f0 = lambda x: f_distr.pdf(x)
        f = lambda x: f1(x) * f2(x) * f0(x)
        y = integrate.quad(f, -npy.inf, npy.inf)
                
    else:
        print 'other types of statistical distributsions are coming soon ...'

    return y[0]
Example #35
0
def generateToy():

  np.random.seed(12345)

  fig,ax = plt.subplots()
  triang_arg=0.5
  #frozen_triangle = triang(c=triang_arg, loc=2) #up-sloping line from loc to (loc + c*scale) and then downsloping for (loc + c*scale) to (loc+scale).
  frozen_triangle = triang(c=0.5,loc=2) #up-sloping line from loc to (loc + c*scale) and then downsloping for (loc + c*scale) to (loc+scale).
  frozen_powerlaw = powerlaw(2) #powerlaw.pdf(x, a) = a * x**(a-1)

  x = np.linspace(0,1,20)
  x2 = np.linspace(0,1,20)
  nx = x
  nx2 = x2
  #nd = frozen_powerlaw.ppf(nx)
  #nd = np.array([0,0.3162,0.4472,0.5477,0.6324,0.7071,0.7746,0.8367,0.8944,0.9487])
  nd = np.array([0,0.140175,0.264911,0.378405,0.48324,0.581139,0.67332,0.760682,0.843909,0.923538])
  #nd = np.array([0.0723805,0.204159,0.322876,0.431782,0.532971,0.627882,0.717556,0.802776,0.884144,0.962142])
  #pdf = frozen_powerlaw.pdf(x)
  #nd = frozen_triangle.ppf(nx)
  #print x
  #print nd
  #raw_input()
  #pdf = frozen_triangle.pdf(x)
  #print nd
  #print pdf
  #raw_input()
  #for i in range(len(nd)-1):
  #  print (nd[i+1]-nd[i])*(nd[i+1]+nd[i])
  #raw_input()

  #nd2 = frozen_triangle2.ppf(nx2)
  #pdf2 = frozen_triangle2.pdf(x2)

  #print nd,nd2
  #ndc = np.concatenate((nd,nd2),axis=0)
  #print 'ndc', ndc
  #nxc = np.concatenate((nx,nx2))
  #print pdf, pdf2
  #pdfc = np.concatenate((pdf,pdf2))
  #xc = np.concatenate((x,x2))

  #plt.plot(nd,len(nx)*[1],"x")
  #plt.plot(x,pdf)
  #hist(nd,'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)

  #plt.plot(nd[0:11],len(nx[0:11])*[1],"x")
  #plt.plot(x[0:11],pdf[0:11])
  #hist(nd[0:11],'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)
  #hist(ndc,bins=50,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)

  #plt.plot(nd[11:],len(nx[11:])*[1],"x")
  #plt.plot(x[11:],pdf[11:])
  #hist(nd[11:],'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)

  print(nd)
  plt.plot(nd,len(nd)*[1],"x")
  #plt.plot(x,pdf)
  hist(nd,'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax)

  plt.show()
  fig.savefig('plots/toy_plots2.png')
Example #36
0
def single_triang(ratio, start, width):
    value = stats.triang(ratio, start, width).rvs()[0]
    return value
Example #37
0
    """
    Converts a multiplicative scale vote to additive
    :param val: The multiplicative scale vote
    :return: The corresponding additive scale vote
    """
    if val == 0:
        return None
    elif val >= 1:
        return val - 1
    else:
        val = 1 / val
        val = val - 1
        return -val


DEFAULT_DISTRIB = triang(c=0.5, loc=-1.5, scale=3.0)


def avote_random(avote):
    """
    Returns a random additive vote in the neighborhood of the additive vote avote
    according to the default disribution DEFAULT_DISTRIB
    """
    if avote is None:
        return None
    raw_val = DEFAULT_DISTRIB.rvs(size=1)[0]
    return avote + raw_val


def mvote_random(mvote):
    """
Example #38
0
def draw_triangle_hist(a, b, c):
    scale = float(b - a)
    t = triang(float(c - a) / scale, loc=float(a), scale=scale)
    x = np.linspace(60, 100, 10000)
    h = plt.plot(x, t.pdf(x), label='$a=%d$, $b=%d$, $c=%d$' % (a, b, c), linewidth=2)
Example #39
0
    def test_triangle_quantile(self):
        rv_1 = stats.triang(0.5, loc=-0.5, scale=2)

        alpha = np.array([0.3, 0.75])
        assert np.allclose(rv_1.ppf(alpha),
                           triangle_quantile(alpha, 0.5, -0.5, 2))
Example #40
0
        if f in assignments:
            col[f.fid] = assigned_getter(assignments[f], f)
        else:
            col[f.fid] = unassigned_getter(f)
    return pandas.Series(col)


if __name__ == '__main__':
    npr.seed(1)
    infilename = os.path.join(context.DATA_PATH,
                              'Scenario_W0.25BP2C60Q2D5.xlsx')
    myslots_perhour = 30
    rtc_dist_params = {'c': 0.2, 'loc': 0, 'scale': 60 * 90}
    weight_dist_params = {'c': 0.25, 'loc': 0, 'scale': 2}

    myrtc_dist = sps.triang(**rtc_dist_params)
    myweight_dist = sps.triang(**weight_dist_params)
    outfoldername = os.path.join(context.DATA_PATH, 'test_out_4')
    airline_cheats = True
    postswap = True
    with open(os.path.join(outfoldername, 'param_record.txt'),
              'w') as paramfile:
        paramfile.write("RTC Distribution: Triangular, " +
                        str(rtc_dist_params) + "\n")
        paramfile.write("RTC Distribution: Weight, " +
                        str(weight_dist_params) + "\n")
        paramfile.write("Slots per hour: " + str(myslots_perhour) + "\n")
        paramfile.write("Airline Cheats: " + str(airline_cheats) + "\n")
        paramfile.write("Postswap: " + str(postswap) + "\n")

        paramfile.write("Seed: " + str(1))
Example #41
0
def generador_estadistica(base):
    from scipy.stats import expon, exponnorm, gamma, logistic, lognorm, norm, uniform, triang

    lista_distributions = []
    lista_parameters = []
    for z in range(0, len(base)):
        if base["Parameters"][z] == "Action can not be performed":
            lista_distributions.append("Action can not be performed")
            lista_parameters.append("Action can not be performed")
        else:
            distri_prueba = [*base["Parameters"][z]]
            lista_distributions.append(distri_prueba)
            lista_parameters.append(base["Parameters"][z].get(
                distri_prueba[0]))

    lista_mean = []
    lista_std = []
    for xx in range(0, len(base)):
        if lista_distributions[xx][0] == "expon":
            seleccion = expon(lista_parameters[xx][0], lista_parameters[xx][1])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx][0] == "exponnorm":
            seleccion = exponnorm(lista_parameters[xx][0],
                                  lista_parameters[xx][1],
                                  lista_parameters[xx][2])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx][0] == "gamma":
            seleccion = gamma(lista_parameters[xx][0], lista_parameters[xx][1],
                              lista_parameters[xx][2])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx][0] == "logistic":
            seleccion = logistic(lista_parameters[xx][0],
                                 lista_parameters[xx][1])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx][0] == "lognorm":
            seleccion = lognorm(lista_parameters[xx][0],
                                lista_parameters[xx][1],
                                lista_parameters[xx][2])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx][0] == "norm":
            seleccion = norm(lista_parameters[xx][0], lista_parameters[xx][1])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx][0] == "uniform":
            seleccion = uniform(lista_parameters[xx][0],
                                lista_parameters[xx][1])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx][0] == "triang":
            seleccion = triang(lista_parameters[xx][0],
                               lista_parameters[xx][1],
                               lista_parameters[xx][2])
            lista_mean.append(seleccion.mean())
            lista_std.append(seleccion.std())

        elif lista_distributions[xx] == "Action can not be performed":
            lista_mean.append("No data for mean")
            lista_std.append("No data for std")

    base["mean"] = lista_mean
    base["std"] = lista_std
    return base
Example #42
0
def generador_estadistica_2(base):
    dis = next(iter(base))
    lista_parameters = np.asarray(base[dis])

    if dis == "expon":
        seleccion = expon(lista_parameters[0], lista_parameters[1])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "exponnorm":
        seleccion = exponnorm(lista_parameters[0], lista_parameters[1],
                              lista_parameters[2])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "gamma":
        seleccion = gamma(lista_parameters[0], lista_parameters[1],
                          lista_parameters[2])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "logistic":
        seleccion = logistic(lista_parameters[0], lista_parameters[1])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "lognorm":
        seleccion = lognorm(lista_parameters[0], lista_parameters[1],
                            lista_parameters[2])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "norm":
        seleccion = norm(lista_parameters[0], lista_parameters[1])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "uniform":
        seleccion = uniform(lista_parameters[0], lista_parameters[1])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "triang":
        seleccion = triang(lista_parameters[0], lista_parameters[1],
                           lista_parameters[2])
        lista_mean = seleccion.mean()
        lista_std = seleccion.std()
        lista_generador = seleccion.rvs(1000)

    elif dis == "Action can not be performed":
        lista_mean = "No data for mean"
        lista_std = "No data for std"
        lista_generador = seleccion.rvs(1000)

    base["mean"] = lista_mean
    base["std"] = lista_std
    base["generador"] = lista_generador
    return base
def integral(parameters):
    N = parameters['N']
    target = parameters['target']
    R = parameters['retailers']

    # rv1, rv2, rv3 = ndarray(shape = (N,), dtype=float), ndarray(shape = (N,), dtype=float), ndarray(shape = (N,), dtype=float)
    if not parameters['distribution']:
        print 'No distribution set...abort'
        exit(1)
    # elif parameters['distribution'] == 'truncnorm':
    #     a1, b1 = (parameters['min_intrv1'] - parameters['mu1']) / parameters['sigma1'], (parameters['max_intrv1'] - parameters['mu1']) / parameters['sigma1']
    #     a2, b2 = (parameters['min_intrv2'] - parameters['mu2']) / parameters['sigma2'], (parameters['max_intrv2'] - parameters['mu2']) / parameters['sigma2']
    #     a3, b3 = (parameters['min_intrv3'] - parameters['mu3']) / parameters['sigma3'], (parameters['max_intrv3'] - parameters['mu3']) / parameters['sigma3']
    #     rv1 = truncnorm(a1, b1, loc=parameters['mu1'], scale=parameters['sigma1']).rvs(N)
    #     rv2 = truncnorm(a2, b2, loc=parameters['mu2'], scale=parameters['sigma2']).rvs(N)
    #     rv3 = truncnorm(a3, b3, loc=parameters['mu3'], scale=parameters['sigma3']).rvs(N)
    elif parameters['distribution'] == 'norm':
        rvs = [norm(parameters['mu{}'.format(i)],
                    parameters['sigma{}'.format(i)]).rvs(N) for i in xrange(1, R+1)]

    elif parameters['distribution'] == 'uniform':
        rvs = [uniform(loc=parameters['mu{}'.format(i)],
                       scale=parameters['sigma{}'.format(i)]).rvs(N) for i in xrange(1, R+1)]

    elif parameters['distribution'] == 'triang':
        rvs = [triang(loc=parameters['min_intrv{}'.format(i)],
                      scale=parameters['max_intrv{}'.format(i)],
                      c=parameters['mu{}'.format(i)]).rvs(N) for i in xrange(1, R+1)]

    else:
        print 'Distribution not recognized...abort'
        exit(1)

    if parameters['scaling']:
        #scale the values of Qs in the allowed range such that sum(Q_i) = A
        r = sum([ABS(parameters['Q{}'.format(i)]) for i in xrange(1, R+1)])
        if r == 0.0:
            r = 1.

        # rounding the values, the sum could exceed A
        tot_other_Q = 0.0
        #  set the first R-1 varibles
        for i in xrange(1, R):
            parameters['Q{}'.format(i)] = ABS(parameters['Q{}'.format(i)]) * parameters['A'] / r
            tot_other_Q += parameters['Q{}'.format(i)]

        #  set the R-th variable by difference
        parameters['Q{}'.format(R)] = parameters['A'] - tot_other_Q

    if parameters['retailers'] == 3:
        return _integral3(rvs[0], rvs[1], rvs[2],
                          parameters['Q1'], parameters['Q2'], parameters['Q3'], target)
    elif parameters['retailers'] == 6:
        return _integral6(rvs[0], rvs[1], rvs[2],
                          rvs[3], rvs[4], rvs[5],
                          parameters['Q1'], parameters['Q2'], parameters['Q3'],
                          parameters['Q4'], parameters['Q5'], parameters['Q6'], target)
    elif parameters['retailers'] == 9:
        return _integral9(rvs[0], rvs[1], rvs[2],
                          rvs[3], rvs[4], rvs[5],
                          rvs[6], rvs[7], rvs[8],
                          parameters['Q1'], parameters['Q2'], parameters['Q3'],
                          parameters['Q4'], parameters['Q5'], parameters['Q6'],
                          parameters['Q7'], parameters['Q8'], parameters['Q9'], target)
    else:
        print "Not implemented with {} retailers".format(parameters['retailers'])
        exit(1)
def generateToy():

  np.random.seed(12345)

  fig,ax = plt.subplots()
  triang_arg=0.5
  #frozen_triangle = triang(c=triang_arg, loc=2) #up-sloping line from loc to (loc + c*scale) and then downsloping for (loc + c*scale) to (loc+scale).
  frozen_triangle = triang(c=0.5,loc=2) #up-sloping line from loc to (loc + c*scale) and then downsloping for (loc + c*scale) to (loc+scale).
  frozen_powerlaw = powerlaw(2) #powerlaw.pdf(x, a) = a * x**(a-1)

  x = np.linspace(0,1,20)
  x2 = np.linspace(0,1,20)
  nx = x
  nx2 = x2
  #nd = frozen_powerlaw.ppf(nx)
  #nd = np.array([0,0.3162,0.4472,0.5477,0.6324,0.7071,0.7746,0.8367,0.8944,0.9487])
  nd = np.array([0,0.140175,0.264911,0.378405,0.48324,0.581139,0.67332,0.760682,0.843909,0.923538])
  #nd = np.array([0.0723805,0.204159,0.322876,0.431782,0.532971,0.627882,0.717556,0.802776,0.884144,0.962142])
  #pdf = frozen_powerlaw.pdf(x)
  #nd = frozen_triangle.ppf(nx)
  #print x
  #print nd
  #raw_input()
  #pdf = frozen_triangle.pdf(x)
  #print nd
  #print pdf
  #raw_input()
  #for i in range(len(nd)-1):
  #  print (nd[i+1]-nd[i])*(nd[i+1]+nd[i])
  #raw_input()

  #nd2 = frozen_triangle2.ppf(nx2)
  #pdf2 = frozen_triangle2.pdf(x2)

  #print nd,nd2
  #ndc = np.concatenate((nd,nd2),axis=0)
  #print 'ndc', ndc
  #nxc = np.concatenate((nx,nx2))
  #print pdf, pdf2
  #pdfc = np.concatenate((pdf,pdf2))
  #xc = np.concatenate((x,x2))

  #plt.plot(nd,len(nx)*[1],"x")
  #plt.plot(x,pdf)
  #hist(nd,'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)

  #plt.plot(nd[0:11],len(nx[0:11])*[1],"x")
  #plt.plot(x[0:11],pdf[0:11])
  #hist(nd[0:11],'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)
  #hist(ndc,bins=50,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)

  #plt.plot(nd[11:],len(nx[11:])*[1],"x")
  #plt.plot(x[11:],pdf[11:])
  #hist(nd[11:],'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax,normed=True)

  print nd
  plt.plot(nd,len(nd)*[1],"x")
  #plt.plot(x,pdf)
  hist(nd,'blocks',fitness='poly_events',p0=0.05,histtype='bar',alpha=0.2,label='b blocks',ax=ax)

  plt.show()
  fig.savefig('plots/toy_plots2.png')
        os.system("cls")

        nombreDistribucion = "Distribución Triangular"
        print("\t::", nombreDistribucion, "::")

        # Parametros necesarios para la generación
        limiteInferior = float(input("->Ingrese el límite inferior :"))
        limiteSuperior = float(input("->Ingrese el límite superior :"))
        moda = float(input("->Ingrese parametro de forma (entre 0 y 1):"))

        numeroDatos = int(input("->Ingrese el número de variables aleatorias a generar :"))
        #nivelDeSignificacia = float(input("->Ingrese el nivel de significancia para la prueba chi cuadrado :"))
        nivelDeSignificacia = 0.05

        tipoDistribucion = st.triang(moda, limiteInferior,limiteSuperior-limiteInferior)
        objChi2 = triangularDistribution.Triangular(numeroDatos, tipoDistribucion, nivelDeSignificacia,
                                                    nombreDistribucion, moda, limiteInferior, limiteSuperior)

        legendDensity = 'a' + "=" + str(limiteInferior) + " ; " + 'b' + "=" + str(limiteSuperior)+ " ; " + 'x' + "=" + str(moda)
        legendHistogram = 'a' + "=" + str("{0:.2f}".format(objChi2.limInfGenerated)) + " ; " + 'b' + "=" + str("{0:.2f}".format(objChi2.limSupGenerated)) + " ; " + 'x' + "=" + str(
            "{0:.2f}".format(objChi2.modaGenerated))



        axisX = np.linspace(limiteInferior, limiteSuperior, 100)

        objChi2.chiSquareTest()
        objChi2.graph(legendHistogram, legendDensity, axisX)

    # DISTRIBUCION CHICUADRADO