Exemplo n.º 1
0
def sample(SAlib_problem, args):
    if args.method == 'fast':
        from SALib.sample.fast_sampler import sample
        X = sample(SAlib_problem, args.N, args.M)
    elif args.method == 'latin':
        from SALib.sample.latin import sample
        X = sample(SAlib_problem, args.N)
    elif args.method == 'morris':
        from SALib.sample.morris import sample
        X = sample(SAlib_problem,
                   args.N,
                   args.num_levels,
                   args.grid_jump,
                   optimal_trajectories=args.optimal_trajectories,
                   local_optimization=args.local_optimization)
    elif args.method == 'saltelli':
        from SALib.sample.saltelli import sample
        X = sample(SAlib_problem,
                   args.N,
                   calc_second_order=args.calc_second_order)
    elif args.method == 'ff':
        from SALib.sample.ff import sample
        X = sample(SAlib_problem)
    elif args.method == 'random':
        bounds = numpy.array(SAlib_problem['bounds'])
        assert bounds.shape[
            1] == 2, 'Expected two columns: minimum and maximum'
        assert bounds.shape[0] == SAlib_problem['num_vars']
        minbound, maxbound = bounds[:, 0], bounds[:, 1]
        X = numpy.random.uniform(minbound, maxbound, (args.N, minbound.size))
    else:
        raise Exception('Unknown sampler "%s" specified.' % args.method)
    print('Generated an ensemble with %i members' % (X.shape[0], ))
    return X
Exemplo n.º 2
0
def sample(SAlib_problem, args):
    if args.method == 'fast':
        from SALib.sample.fast_sampler import sample
        X = sample(SAlib_problem, args.N, args.M)
    elif args.method == 'latin':
        from SALib.sample.latin import sample
        X = sample(SAlib_problem, args.N)
    elif args.method == 'morris':
        from SALib.sample.morris import sample
        X = sample(SAlib_problem,
                   args.N,
                   args.num_levels,
                   args.grid_jump,
                   optimal_trajectories=args.optimal_trajectories,
                   local_optimization=args.local_optimization)
    elif args.method == 'saltelli':
        from SALib.sample.saltelli import sample
        X = sample(SAlib_problem,
                   args.N,
                   calc_second_order=args.calc_second_order)
    elif args.method == 'ff':
        from SALib.sample.ff import sample
        X = sample(SAlib_problem)
    else:
        raise Exception('Unknown sampler "%s" specified.' % args.method)
    print('Generated an ensemble with %i members' % (X.shape[0], ))
    return X
Exemplo n.º 3
0
def test_fast_sample_seed():

    _, problem = problem_setup()

    sample1 = fast_sampler.sample(problem, 65, seed=None)
    sample2 = fast_sampler.sample(problem, 65, seed=123)

    np.testing.assert_equal(np.any(np.not_equal(sample1, sample2)), True)
Exemplo n.º 4
0
def test_fast_to_df():
    params = ['x1', 'x2', 'x3']
    problem = {
        'num_vars':
        3,
        'names':
        params,
        'groups':
        None,
        'bounds': [[-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359]]
    }

    param_values = fast_sampler.sample(problem, 1000)
    Y = Ishigami.evaluate(param_values)

    Si = fast.analyze(problem, Y, print_to_console=False)
    Si_df = Si.to_df()

    expected_index = set(params)
    assert isinstance(Si_df, pd.DataFrame), \
        "FAST Si: Expected DataFrame, got {}".format(type(Si_df))
    assert set(Si_df.index) == expected_index, "Incorrect index in DataFrame"

    col_names = set(['S1', 'ST'])
    assert set(Si_df.columns) == col_names, \
        "Unexpected column names in DataFrame. Expected {}, got {}".format(
            col_names, Si_df.columns)
Exemplo n.º 5
0
    def redraw(self, random_method: str):
        """Redraw the random number with the given method

        :param random_method: the random method to use

        """
        problem = {
            "num_vars": self.num_dim,
            "names": list(range(self.num_dim)),
            "bounds": [[0, 1]] * self.num_dim,
        }
        if random_method == "pseudo_random":
            seq = np.random.random((self.bucket_size, 2))
        elif random_method == "sobol_sequence":
            seq = sobol_sequence.sample(self.bucket_size, 2)
        elif random_method == "saltelli":
            seq = saltelli.sample(problem, self.bucket_size, calc_second_order=False)
        elif random_method == "latin_hypercube":
            seq = latin.sample(problem, self.bucket_size)
        elif random_method == "finite_differences":
            seq = finite_diff.sample(problem, self.bucket_size)
        elif random_method == "fast":
            seq = fast_sampler.sample(problem, self.bucket_size, M=45)
        else:
            raise ValueError(f"Unknown random method {random_method}")
        self.random_draws[random_method] = seq
Exemplo n.º 6
0
def fast_sample(
    parameters: MutableMapping[str, Distribution],
    harmonics: Optional[int],
    sample_size: Optional[int],
) -> List[Dict[str, Record]]:
    if len(parameters) == 0:
        raise ValueError("Cannot study the sensitivity of no variables")

    if harmonics is None:
        harmonics = 4
    if sample_size is None:
        sample_size = 1000

    problem = _build_salib_problem(parameters)

    samples = fast_sampler.sample(problem, sample_size, M=harmonics)

    group_records = []
    for dist in parameters.values():
        records = []
        for sample in samples:
            data = dict(zip(dist.index, sample[:dist.size]))
            record = NumericalRecord(data=data,
                                     index=dist.index)  # type: ignore
            records.append(record)
        samples = np.delete(samples, list(range(dist.size)), axis=1)
        group_records.append(records)

    evaluations = []
    for i in zip(*group_records):
        evaluation = dict(zip(parameters.keys(), i))
        evaluations.append(evaluation)

    return evaluations
Exemplo n.º 7
0
def test_fast_to_df():
    params = ['x1', 'x2', 'x3']
    problem = {
        'num_vars': 3,
        'names': params,
        'groups': None,
        'bounds': [[-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359],
                   [-3.14159265359, 3.14159265359]]
    }

    param_values = fast_sampler.sample(problem, 1000)
    Y = Ishigami.evaluate(param_values)

    Si = fast.analyze(problem, Y, print_to_console=False)
    Si_df = Si.to_df()

    expected_index = set(params)
    assert isinstance(Si_df, pd.DataFrame), \
        "FAST Si: Expected DataFrame, got {}".format(type(Si_df))
    assert set(Si_df.index) == expected_index, "Incorrect index in DataFrame"

    col_names = set(['S1', 'ST'])
    assert set(Si_df.columns) == col_names, \
        "Unexpected column names in DataFrame. Expected {}, got {}".format(
            col_names, Si_df.columns)
Exemplo n.º 8
0
def generate_samples(problem, params, project_dir, method):
    """
    The Saltelli sampler generated 8000 samples. The Saltelli sampler generates N*(2D+2) samples, where in this
    example N is 1000 (the argument we supplied) and D is 3 (the number of model inputs).
    """

    if method == 'Saltelli':
        param_values = saltelli.sample(problem, params['samples'])
    elif method == 'FAST':
        param_values = fast_sampler.sample(problem, params['samples'])

    count = 0

    for i, X in enumerate(param_values):
        count += 1
        p, w, pred_fle = par.get_params(innate, X)
        _numpoints = 100
        t = [
            par._stoptime * float(i) / (_numpoints - 1)
            for i in range(_numpoints)
        ]
        w0 = innate.get_init(w, params)
        t, wsol = innate.solve(p, w0, t, params)
        APE_blood = []
        CH = []
        ACH = []
        for t1, w1 in zip(t, wsol):
            APE_blood.append(w1[1])
            CH.append(w1[10])
            ACH.append(w1[13])
        # Y_list.append(APE_blood)
        write_file(project_dir, method, APE_blood, 'AP')
        write_file(project_dir, method, CH, 'CH')
        write_file(project_dir, method, ACH, 'ACH')
        print(count, ' of ', len(param_values))
def sample_parameters(problem: dict, seed: int, method: str):
    if method == "FAST":
        return fast_sampler.sample(problem, seed)
    if method == "sobol":
        return saltelli.sample(problem, seed)
    if (method == "RBD_FAST") or (method == "DMIM"):
        return latin.sample(problem, seed)
Exemplo n.º 10
0
    def _sample(*args, **kwargs):
        self = args[0]

        N = self.N
        M = kwargs.get('M', 4)
        self.storeKwargs(N=N, M=M)

        return fast_sampler.sample(self.problem, N=N, M=M)
Exemplo n.º 11
0
def test_regression_fast():
    param_file = 'src/SALib/test_functions/params/Ishigami.txt'
    problem = read_param_file(param_file)
    param_values = fast_sampler.sample(problem, 10000)

    Y = Ishigami.evaluate(param_values)

    Si = fast.analyze(problem, Y, print_to_console=False)
    assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)
    assert_allclose(Si['ST'], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)
Exemplo n.º 12
0
def FAST_analysis(network, num_samples, prob_def):
    print("Sampling via FAST sampler...")
    samples = fast_sampler.sample(prob_def, num_samples)
    samples = np.split(samples, samples.shape[1], axis=1)
    samples = [s.squeeze() for s in samples]
    values = {n: torch.tensor(s) for n, s in zip(prob_def["names"], samples)}
    print("Running GrFN...")
    Y = network.run(values).numpy()
    print("Analyzing via FAST...")
    return fast.analyze(prob_def, Y, print_to_console=True)
Exemplo n.º 13
0
def test_regression_fast():
    param_file = 'src/SALib/test_functions/params/Ishigami.txt'
    problem = read_param_file(param_file)
    param_values = fast_sampler.sample(problem, 10000)

    Y = Ishigami.evaluate(param_values)

    Si = fast.analyze(problem, Y, print_to_console=False)
    assert_allclose(Si['S1'], [0.31, 0.44, 0.00], atol=5e-2, rtol=1e-1)
    assert_allclose(Si['ST'], [0.55, 0.44, 0.24], atol=5e-2, rtol=1e-1)
Exemplo n.º 14
0
def populate():
    # 'parameters' dictionary stores each line in the model
    par_keys = list(parameters.keys())

    # init problem definiton
    seed = int(opts['seed'])
    levels = int(opts['p_levels'])

    problem = {
        'names': opts['par_name'],
        'num_vars': len(opts['par_name']),
        'bounds': [],
    }

    # define bounds following the model configuration
    for line in range(len(par_keys)):
        if parameters[line][0] == 'par':
            if parameters[line][3] == 'range':
                lower = float(parameters[par_keys[line]][4])
                upper = float(parameters[par_keys[line]][5])
            if parameters[line][3] == 'factor':
                lower = float(parameters[line][2]) * (
                    1 - float(parameters[par_keys[line]][4]))
                upper = float(parameters[line][2]) * (
                    1 + float(parameters[par_keys[line]][5]))
            problem['bounds'].append([lower, upper])

    # create samples to simulate
    if opts['method'] == 'sobol':
        models = saltelli.sample(problem=problem,
                                 N=levels,
                                 calc_second_order=True,
                                 seed=seed)
    elif opts['method'] == 'fast':
        models = fast_sampler.sample(problem=problem, N=levels, seed=seed)
    elif opts['method'] == 'rbd-fast' or opts['method'] == 'delta' or opts[
            'method'] == 'dgsm':
        models = latin.sample(problem=problem, N=levels, seed=seed)
    elif opts['method'] == 'morris':
        models = morris_sample(problem=problem, N=levels)
    elif opts['method'] == 'frac':
        models = ff_sample(problem, seed=seed)
    else:
        error_msg = 'Wrong method name.'
        print(error_msg)
        raise ValueError(error_msg)

    population = {}
    # add samples to population dict
    population['problem', 'samples'] = models
    # add problem definition to population dict
    population['problem', 'definition'] = problem

    return population
Exemplo n.º 15
0
def make_samples_for_row(row,
                         model,
                         fixed_parameters,
                         parameter_bounds={},
                         analysis_type='sobol',
                         N=1000,
                         **kwargs):
    '''
    Supporting function for sample_and_integrate. Do not run.
    row must be a pandas Series
    
    :meta private:
    '''

    param_names = model['params']
    base_value = row[param_names]
    to_permute = [p for p in param_names if p not in fixed_parameters]
    to_keep = [p for p in param_names if p in fixed_parameters]
    if parameter_bounds:
        bounds = np.array([parameter_bounds[p] for p in to_permute])
    else:
        bounds = [[base_value[p] / 10, base_value[p] * 10] for p in to_permute]

    problem = {
        'num_vars': len(to_permute),
        'names': to_permute,
        'bounds': bounds,
    }

    if analysis_type == 'sobol':
        new_values = saltelli.sample(problem, N, **kwargs)
    elif analysis_type == 'fast':
        new_values = fast_sampler.sample(problem, N, **kwargs)
    elif analysis_type == 'delta':
        new_values = latin.sample(problem, N)
    elif analysis_type == 'rbd-fast':
        new_values = latin.sample(problem, N)
    else:
        raise Exception(
            'Could not find analyzer. analysis_type must be sobol, fast, rbd-fast or delta.'
        )

    new_values = pd.DataFrame(new_values, columns=to_permute)

    fixed_parametersped_values = np.repeat(
        base_value[to_keep].values[np.newaxis, :], len(new_values), 0)

    samples = np.concatenate((new_values, fixed_parametersped_values), axis=1)
    samples = pd.DataFrame(samples, columns=to_permute + to_keep)
    samples = samples[param_names]

    return samples, problem
Exemplo n.º 16
0
    def analyze(self):
        """Initiate the analysis, and stores the result at data directory.

        Generates:
            Analysis result at 'acbm/data/output/fast.txt'.

        """

        X = fast_sampler.sample(self.problem,
                                self.n_samples,
                                M=self.M,
                                seed=self.seed_sample)
        Y = ACBM.evaluate(X)
        si = fast.analyze(self.problem, Y, M=self.M, seed=self.seed_analyze)
        pickle.dump(si, open(self.path_output + 'fast.txt', 'wb'))
Exemplo n.º 17
0
    def fast(self):
        """FAST sensitivity analysis of the objective function.
        This function estimates the sensitivity with the FAST method of the
        objective function with changes in the parameters using SALib:
        https://salib.readthedocs.io/en/latest/api.html#fast-fourier-amplitude-sensitivity-test

        Returns:
            dict: sensitivity values of parameters; dict has keys 'S1' and 'ST'
        """
        X, y, problem = self._sensitivity_prep()
        n_sample = 2000
        param_values = fast_sampler.sample(problem, n_sample)
        X_s, y_s = self._closest_points(problem, X, y, param_values)
        Si = fast.analyze(problem, y_s)
        return Si
Exemplo n.º 18
0
 def _gen_muestrea(símismo, n, ops):
     if símismo.método == 'sobol':
         return saltelli.sample(problem=símismo.problema, N=n, **ops)
     elif símismo.método == 'fast':
         return fast_sampler.sample(problem=símismo.problema, N=n, **ops)
     elif símismo.método == 'morris':
         return morris_muestra.sample(problem=símismo.problema, N=n, **ops)
     elif símismo.método == 'dmim':
         return latin.sample(problem=símismo.problema, N=n)
     elif símismo.método == 'dgsm':
         return saltelli.sample(problem=símismo.problema, N=n)
     elif símismo.método == 'ff':
         return ff_muestra.sample(problem=símismo.problema)
     else:
         raise ValueError('Método de análisis de sensibilidad "{}" no reconocido.'.format(símismo.método))
Exemplo n.º 19
0
def fast_sample(
    parameters: MutableMapping[str, Distribution],
    harmonics: Optional[int],
    sample_size: Optional[int],
) -> List[Dict[str, Record]]:
    """Construct the Numpy matrix with model inputs for the extended Fourier
    Amplitude Sensitivity test. The generated samples are intended to be used by
    fast_analyze() after a model has been run the returned data from this
    function.

    This function essentially wraps SALib.sample.fast_sampler.sample.

    Args:
        parameters: Collection of ert3 distributions
        harmonics: The inference parameter, in SALib called "M"
        sample_size: Number of samples to generate, called "N" in SALib
    """
    if len(parameters) == 0:
        raise ValueError("Cannot study the sensitivity without any variables")

    if harmonics is None:
        harmonics = 4
    if sample_size is None:
        sample_size = 1000

    problem = _build_salib_problem(parameters)

    samples = fast_sampler.sample(problem, sample_size, M=harmonics)
    group_records = []
    for dist in parameters.values():
        # Loop over each parameter, let it be scalar or list-like
        records = []
        for sample in samples:
            if dist.is_scalar:
                data = sample[0]
            else:
                data = dict(zip(dist.index, sample[:dist.size]))
            record = NumericalRecord(data=data, index=dist.index)
            records.append(record)
        samples = np.delete(samples, list(range(dist.size)), axis=1)
        group_records.append(records)

    evaluations = []
    for i in zip(*group_records):
        evaluation = dict(zip(parameters.keys(), i))
        evaluations.append(evaluation)

    return evaluations
Exemplo n.º 20
0
 def redraw(self, random_method):
     problem = {'num_vars': 2, 'names': ['x', 'y'], 'bounds': [[0, 1]] * 2}
     if random_method == 'pseudo_random':
         seq = np.random.random((NUM_DATA_POINTS, 2))
     elif random_method == 'sobol_sequence':
         seq = sobol_sequence.sample(NUM_DATA_POINTS, 2)
     elif random_method == 'saltelli':
         seq = saltelli.sample(problem,
                               NUM_DATA_POINTS,
                               calc_second_order=False)
     elif random_method == 'latin_hypercube':
         seq = latin.sample(problem, NUM_DATA_POINTS)
     elif random_method == 'finite_differences':
         seq = finite_diff.sample(problem, NUM_DATA_POINTS)
     elif random_method == 'fast':
         seq = fast_sampler.sample(problem, NUM_DATA_POINTS, M=45)
     self.random_draws[random_method] = seq
Exemplo n.º 21
0
def sampleFAST(num,cz):
    #read the variable table, "variable"
    data_set_temp = np.genfromtxt('./variable.csv',
                                  skip_header=1,
                                  dtype=str,
                                  delimiter=',')

    #generate the data set under cz
    climate = ['1A','2A','2B','3A','3B','3C','4A','4B','4C','5A','5B','6A','6B','7A','8A']
    ind = climate.index(cz)
    data_set = []
    k = 1
    for row in data_set_temp:
        temp = [str(k)]
        temp.append(row[0])#the measure's name
        temp.append(row[1])#the argument's name
        temp.append(float(row[ind+2]))#the minimum value
        temp.append(float(row[ind+19]))#the maximum value
        data_set.append(temp)
        k += 1

    names = []
    bounds = []
    for row in data_set:
        names.append(row[0])
        temp = []
        temp.append(row[3])
        temp.append(row[4])
        bounds.append(temp)
    
    #set the variables and ranges of variables
    problem = {
        'num_vars': len(data_set),
        'names': names,
        'bounds': bounds
    }

    #select the samples
    param_values = fast_sampler.sample(problem, num)
    
    return data_set,problem,param_values
Exemplo n.º 22
0
def sampleFAST(num, cz):
    #read the variable table, "variable"
    data_set_temp = np.genfromtxt('./variable.csv',
                                  skip_header=1,
                                  dtype=str,
                                  delimiter=',')

    #generate the data set under cz
    climate = [
        '1A', '2A', '2B', '3A', '3B', '3C', '4A', '4B', '4C', '5A', '5B', '6A',
        '6B', '7A', '8A'
    ]
    ind = climate.index(cz)
    data_set = []
    k = 1
    for row in data_set_temp:
        temp = [str(k)]
        temp.append(row[0])  #the measure's name
        temp.append(row[1])  #the argument's name
        temp.append(float(row[ind + 2]))  #the minimum value
        temp.append(float(row[ind + 19]))  #the maximum value
        data_set.append(temp)
        k += 1

    names = []
    bounds = []
    for row in data_set:
        names.append(row[0])
        temp = []
        temp.append(row[3])
        temp.append(row[4])
        bounds.append(temp)

    #set the variables and ranges of variables
    problem = {'num_vars': len(data_set), 'names': names, 'bounds': bounds}

    #select the samples
    param_values = fast_sampler.sample(problem, num)

    return data_set, problem, param_values
Exemplo n.º 23
0
def prep_anal_sensib(método, n, problema, opciones):
    """

    :param método:
    :type método: str
    :param n:
    :type n: int
    :param problema:
    :type problema: dict
    :param opciones:
    :type opciones: dict
    :return:
    :rtype:
    """

    método_mín = método.lower()

    if método_mín == 'sobol':
        # Preparar opciones
        conv_ops_muestrear = {'calc_segundo_orden': 'calc_second_order'}
        conv_ops_anlz = {'calc_segundo_orden': 'calc_second_order', 'núm_remuestreos': 'num_resamples',
                         'nivel_conf': 'conf_level', 'paralelo': 'parallel', 'n_procesadores': 'n_processors'}

        # La opciones para las funciones de de muestreo y de análisis
        ops_muestrear = {conv_ops_muestrear[a]: val for a, val in opciones.items() if a in conv_ops_muestrear}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular cuáles valores de parámetros tenemos que poner para el análisis Sobol
        vals_paráms = saltelli.sample(problem=problema, N=n, **ops_muestrear)

        # La función de análisis
        fun_anlz = sobol.analyze

    elif método_mín == 'fast':
        # Preparar opciones
        if 'M' in opciones:
            ops_muestrear = ops_anlz = {'M': opciones['M']}
        else:
            ops_muestrear = ops_anlz = {}

        # Calcular para FAST
        vals_paráms = fast_sampler.sample(problem=problema, N=n, **ops_muestrear)

        # La función de análisis
        fun_anlz = fast.analyze

    elif método_mín == 'morris':
        # Preparar opciones
        conv_ops_muestrear = {'núm_niveles': 'num_levels', 'salto_cuadr': 'grid_jump',
                              'traj_optimal': 'optimal_trajectories', 'opt_local': 'local_optimization'}
        conv_ops_anlz = {'núm_remuestreos': 'num_resamples', 'nivel_conf': 'conf_level',
                         'salto_cuadr': 'grid_jump', 'núm_niveles': 'num_levels'}

        # La opciones para las funciones de de muestreo y de análisis
        ops_muestrear = {conv_ops_muestrear[a]: val for a, val in opciones.items() if a in conv_ops_muestrear}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular para Morris
        vals_paráms = morris_muestra.sample(problem=problema, N=n, **ops_muestrear)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = morris_anlz

    elif método_mín == 'dmim':
        # Preparar opciones
        conv_ops_anlz = {'núm_remuestreos': 'num_resamples', 'nivel_conf': 'conf_level'}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular para DMIM
        vals_paráms = latin.sample(problem=problema, N=n)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = delta.analyze

    elif método_mín == 'dgsm':  # para hacer: verificar
        # Preparar opciones
        conv_ops_anlz = {'núm_remuestreos': 'num_resamples', 'nivel_conf': 'conf_level'}
        ops_anlz = {conv_ops_anlz[a]: val for a, val in opciones.items() if a in conv_ops_anlz}

        # Calcular para DGSM
        vals_paráms = saltelli.sample(problem=problema, N=n)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = dgsm

    elif método_mín == 'ff':

        # Preparar opciones
        if 'segundo_orden' in opciones:
            ops_anlz = {'second_order': opciones['segundo_orden']}
        else:
            ops_anlz = {}

        # Calcular para FF
        vals_paráms = ff_muestra.sample(problem=problema)
        ops_anlz['X'] = vals_paráms

        # La función de análisis
        fun_anlz = ff_anlz

    else:
        raise ValueError('Método de análisis de sensibilidad "{}" no reconocido.'.format(método))

    return vals_paráms, fun_anlz, ops_anlz
Exemplo n.º 24
0
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs):
    if len(model.uncertainties) == 0:
        raise ValueError("no uncertainties defined in model")

    problem = {
        'num_vars': len(model.uncertainties),
        'names': model.uncertainties.keys(),
        'bounds': [[0.0, 1.0] for u in model.uncertainties],
        'groups': kwargs.get("groups", None)
    }

    # estimate the argument N passed to the sampler that produces the requested
    # number of samples
    N = _predict_N(method, nsamples, problem["num_vars"], kwargs)

    # generate the samples
    if method == "sobol":
        samples = saltelli.sample(problem, N,
                                  **_cleanup_kwargs(saltelli.sample, kwargs))
    elif method == "morris":
        samples = morris_sampler.sample(
            problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs))
    elif method == "fast":
        samples = fast_sampler.sample(
            problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs))
    elif method == "ff":
        samples = ff_sampler.sample(
            problem, **_cleanup_kwargs(ff_sampler.sample, kwargs))
    elif method == "dgsm":
        samples = finite_diff.sample(
            problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs))
    elif method == "delta":
        if "samples" in kwargs:
            samples = kwargs["samples"]
        else:
            samples = latin.sample(problem, N,
                                   **_cleanup_kwargs(latin.sample, kwargs))

    # convert from samples in [0, 1] to uncertainty domain
    for i, u in enumerate(model.uncertainties):
        samples[:, i] = u.ppf(samples[:, i])

    # run the model and collect the responses
    responses = np.empty(samples.shape[0])

    for i in range(samples.shape[0]):
        sample = {k: v for k, v in zip(model.uncertainties.keys(), samples[i])}
        responses[i] = evaluate(model, overwrite(sample, policy))[response]

    # run the sensitivity analysis method
    if method == "sobol":
        result = sobol.analyze(problem, responses,
                               **_cleanup_kwargs(sobol.analyze, kwargs))
    elif method == "morris":
        result = morris_analyzer.analyze(
            problem, samples, responses,
            **_cleanup_kwargs(morris_analyzer.analyze, kwargs))
    elif method == "fast":
        result = fast.analyze(problem, responses,
                              **_cleanup_kwargs(fast.analyze, kwargs))
    elif method == "ff":
        result = ff_analyzer.analyze(
            problem, samples, responses,
            **_cleanup_kwargs(ff_analyzer.analyze, kwargs))
    elif method == "dgsm":
        result = dgsm.analyze(problem, samples, responses,
                              **_cleanup_kwargs(dgsm.analyze, kwargs))
    elif method == "delta":
        result = delta.analyze(problem, samples, responses,
                               **_cleanup_kwargs(delta.analyze, kwargs))

    # convert the SALib results into a form allowing pretty printing and
    # lookups using the parameter name
    pretty_result = SAResult(
        list(result["names"] if "names" in result else problem["names"]))

    if "S1" in result:
        pretty_result["S1"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["S1"])
        }
    if "S1_conf" in result:
        pretty_result["S1_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["S1_conf"])
        }
    if "ST" in result:
        pretty_result["ST"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["ST"])
        }
    if "ST_conf" in result:
        pretty_result["ST_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["ST_conf"])
        }
    if "S2" in result:
        pretty_result["S2"] = _S2_to_dict(result["S2"], problem)
    if "S2_conf" in result:
        pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem)
    if "delta" in result:
        pretty_result["delta"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["delta"])
        }
    if "delta_conf" in result:
        pretty_result["delta_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["delta_conf"])
        }
    if "vi" in result:
        pretty_result["vi"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["vi"])
        }
    if "vi_std" in result:
        pretty_result["vi_std"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["vi_std"])
        }
    if "dgsm" in result:
        pretty_result["dgsm"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["dgsm"])
        }
    if "dgsm_conf" in result:
        pretty_result["dgsm_conf"] = {
            k: float(v)
            for k, v in zip(problem["names"], result["dgsm_conf"])
        }
    if "mu" in result:
        pretty_result["mu"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu"])
        }
    if "mu_star" in result:
        pretty_result["mu_star"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu_star"])
        }
    if "mu_star_conf" in result:
        pretty_result["mu_star_conf"] = {
            k: float(v)
            for k, v in zip(result["names"], result["mu_star_conf"])
        }
    if "sigma" in result:
        pretty_result["sigma"] = {
            k: float(v)
            for k, v in zip(result["names"], result["sigma"])
        }

    return pretty_result
Exemplo n.º 25
0
 def sample(self, problem, size):
     return fast_sampler.sample(problem, size, self.m)
Exemplo n.º 26
0
if ctrlSetting["saMethod"] == 1: # For Sobol method
    # Convergence properties of the Sobol' sequence is only valid if
    # 'N' (20) is equal to '2^n'
    sampleMethod = "saltelli"
    saltelliArgument = ctrlSetting["saltelliArg"]
    if not checkSaltelliArgument(saltelliArgument):
        print("The argument for saltelli sample need to be a value that is power of 2")
        exit()
    parmSamples = saltelli.sample(parmForSA, saltelliArgument)
elif ctrlSetting["saMethod"] == 2: # For Morris method
    sampleMethod = "Morris"
    parmSamples = morris_sample.sample(parmForSA, ctrlSetting["morrisRes"], num_levels=4)
elif ctrlSetting["saMethod"] == 3: # For FAST method
    sampleMethod = "Fast"
    # SALib.sample.fast_sampler.sample(problem, N, M=4, seed=None)[source]
    parmSamples = fast_sampler.sample(parmForSA, ctrlSetting["fastRes"], M=4)

fnpParmSamples = os.path.join(fdSA, "parmSample_{}.txt".format(sampleMethod))
np.savetxt(fnpParmSamples, parmSamples, fmt='%.3f',delimiter=' ')

# Step 3: Evaluate the model with sampled results and
# store the output into a file
# At this version, the average value will be used.
# Another question to consider is the number of sites used.

# In order to save the time to rewrite the duplicated codes for modifying 
# file values, the parameter values will be updated based on the values
# in each row in the parameter dataframe.

# A list to store the value for allruns, which will be written into a file
# There are multiple oulets, each outlet will have one list representing
Exemplo n.º 27
0
        [lob_search, upb_search],
    ]
}

## Generate samples
if method_flag == 1:
    param_values = saltelli.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10
elif method_flag == 2:
    param_values = latin.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10
elif method_flag == 3:
    param_values = finite_diff.sample(problem, sample_number, delta=0.001)
    parm = (param_values + 1) * 7e10
elif method_flag == 4:
    param_values = fast_sampler.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10
elif method_flag == 5:
    param_values = ff_sample(problem)
    parm = (param_values[:, :21] + 1) * 7e10
elif method_flag == 6:
    param_values = morris_sample(problem, N=sample_number, num_levels=4, grid_jump=2, \
                      optimal_trajectories=None)
    parm = (param_values + 1) * 7e10
elif method_flag == 7:
    param_values = saltelli.sample(problem, sample_number)
    parm = (param_values + 1) * 7e10

## Run model (example)
FEM_freq = uncertainty_analysis.uncertainty_analysis.random_freq_run(
    analysis=analysis1, parm=parm, target='E', index=index)
Exemplo n.º 28
0
def main(args):
    if args.subcommand != 'sample':
        print('Reading sensitity samples from %s...' % args.info)
        with open(args.info, 'rb') as f:
            job_info = pickle.load(f)
        args.xmlfile = job_info['sample_args'].xmlfile

    print('Reading configuration from %s...' % args.xmlfile)
    current_job = job.fromConfigurationFile(args.xmlfile)

    names = current_job.getParameterNames()
    minpar, maxpar = current_job.getParameterBounds()
    logscale = current_job.getParameterLogScale()

    # For parameters that having been marked for log-transformation,
    # transform their ranges now so that SAlib will operate in log-transformed space at all times.
    for i, log in enumerate(logscale):
        if log:
            minpar[i] = numpy.log10(minpar[i])
            maxpar[i] = numpy.log10(maxpar[i])

    SAlib_problem = {
        'num_vars': len(names),
        'names': names,
        'bounds': list(zip(minpar, maxpar))
    }

    if args.subcommand == 'sample':
        # Only create setup directories
        X = sample(SAlib_problem, args)
        job_info = {'sample_args': args, 'X': X}

        if args.dir is not None:
            assert isinstance(current_job, job.program.Job)
            ensemble = X.copy()
            for i, log in enumerate(logscale):
                if log:
                    ensemble[:, i] = 10.**ensemble[:, i]
            job_info[
                'simulationdirs'] = current_job.prepareEnsembleDirectories(
                    ensemble, args.dir, args.format)
        save_info(args.info, job_info)
    elif args.subcommand == 'run':
        X = job_info['X']
        if 'simulationdirs' in job_info:
            # We have created all setup directories during the sample setp. The user must have run to model in each.
            targets = [(compile(expression, '<string>', 'eval'), ncpath)
                       for (expression, ncpath) in current_job.targets]
            Y = numpy.empty((len(job_info['simulationdirs']), len(targets)))
            print(
                'Retrieving value of target expression(s) for each ensemble member...'
            )
            for i, simulationdir in enumerate(job_info['simulationdirs']):
                for itarget, (expression, ncpath) in enumerate(targets):
                    wrappednc = job.program.NcDict(
                        os.path.join(simulationdir, ncpath))
                    Y[i, itarget] = wrappednc.eval(expression)
                print('  - %i: %s' % (i, Y[i, :]))
                wrappednc.finalize()
        else:
            # We run the model ourselves.
            Y = current_job.evaluate_ensemble([
                undoLogTransform(X[i, :], logscale) for i in range(X.shape[0])
            ],
                                              stop_on_bad_result=True)
            if Y is None:
                print('Ensemble evaluation failed. Exiting...')
                return
            Y = numpy.array(Y)
        job_info['Y'] = Y
        print('Updating sensitivity info in %s with model results...' %
              args.info)
        save_info(args.info, job_info)
    elif args.subcommand == 'analyze':
        if 'Y' not in job_info:
            print('"analyze" step can only be used after "run" step')
            sys.exit(2)
        X, Y = job_info['X'], job_info['Y']
        Y.shape = (X.shape[0], -1)
        if hasattr(current_job, 'targets'):
            target_names = [
                'Target %i (%s/%s)' % (i, path, expr)
                for i, (expr, path) in enumerate(current_job.targets)
            ]
        elif hasattr(current_job, 'target'):
            target_names = [
                'Target %s/%s' % (current_job.target[1], current_job.target[0])
            ]
        else:
            target_names = ['Target %i' % i for i in range(Y.shape[1])]
        mean_rank = numpy.zeros((X.shape[1], ), dtype=int)
        for itarget, target_name in enumerate(target_names):
            sensitivities = analyze(SAlib_problem, args,
                                    job_info['sample_args'], X, Y[:, itarget])
            isort = numpy.argsort(sensitivities)[::-1]
            for irank, ipar in enumerate(isort):
                mean_rank[ipar] += irank
            print(target_name)
            for i in isort:
                print('  - %s (%s)' % (names[i], sensitivities[i]))
        mean_rank = 1 + mean_rank / float(Y.shape[1])

        if args.select is not None:
            n, path = args.select
            n = int(n)
            selected = set()
            print('Consensus ranking (top %i parameters):' % n)
            for i in numpy.argsort(mean_rank)[:n]:
                print('  - %s (mean rank = %.1f)' % (names[i], mean_rank[i]))
                selected.add(names[i])
            xml_tree = xml.etree.ElementTree.parse(args.xmlfile)
            parameters_xml = xml_tree.find('./parameters')
            for ipar, element in enumerate(
                    parameters_xml.findall('./parameter')):
                with job.shared.XMLAttributes(element, 'parameter %i' %
                                              (ipar + 1, )) as att:
                    name = current_job.getParameter(att).name
                if name not in selected:
                    element.tag = 'disabled_parameter'
            xml_tree.write(path)
Exemplo n.º 29
0
 def sample(self, problem, size):
     return fast_sampler.sample(problem, size, self.m)
Exemplo n.º 30
0
Arquivo: fast.py Projeto: SALib/SALib
import sys
sys.path.append('../..')

from SALib.analyze import fast
from SALib.sample import fast_sampler
from SALib.test_functions import Ishigami
from SALib.util import read_param_file

# Read the parameter range file and generate samples
problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt')

# Generate samples
param_values = fast_sampler.sample(problem, 1000)

# Run the "model" and save the output in a text file
# This will happen offline for external models
Y = Ishigami.evaluate(param_values)

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = fast.analyze(problem, Y, print_to_console=False)
# Returns a dictionary with keys 'S1' and 'ST'
# e.g. Si['S1'] contains the first-order index for each parameter, in the
# same order as the parameter file
Exemplo n.º 31
0
def sa(model, response, policy={}, method="sobol", nsamples=1000, **kwargs):
    if len(model.uncertainties) == 0:
        raise ValueError("no uncertainties defined in model")
    
    problem = { 'num_vars' : len(model.uncertainties),
                'names' : model.uncertainties.keys(),
                'bounds' : [[0.0, 1.0] for u in model.uncertainties],
                'groups' : kwargs.get("groups", None) }

    # estimate the argument N passed to the sampler that produces the requested
    # number of samples
    N = _predict_N(method, nsamples, problem["num_vars"], kwargs)
    
    # generate the samples
    if method == "sobol":
        samples = saltelli.sample(problem, N, **_cleanup_kwargs(saltelli.sample, kwargs))
    elif method == "morris":
        samples = morris_sampler.sample(problem, N, **_cleanup_kwargs(morris_sampler.sample, kwargs))
    elif method == "fast":
        samples = fast_sampler.sample(problem, N, **_cleanup_kwargs(fast_sampler.sample, kwargs))
    elif method == "ff":
        samples = ff_sampler.sample(problem, **_cleanup_kwargs(ff_sampler.sample, kwargs))
    elif method == "dgsm":
        samples = finite_diff.sample(problem, N, **_cleanup_kwargs(finite_diff.sample, kwargs))
    elif method == "delta":
        if "samples" in kwargs:
            samples = kwargs["samples"]
        else:
            samples = latin.sample(problem, N, **_cleanup_kwargs(latin.sample, kwargs))
            
    # convert from samples in [0, 1] to uncertainty domain
    for i, u in enumerate(model.uncertainties):
        samples[:,i] = u.ppf(samples[:,i])
        
    # run the model and collect the responses
    responses = np.empty(samples.shape[0])
    
    for i in range(samples.shape[0]):
        sample = {k : v for k, v in zip(model.uncertainties.keys(), samples[i])}
        responses[i] = evaluate(model, overwrite(sample, policy))[response]
    
    # run the sensitivity analysis method
    if method == "sobol":
        result = sobol.analyze(problem, responses, **_cleanup_kwargs(sobol.analyze, kwargs))
    elif method == "morris":
        result = morris_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(morris_analyzer.analyze, kwargs))
    elif method == "fast":
        result = fast.analyze(problem, responses, **_cleanup_kwargs(fast.analyze, kwargs))
    elif method == "ff":
        result = ff_analyzer.analyze(problem, samples, responses, **_cleanup_kwargs(ff_analyzer.analyze, kwargs))
    elif method == "dgsm":
        result = dgsm.analyze(problem, samples, responses, **_cleanup_kwargs(dgsm.analyze, kwargs))
    elif method == "delta":
        result = delta.analyze(problem, samples, responses, **_cleanup_kwargs(delta.analyze, kwargs))
         
    # convert the SALib results into a form allowing pretty printing and
    # lookups using the parameter name
    pretty_result = SAResult(result["names"] if "names" in result else problem["names"])
    
    if "S1" in result:
        pretty_result["S1"] = {k : float(v) for k, v in zip(problem["names"], result["S1"])}
    if "S1_conf" in result:
        pretty_result["S1_conf"] = {k : float(v) for k, v in zip(problem["names"], result["S1_conf"])}
    if "ST" in result:
        pretty_result["ST"] = {k : float(v) for k, v in zip(problem["names"], result["ST"])}
    if "ST_conf" in result:
        pretty_result["ST_conf"] = {k : float(v) for k, v in zip(problem["names"], result["ST_conf"])}
    if "S2" in result:
        pretty_result["S2"] = _S2_to_dict(result["S2"], problem)
    if "S2_conf" in result:
        pretty_result["S2_conf"] = _S2_to_dict(result["S2_conf"], problem)
    if "delta" in result:
        pretty_result["delta"] = {k : float(v) for k, v in zip(problem["names"], result["delta"])}
    if "delta_conf" in result:
        pretty_result["delta_conf"] = {k : float(v) for k, v in zip(problem["names"], result["delta_conf"])}
    if "vi" in result:
        pretty_result["vi"] = {k : float(v) for k, v in zip(problem["names"], result["vi"])}
    if "vi_std" in result:
        pretty_result["vi_std"] = {k : float(v) for k, v in zip(problem["names"], result["vi_std"])}
    if "dgsm" in result:
        pretty_result["dgsm"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm"])}
    if "dgsm_conf" in result:
        pretty_result["dgsm_conf"] = {k : float(v) for k, v in zip(problem["names"], result["dgsm_conf"])}
    if "mu" in result:
        pretty_result["mu"] = {k : float(v) for k, v in zip(result["names"], result["mu"])}
    if "mu_star" in result:
        pretty_result["mu_star"] = {k : float(v) for k, v in zip(result["names"], result["mu_star"])}
    if "mu_star_conf" in result:
        pretty_result["mu_star_conf"] = {k : float(v) for k, v in zip(result["names"], result["mu_star_conf"])}
    if "sigma" in result:
        pretty_result["sigma"] = {k : float(v) for k, v in zip(result["names"], result["sigma"])}

    return pretty_result
Exemplo n.º 32
0
import sys
sys.path.append('../..')

from SALib.analyze import fast
from SALib.sample import fast_sampler
from SALib.test_functions import Ishigami
from SALib.util import read_param_file

# Read the parameter range file and generate samples
problem = read_param_file('../../src/SALib/test_functions/params/Ishigami.txt')

# Generate samples
param_values = fast_sampler.sample(problem, 1000)

# Run the "model" and save the output in a text file
# This will happen offline for external models
Y = Ishigami.evaluate(param_values)

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = fast.analyze(problem, Y, print_to_console=False)
# Returns a dictionary with keys 'S1' and 'ST'
# e.g. Si['S1'] contains the first-order index for each parameter, in the
# same order as the parameter file
Exemplo n.º 33
0
def populate():
    # 'parameters' dictionary stores each line in the model
    par_keys = list(parameters.keys())

    # init problem definiton
    seed = int(opts['seed'])
    levels = int(opts['p_levels'])
    problem = {
        'names': opts['par_name'],
        'num_vars': len(opts['par_name']),
        'bounds': [],
    }

    # define bounds following the model configuration
    for line in range(len(par_keys)):
        if parameters[line][0] == 'par':
            if parameters[line][3] == 'range':
                lower = float(parameters[par_keys[line]][4])
                upper = float(parameters[par_keys[line]][5])
            if parameters[line][3] == 'factor':
                lower = float(parameters[line][2]) * (
                    1 - float(parameters[par_keys[line]][4]))
                upper = float(parameters[line][2]) * (
                    1 + float(parameters[par_keys[line]][5]))
            problem['bounds'].append([lower, upper])

    # create samples to simulate
    if opts['method'] == 'sobol':
        models = saltelli.sample(problem=problem,
                                 N=levels,
                                 calc_second_order=True,
                                 seed=seed)
    elif opts['method'] == 'fast':
        models = fast_sampler.sample(problem=problem, N=levels, seed=seed)
    elif opts['method'] == 'rbd-fast' or opts['method'] == 'delta' or opts[
            'method'] == 'dgsm':
        models = latin.sample(problem=problem, N=levels, seed=seed)
    elif opts['method'] == 'morris':
        models = morris_sample(problem=problem, N=levels)
    elif opts['method'] == 'frac':
        models = ff_sample(problem, seed=seed)
    else:
        error_msg = 'Wrong method name.'
        print(error_msg)
        raise ValueError(error_msg)

    # add samples to population dict
    population = {}
    population['problem', 'samples'] = models

    # write models
    model_string = 'level{:0' + str(len(str(len(models)))) + 'd}'

    for model_index, model in enumerate(models):
        model_key = model_string.format(model_index + 1)
        population[model_key, 'model'] = model_key
        for par_index, par_name in enumerate(opts['par_name']):
            population[model_key, par_name] = models[model_index][par_index]

    # generate a kappa file per model
    par_string = '%var: \'{:s}\' {:.' + opts['par_prec'] + '}\n'

    if opts['continue'] == '1':
        for model in sorted(population.keys()):
            if model[1] == 'model':
                for simulation in range(opts['nsims']):
                    model_key = model[0]
                    model_name = population[model_key, 'model']

                    # define pertubation to the kappa model that indicates KaSim to calculates the Dinamic Influence Network
                    #if opts['type'] == 'total':
                    if opts['syntax'] == '4':
                        flux = '%mod: [T] > {:s} do $DIN \"flux_{:s}_{:03d}.json\" [true];\n'.format(
                            opts['tmin'], model_key, simulation)
                        flux += '%mod: [T] > {:s} do $DIN \"flux_{:s}_{:03d}.json\" [false];'.format(
                            opts['tmax'], model_key, simulation)
                    else:  # kappa3.5 uses $FLUX instead of $DIN
                        flux = '%mod: [T] > {:s} do $FLUX \"flux_{:s}_{:03d}.json\" [true]\n'.format(
                            opts['tmin'], model_key, simulation)
                        flux += '%mod: [T] > {:s} do $FLUX \"flux_{:s}_{:03d}.json\" [false]'.format(
                            opts['tmax'], model_key, simulation)

                #else: # sliced global sensitivity analysis
                #if opts['syntax'] == '4':
                #flux = '%mod: repeat (([T] > DIM_clock) && (DIM_tick > (DIM_length - 1))) do $DIN "flux_".(DIM_tick - DIM_length).".json" [false] until [false];'
                #else: # kappa3.5 uses $FLUX instead of $DIN
                #flux = '\n# Added to calculate a sliced global sensitivity analysis\n'
                #flux += '%var: \'DIN_beat\' {:s}\n'.format(opts['beat'])
                #flux += '%var: \'DIN_length\' {:s}\n'.format(opts['size'])
                #flux += '%var: \'DIN_tick\' {:s}\n'.format(opts['tick'])
                #flux += '%var: \'DIN_clock\' {:s}\n'.format(opts['tmin'])
                #flux += '%mod: repeat (([T] > DIN_clock) && (DIN_tick > (DIN_length - 1))) do '
                #flux += '$FLUX \"flux_{:s}\".(DIN_tick - DIN_length).\".json\" [false] until [false]\n'.format(model_key)
                #flux += '%mod: repeat ([T] > DIN_clock) do '
                #flux += '$FLUX "flux_{:s}".DIN_tick.".json" "probability" [true] until ((((DIN_tick + DIN_length) + 1) * DIN_beat) > [Tmax])\n'.format(model_key)
                #flux += '%mod: repeat ([T] > DIN_clock) do $UPDATE DIN_clock (DIN_clock + DIN_beat); $UPDATE DIN_tick (DIN_tick + 1) until [false]'

                    model_path = './model_{:s}_{:03d}.kappa'.format(
                        model_name, simulation)
                    if not os.path.exists(model_path):
                        with open(model_path, 'w') as file:
                            for line in par_keys:
                                if parameters[line][0] == 'par':
                                    file.write(
                                        par_string.format(
                                            parameters[line][1],
                                            population[model_key,
                                                       parameters[line][1]]))
                                else:
                                    file.write(parameters[line])
                            # add the DIN perturbation at the end of the kappa file
                            file.write(flux)

    # add problem definition to population dict
    population['problem', 'definition'] = problem

    return population
Exemplo n.º 34
0
Arquivo: fast.py Projeto: amicol/SALib
import sys
sys.path.append('../..')

from SALib.sample import fast_sampler
from SALib.analyze import fast
from SALib.test_functions import Ishigami
import numpy as np

# Read the parameter range file and generate samples
param_file = '../../SALib/test_functions/params/Ishigami.txt'

# Generate samples
param_values = fast_sampler.sample(1000, param_file)

# Run the "model" and save the output in a text file
# This will happen offline for external models
Y = Ishigami.evaluate(param_values)
np.savetxt("model_output.txt", Y, delimiter=' ')

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = fast.analyze(
    param_file, 'model_output.txt', column=0, print_to_console=False)
# Returns a dictionary with keys 'S1' and 'ST'
# e.g. Si['S1'] contains the first-order index for each parameter, in the
# same order as the parameter file
Exemplo n.º 35
0
 def sample(self, num_samples=10000):
     param_values = fast_sampler.sample(self.problem, num_samples)
     return param_values
Exemplo n.º 36
0
import sys
sys.path.append('../..')

from SALib.sample import fast_sampler
from SALib.analyze import fast
from SALib.test_functions import Ishigami
import numpy as np

# Read the parameter range file and generate samples
param_file = '../../SALib/test_functions/params/Ishigami.txt'

# Generate samples
param_values = fast_sampler.sample(1000, param_file)

# Run the "model" and save the output in a text file
# This will happen offline for external models
Y = Ishigami.evaluate(param_values)
np.savetxt("model_output.txt", Y, delimiter=' ')

# Perform the sensitivity analysis using the model output
# Specify which column of the output file to analyze (zero-indexed)
Si = fast.analyze(param_file,
                  'model_output.txt',
                  column=0,
                  print_to_console=False)
#Returns a dictionary with keys 'S1' and 'ST'
# e.g. Si['S1'] contains the first-order index for each parameter, in the same order as the parameter file
Exemplo n.º 37
0
def main(args):
    if args.subcommand != 'sample':
        print('Reading sensitivity samples from %s...' % args.info)
        with open(args.info, 'rb') as f:
            job_info = pickle.load(f)
        args.xmlfile = job_info['sample_args'].xmlfile

    print('Reading configuration from %s...' % args.xmlfile)
    current_job = job.fromConfigurationFile(
        args.xmlfile, verbose=not getattr(args, 'quiet', False))

    names = current_job.getParameterNames()
    minpar, maxpar = current_job.getParameterBounds()
    logscale = current_job.getParameterLogScale()

    # For parameters that having been marked for log-transformation,
    # transform their ranges now so that SAlib will operate in log-transformed space at all times.
    for i, log in enumerate(logscale):
        if log:
            minpar[i] = numpy.log10(minpar[i])
            maxpar[i] = numpy.log10(maxpar[i])

    SAlib_problem = {
        'num_vars': len(names),
        'names': names,
        'bounds': list(zip(minpar, maxpar))
    }

    if args.subcommand == 'sample':
        # Only create setup directories
        X = sample(SAlib_problem, args)
        assert X.shape[1] == SAlib_problem['num_vars']
        job_info = {'sample_args': args, 'X': X}

        if args.dir is not None:
            assert isinstance(current_job, job.program.Job)
            ensemble = X.copy()
            for i, log in enumerate(logscale):
                if log:
                    ensemble[:, i] = 10.**ensemble[:, i]
            job_info[
                'simulationdirs'] = current_job.prepareEnsembleDirectories(
                    ensemble, args.dir, args.format)
        save_info(args.info, job_info)
    elif args.subcommand == 'run':
        X = job_info['X']
        if 'simulationdirs' in job_info:
            if args.model:
                current_job.runEnsemble(job_info['simulationdirs'],
                                        ncpus=args.ncpus,
                                        ppservers=args.ppservers,
                                        secret=args.secret)
            # We have created all setup directories during the sample setup. The user must have run to model in each.
            for target in current_job.targets:
                target.initialize()
            Y = numpy.empty(
                (len(job_info['simulationdirs']), len(current_job.targets)))
            print(
                'Retrieving value of target expression(s) for each ensemble member...'
            )
            for i, simulationdir in enumerate(job_info['simulationdirs']):
                for itarget, target in enumerate(current_job.targets):
                    Y[i, itarget] = target.getValue(simulationdir)
                print('  - %i: %s' % (i, Y[i, :]))
        else:
            # We run the model ourselves.
            Y = current_job.evaluate_ensemble([
                undoLogTransform(X[i, :], logscale) for i in range(X.shape[0])
            ],
                                              stop_on_bad_result=not args.cont,
                                              ncpus=args.ncpus,
                                              ppservers=args.ppservers,
                                              secret=args.secret,
                                              verbose=True)
            if Y is None:
                print('Ensemble evaluation failed. Exiting...')
                return
            X_filt, Y_filt = [], []
            for i, y in enumerate(Y):
                if y != -numpy.Inf:
                    X_filt.append(X[i, :])
                    Y_filt.append(y)
            if len(Y_filt) != len(Y):
                print(
                    'WARNING: %i ensemble members returned invalid result. Shrinking ensemble from %i to %i members. Analysis methods that require the original ensemble size may not work.'
                    % (len(Y) - len(Y_filt), len(Y), len(Y_filt)))
                job_info['X'] = numpy.array(X_filt)
            Y = numpy.array(Y_filt)
        job_info['Y'] = Y
        print('Updating sensitivity info in %s with model results...' %
              args.info)
        save_info(args.info, job_info)
    elif args.subcommand == 'analyze':
        if 'Y' not in job_info:
            print('"analyze" step can only be used after "run" step')
            sys.exit(2)
        X, Y = job_info['X'], job_info['Y']
        Y.shape = (X.shape[0], -1)
        if hasattr(current_job, 'targets'):
            target_names = [target.name for target in current_job.targets]
        else:
            target_names = ['Target %i' % i for i in range(Y.shape[1])]
        mean_rank = numpy.zeros((X.shape[1], ), dtype=int)

        all_sa_results = {}
        for itarget, target_name in enumerate(target_names):
            sensitivities, analysis = analyze(SAlib_problem, args,
                                              job_info['sample_args'], X,
                                              Y[:, itarget])
            if args.pickle is not None:
                # Append parameter names and SA results with targetname as key
                analysis['names'] = names
                all_sa_results[target_name] = analysis
            isort = numpy.argsort(sensitivities)[::-1]
            for irank, ipar in enumerate(isort):
                mean_rank[ipar] += irank
            print(target_name)
            for i in isort:
                print('  - %s (%s)' % (names[i], sensitivities[i]))
        mean_rank = 1 + mean_rank / float(Y.shape[1])

        # Create pickle file with all SA results
        if args.pickle is not None:
            print('Writing analysis result to pickle %s.' % args.pickle)
            with open(args.pickle, 'wb') as f:
                pickle.dump(all_sa_results, f)

        if args.select is not None:
            n, path = args.select
            n = int(n)
            selected = set()
            print('Consensus ranking (top %i parameters):' % n)
            for i in numpy.argsort(mean_rank)[:n]:
                print('  - %s (mean rank = %.1f)' % (names[i], mean_rank[i]))
                selected.add(names[i])
            xml_tree = xml.etree.ElementTree.parse(args.xmlfile)
            parameters_xml = xml_tree.find('./parameters')
            for ipar, element in enumerate(
                    parameters_xml.findall('./parameter')):
                with job.shared.XMLAttributes(element, 'parameter %i' %
                                              (ipar + 1, )) as att:
                    name = current_job.getParameter(att).name
                if name not in selected:
                    element.tag = 'disabled_parameter'
            xml_tree.write(path)
Exemplo n.º 38
0
        [0.1, 1],  #d [m] 
        [0.2,
         0.83],  #Q [m^3/s] - Draw Flowrate (since V draw > V feed - usually)
        [60, 72],  #C_D [g/kg] - Draw Concentration
        [27, 35],  #C_F [g/kg] - Feed concentration
        [0.08, 0.34],  #EP [$/kWh] - Energy price
        [0.003, 0.015],  #dEP [$] - Change in energy price
        [0.01, 0.05],  #r - Interest rate
        [0.6, 1],  #OpTime - For RO Plant 
        [5.80, 14.18],  #Cms [$/m^2]
        [14.5, 31.6]
    ]  #T [deg C] - range from Tampa Bay Station 28 since Aug 2015
}

if method.lower() in ['fast']:
    param_values = fast_sampler.sample(
        problem, N_samples)  #For Fourier Amplitude Sensitivity Test
    print("FAST")
elif method.lower() in ['rbd']:
    param_values = latin.sample(problem, N_samples)  #For RBD_FAST Method
    print("RBD")
elif method.lower() in ['sobol']:
    param_values = saltelli.sample(problem, N_samples)  #For Sobol Method
    print("Sobol")

#Inputs are prob,N_Yrs,Tropp,PT_d,PT_f,C_Vant,PTopp,inf,v,R,A_m,L_m,Mpv,g,MW_NaCl,csv_name
#Tropp = 1 = transmission, 0 = no transmission
#PTopp: 0 - no pretreatment, 1 - Draw Pretreatment, 2 = Feed pretreatment, 3 = feed and draw pretreatment
Test = pse.comboUSA(param_values, N_Yrs, 0, Pt['MF'], Pt['MF'], C_Vant, 2, inf,
                    v, R, M_geometry, g, MW_NaCl, csv_title_output)

#%%#Run Analysis###############################################################