コード例 #1
0
    def __init__(self,
                 root_models,
                 distances,
                 backend,
                 kernel=None,
                 seed=None):
        self.model = root_models
        # We define the joint Linear combination distance using all the distances for each individual models
        self.distance = LinearCombination(root_models, distances)
        if (kernel is None):

            mapping, garbage_index = self._get_mapping()
            models = []
            for mdl, mdl_index in mapping:
                models.append(mdl)
            kernel = DefaultKernel(models)

        self.kernel = kernel
        self.backend = backend
        self.rng = np.random.RandomState(seed)
        self.logger = logging.getLogger(__name__)

        self.accepted_parameters_manager = AcceptedParametersManager(
            self.model)

        self.simulation_counter = 0
コード例 #2
0
def infer_parameters():
    # define observation for true parameters mean=170, std=15
    height_obs = [
        160.82499176, 167.24266737, 185.71695756, 153.7045709, 163.40568812,
        140.70658699, 169.59102084, 172.81041696, 187.38782738, 179.66358934,
        176.63417241, 189.16082803, 181.98288443, 170.18565017, 183.78493886,
        166.58387299, 161.9521899, 155.69213073, 156.17867343, 144.51580379,
        170.29847515, 197.96767899, 153.36646527, 162.22710198, 158.70012047,
        178.53470703, 170.77697743, 164.31392633, 165.88595994, 177.38083686,
        146.67058471763457, 179.41946565658628, 238.02751620619537,
        206.22458790620766, 220.89530574344568, 221.04082532837026,
        142.25301427453394, 261.37656571434275, 171.63761180867033,
        210.28121820385866, 237.29130237612236, 175.75558340169619,
        224.54340549862235, 197.42448680731226, 165.88273684581381,
        166.55094082844519, 229.54308602661584, 222.99844054358519,
        185.30223966014586, 152.69149367593846, 206.94372818527413,
        256.35498655339154, 165.43140916577741, 250.19273595481803,
        148.87781549665536, 223.05547559193792, 230.03418198709608,
        146.13611923127021, 138.24716809523139, 179.26755740864527,
        141.21704876815426, 170.89587081800852, 222.96391329259626,
        188.27229523693822, 202.67075179617672, 211.75963110985992,
        217.45423324370509
    ]
    # define prior
    from abcpy.continuousmodels import Uniform
    mu = Uniform([[150], [200]], )
    sigma = Uniform([[5], [25]], )
    # define the model
    from abcpy.continuousmodels import Normal as Gaussian
    height = Gaussian([mu, sigma], name='height')

    # define statistics
    from abcpy.statistics import Identity
    statistics_calculator = Identity(degree=2, cross=False)

    # define distance
    from abcpy.distances import LogReg
    distance_calculator = LogReg(statistics_calculator)

    # define kernel
    from abcpy.perturbationkernel import DefaultKernel
    kernel = DefaultKernel([mu, sigma])

    # define backend
    # Note, the dummy backend does not parallelize the code!
    from abcpy.backends import BackendDummy as Backend
    backend = Backend()

    # define sampling scheme
    from abcpy.inferences import PMCABC
    sampler = PMCABC([height], [distance_calculator], backend, kernel, seed=1)

    # sample from scheme
    T, n_sample, n_samples_per_param = 3, 250, 10
    eps_arr = np.array([.75])
    epsilon_percentile = 10
    journal = sampler.sample([height_obs], T, eps_arr, n_sample,
                             n_samples_per_param, epsilon_percentile)

    return journal
コード例 #3
0
ファイル: Inferences.py プロジェクト: eth-cscs/abcpy-models
    def __init__(self,
                 root_models,
                 distances,
                 backend,
                 kernel=None,
                 seed=None):
        self.model = root_models
        # We define the joint Linear combination distance using all the distances for each individual models
        self.distance = LinearCombination(root_models, distances)

        if (kernel is None):

            mapping, garbage_index = self._get_mapping()
            models = []
            for mdl, mdl_index in mapping:
                models.append(mdl)
            kernel = DefaultKernel(models)

        self.kernel = kernel
        self.backend = backend
        self.rng = np.random.RandomState(seed)
        self.logger = logging.getLogger(__name__)

        # these are usually big tables, so we broadcast them to have them once
        # per executor instead of once per task
        self.smooth_distances_bds = None
        self.all_distances_bds = None
        self.accepted_parameters_manager = AcceptedParametersManager(
            self.model)

        self.simulation_counter = 0
コード例 #4
0
from Model import TIP4PGromacsOOOH as Water

# Define Graphical model
theta1 = Uniform([[.281] , [.53]],name='theta1')
theta2 = Uniform([[0.2] , [0.9]],name='theta2')
water = Water([theta1, theta2, 2500000])

# Define distance and statistics
statistics_calculator = Identity(degree = 1, cross = False)
distance_calculator = Abserror(statistics_calculator)

# Define kernel
from abcpy.backends import BackendMPI as Backend
backend = Backend()
from abcpy.perturbationkernel import DefaultKernel
kernel = DefaultKernel([theta1, theta2])


######### Inference for simulated data ###############
water_obs = [np.load('Data/obs_data.npy')]

sampler = APMCABC([water], [distance_calculator], backend, kernel, seed = 1)
steps, n_samples, n_samples_per_param, alpha, acceptance_cutoff, covFactor, full_output, journal_file =10, 100, 1, 0.1, 0.03, 2.0, 1, None

print('TIP4P: APMCABC Inferring for simulated data')
journal_apmcabc = sampler.sample([water_obs], steps, n_samples, n_samples_per_param, alpha, acceptance_cutoff, covFactor, full_output, journal_file)
print('TIP4P: APMCABC done for simulated data')
journal_apmcabc.save('Result/MD_GROMACS_APMCABC_obs.jrnl')

######### Inference for Experimental data 1 (Neutron Diffraction of Water) ###############
water_obs = [np.load('Data/exp_data.npy')]
コード例 #5
0
ファイル: Inference.py プロジェクト: eth-cscs/abcpy-models
import numpy as np
from abcpy.continuousmodels import Uniform
from Model import AshDispersal

u0 = Uniform([[100], [300]], name='u0')
l0 = Uniform([[30], [100]], name='l0')
AD = AshDispersal([u0, l0], name='AD')

from abcpy.backends import BackendDummy as Backend
backend = Backend(process_per_model=2)

from abcpy.perturbationkernel import DefaultKernel
kernel = DefaultKernel([u0, l0])

from Distance import DepositionDistance
distance_calculator = DepositionDistance()

print(distance_calculator.distance("test.h5", "test.h5"))

from abcpy.inferences import SABC
sampler = SABC([AD], [distance_calculator], backend, kernel, seed=1)
#steps, epsilon, n_samples, n_samples_per_param, beta, delta, v, ar_cutoff, resample, n_update, adaptcov, full_output = 3, [50], 50, 1, 2, 0.2, 0.3, 0.1, None, None, 1, 1
steps, epsilon, n_samples, n_samples_per_param, beta, delta, v, ar_cutoff, resample, n_update, adaptcov, full_output = 3, [
    1
], 1, 1, 2, 0.2, 0.3, 0.1, None, None, 1, 1
print('SABC Inferring')
journal_sabc = sampler.sample("test.h5", steps, epsilon, n_samples,
                              n_samples_per_param, beta, delta, v, ar_cutoff,
                              resample, n_update, adaptcov, full_output)
print('SABC done')
コード例 #6
0
def infer_parameters(backend,
                     scheme='rejection',
                     n_samples=250,
                     n_samples_per_param=10,
                     logging_level=logging.WARN):
    """Perform inference for this example.
    Parameters
    ----------
    backend
        The parallelization backend
    steps : integer, optional
        Number of iterations in the sequential PMCABC algoritm ("generations"). The default value is 3
    n_samples : integer, optional
        Number of posterior samples to generate. The default value is 250.
    n_samples_per_param : integer, optional
        Number of data points in each simulated data set. The default value is 10.
    Returns
    -------
    abcpy.output.Journal
        A journal containing simulation results, metadata and optionally intermediate results.
    """
    logging.basicConfig(level=logging_level)

    # experimental setup
    T = 50.  # simulation time
    dt = 0.025  # time step
    I_amp = 0.32  # stimulus amplitude
    r_soma = 40  # radius of soma
    threshold = -55  # AP threshold

    # input stimulus
    stimulus_dict = constant_stimulus(I_amp=I_amp,
                                      T=T,
                                      dt=dt,
                                      t_stim_on=10,
                                      t_stim_off=40,
                                      r_soma=r_soma)
    I = stimulus_dict["I"]
    #I_stim = stimulus_dict["I_stim"]

    # true parameters
    gbar_K_true = 36
    gbar_Na_true = 120

    gbar_K_std = 5
    gbar_Na_std = 5

    # define priors
    gbar_K = Normal([[gbar_K_true], [gbar_K_std]], name='gbar_K')
    gbar_Na = Normal([[gbar_Na_true], [gbar_Na_std]], name='gbar_Na')

    # define the model
    hh_simulator = HHSimulator([gbar_K, gbar_Na], I, T, dt)

    # observed data
    obs_data = hh_simulator.forward_simulate([gbar_K_true, gbar_Na_true])

    # define statistics
    statistics_calculator = Identity()

    # Learn the optimal summary statistics using Semiautomatic summary selection
    statistics_learning = Semiautomatic([hh_simulator],
                                        statistics_calculator,
                                        backend,
                                        n_samples=1000,
                                        n_samples_per_param=1,
                                        seed=42)
    new_statistics_calculator = statistics_learning.get_statistics()

    # define distance
    distance_calculator = Euclidean(new_statistics_calculator)

    # define kernel
    kernel = DefaultKernel([gbar_K, gbar_Na])

    # define sampling scheme
    if scheme == 'rejection':
        sampler = RejectionABC([hh_simulator], [distance_calculator],
                               backend,
                               seed=42)
        # sample from scheme
        epsilon = 2.
        journal = sampler.sample([obs_data], n_samples, n_samples_per_param,
                                 epsilon)

    elif scheme == 'smc':
        sampler = SMCABC([hh_simulator], [distance_calculator],
                         backend,
                         kernel,
                         seed=42)
        # sample from scheme
        steps = 3
        journal = sampler.sample([obs_data], steps, n_samples,
                                 n_samples_per_param)
    elif scheme == 'pmc':
        sampler = PMCABC([hh_simulator], [distance_calculator],
                         backend,
                         kernel,
                         seed=42)
        # sample from scheme
        steps = 3
        eps_arr = np.array([2.])
        epsilon_percentile = 10
        journal = sampler.sample([obs_data], steps, eps_arr, n_samples,
                                 n_samples_per_param, epsilon_percentile)

    return journal
def infer_parameters(steps=2, n_sample=50, n_samples_per_param=1, logging_level=logging.WARN):
    """Perform inference for this example.

    Parameters
    ----------
    steps : integer, optional
        Number of iterations in the sequential PMCABC algorithm ("generations"). The default value is 3
    n_samples : integer, optional
        Number of posterior samples to generate. The default value is 250.
    n_samples_per_param : integer, optional
        Number of data points in each simulated data set. The default value is 10.

    Returns
    -------
    abcpy.output.Journal
        A journal containing simulation results, metadata and optionally intermediate results.
    """
    logging.basicConfig(level=logging_level)
    # define backend
    # Note, the dummy backend does not parallelize the code!
    from abcpy.backends import BackendDummy as Backend
    backend = Backend()

    # define observation for true parameters mean=170, std=15
    height_obs = [160.82499176, 167.24266737, 185.71695756, 153.7045709, 163.40568812, 140.70658699, 169.59102084,
                  172.81041696, 187.38782738, 179.66358934, 176.63417241, 189.16082803, 181.98288443, 170.18565017,
                  183.78493886, 166.58387299, 161.9521899, 155.69213073, 156.17867343, 144.51580379, 170.29847515,
                  197.96767899, 153.36646527, 162.22710198, 158.70012047, 178.53470703, 170.77697743, 164.31392633,
                  165.88595994, 177.38083686, 146.67058471763457, 179.41946565658628, 238.02751620619537,
                  206.22458790620766, 220.89530574344568, 221.04082532837026, 142.25301427453394, 261.37656571434275,
                  171.63761180867033, 210.28121820385866, 237.29130237612236, 175.75558340169619, 224.54340549862235,
                  197.42448680731226, 165.88273684581381, 166.55094082844519, 229.54308602661584, 222.99844054358519,
                  185.30223966014586, 152.69149367593846, 206.94372818527413, 256.35498655339154, 165.43140916577741,
                  250.19273595481803, 148.87781549665536, 223.05547559193792, 230.03418198709608, 146.13611923127021,
                  138.24716809523139, 179.26755740864527, 141.21704876815426, 170.89587081800852, 222.96391329259626,
                  188.27229523693822, 202.67075179617672, 211.75963110985992, 217.45423324370509]

    # define prior
    from abcpy.continuousmodels import Uniform
    mu = Uniform([[150], [200]], name="mu")
    sigma = Uniform([[5], [25]], name="sigma")

    # define the model
    from abcpy.continuousmodels import Normal
    height = Normal([mu, sigma], )

    # 1) generate simulations from prior
    from abcpy.inferences import DrawFromPrior
    draw_from_prior = DrawFromPrior([height], backend=backend)

    # notice the use of the `.sample_par_sim_pairs` method rather than `.sample` to obtain data suitably formatted
    # for the summary statistics learning routines
    parameters, simulations = draw_from_prior.sample_par_sim_pairs(100, n_samples_per_param=1)
    # if you want to use the test loss to do early stopping in the training:
    parameters_val, simulations_val = draw_from_prior.sample_par_sim_pairs(100, n_samples_per_param=1)
    # discard the mid dimension (n_samples_per_param, as the StatisticsLearning classes use that =1)
    simulations = simulations.reshape(simulations.shape[0], simulations.shape[2])
    simulations_val = simulations_val.reshape(simulations_val.shape[0], simulations_val.shape[2])

    # 2) now train the NNs with the different methods with the generated data
    from abcpy.statistics import Identity
    identity = Identity()  # to apply before computing the statistics

    logging.info("semiNN")
    from abcpy.statisticslearning import SemiautomaticNN, TripletDistanceLearning
    semiNN = SemiautomaticNN([height], identity, backend=backend, parameters=parameters,
                             simulations=simulations, parameters_val=parameters_val, simulations_val=simulations_val,
                             early_stopping=True,  # early stopping
                             seed=1, n_epochs=10, scale_samples=False, use_tqdm=False)
    logging.info("triplet")
    triplet = TripletDistanceLearning([height], identity, backend=backend, parameters=parameters,
                                      simulations=simulations, parameters_val=parameters_val,
                                      simulations_val=simulations_val,
                                      early_stopping=True,  # early stopping
                                      seed=1, n_epochs=10, scale_samples=True, use_tqdm=False)

    # 3) save and re-load NNs:
    # get the statistics from the already fit StatisticsLearning object 'semiNN':
    learned_seminn_stat = semiNN.get_statistics()
    learned_triplet_stat = triplet.get_statistics()

    # this has a save net method:
    learned_seminn_stat.save_net("seminn_net.pth")
    # if you used `scale_samples=True` in learning the NNs, need to provide a path where pickle stores the scaler too:
    learned_triplet_stat.save_net("triplet_net.pth", path_to_scaler="scaler.pkl")

    # to reload: need to use the Neural Embedding statistics fromFile; this needs to know which kind of NN it is using;
    # need therefore to pass either the input/output size (it data size and number parameters) or the network class if
    # that was specified explicitly in the StatisticsLearning class. Check the docstring for NeuralEmbedding.fromFile
    # for more details.
    from abcpy.statistics import NeuralEmbedding
    learned_seminn_stat_loaded = NeuralEmbedding.fromFile("seminn_net.pth", input_size=1, output_size=2)
    learned_triplet_stat_loaded = NeuralEmbedding.fromFile("triplet_net.pth", input_size=1, output_size=2,
                                                           path_to_scaler="scaler.pkl")

    # 4) you can optionally rescale the different summary statistics be their standard deviation on a reference dataset
    # of simulations. To do this, it is enough to pass at initialization the reference dataset, and the rescaling will
    # be applied every time the statistics is computed on some simulation or observation.
    learned_triplet_stat_loaded = NeuralEmbedding.fromFile("triplet_net.pth", input_size=1, output_size=2,
                                                           path_to_scaler="scaler.pkl",
                                                           reference_simulations=simulations_val)

    # 5) perform inference
    # define distance
    from abcpy.distances import Euclidean
    distance_calculator = Euclidean(learned_seminn_stat_loaded)

    # define kernel
    from abcpy.perturbationkernel import DefaultKernel
    kernel = DefaultKernel([mu, sigma])

    # define sampling scheme
    from abcpy.inferences import PMCABC
    sampler = PMCABC([height], [distance_calculator], backend, kernel, seed=1)

    eps_arr = np.array([500])  # starting value of epsilon; the smaller, the slower the algorithm.
    # at each iteration, take as epsilon the epsilon_percentile of the distances obtained by simulations at previous
    # iteration from the observation
    epsilon_percentile = 10
    journal = sampler.sample([height_obs], steps, eps_arr, n_sample, n_samples_per_param, epsilon_percentile)

    return journal
コード例 #8
0
ファイル: MLE_variance.py プロジェクト: eth-cscs/abcpy-models
        pT = Uniform([[0.1], [10.0]], name='pT')
        pF = Uniform([[0.1e-3], [9.0e-3]], name='pF')
        aT = Uniform([[0], [10]], name='aT')
        v_z_AP = Uniform([[1.0e-3], [9.0e-3]], name='v_z_AP')
        v_z_NAP = Uniform([[1.0e-4], [9.0e-4]], name='v_z_NAP')
        PD = PlateletDeposition([noAP, noNAP, SR_x, pAd, pAg, pT, pF, aT, v_z_AP, v_z_NAP], name='PD')
        # XObserved = np.array([0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 2.11600000e+05, 5.96585366e+03,
        #                       2.00000000e+01, 3.90572997e+03, 1.59328549e+01, 1.38943902e+05, 0.00000000e+00,
        #                       6.00000000e+01, 3.42727305e+03, 2.80052570e+01, 8.57585366e+04, 0.00000000e+00,
        #                       1.20000000e+02, 2.33523014e+03, 7.57715388e+01, 4.25231707e+04, 0.00000000e+00,
        #                       3.00000000e+02, 1.74166329e+02, 2.46413793e+03, 5.15975610e+03, 0.00000000e+00])
        # obsdata = [np.array(XObserved).reshape(1, -1)]

        # # Define kernel and join the defined kernels
        from abcpy.perturbationkernel import DefaultKernel
        kernel = DefaultKernel([pAd, pAg, pT, pF, aT, v_z_AP, v_z_NAP])

        # Define Distance functions
        from abcpy.distances import Euclidean
        from statistic import Multiply
        L = np.load('Data/L_all_3_cross.npz')['L']
        stat_mult = Multiply(L=L, degree=3, cross=True)
        dist_calc_mult = Euclidean(stat_mult)

        # SABC - Multiply##
        from abcpy.inferences import SABC
        print('Inference using Classifier Loss')
        sampler = SABC([PD], [dist_calc_mult], backend, kernel, seed=1)
        steps, epsilon, n_samples, n_samples_per_param, ar_cutoff, full_output, journal_file = 20, 10e20, 511, 1, 0.001, 1, None
        print('SABC Inferring')
コード例 #9
0
def infer_parameters(steps=3,
                     n_sample=250,
                     n_samples_per_param=10,
                     logging_level=logging.WARN):
    """Perform inference for this example.

    Parameters
    ----------
    steps : integer, optional
        Number of iterations in the sequential PMCABC algoritm ("generations"). The default value is 3
    n_samples : integer, optional
        Number of posterior samples to generate. The default value is 250.
    n_samples_per_param : integer, optional
        Number of data points in each simulated data set. The default value is 10.

    Returns
    -------
    abcpy.output.Journal
        A journal containing simulation results, metadata and optionally intermediate results.
    """
    logging.basicConfig(level=logging_level)
    # define observation for true parameters mean=170, std=15
    height_obs = [
        160.82499176, 167.24266737, 185.71695756, 153.7045709, 163.40568812,
        140.70658699, 169.59102084, 172.81041696, 187.38782738, 179.66358934,
        176.63417241, 189.16082803, 181.98288443, 170.18565017, 183.78493886,
        166.58387299, 161.9521899, 155.69213073, 156.17867343, 144.51580379,
        170.29847515, 197.96767899, 153.36646527, 162.22710198, 158.70012047,
        178.53470703, 170.77697743, 164.31392633, 165.88595994, 177.38083686,
        146.67058471763457, 179.41946565658628, 238.02751620619537,
        206.22458790620766, 220.89530574344568, 221.04082532837026,
        142.25301427453394, 261.37656571434275, 171.63761180867033,
        210.28121820385866, 237.29130237612236, 175.75558340169619,
        224.54340549862235, 197.42448680731226, 165.88273684581381,
        166.55094082844519, 229.54308602661584, 222.99844054358519,
        185.30223966014586, 152.69149367593846, 206.94372818527413,
        256.35498655339154, 165.43140916577741, 250.19273595481803,
        148.87781549665536, 223.05547559193792, 230.03418198709608,
        146.13611923127021, 138.24716809523139, 179.26755740864527,
        141.21704876815426, 170.89587081800852, 222.96391329259626,
        188.27229523693822, 202.67075179617672, 211.75963110985992,
        217.45423324370509
    ]

    # define prior
    from abcpy.continuousmodels import Uniform
    mu = Uniform([[150], [200]], name='mu')
    sigma = Uniform([[5], [25]], name='sigma')

    # define the model
    from abcpy.continuousmodels import Normal
    height = Normal([mu, sigma], name='height')

    # define statistics
    from abcpy.statistics import Identity
    statistics_calculator = Identity(degree=2, cross=False)

    # define distance
    from abcpy.distances import LogReg
    distance_calculator = LogReg(statistics_calculator, seed=42)

    # define kernel
    from abcpy.perturbationkernel import DefaultKernel
    kernel = DefaultKernel([mu, sigma])

    # define backend
    # Note, the dummy backend does not parallelize the code!
    from abcpy.backends import BackendDummy as Backend
    backend = Backend()

    # define sampling scheme
    from abcpy.inferences import PMCABC
    sampler = PMCABC([height], [distance_calculator], backend, kernel, seed=1)

    eps_arr = np.array([.75])
    epsilon_percentile = 10
    journal = sampler.sample([height_obs], steps, eps_arr, n_sample,
                             n_samples_per_param, epsilon_percentile)

    return journal
コード例 #10
0
def infer_parameters():
    # The data corresponding to model_1 defined below
    grades_obs = [
        3.872486707973337, 4.6735380808674405, 3.9703538990858376,
        4.11021272048805, 4.211048655421368, 4.154817956586653,
        4.0046893064392695, 4.01891381384729, 4.123804757702919,
        4.014941267301294, 3.888174595940634, 4.185275142948246,
        4.55148774469135, 3.8954427675259016, 4.229264035335705,
        3.839949451328312, 4.039402553532825, 4.128077814241238,
        4.361488645531874, 4.086279074446419, 4.370801602256129,
        3.7431697332475466, 4.459454162392378, 3.8873973643008255,
        4.302566721487124, 4.05556051626865, 4.128817316703757,
        3.8673704442215984, 4.2174459453805015, 4.202280254493361,
        4.072851400451234, 3.795173229398952, 4.310702877332585,
        4.376886328810306, 4.183704734748868, 4.332192463368128,
        3.9071312388426587, 4.311681374107893, 3.55187913252144,
        3.318878360783221, 4.187850500877817, 4.207923106081567,
        4.190462065625179, 4.2341474252986036, 4.110228694304768,
        4.1589891480847765, 4.0345604687633045, 4.090635481715123,
        3.1384654393449294, 4.20375641386518, 4.150452690356067,
        4.015304457401275, 3.9635442007388195, 4.075915739179875,
        3.5702080541929284, 4.722333310410388, 3.9087618197155227,
        4.3990088006390735, 3.968501165774181, 4.047603645360087,
        4.109184340976979, 4.132424805281853, 4.444358334346812,
        4.097211737683927, 4.288553086265748, 3.8668863066511303,
        3.8837108501541007
    ]

    # The prior information changing the class size and social background, depending on school location
    from abcpy.continuousmodels import Uniform, Normal
    school_location = Uniform([[0.2], [0.3]], )

    # The average class size of a certain school
    class_size = Normal([[school_location], [0.1]], )

    # The social background of a student
    background = Normal([[school_location], [0.1]], )

    # The grade a student would receive without any bias
    grade_without_additional_effects = Normal([[4.5], [0.25]], )

    # The grade a student of a certain school receives
    final_grade = grade_without_additional_effects - class_size - background

    # The data corresponding to model_2 defined below
    scholarship_obs = [
        2.7179657436207805, 2.124647285937229, 3.07193407853297,
        2.335024761813643, 2.871893855192, 3.4332002458233837,
        3.649996835818173, 3.50292335102711, 2.815638168018455,
        2.3581613289315992, 2.2794821846395568, 2.8725835459926503,
        3.5588573782815685, 2.26053126526137, 1.8998143530749971,
        2.101110815311782, 2.3482974964831573, 2.2707679029919206,
        2.4624550491079225, 2.867017757972507, 3.204249152084959,
        2.4489542437714213, 1.875415915801106, 2.5604889644872433,
        3.891985093269989, 2.7233633223405205, 2.2861070389383533,
        2.9758813233490082, 3.1183403287267755, 2.911814060853062,
        2.60896794303205, 3.5717098647480316, 3.3355752461779824,
        1.99172284546858, 2.339937680892163, 2.9835630207301636,
        2.1684912355975774, 3.014847335983034, 2.7844122961916202,
        2.752119871525148, 2.1567428931391635, 2.5803629307680644,
        2.7326646074552103, 2.559237193255186, 3.13478196958166,
        2.388760269933492, 3.2822443541491815, 2.0114405441787437,
        3.0380056368041073, 2.4889680313769724, 2.821660164621084,
        3.343985964873723, 3.1866861970287808, 4.4535037154856045,
        3.0026333138006027, 2.0675706089352612, 2.3835301730913185,
        2.584208398359566, 3.288077633446465, 2.6955853384148183,
        2.918315169739928, 3.2464814419322985, 2.1601516779909433,
        3.231003347780546, 1.0893224045062178, 0.8032302688764734,
        2.868438615047827
    ]

    # A quantity that determines whether a student will receive a scholarship
    scholarship_without_additional_effects = Normal([[2], [0.5]], )

    # A quantity determining whether a student receives a scholarship, including his social background
    final_scholarship = scholarship_without_additional_effects + 3 * background

    # Define a summary statistics for final grade and final scholarship
    from abcpy.statistics import Identity
    statistics_calculator_final_grade = Identity(degree=2, cross=False)
    statistics_calculator_final_scholarship = Identity(degree=3, cross=False)

    # Define a distance measure for final grade and final scholarship
    from abcpy.approx_lhd import SynLikelihood
    approx_lhd_final_grade = SynLikelihood(statistics_calculator_final_grade)
    approx_lhd_final_scholarship = SynLikelihood(
        statistics_calculator_final_scholarship)

    # Define a backend
    from abcpy.backends import BackendDummy as Backend
    backend = Backend()

    # Define a perturbation kernel
    from abcpy.perturbationkernel import DefaultKernel
    kernel = DefaultKernel([school_location, class_size, grade_without_additional_effects, \
                            background, scholarship_without_additional_effects])

    # Define sampling parameters
    T, n_sample, n_samples_per_param = 3, 250, 10

    # Define sampler
    from abcpy.inferences import PMC
    sampler = PMC([final_grade, final_scholarship], \
                     [approx_lhd_final_grade, approx_lhd_final_scholarship], backend, kernel)

    # Sample
    journal = sampler.sample([grades_obs, scholarship_obs], T, n_sample,
                             n_samples_per_param)
コード例 #11
0
np.save('Result/obs_data_' + str(ind), result)

# Define backend
from abcpy.backends import BackendMPI as Backend
backend = Backend()

# Define Statistics
from abcpy.statistics import Identity
statistics_calculator = Identity(degree=1, cross=False)

# Define distance
from KLdistance import KLdistance
distance_calculator = KLdistance(statistics_calculator)

# Define kernel
from abcpy.perturbationkernel import DefaultKernel
kernel = DefaultKernel([sigma, epsilon])

## APMCABC ##
from abcpy.inferences import APMCABC
sampler = APMCABC([relentropy], [distance_calculator], backend, kernel, seed=1)
steps, n_samples, n_samples_per_param, alpha, acceptance_cutoff, covFactor, full_output, journal_file = 10, 1000, 1, 0.1, 0.03, 2.0, 1, None

# Import Simulated Dataset
relentropy_obs = [np.load('Result/obs_data.npy')]
print('APMCABC Inferring Heliuma Potential')
journal_apmcabc = sampler.sample([relentropy_obs], steps, n_samples,
                                 n_samples_per_param, alpha, acceptance_cutoff,
                                 covFactor, full_output, journal_file)
journal_apmcabc.save('Result/APMCABCHelium.jrnl')
コード例 #12
0
ファイル: Inference.py プロジェクト: eth-cscs/abcpy-models
# print(statistics_calculator.statistics(resultfakeobs1))
# print(statistics_calculator.statistics(resultfakeobs2))
# Define distance
from abcpy.distances import Euclidean

distance_calculator = Euclidean(statistics_calculator)
# print('# Check whether the distance works')
# print(distance_calculator.distance(resultfakeobs1, resultfakeobs1))
# print(distance_calculator.distance(resultfakeobs1, resultfakeobs2))
#
# Define kernel
from abcpy.perturbationkernel import DefaultKernel

kernel = DefaultKernel([
    B_0, activation_energy, energy_tissue, energy_food, energy_synthesis,
    half_saturation_coeff, max_ingestion_rate, mass_birth, mass_cocoon,
    mass_maximum, mass_sexual_maturity, growth_constant, max_reproduction_rate,
    speed
])

## SABC ##
from abcpy.inferences import SABC

sampler = SABC([EarthWorm], [distance_calculator], backend, kernel, seed=1)

steps, epsilon, n_samples, n_samples_per_param, ar_cutoff, full_output, journal_file = 10, 10000, 500, 1, 0.001, 1, None
print('SABC Inferring')
# We use resultfakeobs1 as our observed dataset
journal_sabc = sampler.sample([resultfakeobs1],
                              steps=steps,
                              epsilon=epsilon,
                              n_samples=n_samples,
コード例 #13
0
ファイル: Inference.py プロジェクト: eth-cscs/abcpy-models
#print(statistics_calculator.statistics(resultfakeobs1))
#print(statistics_calculator.statistics(obs_data))
# Define distance
from Distance import WeightedEuclidean
wt = list(1000 * np.ones(11)) + list(0 * np.ones(11))
for ind in range(60):
    wt = wt + list((1 / 30) * np.ones(11))
distance_calculator = WeightedEuclidean(statistics_calculator,
                                        weight=np.array(wt))
#print('# Check whether the distance works')
#print(distance_calculator.distance(obs_data, resultfakeobs1))

if sample:
    # Define kernel
    from abcpy.perturbationkernel import DefaultKernel
    kernel = DefaultKernel([H, Am, AE, PM, I])

    if algorithm == 'sabc':
        ## SABC ##
        from abcpy.inferences import SABC
        sampler = SABC([Bass], [distance_calculator], backend, kernel, seed=1)
        steps, epsilon, n_samples, n_samples_per_param, ar_cutoff, full_output, journal_file = 10, 10e20, 111, 1, 0.001, 1, None
        print('SABC Inferring')
        # We use resultfakeobs1 as our observed dataset
        journal_sabc = sampler.sample(observations=[obs_data],
                                      steps=steps,
                                      epsilon=epsilon,
                                      n_samples=n_samples,
                                      n_samples_per_param=n_samples_per_param,
                                      beta=2,
                                      delta=0.2,
コード例 #14
0
ファイル: Inference.py プロジェクト: pkzli/abcpy-models
    np.array([[0, 0, 0, 172200, 4808], [20, 1689, 26.8, 155100, 1683],
              [60, 2004, 29.9, 149400, 0], [120, 1968, 31.3, 140700, 0],
              [300, 1946, 36.6, 125801, 0]])
]

# Example to Generate Data to check it's correct
#PDtry = PlateletDeposition([110.0, 14.6, 0.6, 1.7e-3, 6.0], name = 'PD')
#resultfakeobs1 = PDtry.forward_simulate([110.0, 14.6, 0.6, 1.7e-3, 6.0], 1)

# Define backend
from abcpy.backends import BackendDummy as Backend
backend = Backend()

# Define kernel and join the defined kernels
from abcpy.perturbationkernel import DefaultKernel
kernel = DefaultKernel([pAd, pAg, pT, pF, aT])

# Define Statistics
from Statistics import DepositionStatistics
statistics_calculator = DepositionStatistics(degree=1, cross=False)

# Define distance
from Distance import DepositionDistance
distance_calculator = DepositionDistance(statistics_calculator)

print(distance_calculator.distance(data_obs, data_obs_1))

print('Hello')

## SABC ##
from abcpy.inferences import SABC