def test_stochmet_with_prediction(): uni_prior = uniform_prior.UniformPrior(dmin, true_params * 0.6) met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries) met.compute(n_points=50, chunk_size=2) x_0 = met.data.s.reshape((50, 12)) y_0 = np.zeros(50) uni_prior = uniform_prior.UniformPrior(true_params * 1.5, dmax) met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries) met.compute(n_points=50, chunk_size=2) x_1 = met.data.s.reshape((50, 12)) y_1 = np.ones(50) X = np.vstack((x_0, x_1)) y = np.hstack((y_0, y_1)) clf = SVC() clf.fit(X, y) def predictor(x): return clf.predict(x) #multi-processing mode uni_prior = uniform_prior.UniformPrior(dmin, dmax) met = stochmet.StochMET(sim=simulator2, sampler=uni_prior, summarystats=summaries) met.compute(n_points=10, chunk_size=2, predictor=predictor) np.testing.assert_equal(met.data.s.shape, (10, 1, 12)) np.testing.assert_equal(met.data.ts.shape, (10, 1, 2, 101)) np.testing.assert_equal(met.data.x.shape, (10, 5)) np.testing.assert_equal(met.data.user_labels.shape, (10, )) np.testing.assert_equal(met.data.y.shape, (10, 1)) #cluster-mode c = Client() met.compute(n_points=10, chunk_size=2, predictor=predictor) np.testing.assert_equal(met.data.s.shape, (20, 1, 12)) np.testing.assert_equal(met.data.ts.shape, (20, 1, 2, 101)) np.testing.assert_equal(met.data.x.shape, (20, 5)) np.testing.assert_equal(met.data.user_labels.shape, (20, )) np.testing.assert_equal(met.data.y.shape, (20, 1)) c.close()
def abc_test_run(): """ Perform a test abc run :return: ABC mean absolute error """ dmin, dmax = get_bounds() uni_prior = uniform_prior.UniformPrior(dmin, dmax) fixed_data = get_fixed_data() summ_func = auto_tsfresh.SummariesTSFRESH() ns = naive_squared.NaiveSquaredDistance() abc = ABC(fixed_data, sim=simulate, prior_function=uni_prior, summaries_function=summ_func.compute, distance_function=ns, use_logger=True) abc.compute_fixed_mean(chunk_size=2) res = abc.infer(num_samples=100, batch_size=10, chunk_size=2) true_params = get_true_param() mae_inference = mean_absolute_error(true_params, abc.results['inferred_parameters']) return mae_inference
def simulator2(x): return simulator(x, model=toggle_model) # Set up the prior default_param = np.array(list(toggle_model.listOfParameters.items()))[:, 1] bound = [] for exp in default_param: bound.append(float(exp.expression)) true_params = np.array(bound) dmin = true_params * 0.5 dmax = true_params * 2.0 uni_prior = uniform_prior.UniformPrior(dmin, dmax) default_fc_params = { 'mean': None, 'variance': None, 'skewness': None, 'agg_autocorrelation': [{ 'f_agg': 'mean', 'maxlag': 5 }, { 'f_agg': 'median', 'maxlag': 5 }, {
def test_uniform_prior(): lb = np.asarray([1, 1]) ub = np.asarray([5, 5]) num_samples = 5 prior_func = uniform_prior.UniformPrior(lb, ub) # multiprocessing mode samples = prior_func.draw(num_samples, chunk_size=1) assert len( samples ) == 5, "UniformPrior functional test error, expected chunk count mismatch" samples, = dask.compute(samples) samples = np.asarray(samples) assert samples.shape[ 0] == num_samples, "UniformPrior functional test error, expected sample count mismatch" assert samples.shape[ 1] == 1, "UniformPrior functional test error, expected chunk size mismatch" assert samples.shape[2] == len( lb), "UniformPrior functional test error, dimension mismatch" samples = samples.reshape(-1, len(lb)) axis_mins = np.min(samples, 0) axis_maxs = np.max(samples, 0) assert axis_mins[0] > lb[0] and axis_maxs[0] < ub[0] and axis_mins[1] > lb[1] and axis_maxs[1] < ub[1], \ "UniformPrior functional test error, drawn samples out of bounds" # Cluster mode c = Client() samples = prior_func.draw(num_samples, chunk_size=1) assert len( samples ) == 5, "UniformPrior functional test error, expected chunk count mismatch" samples, = dask.compute(samples) samples = np.asarray(samples) assert samples.shape[ 0] == num_samples, "UniformPrior functional test error, expected sample count mismatch" assert samples.shape[ 1] == 1, "UniformPrior functional test error, expected chunk size mismatch" assert samples.shape[2] == len( lb), "UniformPrior functional test error, dimension mismatch" samples = samples.reshape(-1, len(lb)) axis_mins = np.min(samples, 0) axis_maxs = np.max(samples, 0) assert axis_mins[0] > lb[0] and axis_maxs[0] < ub[0] and axis_mins[1] > lb[1] and axis_maxs[1] < ub[1], \ "UniformPrior functional test error, drawn samples out of bounds" # chunk_size = 2 samples = prior_func.draw(num_samples, chunk_size=2) assert len( samples ) == 3, "UniformPrior functional test error, expected chunk count mismatch" samples, = dask.compute(samples) samples = np.asarray(samples) assert samples.shape[ 0] == 3, "UniformPrior functional test error, expected sample count mismatch" assert samples[-1].shape[ 0] == 2, "UniformPrior functional test error, expected chunk size mismatch" assert samples[-1].shape[1] == len( lb), "UniformPrior functional test error, dimension mismatch" samples = core._reshape_chunks(samples) axis_mins = np.min(samples, 0) axis_maxs = np.max(samples, 0) assert axis_mins[0] > lb[0] and axis_maxs[0] < ub[0] and axis_mins[1] > lb[1] and axis_maxs[1] < ub[1], \ "UniformPrior functional test error, drawn samples out of bounds" c.close()
# Imports from sciope.utilities.priors import uniform_prior from sciope.inference import abc_inference from sciope.utilities.summarystats import burstiness as bs import numpy as np import vilar from sklearn.metrics import mean_absolute_error # Load data data = np.loadtxt("datasets/vilar_dataset_specieA_100trajs_150time.dat", delimiter=",") # Set up the prior dmin = [30, 200, 0, 30, 30, 1, 1, 0, 0, 0, 0.5, 0.5, 1, 30, 80] dmax = [70, 600, 1, 70, 70, 10, 12, 1, 2, 0.5, 1.5, 1.5, 3, 70, 120] mm_prior = uniform_prior.UniformPrior(np.asarray(dmin), np.asarray(dmax)) bs_stat = bs.Burstiness(mean_trajectories=False) # Set up ABC abc_instance = abc_inference.ABC(data, vilar.simulate, epsilon=0.1, prior_function=mm_prior, summaries_function=bs_stat) # Perform ABC; require 30 samples abc_instance.infer(30) # Results true_params = [[ 50.0, 100.0, 50.0, 500.0, 0.01, 50.0, 50.0, 5.0, 1.0, 10.0, 0.5, 0.2, 1.0,