def test__prior__samples_sample_priors(self): model = af.PriorModel(af.m.MockClassx4) model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) model.two = af.UniformPrior(lower_limit=0.199, upper_limit=0.201) model.three = af.UniformPrior(lower_limit=0.299, upper_limit=0.301) model.four = af.UniformPrior(lower_limit=0.399, upper_limit=0.401) initializer = af.InitializerPrior() unit_parameter_lists, parameter_lists, figure_of_merit_list = initializer.samples_from_model( total_points=2, model=model, fitness_function=MockFitness()) assert 0.0 < unit_parameter_lists[0][0] < 1.0 assert 0.0 < unit_parameter_lists[1][0] < 1.0 assert 0.0 < unit_parameter_lists[0][1] < 1.0 assert 0.0 < unit_parameter_lists[1][1] < 1.0 assert 0.0 < unit_parameter_lists[0][2] < 1.0 assert 0.0 < unit_parameter_lists[1][2] < 1.0 assert 0.0 < unit_parameter_lists[0][3] < 1.0 assert 0.0 < unit_parameter_lists[1][3] < 1.0 assert 0.099 < parameter_lists[0][0] < 0.101 assert 0.099 < parameter_lists[1][0] < 0.101 assert 0.199 < parameter_lists[0][1] < 0.201 assert 0.199 < parameter_lists[1][1] < 0.201 assert 0.299 < parameter_lists[0][2] < 0.301 assert 0.299 < parameter_lists[1][2] < 0.301 assert 0.399 < parameter_lists[0][3] < 0.401 assert 0.399 < parameter_lists[1][3] < 0.401 assert figure_of_merit_list == [1.0, 1.0]
def test__samples_in_test_model(self): model = af.PriorModel(af.m.MockClassx4) model.one = af.UniformPrior(lower_limit=0.099, upper_limit=0.101) model.two = af.UniformPrior(lower_limit=0.199, upper_limit=0.201) model.three = af.UniformPrior(lower_limit=0.299, upper_limit=0.301) model.four = af.UniformPrior(lower_limit=0.399, upper_limit=0.401) initializer = af.InitializerPrior() unit_parameter_lists, parameter_lists, figure_of_merit_list = initializer.samples_in_test_mode( total_points=2, model=model, ) assert 0.0 < unit_parameter_lists[0][0] < 1.0 assert 0.0 < unit_parameter_lists[1][0] < 1.0 assert 0.0 < unit_parameter_lists[0][1] < 1.0 assert 0.0 < unit_parameter_lists[1][1] < 1.0 assert 0.0 < unit_parameter_lists[0][2] < 1.0 assert 0.0 < unit_parameter_lists[1][2] < 1.0 assert 0.0 < unit_parameter_lists[0][3] < 1.0 assert 0.0 < unit_parameter_lists[1][3] < 1.0 assert 0.099 < parameter_lists[0][0] < 0.101 assert 0.099 < parameter_lists[1][0] < 0.101 assert 0.199 < parameter_lists[0][1] < 0.201 assert 0.199 < parameter_lists[1][1] < 0.201 assert 0.299 < parameter_lists[0][2] < 0.301 assert 0.299 < parameter_lists[1][2] < 0.301 assert 0.399 < parameter_lists[0][3] < 0.401 assert 0.399 < parameter_lists[1][3] < 0.401 assert figure_of_merit_list == [-1.0e99, -1.0e99]
https://pyswarms.readthedocs.io/en/latest/index.html **PyAutoFit** extends *PySwarms* by allowing runs to be terminated and resumed from the point of termination, as well as providing different options for the initial distribution of particles. """ # %% pso = af.PySwarmsLocal( n_particles=100, iters=1000, cognitive=0.5, social=0.3, inertia=0.9, ftol=-np.inf, initializer=af.InitializerPrior(), number_of_cores=1, paths=af.Paths(folders=["examples", "complex"]), ) result = pso.fit(model=model, analysis=analysis) # %% """ __Result__ The result object returned by PSO is again very similar in structure to previous results. """ # %% instance = result.max_log_likelihood_instance