def make_factor_model(prior_model): class MockAnalysis(af.Analysis): @staticmethod def log_likelihood_function(*_): return 1 return ep.ModelFactor(prior_model, analysis=MockAnalysis())
def make_factor_model(centre: float, sigma: float, optimiser=None) -> ep.ModelFactor: """ We'll make a LikelihoodModel for each Gaussian we're fitting. First we'll make the actual data to be fit. Note that the intensity value is shared. """ y = make_data( Gaussian(centre=centre, intensity=intensity, sigma=sigma), x) """ Next we need a prior model. Note that the intensity prior is shared. """ prior_model = af.PriorModel( Gaussian, centre=af.GaussianPrior(mean=50, sigma=20), intensity=intensity_prior, sigma=af.GaussianPrior(mean=10, sigma=10), ) """ Finally we combine the likelihood function with the prior model to produce a likelihood factor - this will be converted into a ModelFactor which is like any other factor in the factor graph. We can also pass a custom optimiser in here that will be used to fit the factor instead of the default optimiser. """ return ep.ModelFactor(prior_model, analysis=Analysis(x=x, y=y), optimiser=optimiser)
def test_gaussian(): n_observations = 100 x = np.arange(n_observations) y = make_data(Gaussian(centre=50.0, intensity=25.0, sigma=10.0), x) prior_model = af.PriorModel( Gaussian, centre=af.GaussianPrior(mean=50, sigma=20), intensity=af.GaussianPrior(mean=25, sigma=10), sigma=af.GaussianPrior(mean=10, sigma=10), ) factor_model = ep.ModelFactor(prior_model, analysis=Analysis(x=x, y=y)) laplace = ep.LaplaceFactorOptimiser() model = factor_model.optimise(laplace) assert model.centre.mean == pytest.approx(50, rel=0.1) assert model.intensity.mean == pytest.approx(25, rel=0.1) assert model.sigma.mean == pytest.approx(10, rel=0.1)
def make_model_factor_2(): model_2 = af.Collection(one=af.UniformPrior()) return g.ModelFactor(model_2, Analysis(0.0))
datasets which we intend to fit with each of these `Gaussians`, setting up each in an `Analysis` class that defines how the model is used to fit the data. We now simply need to pair each model-component to each `Analysis` class, so that **PyAutoFit** knows that: - `prior_model_0` fits `data_0` via `analysis_0`. - `prior_model_1` fits `data_1` via `analysis_1`. - `prior_model_2` fits `data_2` via `analysis_2`. The point where a `Model` and `Analysis` class meet is called a `ModelFactor`. This term is used to denote that we are composing a graphical model, which is commonly termed a 'factor graph'. A factor defines a node on this graph where we have some data, a model, and we fit the two together. The 'links' between these different nodes then define the global model we are fitting. """ model_factor_0 = g.ModelFactor(prior_model=prior_model_0, analysis=analysis_0) model_factor_1 = g.ModelFactor(prior_model=prior_model_1, analysis=analysis_1) model_factor_2 = g.ModelFactor(prior_model=prior_model_2, analysis=analysis_2) """ We combine our `ModelFactors` into one, to compose the factor graph. """ factor_graph = g.FactorGraphModel(model_factor_0, model_factor_1, model_factor_2) """ So, what is a factor graph? A factor graph defines the graphical model we have composed. For example, it defines the different model components that make up our model (e.g. the three `Gaussian` classes) and how their parameters are linked or shared (e.g. that each `Gaussian` has its own unique `intensity` and `sigma`, but a shared `centre` parameter. This is what our factor graph looks like: