def test_vector_none_fitting(self): """ Fit to a vector model with one var's data set to None """ a, b, c = parameters('a, b, c') a_i, b_i, c_i = variables('a_i, b_i, c_i') model = {a_i: a, b_i: b, c_i: c} xdata = np.array([ [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.], [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8], [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1], ]) fit_none = NumericalLeastSquares( model=model, a_i=xdata[0], b_i=xdata[1], c_i=None, ) fit = NumericalLeastSquares( model=model, a_i=xdata[0], b_i=xdata[1], c_i=xdata[2], ) fit_none_result = fit_none.execute() fit_result = fit.execute() self.assertAlmostEqual(fit_none_result.params.a, fit_result.params.a, 4) self.assertAlmostEqual(fit_none_result.params.b, fit_result.params.b, 4) self.assertAlmostEqual(fit_none_result.params.c, 1.0)
def test_model_from_dict(self): x, y_1, y_2 = variables('x, y_1, y_2') a, b = parameters('a, b') model = Model({ y_1: 2 * a * x, y_2: b * x**2 })
def test_gaussian(self): x0, sig = parameters('x0, sig') x = Variable() new = sympy.exp(-(x - x0)**2/(2*sig**2)) self.assertIsInstance(new, sympy.exp) g = Gaussian(x, x0, sig) self.assertTrue(issubclass(g.__class__, sympy.exp))
def test_callable(self): a, b = parameters('a, b') x, y = variables('x, y') func = a*x**2 + b*y**2 result = func(x=2, y=3, a=3, b=9) self.assertEqual(result, 3*2**2 + 9*3**2) xdata = np.arange(1,10) ydata = np.arange(1,10) result = func(x=ydata, y=ydata, a=3, b=9) self.assertTrue(np.array_equal(result, 3*xdata**2 + 9*ydata**2))
def test_jacobian_matrix(self): """ The jacobian matrix of a model should be a 2D list (matrix) containing all the partial derivatives. :return: """ a, b, c = parameters('a, b, c') a_i, b_i, c_i = variables('a_i, b_i, c_i') # a_i, b_i, c_i, s_a, s_b, s_c = variables('a_i, b_i, c_i, s_a, s_b, s_c') model = Model({a_i: 2 * a + 3 * b, b_i: 5 * b, c_i: 7 * c}) self.assertEqual([[2, 3, 0], [0, 5, 0], [0, 0, 7]], model.jacobian)
def test_likelihood_fitting_gaussian(self): """ Fit using the likelihood method. """ mu, sig = parameters('mu, sig') sig.min = 0.01 sig.value = 3.0 mu.value = 50. x = Variable() pdf = Gaussian(x, mu, sig) # pdf = sympy.exp(-(x - mu)**2/(2*sig**2))/sympy.sqrt(2*sympy.pi*sig**2) np.random.seed(10) xdata = np.random.normal(51., 3.5, 100000) fit = Likelihood(pdf, xdata) fit_result = fit.execute() print(fit_result) self.assertAlmostEqual(fit_result.params.mu, np.mean(xdata), 5) self.assertAlmostEqual(fit_result.params.sig, np.std(xdata), 5)
def __get_grad(ar,n): # grad at different scales, see test_symfit_0707.ipynb m = n - 1 (M,N) = ar.shape # output grad matrix with size (M-m)x(N-m) gd180 = np.zeros((M-m,N-m)) gd360 = np.zeros((M-m,N-m)) # initial values for fitting parameters ## Goodman et al. 1993 (doi:10.1086/172465) v0 = Parameter(value=5.) al = Parameter(value=0.) b1 = Parameter(value=0.) v_0, a, b = parameters('v0, al, bl') x, y, z = variables('x, y, z') md = {z: v_0 + a * x + b * y} for (x,y),i in np.ndenumerate(ar): if x >= ar.shape[0]-m or y >= ar.shape[1]-m: # fit grad from (x,y) (to (x+n, y+n)), so right/bottom edges are neglected continue else: ap = ar[slice(x,x+n),slice(y,y+n)] # array of indices xx,yy = np.where(~np.isnan(ap)) zz = ap.flatten() zz = zz[~np.isnan(zz)] ft = Fit(md, x=xx, y=yy, z=zz) ft_result = ft.execute() (a,b) = (ft_result.params.al,ft_result.params.bl) gd180[x,y] = np.mod(np.mod(360-np.degrees(np.arctan(b/a)), 360),180) gd360[x,y] = np.mod(360-np.degrees(np.arctan(b/a)), 360) return gd180,gd360
def test_vector_fitting(self): a, b, c = parameters('a, b, c') a_i, b_i, c_i = variables('a_i, b_i, c_i') model = {a_i: a, b_i: b, c_i: c} xdata = np.array([ [10.1, 9., 10.5, 11.2, 9.5, 9.6, 10.], [102.1, 101., 100.4, 100.8, 99.2, 100., 100.8], [71.6, 73.2, 69.5, 70.2, 70.8, 70.6, 70.1], ]) fit = NumericalLeastSquares( model=model, a_i=xdata[0], b_i=xdata[1], c_i=xdata[2], ) fit_result = fit.execute() self.assertAlmostEqual(fit_result.params.a, 9.985691, 6) self.assertAlmostEqual(fit_result.params.b, 1.006143e+02, 4) self.assertAlmostEqual(fit_result.params.c, 7.085713e+01, 5)
def test_model_callable(self): """ Tests if Model objects are callable in the way expected. Calling a model should evaluate it's expression(s) with the given values. The return value is a namedtuple. The signature should also work so inspection is saved. """ a, b = parameters('a, b') x, y = variables('x, y') new = a*x**2 + b*y**2 model = Model(new) z, = model(3, 3, 2, 2) self.assertEqual(z, 36) for arg_name, name in zip(('x', 'y', 'a', 'b'), inspect_sig.signature(model).parameters): self.assertEqual(arg_name, name) # From Model __init__ directly model = Model([a*x**2, 4*b*y**2, a*x**2 + b*y**2]) z_1, z_2, z_3 = model(3, 3, 2, 2) self.assertEqual(z_1, 18) self.assertEqual(z_2, 72) self.assertEqual(z_3, 36) for arg_name, name in zip(('x', 'y', 'a', 'b'), inspect_sig.signature(model).parameters): self.assertEqual(arg_name, name) # From dict z_1, z_2, z_3 = variables('z_1, z_2, z_3') model = Model({z_1: a*x**2, z_2: 4*b*y**2, z_3: a*x**2 + b*y**2}) z_1, z_2, z_3 = model(3, 3, 2, 2) self.assertEqual(z_1, 18) self.assertEqual(z_2, 72) self.assertEqual(z_3, 36) for arg_name, name in zip(('x', 'y', 'a', 'b'), inspect_sig.signature(model).parameters): self.assertEqual(arg_name, name)