def test_get_param_names(): L1 = LorentzianLine(name="Lorentzian", domain=(-5.0, 5.0), x0=-0.5, width=0.4, c=0.2) F_c1 = F_cLine(name="Fc1", domain=(-5.0, 5.0), x0=-0.0, width=0.4, c=0.2) print(L1.get_param_names()) print(F_c1.get_param_names())
def test_export_load(): L = LorentzianLine(name="Lorentzian", domain=(-5.0, 5.0), x0=-0.5, width=0.4, c=0.2) L.export_to_jsonfile(f"{testdir}/resources/test_export_load_file.json") L2 = LorentzianLine.load_from_jsonfile( f"{testdir}/resources/test_export_load_file.json") assert L.name == L2.name L3 = LorentzianLine.load_from_dict(**L2.export_to_dict()) F_c1 = LineFactory.create("f_c", name="FcLine1", domain=(-15, 15), x0=0.0, width=0.02, A=350.0, q=0.023, c=0.0, weight=1.0) F_c1.export_to_jsonfile( f"{testdir}/resources/test_export_load_F_c_file.json") F_c2 = F_cLine.load_from_jsonfile( f"{testdir}/resources/test_export_load_F_c_file.json") assert F_c1.line_params["width"] == F_c2.line_params["width"] F_c3 = F_cLine.load_from_dict(**F_c2.export_to_dict()) assert min(F_c3.domain) == min(F_c1.domain) F_I1 = LineFactory.create("f_I", name="FILine1", domain=(-15, 15), x0=0.0, width=0.02, A=350.0, q=0.023, c=0.0, weight=1.0, kappa=0.01) F_I1.export_to_jsonfile( f"{testdir}/resources/test_export_load_F_I_file.json") F_I2 = F_ILine.load_from_jsonfile( f"{testdir}/resources/test_export_load_F_I_file.json") assert F_I1.line_params["width"] == F_I2.line_params["width"] F_I3 = F_ILine.load_from_dict(**F_I2.export_to_dict()) assert min(F_I3.domain) == min(F_I1.domain)
def test_get_adaptive_integration_grid(): ### Lines LL1 = LorentzianLine("LL1", (-energy_from_lambda(6.0), 15), x0=0.048, width=0.04, c=0.0, weight=0.0) F_c = F_cLine("F_c1", (-energy_from_lambda(6.0), 15), x0=0.0, width=0.02, A=350.0, q=0.02, c=0.0, weight=1) F_I = F_ILine("F_I1", (-energy_from_lambda(6.0), 15), x0=-0.02, width=0.008, A=350.0, q=0.02, kappa=0.01, c=0.0, weight=1) ### CalcStrategy inel = InelasticCalcStrategy(610) quasi = QuasielasticCalcStrategy()
def visualize_Lines(): Lorentzian = LorentzianLine("Lorentzian1", (-energy_from_lambda(6.0), 15), x0=-0.3, width=0.5, c=0.0) F_c = F_cLine("F_c1", (-energy_from_lambda(6.0), 15), x0=-0.3, width=0.5, A=350.0, q=0.02, c=0.0, weight=1) F_I = F_ILine("F_I1", (-energy_from_lambda(6.0), 15), x0=-0.3, width=0.008, A=350.0, q=0.02, kappa=0.01, c=0.0, weight=1) print(F_I.integrate()) F_I2 = F_ILine("F_I2", (-energy_from_lambda(6.0), 15), x0=-0.3, width=0.1, A=367.0, q=0.124, kappa=0.065, c=0.0, weight=1) print(F_I2.integrate()) e = np.linspace(-0.5, 0.0, 1201) plt.plot(e, Lorentzian(e)) plt.plot(e, F_c(e) * F_c.normalize(), ls="--", lw=2.0) plt.plot(e, F_I(e), ls="dotted", lw=4.0) plt.plot(e, F_I2(e), ls="-.", lw=1.0) plt.show()
def test_domainenforcement(): Lorentzian = LorentzianLine("Lorentzian1", (-5., 5), x0=-0.3, width=0.5, c=0.2) F_c = F_cLine("FcLine1", (-15, 15), x0=0.0, width=0.02, A=350.0, q=0.023, c=0.0, weight=1.0) e_in_domain = np.linspace(-5.0, 5.0, 11) e_beyond_domain = np.linspace(-10.0, 10.0, 21) pL1 = Lorentzian(e_in_domain) pL2 = Lorentzian(e_beyond_domain) pF1 = F_c(e_in_domain) pF2 = F_c(e_beyond_domain) assert np.all(pL1 == pL2[Lorentzian.within_domain(e_beyond_domain)]) assert np.all(pF1 == pF2[Lorentzian.within_domain(e_beyond_domain)])
def test_Lines(): l1 = Line("LineBase", (-15., 15), x0=0, FWHM=0.5) # assert isinstance(l1, Line) # try: # l1.check_params() # except KeyError: # print("KeyError was caught and 'handled' in this test.") l2 = LorentzianLine("Lorentzian1", (-15., 15.), x0=0.2, width=0.3) # l2.check_params() l2.update_line_params(c=0.2) l3 = F_cLine("FcLine1", (-15, 15), x0=0.0, width=0.02, A=350.0, q=0.023, c=0.0, weight=1.0) l3.update_line_params(width=0.1) l3.update_domain((-1.0, 1.0)) l4 = F_ILine("FILine1", (-15, 15), x0=0.0, width=0.02, A=350.0, q=0.023, kappa=0.01, c=0.0, weight=1.0) l4.update_line_params(kappa=0.015) e = np.linspace(-1.5, 1.5, 16) print("Returned by 'l1.calc': ", l1.calc(e, **l1.line_params)) print("Returned by 'l2(e)': ", l2(e)) print("Returned by 'F_cLine': ", l3(e)) print("Returned by 'F_ILine': ", l4(e))
def test_normalization(): Lorentzian = LorentzianLine("Lorentzian1", (-5., 5), x0=0.0, width=0.5, c=0.0) n = Lorentzian.normalize() from scipy.integrate import quad to_integrate = lambda x: Lorentzian(x) val, err = quad(to_integrate, min(Lorentzian.domain), max(Lorentzian.domain)) print( f"Integration value: {val:.5f} +- {err:.5f} | normalization factor: {n:.5f}" ) print(f"Normalized Line area: {val*n}") # - - - - - - - - - - - - - - - - - - - - F_c = F_cLine("FcLine1", (-15, 15), x0=0.0, width=0.02, A=350.0, q=0.023, c=0.0, weight=1.0) nfc = F_c.normalize() ifc = F_c.integrate() x = np.linspace(-1000, 1000, 5000000) y = F_c.calc(x, **F_c.line_params) ifctrapz = np.trapz(y, x) ifcquadv, ifcquade = quad(lambda x: nfc * F_c(x), min(F_c.domain), max(F_c.domain)) print(f"{F_c.line_params}") print(f"Standard Integration value for 10000 steps: {ifc}") print( f"QUADPACK Integration value (after normal.): {ifcquadv} +- {ifcquade}" ) print(f"TRAPEZOID Integration value -1e3 from 1e3 : {ifctrapz}") # - - - - - - - - - - - - - - - - - - - - F_I = F_ILine("FILine1", (-15, 15), x0=0.0, width=0.02, A=350.0, q=0.023, kappa=0.01, c=0.0, weight=1.0) nfI = F_I.normalize() ifI = F_I.integrate() x = np.linspace(-1000, 1000, 5000000) y = F_I.calc(x, **F_I.line_params) ifItrapz = np.trapz(y, x) ifIquadv, ifIquade = quad(lambda x: F_I(x), min(F_I.domain), max(F_I.domain)) print(f"{F_I.line_params}") print(f"Standard Integration value for 10000 steps: {ifI}") print( f"QUADPACK Integration value : {ifIquadv} +- {ifIquade}" ) print(f"TRAPEZOID Integration value -1e3 from 1e3 : {ifItrapz}")
def test_adaptive_vs_linear(): # L1 = LorentzianLine("LL1", (-energy_from_lambda(6.0), 15), x0=0.048, width=0.04, c=0.0, weight=0.0) L2 = F_cLine("F_c1", (-energy_from_lambda(6.0), 15), x0=0.0, width=0.0001, A=350.0, q=0.02, c=0.0, weight=1) L3 = F_ILine("F_I1", (-energy_from_lambda(6.0), 15), x0=-0.02, width=0.01, A=350.0, q=0.02, kappa=0.01, c=0.0, weight=1) # Contruct a SqE model sqe = SqE(lines=(L1, L2, L3), lam=6.0, dlam=0.12, lSD=3.43, T=20) # Add the detector efficiency correction decf = DetectorEfficiencyCorrectionFactor(sqe, ne=100, nlam=20) ### Construct the adaptive integration grid ne = 100 nlam = 21 l = linspace(1 - sqe.model_params["dlam"], 1 + sqe.model_params["dlam"], nlam) * sqe.model_params["lam"] a = -0.99999 * energy_from_lambda(l) ee = sqe.get_adaptive_integration_grid(ne, nlam) ee = where(ee <= atleast_2d(a), atleast_2d(a), ee) ne = ee.shape[0] ll = tile(l, (ne, 1)) print( f"ee: - Shape: {ee.shape}\n - ee[::50, {nlam//2}]: {ee[::50, nlam//2]}" ) print( f"ll: - Shape: {ll.shape}\n - ll[::50, {nlam//2}]: {ll[::50, nlam//2]}" ) ### Construct a standard linear integration grid nelin = 10000 nlamlin = 21 eelin, lllin = energy_lambda_nrange(15.0, 6.0, 0.12, nelin, nlamlin) print( f"eelin: - Shape: {ee.shape}\n - ee[::{nelin//10}, {nlam//2}]: {eelin[::nelin//10, nlam//2]}" ) print( f"lllin: - Shape: {ll.shape}\n - ll[::{nelin//10}, {nlam//2}]: {lllin[::nelin//10, nlam//2]}" ) ### perform correction calculation from time import time startt = time() adaptcorr = decf.calc(ee, ll) intermedt = time() lincorr = decf.calc(eelin, lllin) stopt = time() print(f"Adaptive integration took: {intermedt - startt:.6f}") print(f"Adaptive grid correction value: {adaptcorr:.6f}") print(f"Linear integration took : {stopt - intermedt:.6f}") print(f"Linear grid correction value : {lincorr:.6f}")