def solve(l1, l2, data): inverse_dimensions = [ cp.LinearDimension(**item) for item in data["inverse_dimensions"] ] compressed_K = np.asarray(data["kernel"], dtype=np.float64) compressed_s = cp.parse_dict(data["signal"]) print(compressed_K, compressed_s, inverse_dimensions) # s_lasso = SmoothLassoCV( # # alpha=l2, # # lambda1=l1, # inverse_dimension=inverse_dimensions, # # method="lars", # tolerance=1e-3, # ) s_lasso = SmoothLasso( alpha=l2, lambda1=l1, inverse_dimension=inverse_dimensions, method="lars", tolerance=1e-3, ) s_lasso.fit(K=compressed_K, s=compressed_s) res = s_lasso.f / s_lasso.f.max() return [ res.to_dict(), s_lasso.hyperparameters["lambda"], s_lasso.hyperparameters["alpha"], ]
# # the optimum hyper-parameters, alpha and lambda, from the cross-validation. # print(s_lasso.hyperparameters) # # {'alpha': 2.198392648862289e-08, 'lambda': 1.2742749857031348e-06} # # the solution # f_sol = s_lasso.f # # the cross-validation error curve # CV_metric = s_lasso.cross_validation_curve # %% # If you use the above ``SmoothLassoCV`` method, skip the following code-block. # Setup the smooth lasso class s_lasso = SmoothLasso(alpha=2.198e-8, lambda1=1.27e-6, inverse_dimension=inverse_dimensions) # run the fit method on the compressed kernel and compressed data. s_lasso.fit(K=compressed_K, s=compressed_s) # %% # The optimum solution # '''''''''''''''''''' # # The :attr:`~mrinversion.linear_model.SmoothLasso.f` attribute of the instance holds # the solution, f_sol = s_lasso.f # f_sol is a CSDM object. # %% # where ``f_sol`` is the optimum solution. #
# # the optimum hyper-parameters, alpha and lambda, from the cross-validation. # print(s_lasso.hyperparameters) # # {'alpha': 3.359818286283781e-05, 'lambda': 5.324953129837531e-06} # # the solution # f_sol = s_lasso.f # # the cross-validation error curve # CV_metric = s_lasso.cross_validation_curve # %% # If you use the above ``SmoothLassoCV`` method, skip the following code-block. s_lasso = SmoothLasso(alpha=1.2e-4, lambda1=4.55e-6, inverse_dimension=inverse_dimensions) # run the fit method on the compressed kernel and compressed data. s_lasso.fit(K=compressed_K, s=compressed_s) # %% # The optimum solution # '''''''''''''''''''' # # The :attr:`~mrinversion.linear_model.SmoothLasso.f` attribute of the instance holds # the solution, f_sol = s_lasso.f # f_sol is a CSDM object. # %% # where ``f_sol`` is the optimum solution. #
def test_01(): domain = "https://sandbox.zenodo.org/record/1065347/files" filename = f"{domain}/8lnwmg0dr7y6egk40c2orpkmmugh9j7c.csdf" data_object = cp.load(filename) data_object = data_object.real _ = [item.to("ppm", "nmr_frequency_ratio") for item in data_object.dimensions] data_object = data_object.T data_object_truncated = data_object[:, 155:180] anisotropic_dimension = data_object_truncated.dimensions[0] inverse_dimensions = [ cp.LinearDimension(count=25, increment="400 Hz", label="x"), cp.LinearDimension(count=25, increment="400 Hz", label="y"), ] lineshape = ShieldingPALineshape( anisotropic_dimension=anisotropic_dimension, inverse_dimension=inverse_dimensions, channel="29Si", magnetic_flux_density="9.4 T", rotor_angle="87.14°", rotor_frequency="14 kHz", number_of_sidebands=4, ) K = lineshape.kernel(supersampling=2) new_system = TSVDCompression(K, data_object_truncated) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s assert new_system.truncation_index == 87 s_lasso = SmoothLasso( alpha=2.07e-7, lambda1=7.85e-6, inverse_dimension=inverse_dimensions ) s_lasso.fit(K=compressed_K, s=compressed_s) f_sol = s_lasso.f residuals = s_lasso.residuals(K=K, s=data_object_truncated) # assert np.allclose(residuals.mean().value, 0.00048751) np.testing.assert_almost_equal(residuals.std().value, 0.00336372, decimal=2) f_sol /= f_sol.max() [item.to("ppm", "nmr_frequency_ratio") for item in f_sol.dimensions] Q4_region = f_sol[0:8, 0:8, 3:18] Q4_region.description = "Q4 region" Q3_region = f_sol[0:8, 11:22, 8:20] Q3_region.description = "Q3 region" # Analysis int_Q4 = stats.integral(Q4_region) # volume of the Q4 distribution mean_Q4 = stats.mean(Q4_region) # mean of the Q4 distribution std_Q4 = stats.std(Q4_region) # standard deviation of the Q4 distribution int_Q3 = stats.integral(Q3_region) # volume of the Q3 distribution mean_Q3 = stats.mean(Q3_region) # mean of the Q3 distribution std_Q3 = stats.std(Q3_region) # standard deviation of the Q3 distribution np.testing.assert_almost_equal( (100 * int_Q4 / (int_Q4 + int_Q3)).value, 60.45388973909665, decimal=1 ) np.testing.assert_almost_equal( np.asarray([mean_Q4[0].value, mean_Q4[1].value, mean_Q4[2].value]), np.asarray([8.604842824865958, 9.05845796147297, -103.6976331077773]), decimal=0, ) np.testing.assert_almost_equal( np.asarray([mean_Q3[0].value, mean_Q3[1].value, mean_Q3[2].value]), np.asarray([10.35036818411856, 79.02481579085152, -90.58326773441284]), decimal=0, ) np.testing.assert_almost_equal( np.asarray([std_Q4[0].value, std_Q4[1].value, std_Q4[2].value]), np.asarray([4.525457744683861, 4.686253809896416, 5.369228151035292]), decimal=0, ) np.testing.assert_almost_equal( np.asarray([std_Q3[0].value, std_Q3[1].value, std_Q3[2].value]), np.asarray([6.138761032132587, 7.837190479891721, 4.210912435356488]), decimal=0, )
# # Smooth-LASSO problem # '''''''''''''''''''' # # Solve the smooth-lasso problem. You may choose to skip this step and proceed to the # statistical learning method. Usually, the statistical learning method is a # time-consuming process that solves the smooth-lasso problem over a range of # predefined hyperparameters. # If you are unsure what range of hyperparameters to use, you can use this step for # a quick look into the possible solution by giving a guess value for the :math:`\alpha` # and :math:`\lambda` hyperparameters, and then decide on the hyperparameters range # accordingly. # guess alpha and lambda values. s_lasso = SmoothLasso(alpha=5e-5, lambda1=5e-6, inverse_dimension=inverse_dimension) s_lasso.fit(K=compressed_K, s=compressed_s) f_sol = s_lasso.f # %% # Here, ``f_sol`` is the solution corresponding to hyperparameters # :math:`\alpha=5\times10^{-5}` and :math:`\lambda=5\times 10^{-6}`. The plot of this # solution is _, ax = plt.subplots(1, 2, figsize=(9, 3.5), subplot_kw={"projection": "csdm"}) # the plot of the guess tensor distribution solution. plot2D(ax[0], f_sol / f_sol.max(), title="Guess distribution") # the plot of the true tensor distribution. plot2D(ax[1], true_data_object, title="True distribution")
# print(s_lasso.hyperparameters) # # the solution # f_sol = s_lasso.f # # the cross-validation error curve # CV_metric = s_lasso.cross_validation_curve # %% # If you use the above ``SmoothLassoCV`` method, skip the following code-block. The # following code-block evaluates the smooth-lasso solution at the pre-optimized # hyperparameters. # Setup the smooth lasso class s_lasso = SmoothLasso( alpha=8.34e-7, lambda1=6.16e-7, inverse_dimension=inverse_dimensions ) # run the fit method on the compressed kernel and compressed data. s_lasso.fit(K=compressed_K, s=compressed_s) # %% # The optimum solution # '''''''''''''''''''' # # The :attr:`~mrinversion.linear_model.SmoothLasso.f` attribute of the instance holds # the solution, f_sol = s_lasso.f # f_sol is a CSDM object. # %% # where ``f_sol`` is the optimum solution. #