def test_MAF_lineshape_kernel(): for dim in anisotropic_dims: ns_obj = ShieldingPALineshape( anisotropic_dimension=dim, inverse_dimension=inverse_dimension, channel="29Si", magnetic_flux_density="9.4 T", rotor_angle="90 deg", rotor_frequency="14 kHz", number_of_sidebands=1, ) zeta, eta = ns_obj._get_zeta_eta(supersampling=1) K = ns_obj.kernel(supersampling=1) sim_lineshape = generate_shielding_kernel(zeta, eta, np.pi / 2, 14000, 1).T assert np.allclose(K, sim_lineshape, rtol=1.0e-3, atol=1e-3) ns_obj = MAF( anisotropic_dimension=dim, inverse_dimension=inverse_dimension, channel="29Si", magnetic_flux_density="9.4 T", ) zeta, eta = ns_obj._get_zeta_eta(supersampling=1) K = ns_obj.kernel(supersampling=1) sim_lineshape = generate_shielding_kernel(zeta, eta, np.pi / 2, 14000, 1).T assert np.allclose(K, sim_lineshape, rtol=1.0e-3, atol=1e-3) _ = TSVDCompression(K, s=np.arange(96)) assert _.truncation_index == 15
def test_spinning_sidebands_kernel(): # 1 for dim in anisotropic_dims: ns_obj = ShieldingPALineshape( anisotropic_dimension=dim, inverse_dimension=inverse_dimension, channel="29Si", magnetic_flux_density="9.4 T", rotor_angle="54.735 deg", rotor_frequency="100 Hz", number_of_sidebands=96, ) zeta, eta = ns_obj._get_zeta_eta(supersampling=1) K = ns_obj.kernel(supersampling=1) sim_lineshape = generate_shielding_kernel(zeta, eta, 0.9553059660790962, 100, 96).T assert np.allclose(K, sim_lineshape, rtol=1.0e-3, atol=1e-3) # 2 ns_obj = ShieldingPALineshape( anisotropic_dimension=dim, inverse_dimension=inverse_dimension_ppm, channel="29Si", magnetic_flux_density="9.4 T", rotor_angle="54.735 deg", rotor_frequency="100 Hz", number_of_sidebands=96, ) zeta, eta = ns_obj._get_zeta_eta(supersampling=1) K = ns_obj.kernel(supersampling=1) sim_lineshape = generate_shielding_kernel(zeta, eta, 0.9553059660790962, 100, 96, to_ppm=False).T assert np.allclose(K, sim_lineshape, rtol=1.0e-3, atol=1e-3) # 3 ns_obj = SpinningSidebands( anisotropic_dimension=dim, inverse_dimension=inverse_dimension, channel="29Si", magnetic_flux_density="9.4 T", ) zeta, eta = ns_obj._get_zeta_eta(supersampling=1) K = ns_obj.kernel(supersampling=1) sim_lineshape = generate_shielding_kernel(zeta, eta, 0.9553059660790962, 208.33, 96).T assert np.allclose(K, sim_lineshape, rtol=1.0e-3, atol=1e-3) _ = TSVDCompression(K, s=np.arange(96)) assert _.truncation_index == 15
def test_inversion(): domain = "https://www.ssnmr.org/sites/default/files/mrsimulator" filename = f"{domain}/MAS_SE_PIETA_5%25Li2O_FT.csdf" data_object = cp.load(filename) # Inversion only requires the real part of the complex dataset. data_object = data_object.real sigma = 1110.521 # data standard deviation # Convert the MAS dimension from Hz to ppm. data_object.dimensions[0].to("ppm", "nmr_frequency_ratio") data_object = data_object.T data_object_truncated = data_object[:, 1220:-1220] data_object_truncated.dimensions[0].to("s") # set coordinates to 's' kernel_dimension = data_object_truncated.dimensions[0] relaxT2 = relaxation.T2( kernel_dimension=kernel_dimension, inverse_dimension=dict( count=32, minimum="1e-3 s", maximum="1e4 s", scale="log", label="log (T2 / s)", ), ) inverse_dimension = relaxT2.inverse_dimension K = relaxT2.kernel(supersampling=20) new_system = TSVDCompression(K, data_object_truncated) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s assert new_system.truncation_index == 18 # setup the pre-defined range of alpha and lambda values lambdas = 10 ** (-4 + 5 * (np.arange(32) / 31)) # setup the smooth lasso cross-validation class s_lasso = LassoFistaCV( lambdas=lambdas, # A numpy array of lambda values. sigma=sigma, # data standard deviation folds=5, # The number of folds in n-folds cross-validation. inverse_dimension=inverse_dimension, # previously defined inverse dimensions. ) # run the fit method on the compressed kernel and compressed data. s_lasso.fit(K=compressed_K, s=compressed_s) np.testing.assert_almost_equal(s_lasso.hyperparameters["lambda"], 0.116, decimal=1) s_lasso.cv_plot() residuals = s_lasso.residuals(K=K, s=data_object_truncated) np.testing.assert_almost_equal(residuals.std().value, 1538.48, decimal=1)
def generate_kernel(n, data, count0, inc0, count1, inc1, channel, B0, theta, n_su, d_range, k_typ): if data is None: raise PreventUpdate if d_range is None: d_range = [[0, -1], [0, -1]] data = cp.parse_dict(data) anisotropic_dimension = data.dimensions[0] inverse_dimensions = [ cp.LinearDimension(count=count0, increment=f"{inc0} Hz", label="x"), cp.LinearDimension(count=count1, increment=f"{inc1} Hz", label="y"), ] vr = 0 ns = 1 if k_typ == "sideband-correlation": vr = anisotropic_dimension.increment.to("Hz") ns = anisotropic_dimension.count if k_typ == "MAF": vr = "1 GHz" ns = 1 K = ShieldingPALineshape( anisotropic_dimension=anisotropic_dimension, inverse_dimension=inverse_dimensions, channel=channel, magnetic_flux_density=f"{B0} T", rotor_angle=f"{theta} °", rotor_frequency=f"{vr}", number_of_sidebands=ns, ).kernel(supersampling=int(n_su)) ranges = slice(d_range[1][0], d_range[1][1], None) data_truncated = data[:, ranges] new_system = TSVDCompression(K, data_truncated) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s return { "kernel": compressed_K, "signal": compressed_s.dict(), "inverse_dimensions": [item.dict() for item in inverse_dimensions], }
def test_fista(): domain = "https://sandbox.zenodo.org/record/1065394/files" filename = f"{domain}/test1_signal.csdf" signal = cp.load(filename) sigma = 0.0008 datafile = f"{domain}/test1_t2.csdf" true_dist = cp.load(datafile) kernel_dimension = signal.dimensions[0] relaxT2 = relaxation.T2( kernel_dimension=kernel_dimension, inverse_dimension=dict( count=64, minimum="1e-2 s", maximum="1e3 s", scale="log", label="log (T2 / s)", ), ) inverse_dimension = relaxT2.inverse_dimension K = relaxT2.kernel(supersampling=1) new_system = TSVDCompression(K, signal) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s assert new_system.truncation_index == 29 lambdas = 10 ** (-5 + 4 * (np.arange(16) / 15)) f_lasso_cv = LassoFistaCV( lambdas=lambdas, # A numpy array of lambda values. folds=5, # The number of folds in n-folds cross-validation. sigma=sigma, # noise standard deviation inverse_dimension=inverse_dimension, # previously defined inverse dimensions. ) f_lasso_cv.fit(K=compressed_K, s=compressed_s) sol = f_lasso_cv.f assert np.argmax(sol.y[0].components[0]) == np.argmax(true_dist.y[0].components[0]) residuals = f_lasso_cv.residuals(K=K, s=signal) std = residuals.std() np.testing.assert_almost_equal(std.value, sigma, decimal=3)
# instance to generate the MAF line-shape kernel. K = lineshape.kernel(supersampling=1) print(K.shape) # %% # The kernel ``K`` is a NumPy array of shape (128, 625), where the axes with 128 and # 625 points are the anisotropic dimension and the features (x-y coordinates) # corresponding to the :math:`25\times 25` `x`-`y` grid, respectively. # %% # Data Compression # '''''''''''''''' # # Data compression is optional but recommended. It may reduce the size of the # inverse problem and, thus, further computation time. new_system = TSVDCompression(K, data_object_truncated) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s print(f"truncation_index = {new_system.truncation_index}") # %% # Solving the inverse problem # --------------------------- # # Smooth LASSO cross-validation # ''''''''''''''''''''''''''''' # # Solve the smooth-lasso problem. Ordinarily, one should use the statistical learning # method to solve the inverse problem over a range of α and λ values and then determine # the best nuclear shielding tensor parameter distribution for the given 2D MAF
relaxT2 = relaxation.T2( kernel_dimension=kernel_dimension, inverse_dimension=dict(count=64, minimum="1e-2 s", maximum="1e3 s", scale="log", label="log (T2 / s)"), ) inverse_dimension = relaxT2.inverse_dimension K = relaxT2.kernel(supersampling=1) # %% # Data Compression # '''''''''''''''' new_system = TSVDCompression(K, signal) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s print(f"truncation_index = {new_system.truncation_index}") # %% # Fista LASSO cross-validation # ''''''''''''''''''''''''''''' # Create a guess range of values for the :math:`\lambda` hyperparameters. lambdas = 10**(-5 + 4 * (np.arange(64) / 63)) # setup the smooth lasso cross-validation class f_lasso_cv = LassoFistaCV( lambdas=lambdas, # A numpy array of lambda values. folds=5, # The number of folds in n-folds cross-validation.
def test_01(): domain = "https://sandbox.zenodo.org/record/1065347/files" filename = f"{domain}/8lnwmg0dr7y6egk40c2orpkmmugh9j7c.csdf" data_object = cp.load(filename) data_object = data_object.real _ = [item.to("ppm", "nmr_frequency_ratio") for item in data_object.dimensions] data_object = data_object.T data_object_truncated = data_object[:, 155:180] anisotropic_dimension = data_object_truncated.dimensions[0] inverse_dimensions = [ cp.LinearDimension(count=25, increment="400 Hz", label="x"), cp.LinearDimension(count=25, increment="400 Hz", label="y"), ] lineshape = ShieldingPALineshape( anisotropic_dimension=anisotropic_dimension, inverse_dimension=inverse_dimensions, channel="29Si", magnetic_flux_density="9.4 T", rotor_angle="87.14°", rotor_frequency="14 kHz", number_of_sidebands=4, ) K = lineshape.kernel(supersampling=2) new_system = TSVDCompression(K, data_object_truncated) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s assert new_system.truncation_index == 87 s_lasso = SmoothLasso( alpha=2.07e-7, lambda1=7.85e-6, inverse_dimension=inverse_dimensions ) s_lasso.fit(K=compressed_K, s=compressed_s) f_sol = s_lasso.f residuals = s_lasso.residuals(K=K, s=data_object_truncated) # assert np.allclose(residuals.mean().value, 0.00048751) np.testing.assert_almost_equal(residuals.std().value, 0.00336372, decimal=2) f_sol /= f_sol.max() [item.to("ppm", "nmr_frequency_ratio") for item in f_sol.dimensions] Q4_region = f_sol[0:8, 0:8, 3:18] Q4_region.description = "Q4 region" Q3_region = f_sol[0:8, 11:22, 8:20] Q3_region.description = "Q3 region" # Analysis int_Q4 = stats.integral(Q4_region) # volume of the Q4 distribution mean_Q4 = stats.mean(Q4_region) # mean of the Q4 distribution std_Q4 = stats.std(Q4_region) # standard deviation of the Q4 distribution int_Q3 = stats.integral(Q3_region) # volume of the Q3 distribution mean_Q3 = stats.mean(Q3_region) # mean of the Q3 distribution std_Q3 = stats.std(Q3_region) # standard deviation of the Q3 distribution np.testing.assert_almost_equal( (100 * int_Q4 / (int_Q4 + int_Q3)).value, 60.45388973909665, decimal=1 ) np.testing.assert_almost_equal( np.asarray([mean_Q4[0].value, mean_Q4[1].value, mean_Q4[2].value]), np.asarray([8.604842824865958, 9.05845796147297, -103.6976331077773]), decimal=0, ) np.testing.assert_almost_equal( np.asarray([mean_Q3[0].value, mean_Q3[1].value, mean_Q3[2].value]), np.asarray([10.35036818411856, 79.02481579085152, -90.58326773441284]), decimal=0, ) np.testing.assert_almost_equal( np.asarray([std_Q4[0].value, std_Q4[1].value, std_Q4[2].value]), np.asarray([4.525457744683861, 4.686253809896416, 5.369228151035292]), decimal=0, ) np.testing.assert_almost_equal( np.asarray([std_Q3[0].value, std_Q3[1].value, std_Q3[2].value]), np.asarray([6.138761032132587, 7.837190479891721, 4.210912435356488]), decimal=0, )
# and `rotor_frequency`, are set to match the conditions under which the spectrum # was acquired. The value of the `number_of_sidebands` argument is the number of # sidebands calculated for each line-shape within the kernel. # # Once the ShieldingPALineshape instance is created, use the # :meth:`~mrinversion.kernel.nmr.ShieldingPALineshape.kernel` method of the # instance to generate the MAF line-shape kernel. K = lineshape.kernel(supersampling=1) # %% # Data Compression # '''''''''''''''' # # Data compression is optional but recommended. It may reduce the size of the # inverse problem and, thus, further computation time. new_system = TSVDCompression(K, data_object) compressed_K = new_system.compressed_K compressed_s = new_system.compressed_s print(f"truncation_index = {new_system.truncation_index}") # %% # Solving the inverse problem # --------------------------- # # Smooth-LASSO problem # '''''''''''''''''''' # # Solve the smooth-lasso problem. You may choose to skip this step and proceed to the # statistical learning method. Usually, the statistical learning method is a # time-consuming process that solves the smooth-lasso problem over a range of