def test_footprint_correction_02(scan2d_from_nxs_01: Scan2D): """ Do a really naive footprint correction assuming a step function beam. Enforce that this is the same as our fancy correction, to within 10%. (Note: they are actually about 10% out from each other). """ # 100 micron beam. beam_width = 100e-6 # 1 mm sample. sample_size = 1e-3 intensities_0 = np.copy(scan2d_from_nxs_01.intensity) intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e) beam_size_on_sample = beam_width / \ np.sin(np.radians(scan2d_from_nxs_01.theta)) incident_beam_fraction = sample_size / beam_size_on_sample test_intensities = intensities_0 / incident_beam_fraction test_intensities_e = intensities_e_0 / incident_beam_fraction scan2d_from_nxs_01.footprint_correction(beam_width, sample_size) for i, test_intensity in enumerate(test_intensities): assert test_intensity == pytest.approx(scan2d_from_nxs_01.intensity[i], 0.1) for i, test_intensity_e in enumerate(test_intensities_e): assert test_intensity_e == pytest.approx( scan2d_from_nxs_01.intensity_e[i], 0.1)
def test_crop_02(scan2d_from_nxs_01: Scan2D, region_01: Region): """ Make sure that our cropped region has the correct size. """ scan2d_from_nxs_01.crop(crop_to_region, region=region_01) assert (scan2d_from_nxs_01.images[0].shape[0] * scan2d_from_nxs_01.images[0].shape[1]) == region_01.num_pixels
def test_crop_03(scan2d_from_nxs_01: Scan2D, region_01: Region): """ Make sure that the region we've cropped to has the specified shape. """ scan2d_from_nxs_01.crop(crop_to_region, region=region_01) assert scan2d_from_nxs_01.images[0].shape[0] == region_01.x_length assert scan2d_from_nxs_01.images[0].shape[1] == region_01.y_length
def test_qdcd_normalisation_02(scan2d_from_nxs_01: Scan2D, dcd_norm_01_splev, parsed_dcd_normalisation_01): """ Make sure that our nice splev normalisation does something similar to what would be achieved using a simple cubic scipy.interpolate.interp1D. """ # First, generate some test intensities by dividing by an interp1D function. intensities_0 = np.copy(scan2d_from_nxs_01.intensity) intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e) _, dataframe = parsed_dcd_normalisation_01 interp = interp1d(dataframe["qdcd_"], dataframe['adc2'], kind='cubic') test_intensities = intensities_0 / interp(scan2d_from_nxs_01.q_vectors) test_intensities_e = intensities_e_0 / interp(scan2d_from_nxs_01.q_vectors) # Now, carry out the qdcd normalisation as normal. scan2d_from_nxs_01.qdcd_normalisation(dcd_norm_01_splev) # These interpolation methods could be decently different, but lets enforce # that our values are the same to within 1%. for i, test_intensity in enumerate(test_intensities): assert test_intensity == pytest.approx(scan2d_from_nxs_01.intensity[i], rel=0.01) for i, test_inten_e in enumerate(test_intensities_e): assert test_inten_e == pytest.approx(scan2d_from_nxs_01.intensity_e[i], rel=0.01)
def test_bkg_sub_02(scan2d_from_nxs_01: Scan2D): """ Make sure that the background subtraction function is doing something. """ region_list = scan2d_from_nxs_01.metadata.background_regions scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=region_list) assert scan2d_from_nxs_01.images[0].bkg != 0 assert scan2d_from_nxs_01.images[0].bkg_e != 0
def test_crop_01(scan2d_from_nxs_01: Scan2D, region_01): """ Check that crop is decreasing the size of the image. """ initial_shape = scan2d_from_nxs_01.images[0].shape scan2d_from_nxs_01.crop(crop_to_region, region=region_01) assert scan2d_from_nxs_01.images[0].shape[0] < initial_shape[0] assert scan2d_from_nxs_01.images[0].shape[1] < initial_shape[1]
def test_subsample_q_02(scan2d_from_nxs_01: Scan2D): """ Make sure that we can set just an upper bound. Note that this dataset goes from 0.025Å to 0.06Å """ q_max = 0.04 assert max(scan2d_from_nxs_01.q_vectors) > q_max scan2d_from_nxs_01.subsample_q(q_max=q_max) assert max(scan2d_from_nxs_01.q_vectors) <= q_max
def test_subsample_q_03(scan2d_from_nxs_01: Scan2D): """ Make sure that we can set a lower bound. Note that this dataset goes from 0.025Å to 0.06Å. """ q_min = 0.04 assert min(scan2d_from_nxs_01.q_vectors) < q_min scan2d_from_nxs_01.subsample_q(q_min=q_min) assert min(scan2d_from_nxs_01.q_vectors) >= q_min
def test_transmission_normalisation_intensities(scan: Scan2D, transmission): """ Make sure that we can correct for the attenuation of the beam. The transmission values have been manually read from the .nxs file using a GUI. """ intensity_0 = np.copy(scan.intensity) scan.transmission_normalisation() for i, intensity in enumerate(scan.intensity): assert intensity == intensity_0[i] / transmission
def test_profile_transmission_normalisation(profile_01: Profile, scan2d_from_nxs_01: Scan2D): """ Assert that carrying out a transmission normalisation on an instance of Profile is the same thing as doing it on each of its constituent scans. """ profile_01.transmission_normalisation() scan2d_from_nxs_01.transmission_normalisation() assert_allclose(profile_01.intensity, scan2d_from_nxs_01.intensity) assert_allclose(profile_01.intensity_e, profile_01.intensity_e)
def test_transmission_normalisation_errors(scan: Scan2D, transmission): """ Make sure that we can correct for the attenuation of the beam. The transmission values have been manually read from the .nxs file using a GUI. This function checks the intensity_e values have been dealt with properly. """ intensity_e_0 = np.copy(scan.intensity_e) scan.transmission_normalisation() for i, intensity_e in enumerate(scan.intensity_e): assert intensity_e == intensity_e_0[i] / transmission
def test_subsample_q_04(scan2d_from_nxs_01: Scan2D): """ Test that we can set both lower and upper bounds. """ q_min = 0.032 q_max = 0.051 scan2d_from_nxs_01.subsample_q(q_min, q_max) assert min(scan2d_from_nxs_01.q_vectors) >= q_min assert max(scan2d_from_nxs_01.q_vectors) <= q_max
def test_profile_qdcd_normalisation(profile_01: Profile, scan2d_from_nxs_01: Scan2D, dcd_norm_01_splev): """ Assert that carrying out the qdcd correction on an instance of Profile is the same thing as doing it on each of its constituent scans. """ profile_01.qdcd_normalisation(dcd_norm_01_splev) scan2d_from_nxs_01.qdcd_normalisation(dcd_norm_01_splev) assert_allclose(profile_01.intensity, scan2d_from_nxs_01.intensity) assert_allclose(profile_01.intensity_e, profile_01.intensity_e)
def test_subsample_q_01(scan2d_from_nxs_01: Scan2D): """ Make sure subsample_q deletes the appropriate things. Because it just calls remove_data_points, which has already been tested extensively in test_data, we only need to check a couple of values to make sure the right qs have been deleted an we know that all the other attributes will have been handled correctly. """ original_len = len(scan2d_from_nxs_01.theta) # Defaults shouldn't change anything. scan2d_from_nxs_01.subsample_q() assert len(scan2d_from_nxs_01.theta) == original_len
def test_profile_bkg_sub(profile_01: Profile, scan2d_from_nxs_01: Scan2D): """ Make sure that bkg_sub from the profile is the same as bkg_sub from the scan. """ bkg_region = scan2d_from_nxs_01.metadata.background_regions[0] profile_01.bkg_sub(roi_subtraction, list_of_regions=[bkg_region]) scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=[bkg_region]) assert_allclose(profile_01.intensity_e, scan2d_from_nxs_01.intensity_e, 1e-4) assert_allclose(profile_01.intensity, scan2d_from_nxs_01.intensity, 1e-4)
def test_qdcd_normalisation_01(scan2d_from_nxs_01: Scan2D, dcd_norm_01_splev): """ Make sure that our qdcd normalisation is doing something, and isn't failing silently. (This is a dumb test, but it's really quite hard to test that this is working without just rewriting a division by splev). """ intensities_0 = np.copy(scan2d_from_nxs_01.intensity) intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e) scan2d_from_nxs_01.qdcd_normalisation(dcd_norm_01_splev) assert (intensities_0 != scan2d_from_nxs_01.intensity).all() assert (intensities_e_0 != scan2d_from_nxs_01.intensity_e).all()
def test_gauss_bkg_01(scan2d_from_nxs_01: Scan2D): """ Make sure that our Gaussian fit background subtraction function is doing something. Note that this function is not being tested for sensible results because this doesn't generally seem to be a sensible technique to use on I07. As more instruments are supported, if this technique becomes useful, its tests will need to be extended. For now, only the minimum is being done to ensure that it is roughly functional. """ scan2d_from_nxs_01.bkg_sub(fit_gaussian_1d) assert scan2d_from_nxs_01.images[0].bkg != 0 assert scan2d_from_nxs_01.images[0].bkg_e != 0
def test_bkg_sub_04(scan2d_from_nxs_01: Scan2D, scan2d_from_nxs_01_copy, custom_bkg_region_01): """ Make sure that using two background regions yields a lower uncertainty measurement of the background than using just one background region. """ regions_1 = [scan2d_from_nxs_01.metadata.background_regions[0]] regions_2 = [scan2d_from_nxs_01.metadata.background_regions[0] ] + [custom_bkg_region_01] scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=regions_1) scan2d_from_nxs_01_copy.bkg_sub(roi_subtraction, list_of_regions=regions_2) for i, image_1 in enumerate(scan2d_from_nxs_01.images): image_2 = scan2d_from_nxs_01_copy.images[i] assert image_1.bkg_e > image_2.bkg_e
def test_footprint_correction_01(scan2d_from_nxs_01: Scan2D): """ Makes sure that the footprint correction acually does something for a reasonable beam FWHM and a small (1mm) sample. """ # 100 micron beam. beam_width = 100e-6 # 1 mm sample. sample_size = 1e-3 intensities_0 = np.copy(scan2d_from_nxs_01.intensity) intensities_e_0 = np.copy(scan2d_from_nxs_01.intensity_e) scan2d_from_nxs_01.footprint_correction(beam_width, sample_size) assert (intensities_0 != scan2d_from_nxs_01.intensity).all() assert (intensities_e_0 != scan2d_from_nxs_01.intensity_e).all()
def test_remove_data_points_01(data: Data): """ First data point removal test. """ # Make a deep copy of data. Worth noting that this copy won't quite be # precise if our generic_data was defined using q values, hence the need for # pytest.approx later. data_copy = Data(np.copy(data.intensity), np.copy(data.intensity_e), data.energy, np.copy(data.theta)) # If our data is a Scan2D, we need to construct it slightly differently. if isinstance(data, Scan2D): data_copy = Scan2D(data_copy, data.metadata, list(np.copy(data.images))) data.remove_data_points([1]) assert len(data.intensity) + 1 == len(data_copy.intensity) assert len(data.intensity_e) + 1 == len(data_copy.intensity_e) assert len(data.theta) + 1 == len(data_copy.theta) assert len(data.q_vectors) + 1 == len(data_copy.q_vectors) assert len(data.reflectivity) + 1 == len(data_copy.reflectivity) assert len(data.reflectivity_e) + 1 == len(data_copy.reflectivity_e) assert data.intensity[1] == data_copy.intensity[2] assert data.intensity_e[1] == data_copy.intensity_e[2] assert data.theta[1] == pytest.approx(data_copy.theta[2], rel=1e-3) assert data.q_vectors[1] == pytest.approx(data_copy.q_vectors[2], rel=1e-3) assert data.reflectivity[1] == data_copy.reflectivity[2] assert data.reflectivity_e[1] == data_copy.reflectivity_e[2] if isinstance(data, Scan2D): assert len(data.images) + 1 == len(data_copy.images) assert data.images[1] == data_copy.images[2]
def test_bkg_sub_03(scan2d_from_nxs_01: Scan2D): """ Make sure that the background subtraction decreases our intensity. """ vals, stdevs = (np.zeros(len(scan2d_from_nxs_01.intensity)), np.zeros(len(scan2d_from_nxs_01.intensity))) # Also update the image intensities & errors. for i, image in enumerate(scan2d_from_nxs_01.images): vals[i], stdevs[i] = image.sum() # Store the intensity(Q) to the new value. scan2d_from_nxs_01.intensity = np.array(vals) scan2d_from_nxs_01.intensity_e = np.array(stdevs) region_list = scan2d_from_nxs_01.metadata.background_regions scan2d_from_nxs_01.bkg_sub(roi_subtraction, list_of_regions=region_list) assert (vals > scan2d_from_nxs_01.intensity).all()
def test_gauss_bkg_02(scan2d_from_nxs_01: Scan2D): """ Make sure that carrying out this subtraction decreases our intensity. Note that this function is not being tested for sensible results because this doesn't generally seem to be a sensible technique to use on I07. As more instruments are supported, if this technique becomes useful, its tests will need to be extended. For now, only the minimum is being done to ensure that it is roughly functional. """ vals = np.zeros(len(scan2d_from_nxs_01.intensity)) # Also update the image intensities & errors. for i, image in enumerate(scan2d_from_nxs_01.images): vals[i], _ = image.sum() # Store the intensity(Q) to the new value. scan2d_from_nxs_01.intensity = np.array(vals) intensity_0 = np.copy(scan2d_from_nxs_01.intensity) scan2d_from_nxs_01.bkg_sub(fit_gaussian_1d) assert (scan2d_from_nxs_01.intensity < intensity_0).all()
def test_remove_data_points_02(data: Data): """ Second data point removal test. Most of these tests are fairly trivial, but the point is more to make sure that we're indeed remembering to remove a data point from every single array. Sure, it would be great to split these into their own tests, but... cba. These could also have been wrapped into fancy tests where I calculate with code which indices in the new data object correspond to which indices in the original data_copy. But, that leaves room for error, which defeats the point of testing. """ # Make a deep copy of data. data_copy = Data(np.copy(data.intensity), np.copy(data.intensity_e), data.energy, np.copy(data.theta)) # If our data is a Scan2D, we need to construct it slightly differently. if isinstance(data, Scan2D): data_copy = Scan2D(data_copy, data.metadata, list(np.copy(data.images))) data.remove_data_points([1, 2, 4]) assert len(data.intensity) + 3 == len(data_copy.intensity) assert len(data.intensity_e) + 3 == len(data_copy.intensity_e) assert len(data.theta) + 3 == len(data_copy.theta) assert len(data.q_vectors) + 3 == len(data_copy.q_vectors) assert len(data.reflectivity) + 3 == len(data_copy.reflectivity) assert len(data.reflectivity_e) + 3 == len(data_copy.reflectivity_e) assert data.intensity[1] == data_copy.intensity[3] assert data.intensity_e[1] == data_copy.intensity_e[3] assert data.theta[1] == pytest.approx(data_copy.theta[3], rel=1e-3) assert data.q_vectors[1] == pytest.approx(data_copy.q_vectors[3], rel=1e-3) assert data.reflectivity[1] == data_copy.reflectivity[3] assert data.reflectivity_e[1] == data_copy.reflectivity_e[3] assert data.intensity[2] == data_copy.intensity[5] assert data.intensity_e[2] == data_copy.intensity_e[5] assert data.theta[2] == pytest.approx(data_copy.theta[5], rel=1e-3) assert data.q_vectors[2] == pytest.approx(data_copy.q_vectors[5], rel=1e-3) assert data.reflectivity[2] == data_copy.reflectivity[5] assert data.reflectivity_e[2] == data_copy.reflectivity_e[5] if isinstance(data, Scan2D): assert len(data.images) + 3 == len(data_copy.images) assert data.images[1] == data_copy.images[3] assert data.images[2] == data_copy.images[5]