예제 #1
0
def test_rw_raw():
    """Test read a project to workspace and write in the scope of raw data

    Returns
    -------

    """
    raw_project_name = os.path.join(os.getcwd(), 'data/HZB_Raw_Project.h5')

    # Read to workspace
    source_project = HidraProjectFile(raw_project_name, 'r')

    # To the workspace
    source_workspace = workspaces.HidraWorkspace('Source HZB')
    source_workspace.load_hidra_project(source_project,
                                        load_raw_counts=True,
                                        load_reduced_diffraction=False)

    # Export
    target_project = HidraProjectFile('HZB_HiDra_Test.h5', 'w')
    # Experiment data
    source_workspace.save_experimental_data(target_project,
                                            sub_runs=range(1, 41))

    # Instrument
    detector_setup = source_workspace.get_instrument_setup()
    instrument_setup = HidraSetup(detector_setup=detector_setup)
    target_project.write_instrument_geometry(instrument_setup)

    # Save
    target_project.save(True)

    return
예제 #2
0
    def save_fit_result(self, out_file_name=''):
        """Save the fit result, including a copy of the rest of the file if it does not exist at the specified path.

        If out_file_name is empty or if it matches the parent's current file, this updates the file.

        Otherwise, the parent's file is copied to out_file_name and
        then the updated peak fit data is written to the copy.

        :param out_file_name: string absolute fill path for the place to save the file

        """

        fit_result = self.parent.fit_result
        if fit_result is None:
            return

        if out_file_name is not None and self.parent._curr_file_name != out_file_name:
            copyfile(self.parent._curr_file_name, out_file_name)
            current_project_file = out_file_name
        else:
            current_project_file = self.parent._curr_file_name

        project_h5_file = HidraProjectFile(current_project_file,
                                           mode=HidraProjectFileMode.READWRITE)
        peakcollections = fit_result.peakcollections
        for peak in peakcollections:
            project_h5_file.write_peak_parameters(peak)
        project_h5_file.save(False)
        project_h5_file.close()
예제 #3
0
    def test_mask(self):
        """Test methods to read and write mask file

        Returns
        -------
        None
        """
        # Generate a HiDRA project file
        test_project_file = HidraProjectFile('test_mask.hdf',
                                             HidraProjectFileMode.OVERWRITE)

        # Create a detector mask
        pixel_mask = np.zeros(shape=(1024**2, ), dtype='int')
        pixel_mask += 1
        pixel_mask[123:345] = 0
        pixel_mask[21000:21019] = 0

        # Create a solid angle mask
        solid_mask = np.array([-20, -15, -10, -5, 5, 10, 15, 20])

        # Write detector mask: default and etc
        test_project_file.write_mask_detector_array(None, pixel_mask)
        test_project_file.write_mask_detector_array('test', pixel_mask)

        # Write solid angle mask
        test_project_file.write_mask_solid_angle('test', solid_mask)

        # Close file
        test_project_file.save(True)

        # Open file again
        verify_project_file = HidraProjectFile('test_mask.hdf',
                                               HidraProjectFileMode.READONLY)

        # Read detector default mask
        default_pixel_mask = verify_project_file.read_default_masks()
        assert np.allclose(pixel_mask, default_pixel_mask, 1.E-12)

        # Read detector mask & compare
        verify_pixel_mask = verify_project_file.read_mask_detector_array(
            'test')
        assert np.allclose(pixel_mask, verify_pixel_mask, 1.E-12)

        # Test to read all user detector mask
        user_mask_dict = dict()
        verify_project_file.read_user_masks(user_mask_dict)
        assert list(user_mask_dict.keys())[0] == 'test'

        # Read solid angle mask & compare
        verify_solid_mask = verify_project_file.read_mask_solid_angle('test')
        assert np.allclose(solid_mask, verify_solid_mask, 1.E-2)

        # check name
        assert verify_project_file.name.endswith('test_mask.hdf')

        # Clean
        os.remove('test_mask.hdf')
예제 #4
0
    def save_fit_result(self, out_file_name=''):
        fit_result = self.parent.fit_result
        if fit_result is None:
            return

        current_project_file = self.parent._curr_file_name
        project_h5_file = HidraProjectFile(current_project_file,
                                           mode=HidraProjectFileMode.READWRITE)
        peakcollections = fit_result.peakcollections
        for peak in peakcollections:
            project_h5_file.write_peak_fit_result(peak)
        project_h5_file.save(False)
        project_h5_file.close()
예제 #5
0
def test_wave_length_rw():
    """Test writing and reading for wave length

    Returns
    -------

    """
    # Set up for testing
    test_file_name = 'test_wave_length.h5'
    # Create a detector mask
    gold_wave_length = 1.23456

    # Generate a HiDRA project file
    test_project_file = HidraProjectFile(test_file_name,
                                         HidraProjectFileMode.OVERWRITE)
    test_project_file.save(True)
    test_project_file.close()

    # Open file
    verify_project_file = HidraProjectFile(test_file_name,
                                           HidraProjectFileMode.READONLY)

    # Read wave length (not exist)
    wave_length_test = verify_project_file.read_wavelengths()
    assert np.isnan(wave_length_test), 'No wave length read out'

    # Close
    verify_project_file.close()

    # Generate a HiDRA project file
    test_project_file = HidraProjectFile(test_file_name,
                                         HidraProjectFileMode.READWRITE)

    # Write wave length
    test_project_file.write_wavelength(gold_wave_length)

    # Save and close
    test_project_file.save(True)
    test_project_file.close()

    # Open file again to verify
    verify_project_file2 = HidraProjectFile(test_file_name,
                                            HidraProjectFileMode.READONLY)

    # Read wave length (not exist)
    wave_length_test = verify_project_file2.read_wavelengths()
    assert wave_length_test == gold_wave_length

    # Clean
    os.remove(test_file_name)
예제 #6
0
def main():
    """
    Main method to convert diffraction data in the old HDF5 format
    :return:
    """
    # Load source data in 'old' format
    source_h5 = 'tests/testdata/16-1_TD.cor_Log.h5'
    reader = rs_scan_io.DiffractionDataFile()
    diff_data_dict, sample_logs = reader.load_rs_file(source_h5)

    # Create a Hidra project
    target_project_file_name = 'tests/testdata/Hydra_16-1_cor_log.h5'
    target_file = HidraProjectFile(target_project_file_name, 'w')

    # Create sub runs
    target_file.write_sub_runs(sorted(diff_data_dict.keys()))

    # Add (reduced) diffraction data
    two_theta_vector = None
    diff_data_matrix = None

    # construct the matrix of intensities
    for sub_run_index, sub_run_number in enumerate(
            sorted(diff_data_dict.keys())):
        two_theta_vector_i, intensity_vector_i = diff_data_dict[sub_run_index]

        # create data set
        if two_theta_vector is None:
            two_theta_vector = two_theta_vector_i
            diff_data_matrix = numpy.ndarray(shape=(len(
                diff_data_dict.keys()), intensity_vector_i.shape[0]),
                                             dtype='float')
        # END-IF

        # set vector
        diff_data_matrix[sub_run_index] = intensity_vector_i
    # END-FOR

    # Add data
    target_file.write_reduced_diffraction_data_set(two_theta_vector,
                                                   {None: diff_data_matrix})

    # Save
    target_file.save(verbose=True)

    return
예제 #7
0
    def save_reduced_diffraction(self, session_name, output_name):
        """
        Save the reduced diffraction data to file
        :param session_name:
        :param output_name:
        :return:
        """
        checkdatatypes.check_file_name(output_name, False, True, False,
                                       'Output reduced file')

        workspace = self._session_dict[session_name]

        # Open
        if os.path.exists(output_name):
            io_mode = HidraProjectFileMode.READWRITE
        else:
            io_mode = HidraProjectFileMode.OVERWRITE
        project_file = HidraProjectFile(output_name, io_mode)

        # Save
        workspace.save_reduced_diffraction_data(project_file)

        # Close
        project_file.save()
예제 #8
0
def test_strain_io():
    """Test PeakCollection writing and reading with *D reference*

    Returns
    -------

    """
    # Generate a unique test file
    now = datetime.datetime.now()
    test_file_name = 'test_strain_io_{}.h5'.format(now.toordinal())
    test_ref_d = 1.23454321
    test_ref_d2 = np.array([1.23, 1.24, 1.25])
    peak_tag = 'Fake Peak D'
    peak_tag_2 = 'Fake Peak D Diff'

    # Generate a HiDRA project file
    test_project_file = HidraProjectFile(test_file_name,
                                         HidraProjectFileMode.OVERWRITE)

    # Create a ND array for output parameters
    param_names = PeakShape.PSEUDOVOIGT.native_parameters + BackgroundFunction.LINEAR.native_parameters
    data_type = list()
    for param_name in param_names:
        data_type.append((param_name, np.float32))
    test_error_array = np.zeros(3, dtype=data_type)
    test_params_array = np.zeros(3, dtype=data_type)

    for i in range(3):
        # sub run
        for j, par_name in enumerate(param_names):
            test_params_array[par_name][i] = 2**i + 0.1 * 3**j
            test_error_array[par_name][i] = np.sqrt(
                abs(test_params_array[par_name][i]))
    # END-FOR
    chi2_array = np.array([0.323, 0.423, 0.523])

    # Add test data to output
    peaks = PeakCollection(peak_tag, PeakShape.PSEUDOVOIGT,
                           BackgroundFunction.LINEAR)
    peaks.set_peak_fitting_values(np.array([1, 2, 3]), test_params_array,
                                  test_error_array, chi2_array)
    peaks.set_d_reference(test_ref_d)

    # Add 2nd peak
    peaks2 = PeakCollection(peak_tag_2, PeakShape.PSEUDOVOIGT,
                            BackgroundFunction.LINEAR)
    peaks2.set_peak_fitting_values(np.array([1, 2, 3]), test_params_array,
                                   test_error_array, chi2_array)
    peaks2.set_d_reference(test_ref_d2)

    # Write
    test_project_file.write_peak_parameters(peaks)
    test_project_file.write_peak_parameters(peaks2)
    # Save
    test_project_file.save(verbose=False)

    # Verify
    assert os.path.exists(test_file_name), 'Test project file for peak fitting result {} cannot be found.' \
                                           ''.format(test_file_name)

    # import
    verify_project_file = HidraProjectFile(test_file_name,
                                           HidraProjectFileMode.READONLY)

    # check tags
    peak_tags = verify_project_file.read_peak_tags()
    assert peak_tag in peak_tags and peak_tag_2 in peak_tags
    assert len(peak_tags) == 2

    # Get d-reference of peak 1 to check
    peak_info = verify_project_file.read_peak_parameters(peak_tag)
    verify_d_ref = peak_info.get_d_reference()
    gold_ref_d = np.array([test_ref_d] * 3)
    np.testing.assert_allclose(verify_d_ref, gold_ref_d)

    # Get d-reference of peak 2 to check
    peak_info2 = verify_project_file.read_peak_parameters(peak_tag_2)
    verify_d_ref_2 = peak_info2.get_d_reference()
    np.testing.assert_allclose(verify_d_ref_2, test_ref_d2)

    # Clean
    os.remove(test_file_name)

    return
예제 #9
0
def test_peak_fitting_result_io():
    """Test peak fitting result's writing and reading

    Returns
    -------

    """
    # Generate a unique test file
    now = datetime.datetime.now()
    test_file_name = 'test_peak_io_{}.hdf'.format(now.toordinal())

    # Generate a HiDRA project file
    test_project_file = HidraProjectFile(test_file_name,
                                         HidraProjectFileMode.OVERWRITE)

    # Create a ND array for output parameters
    param_names = PeakShape.PSEUDOVOIGT.native_parameters + BackgroundFunction.LINEAR.native_parameters
    data_type = list()
    for param_name in param_names:
        data_type.append((param_name, np.float32))
    test_error_array = np.zeros(3, dtype=data_type)
    test_params_array = np.zeros(3, dtype=data_type)

    for i in range(3):
        # sub run
        for j, par_name in enumerate(param_names):
            test_params_array[par_name][i] = 2**i + 0.1 * 3**j
            test_error_array[par_name][i] = np.sqrt(
                abs(test_params_array[par_name][i]))
    # END-FOR
    chi2_array = np.array([0.323, 0.423, 0.523])

    # Add test data to output
    peaks = PeakCollection('test fake', PeakShape.PSEUDOVOIGT,
                           BackgroundFunction.LINEAR)
    peaks.set_peak_fitting_values(np.array([1, 2, 3]), test_params_array,
                                  test_error_array, chi2_array)

    test_project_file.write_peak_parameters(peaks)

    test_project_file.save(False)

    # Check
    assert os.path.exists(test_file_name), 'Test project file for peak fitting result {} cannot be found.' \
                                           ''.format(test_file_name)
    print('[INFO] Peak parameter test project file: {}'.format(test_file_name))

    # Import
    verify_project_file = HidraProjectFile(test_file_name,
                                           HidraProjectFileMode.READONLY)

    # get the tags
    peak_tags = verify_project_file.read_peak_tags()
    assert 'test fake' in peak_tags
    assert len(peak_tags) == 1

    # get the parameter of certain
    peak_info = verify_project_file.read_peak_parameters('test fake')

    # peak profile
    assert peak_info.peak_profile == str(PeakShape.PSEUDOVOIGT)
    assert peak_info.background_type == str(BackgroundFunction.LINEAR)

    # sub runs
    assert np.allclose(peak_info.sub_runs, np.array([1, 2, 3]))

    # parameter values
    # print('DEBUG:\n  Expected: {}\n  Found: {}'.format(test_params_array, peak_info[3]))
    peak_values, peak_errors = peak_info.get_native_params()
    assert_allclose_structured_numpy_arrays(test_params_array, peak_values)
    # np.testing.assert_allclose(peak_info[3], test_params_array, atol=1E-12)

    # parameter values
    # assert np.allclose(peak_info[4], test_error_array, 1E-12)
    assert_allclose_structured_numpy_arrays(test_error_array, peak_errors)

    # Clean
    os.remove(test_file_name)

    return