コード例 #1
0
    def save_fit_result(self, out_file_name=''):
        """Save the fit result, including a copy of the rest of the file if it does not exist at the specified path.

        If out_file_name is empty or if it matches the parent's current file, this updates the file.

        Otherwise, the parent's file is copied to out_file_name and
        then the updated peak fit data is written to the copy.

        :param out_file_name: string absolute fill path for the place to save the file

        """

        fit_result = self.parent.fit_result
        if fit_result is None:
            return

        if out_file_name is not None and self.parent._curr_file_name != out_file_name:
            copyfile(self.parent._curr_file_name, out_file_name)
            current_project_file = out_file_name
        else:
            current_project_file = self.parent._curr_file_name

        project_h5_file = HidraProjectFile(current_project_file,
                                           mode=HidraProjectFileMode.READWRITE)
        peakcollections = fit_result.peakcollections
        for peak in peakcollections:
            project_h5_file.write_peak_parameters(peak)
        project_h5_file.save(False)
        project_h5_file.close()
コード例 #2
0
    def test_detector_efficiency(self):
        """
        Test methods to read and write detector efficiency

        Returns
        -------
        None
        """
        # Generate a HiDRA project file
        test_project_file = HidraProjectFile('test_efficient.hdf',
                                             HidraProjectFileMode.OVERWRITE)

        # Create a detector efficiency array
        mock_test_run_number = 12345
        efficient_array = np.random.random_sample(1024**2)

        # Write to file
        test_project_file.write_efficiency_correction(mock_test_run_number,
                                                      efficient_array)

        # Close file
        test_project_file.close()

        # Open file again
        verify_project_file = HidraProjectFile('test_efficient.hdf',
                                               HidraProjectFileMode.READONLY)

        # Read detector efficiency & compare
        verify_eff_array = verify_project_file.read_efficiency_correction()

        # Check
        assert np.allclose(efficient_array, verify_eff_array, rtol=1E-12)

        # Clean
        os.remove('test_efficient.hdf')
コード例 #3
0
    def load_vanadium(self, van_project_file):
        """Load vanadium from HiDRA project file

        Parameters
        ----------
        van_project_file : str
            vanadium HiDRA project file or NeXus file

        Returns
        -------
        ~numpy.narray, float
            1D array as vanadium counts and duration of vanadium run (second)

        """
        checkdatatypes.check_file_name(van_project_file, True, False, False,
                                       'Vanadium project/NeXus file')

        if van_project_file.endswith('.nxs.h5'):
            # Input is nexus file
            # reduce with PyRS/Python
            converter = NeXusConvertingApp(van_project_file,
                                           mask_file_name=None)
            self._van_ws = converter.convert(use_mantid=False)

        else:
            # Input is HiDRA project file
            self._van_ws = workspaces.HidraWorkspace(name=van_project_file)

            # PyRS HDF5
            project_h5_file = HidraProjectFile(
                van_project_file, mode=HidraProjectFileMode.READONLY)

            # Load
            self._van_ws.load_hidra_project(project_h5_file,
                                            load_raw_counts=True,
                                            load_reduced_diffraction=False)

            # Close project file
            project_h5_file.close()

        # Process the vanadium counts
        sub_runs = self._van_ws.get_sub_runs()
        assert len(
            sub_runs
        ) == 1, 'There shall be more than 1 sub run in vanadium project file'

        # get vanadium data
        van_array = self._van_ws.get_detector_counts(sub_runs[0]).astype(
            np.float64)

        # get vanadium run duration
        van_duration = self._van_ws.get_sample_log_value(
            HidraConstants.SUB_RUN_DURATION, sub_runs[0])

        return van_array, van_duration
コード例 #4
0
ファイル: event_handler.py プロジェクト: williamfgc/PyRS
    def save_fit_result(self, out_file_name=''):
        fit_result = self.parent.fit_result
        if fit_result is None:
            return

        current_project_file = self.parent._curr_file_name
        project_h5_file = HidraProjectFile(current_project_file,
                                           mode=HidraProjectFileMode.READWRITE)
        peakcollections = fit_result.peakcollections
        for peak in peakcollections:
            project_h5_file.write_peak_fit_result(peak)
        project_h5_file.save(False)
        project_h5_file.close()
コード例 #5
0
def test_exclude_subruns(nexusfile, projectfile):
    """Test converting NeXus to project and convert to diffraction pattern

    Note: project file cannot be the same as NeXus file as the output file will be
    removed by pytest

    Parameters
    ----------
    nexusfile
    projectfile

    Returns
    -------

    """
    sub_runs = [2, 4, 5]

    # convert the nexus file to a project file and do the "simple" checks
    converter = NeXusConvertingApp(nexusfile, None)
    hidra_ws = converter.convert()

    reducer = ReductionApp()
    reducer.load_hidra_workspace(hidra_ws)

    reducer.reduce_data(instrument_file=None,
                        calibration_file=None,
                        mask=None,
                        sub_runs=sub_runs,
                        van_file=None)

    reducer.save_diffraction_data(projectfile)

    reduced_ws = HidraWorkspace('test_powder_pattern')
    reduced_project = HidraProjectFile(projectfile)
    reduced_ws.load_hidra_project(reduced_project, load_raw_counts=False, load_reduced_diffraction=True)

    assert sub_runs == reduced_ws.get_sub_runs()

    reducer.reduce_data(instrument_file=None,
                        calibration_file=None,
                        mask=None,
                        sub_runs=[],
                        van_file=None)

    for sub_run in sub_runs:
        np.testing.assert_allclose(reducer.get_diffraction_data(sub_run),
                                   reduced_ws.get_reduced_diffraction_data(sub_run))

    # cleanup
    reduced_project.close()
    os.remove(projectfile)
コード例 #6
0
    def save_peak_fit_result(self,
                             project_name,
                             hidra_file_name,
                             peak_tag,
                             overwrite=True):
        """ Save the result from peak fitting to HiDRA project file
        Parameters
        ----------
        project_name: str
            name of peak fitting session
        hidra_file_name: String
            project file to export peaks fitting result to
        peak_tag : str
            peak tag
        overwrite: bool
            Flag to append to an existing file or overwrite it

        Returns
        -------

        """
        if project_name is None:
            optimizer = self._peak_fit_engine
        else:
            optimizer = self._peak_fitting_dict[project_name]

        # Determine the file IO mode
        if os.path.exists(hidra_file_name) and overwrite is False:
            # file exists and user does not want overwrite: READWRITE mode
            file_mode = HidraProjectFileMode.READWRITE
        else:
            # starting as a new file
            file_mode = HidraProjectFileMode.OVERWRITE

        # Create HiDRA project file
        hidra_project_file = HidraProjectFile(hidra_file_name, file_mode)
        # Export peaks
        optimizer.export_to_hydra_project(hidra_project_file, peak_tag)
        # Close
        hidra_project_file.close()

        return
コード例 #7
0
    def load_hidra_project(self, project_file_name, load_calibrated_instrument,
                           load_detectors_counts, load_reduced_diffraction):
        """ Load hidra project file and then CLOSE!
        :param project_file_name:
        :param load_calibrated_instrument:
        :param load_detectors_counts: Flag to load detector counts
        :param load_reduced_diffraction: Flag to reduced diffraction data
        :return: HidraWorkspace instance
        """
        # check inputs
        checkdatatypes.check_file_name(project_file_name, True, False, False,
                                       'Project file to load')

        # Check
        if self._curr_workspace is None:
            raise RuntimeError(
                'Call init_session to create a ReductionWorkspace')

        # PyRS HDF5
        # Check permission of file to determine the RW mode of HidraProject file
        if os.access(project_file_name, os.W_OK):
            # Read/Write: Append mode
            file_mode = HidraProjectFileMode.READWRITE
        else:
            # Read only
            file_mode = HidraProjectFileMode.READONLY
        project_h5_file = HidraProjectFile(project_file_name, mode=file_mode)

        # Load
        self._curr_workspace.load_hidra_project(
            project_h5_file,
            load_raw_counts=load_detectors_counts,
            load_reduced_diffraction=load_reduced_diffraction)

        # Close
        project_h5_file.close()
        return self._curr_workspace
コード例 #8
0
def test_wave_length_rw():
    """Test writing and reading for wave length

    Returns
    -------

    """
    # Set up for testing
    test_file_name = 'test_wave_length.h5'
    # Create a detector mask
    gold_wave_length = 1.23456

    # Generate a HiDRA project file
    test_project_file = HidraProjectFile(test_file_name,
                                         HidraProjectFileMode.OVERWRITE)
    test_project_file.save(True)
    test_project_file.close()

    # Open file
    verify_project_file = HidraProjectFile(test_file_name,
                                           HidraProjectFileMode.READONLY)

    # Read wave length (not exist)
    wave_length_test = verify_project_file.read_wavelengths()
    assert np.isnan(wave_length_test), 'No wave length read out'

    # Close
    verify_project_file.close()

    # Generate a HiDRA project file
    test_project_file = HidraProjectFile(test_file_name,
                                         HidraProjectFileMode.READWRITE)

    # Write wave length
    test_project_file.write_wavelength(gold_wave_length)

    # Save and close
    test_project_file.save(True)
    test_project_file.close()

    # Open file again to verify
    verify_project_file2 = HidraProjectFile(test_file_name,
                                            HidraProjectFileMode.READONLY)

    # Read wave length (not exist)
    wave_length_test = verify_project_file2.read_wavelengths()
    assert wave_length_test == gold_wave_length

    # Clean
    os.remove(test_file_name)
コード例 #9
0
def test_powder_pattern_service(project_file_name, mask_file_name, gold_file):
    """Test the powder pattern calculator (service) with HB2B-specific reduction routine

    Parameters
    ----------
    project_file_name
    mask_file_name
    gold_file

    Returns
    -------

    """
    if mask_file_name is not None:
        pytest.skip('Not Ready Yet for Masking')

    # load gold file
    gold_data_dict = parse_gold_file(gold_file)

    # Parse input file
    test_ws = HidraWorkspace('test_powder_pattern')
    test_project = HidraProjectFile(project_file_name)
    test_ws.load_hidra_project(test_project,
                               load_raw_counts=True,
                               load_reduced_diffraction=False)
    test_project.close()

    # Start reduction service
    pyrs_service = HB2BReductionManager()
    pyrs_service.init_session(session_name='test_powder', hidra_ws=test_ws)

    # Reduce raw counts
    pyrs_service.reduce_diffraction_data('test_powder',
                                         False,
                                         1000,
                                         sub_run_list=None,
                                         mask=mask_file_name,
                                         mask_id=None,
                                         vanadium_counts=None,
                                         normalize_by_duration=False)

    # Get sub runs
    sub_runs = test_ws.get_sub_runs()

    for index, sub_run_i in enumerate(sub_runs):
        # Get gold data of pattern (i).
        gold_data_i = gold_data_dict[str(sub_run_i)]

        # Get powder data of pattern (i).
        pattern = pyrs_service.get_reduced_diffraction_data(
            'test_powder', sub_run_i)
        # data_dict[str(sub_run_i)] = pattern

        # validate correct two-theta reduction
        np.testing.assert_allclose(pattern[0],
                                   gold_data_dict[str(sub_run_i)][0],
                                   rtol=1E-8)

        # remove NaN intensity arrays
        pattern[1][np.where(np.isnan(pattern[1]))] = 0.
        gold_data_i[1][np.where(np.isnan(gold_data_i[1]))] = 0.

        # validate correct intesnity reduction
        np.testing.assert_allclose(pattern[1],
                                   gold_data_i[1],
                                   rtol=1E-8,
                                   equal_nan=True)
コード例 #10
0
def test_powder_pattern_engine(project_file_name, mask_file_name, gold_file):
    """Test the powder pattern calculator (service) with HB2B-specific reduction routine

    Parameters
    ----------
    project_file_name
    mask_file_name
    gold_file

    Returns
    -------

    """
    if mask_file_name is not None:
        pytest.skip('Masking is not implemented yet')

    # Parse input file
    test_ws = HidraWorkspace('test_powder_pattern')
    test_project = HidraProjectFile(project_file_name)
    test_ws.load_hidra_project(test_project,
                               load_raw_counts=True,
                               load_reduced_diffraction=False)
    test_project.close()

    # Sub runs
    sub_runs = test_ws.get_sub_runs()

    # Import gold file
    gold_pattern = parse_gold_file(gold_file)

    data_dict = dict()

    # Start reduction service
    pyrs_service = HB2BReductionManager()
    pyrs_service.init_session(session_name='test_powder', hidra_ws=test_ws)

    # Reduce raw counts
    pyrs_service.reduce_diffraction_data('test_powder',
                                         False,
                                         1000,
                                         sub_run_list=None,
                                         mask=mask_file_name,
                                         mask_id=None,
                                         vanadium_counts=None,
                                         normalize_by_duration=False)

    for index, sub_run_i in enumerate(sub_runs):
        # Get gold data of pattern (i).
        gold_data_i = gold_pattern[str(sub_run_i)]

        # Get powder data of pattern (i).
        pattern = pyrs_service.get_reduced_diffraction_data(
            'test_powder', sub_run_i)

        # ensure NaN are removed
        gold_data_i[1][np.where(np.isnan(gold_data_i[1]))] = 0.
        pattern[1][np.where(np.isnan(pattern[1]))] = 0.

        # Verify
        np.testing.assert_allclose(pattern[1], gold_data_i[1], rtol=1E-8)

        data_dict[str(sub_run_i)] = pattern


#    if mask_file_name:
#        name = 'data/HB2B_1017_Mask_Gold.h5'
#    else:
#        name = 'data/HB2B_1017_NoMask_Gold.h5'
#    write_gold_file(name, data_dict)

    return