예제 #1
0
def save_mantid_mask(mask_vec, h5_name, two_theta, note):
    """
    Save a mask vector to
    :param mask_vec:
    :param h5_name:
    :param two_theta:
    :param note:
    :return:
    """
    checkdatatypes.check_numpy_arrays('Mask vector', [mask_vec],
                                      dimension=1,
                                      check_same_shape=False)
    checkdatatypes.check_file_name(h5_name, False, True, False,
                                   'PyRS masking file to export to')
    if two_theta is not None:
        checkdatatypes.check_float_variable('2-theta', two_theta, (-360., 360))
    if note is not None:
        checkdatatypes.check_string_variable('Mask note', note, None)

    # create file
    mask_file = h5py.File(h5_name, 'w')
    # add data set
    mask_data_set = mask_file.create_dataset('mask', data=mask_vec)
    # add attributes
    if two_theta:
        mask_data_set.attrs['2theta'] = two_theta  # '{}'.format(two_theta)
    if note:
        mask_data_set.attrs['note'] = note
    # close file
    mask_file.close()

    return
예제 #2
0
    def __load_mask(self, mask_file_name):
        # Check input
        checkdatatypes.check_file_name(mask_file_name, True, False, False,
                                       'Mask XML file')
        if self._event_wksp is None:
            raise RuntimeError(
                'Meta data only workspace {} does not exist'.format(
                    self._event_ws_name))

        # Load mask XML to workspace
        mask_ws_name = os.path.basename(mask_file_name.split('.')[0])
        mask_ws = LoadMask(Instrument='nrsf2',
                           InputFile=mask_file_name,
                           RefWorkspace=self._event_wksp,
                           OutputWorkspace=mask_ws_name)

        # Extract mask out
        # get the Y array from mask workspace: shape = (1048576, 1)
        self.mask_array = mask_ws.extractY().flatten()
        # in Mantid's mask workspace: one stands delete, zero stands for keep
        # we multiply by the value: zero is delete, one is keep
        self.mask_array = 1 - self.mask_array.astype(int)

        # clean up
        DeleteWorkspace(Workspace=mask_ws_name)
예제 #3
0
def load_pyrs_mask(mask_h5):
    """ Load an HDF5 mask file
    :param mask_h5:
    :return: 3-tuple (mask vector, two theta, user note)
    """
    checkdatatypes.check_file_name(mask_h5, True, False, False,
                                   'PyRS mask file (hdf5) to load')

    # open
    mask_file = h5py.File(mask_h5, 'r')

    # check
    if 'mask' not in mask_file:
        raise RuntimeError('{} does not have entry "mask"'.format(mask_h5))

    # get mask array
    mask_entry = mask_file['mask']
    mask_vec = mask_entry[()]

    if '2theta' in mask_entry.attrs:
        two_theta = mask_entry.attrs['2theta']  # numpy.float64
        # print ('2theta = {} of type {}'.format(two_theta, type(two_theta)))
    else:
        two_theta = None  # not 2theta-dependant mask/ROI

    if 'note' in mask_entry.attrs:
        note = mask_entry.attrs['note']
    else:
        note = None

    return mask_vec, two_theta, note
예제 #4
0
    def save(self, projectfile):
        """
        Save workspace to Hidra project file
        """
        projectfile = os.path.abspath(
            projectfile)  # confirm absolute path to make logs more readable
        checkdatatypes.check_file_name(
            projectfile,
            check_exist=False,
            check_writable=True,
            is_dir=False,
            description='Converted Hidra project file')

        # remove file if it already exists
        if os.path.exists(projectfile):
            self._log.information(
                'Projectfile "{}" exists, removing previous version'.format(
                    projectfile))
            os.remove(projectfile)

        # save
        hydra_file = HidraProjectFile(projectfile,
                                      HidraProjectFileMode.OVERWRITE)

        # Set geometry
        hydra_file.write_instrument_geometry(
            HidraSetup(self._hidra_workspace.get_instrument_setup()))
        # save experimental data/detector counts
        self._hidra_workspace.save_experimental_data(hydra_file)
예제 #5
0
def write_calibration_ascii_file(two_theta, arm_length, calib_config, note,
                                 geom_file_name):
    """Write a geometry ascii file as standard

    Parameters
    ----------
    two_theta
    arm_length
    calib_config
    note
    geom_file_name

    Returns
    -------

    """
    checkdatatypes.check_file_name(
        geom_file_name, False, True, False,
        'Output geometry configuration file in ASCII')

    wbuf = '# {}\n'.format(note)
    wbuf += '2theta = {}\n'.format(two_theta)
    wbuf += 'arm = {} meter\n'.format(arm_length)
    wbuf += 'cal::shift_x = {} meter\n'.format(calib_config.shift_x)
    wbuf += 'cal::shift_y = {} meter\n'.format(calib_config.shift_y)
    wbuf += 'cal::arm = {} meter\n'.format(calib_config.arm_calibration)
    wbuf += 'cal::rot_x = {} degree\n'.format(calib_config.tilt_x)
    wbuf += 'cal::rot_y = {} degree\n'.format(calib_config.rotation_y)
    wbuf += 'cal::rot_z = {} degree\n'.format(calib_config.spin_z)

    out_file = open(geom_file_name, 'w')
    out_file.write(wbuf)
    out_file.close()

    return
예제 #6
0
    def _checkFileAccess(self):
        '''Verify the file has the correct acces permissions and set the value of ``self._is_writable``
        '''
        # prepare the call to check the file permissions
        check_exist = ((self._io_mode == HidraProjectFileMode.READONLY)
                       or (self._io_mode == HidraProjectFileMode.READWRITE))
        self._is_writable = (not self._io_mode
                             == HidraProjectFileMode.READONLY)

        # create a custom message based on requested access mode
        if self._io_mode == HidraProjectFileMode.READONLY:
            description = 'Read-only project file'
        elif self._io_mode == HidraProjectFileMode.OVERWRITE:
            description = 'Write-only project file'
        elif self._io_mode == HidraProjectFileMode.READWRITE:
            description = 'Append-mode project file'
        else:  # this should never happen
            raise RuntimeError(
                'Hidra project file I/O mode {} is not supported'.format(
                    HidraProjectFileMode))

        # convert the filename to an absolute path so error messages are clearer
        self._file_name = os.path.abspath(self._file_name)

        # do the check
        checkdatatypes.check_file_name(self._file_name,
                                       check_exist,
                                       self._is_writable,
                                       is_dir=False,
                                       description=description)
예제 #7
0
    def export_peak_fit(src_rs_file_name, target_rs_file_name, peak_fit_dict):
        """
        export peak fitting result to a RS (residual stress) intermediate file
        :param src_rs_file_name:
        :param target_rs_file_name:
        :param peak_fit_dict:
        :return:
        """
        # check
        checkdatatypes.check_file_name(src_rs_file_name, check_exist=True)
        checkdatatypes.check_file_name(target_rs_file_name,
                                       check_writable=True,
                                       check_exist=False)

        # copy the file?
        if src_rs_file_name != target_rs_file_name:
            copyfile(src_rs_file_name, target_rs_file_name)

        # open file
        target_file = h5py.File(target_rs_file_name, 'r+')
        diff_entry = target_file['Diffraction Data']
        for scan_log_key in diff_entry.keys():
            scan_log_index = int(scan_log_key.split()[1])
            fit_info_i = peak_fit_dict[scan_log_index]
            # add an entry
            diff_entry[scan_log_key].create_group('peak_fit')
            for key in fit_info_i:
                diff_entry[scan_log_key]['peak_fit'][key] = fit_info_i[key]
            # END-FOR
        # END-FOR

        target_file.close()

        return
예제 #8
0
    def save_rs_file(self, file_name):
        """ Save raw detector counts to HB2B/RS standard HDF5 file
        :param file_name:
        :return:
        """
        checkdatatypes.check_file_name(file_name, False, True, False,
                                       'Raw data file to save')

        # check
        if self._counts is None or self._two_theta[0] is None:
            raise RuntimeError('Data has not been set up right')

        rs_h5 = h5py.File(file_name, 'w')
        raw_counts_group = rs_h5.create_group('raw')
        # counts
        raw_counts_group.create_dataset('counts', data=self._counts)

        # dimension
        instrument = rs_h5.create_group('instrument')
        instrument.create_dataset('shape', data=numpy.array(self._det_shape))
        # 2theta
        instrument['2theta'] = self._two_theta[0]
        instrument['2theta unit'] = self._two_theta[1]

        # close
        rs_h5.close()

        return
예제 #9
0
def import_calibration_info_file(cal_info_file):
    """
    import calibration information file
    :param cal_info_file:
    :return:
    """
    checkdatatypes.check_file_name(
        cal_info_file,
        check_exist=True,
        check_writable=False,
        is_dir=False,
        description='HB2B calibration information file')

    cal_info_table = dict()

    cal_file = h5py.File(cal_info_file, mdoe='r')
    for wavelength_entry in cal_file:
        # each wave length
        entry_name = wavelength_entry.name
        cal_info_table[entry_name] = dict()

        # TODO - NEED TO FIND OUT HOW TO PARSE 2 Column Value
        for cal_date, cal_file in wavelength_entry.value:
            cal_info_table[entry_name][cal_date] = cal_file
        # END-FOR
    # END-FOR

    cal_file.close()

    return cal_info_table
예제 #10
0
    def set_output_dir(self, output_dir):
        """
        set the directory for output data
        :param output_dir:
        :return:
        """
        checkdatatypes.check_file_name(output_dir, True, True, True,
                                       'Output directory')

        self._output_directory = output_dir
예제 #11
0
    def load_vanadium(self, van_project_file):
        """Load vanadium from HiDRA project file

        Parameters
        ----------
        van_project_file : str
            vanadium HiDRA project file or NeXus file

        Returns
        -------
        ~numpy.narray, float
            1D array as vanadium counts and duration of vanadium run (second)

        """
        checkdatatypes.check_file_name(van_project_file, True, False, False,
                                       'Vanadium project/NeXus file')

        if van_project_file.endswith('.nxs.h5'):
            # Input is nexus file
            # reduce with PyRS/Python
            converter = NeXusConvertingApp(van_project_file,
                                           mask_file_name=None)
            self._van_ws = converter.convert(use_mantid=False)

        else:
            # Input is HiDRA project file
            self._van_ws = workspaces.HidraWorkspace(name=van_project_file)

            # PyRS HDF5
            project_h5_file = HidraProjectFile(
                van_project_file, mode=HidraProjectFileMode.READONLY)

            # Load
            self._van_ws.load_hidra_project(project_h5_file,
                                            load_raw_counts=True,
                                            load_reduced_diffraction=False)

            # Close project file
            project_h5_file.close()

        # Process the vanadium counts
        sub_runs = self._van_ws.get_sub_runs()
        assert len(
            sub_runs
        ) == 1, 'There shall be more than 1 sub run in vanadium project file'

        # get vanadium data
        van_array = self._van_ws.get_detector_counts(sub_runs[0]).astype(
            np.float64)

        # get vanadium run duration
        van_duration = self._van_ws.get_sample_log_value(
            HidraConstants.SUB_RUN_DURATION, sub_runs[0])

        return van_array, van_duration
예제 #12
0
파일: pyrscore.py 프로젝트: williamfgc/PyRS
    def working_dir(self, user_dir):
        """
        set working directory
        :param user_dir:
        :return:
        """
        checkdatatypes.check_file_name(user_dir, check_writable=False, is_dir=True)

        self._working_dir = user_dir

        return
예제 #13
0
    def to_json(self, file_name):
        """ Convert to a dictionary and convert to Json string
        :return:
        """
        checkdatatypes.check_file_name(file_name, False, True, False, 'Json file name to export instrument setup')

        # construct dictionary
        instrument_dict = self.convert_to_dict()

        # create file
        jfile = open(file_name, 'w')
        json.dump(instrument_dict, jfile)
        jfile.close()
예제 #14
0
def import_calibration_ascii_file(geometry_file_name):
    """
    import geometry set up file
    arm = 0.95
    cal::arm = 0.
    cal::shiftx = 0.
    cal::shifty = 0.1
    :param geometry_file_name:
    :return: calibration instance
    """
    checkdatatypes.check_file_name(geometry_file_name, True, False, False,
                                   'Geometry configuration file in ASCII')

    # init output
    calibration_setup = AnglerCameraDetectorShift(0, 0, 0, 0, 0, 0)

    calibration_file = open(geometry_file_name, 'r')
    geom_lines = calibration_file.readlines()
    calibration_file.close()

    for line in geom_lines:
        line = line.strip()

        # skip empty or comment line
        if line == '' or line.startswith('#'):
            continue

        terms = line.replace('=', ' ').split()
        config_name = terms[0].strip().lower()
        config_value = float(terms[1].strip())

        if config_name == 'cal::shift_x':
            calibration_setup.center_shift_x = config_value
        elif config_name == 'cal::shift_y':
            calibration_setup.center_shift_y = config_value
        elif config_name == 'cal::arm':
            calibration_setup.arm_calibration = config_value
        elif config_name == 'cal::rot_x':
            calibration_setup.rotation_x = config_value
        elif config_name == 'cal::rot_y':
            calibration_setup.rotation_x = config_value
        elif config_name == 'cal::rot_z':
            calibration_setup.rotation_z = config_value
        else:
            raise RuntimeError(
                'Instrument geometry setup item {} is not recognized and supported.'
                .format(config_name))

    return calibration_setup
예제 #15
0
    def from_json(self, file_name):
        """ Convert from a Json string (dictionary) and set to parameters
        :param file_name: json file name
        :return:
        """
        checkdatatypes.check_file_name(file_name, True, False, False, 'Json file name to import instrument setup')

        # read file
        json_file = open(file_name, 'r')
        lines = json_file.readlines()
        json_string = ''
        for line in lines:
            json_string += line.strip()

        instrument_dict = json.loads(json_string)

        self.set_from_dict(instrument_dict)
예제 #16
0
    def export_mask(self, mask_id, out_file, note):
        """
        export mask to HDF5 (PyRS format)
        :param mask_id:
        :param out_file:
        :param note:
        :return:
        """
        checkdatatypes.check_file_name(out_file, False, True, False,
                                       'Output hdf5 file name')
        checkdatatypes.check_string_variable('Mask note', note)

        mask_vec = self._mask_array_dict[mask_id]

        mask_util.save_mantid_mask(mask_vec, out_file, self._2theta, note)

        return
예제 #17
0
def update_calibration_info_file(cal_info_file, cal_info_table, append):
    """ Search archive in order to keep calibration up-to-date
    if in append mode, the additional information will be written to an existing calibration information hdf5 file
    otherwise, From scratch, a calibration information file will be created
    :param cal_info_file:
    :param cal_info_table: calibration information to append to calibration information file
    :param append: flag whether the mode is append or new
    :return:
    """
    # check inputs
    if append:
        checkdatatypes.check_file_name(
            cal_info_file, True, True, False,
            'Calibration information file to create')
    else:
        checkdatatypes.check_file_name(
            cal_info_file, False, True, False,
            'Calibration information file to append')
    checkdatatypes.check_dict('Calibration information table', cal_info_table)
    checkdatatypes.check_bool_variable('Append mode', append)

    # open file
    if append:
        cal_info_file = h5py.File(cal_info_file, mdoe='rw')
    else:
        cal_info_file = h5py.File(cal_info_file, mode='w')

    # write to file
    for wavelength_entry in cal_info_table:
        if wavelength_entry not in cal_info_file:
            # TODO fix this
            # cal_info_file[wavelength_entry] = whatever
            raise RuntimeError(
                'encountered unknown wavelength_entry: {}'.format(
                    wavelength_entry))

        for cal_date in cal_info_table[wavelength_entry]:
            cal_file_name = cal_info_table[wavelength_entry][cal_date]
            cal_info_file[wavelength_entry].append((cal_date, cal_file_name))
        # END-FOR
    # END-FOR

    # close
    cal_info_file.close()

    return
예제 #18
0
    def export_pole_figure(self,
                           detector_id_list,
                           file_name,
                           file_type,
                           file_header=''):
        """
        exported the calculated pole figure
        :param detector_id_list: list of detector IDs to write the pole figure file
        :param file_name:
        :param file_type: ASCII or MTEX (.jul)
        :param file_header: for MTEX format
        :return:
        """
        # TESTME - 20180711 - Clean this method and allow user to specifiy header

        # process detector ID list
        if detector_id_list is None:
            detector_id_list = self.get_detector_ids()
        else:
            checkdatatypes.check_list('Detector IDs', detector_id_list)

        # check inputs
        checkdatatypes.check_file_name(file_name,
                                       check_exist=False,
                                       check_writable=True)
        checkdatatypes.check_string_variable(
            'Output pole figure file type/format', file_type)

        # it is a dictionary now
        if file_type.lower() == 'ascii':
            # export pole figure arrays as ascii column file
            export_arrays_to_ascii(self._pole_figure_dict, detector_id_list,
                                   file_name)
        elif file_type.lower() == 'mtex':
            # export to MTEX format
            export_to_mtex(self._pole_figure_dict,
                           detector_id_list,
                           file_name,
                           header=file_header)

        return
예제 #19
0
def browse_dir(parent, caption, default_dir):
    """ Browse a directory
    :param parent:
    :param caption:
    :param default_dir:
    :return: non-empty string for selected directory; empty string for canceled operation
    """
    # check inputs
    assert isinstance(
        parent, object), 'Parent {} must be of some object.'.format(parent)
    checkdatatypes.check_string_variable('File browsing title/caption',
                                         caption)
    checkdatatypes.check_file_name(default_dir, check_exist=False, is_dir=True)

    # get directory
    chosen_dir = QFileDialog.getExistingDirectory(parent, caption, default_dir)
    print('[DB...BAT] Chosen dir: {} of type {}'.format(
        chosen_dir, type(chosen_dir)))
    chosen_dir = str(chosen_dir).strip()

    return chosen_dir
예제 #20
0
    def load_raw_measurement_data(file_name):
        """
        Load raw data measured
        :param file_name:
        :return:
        """
        checkdatatypes.check_file_name(file_name, check_exist=True)

        # access sub tree
        scan_h5 = h5py.File(file_name)
        if 'raw' not in scan_h5.keys() or 'instrument' not in scan_h5.keys():
            raise RuntimeError(
                'PyRS reduced file {} must have both raw and instrument entries.'
                'FYI current entries are {}'.format(file_name, scan_h5.keys()))

        # get diffraction data/counts
        diff_data_group = scan_h5['raw']

        # loop through the Logs
        counts = diff_data_group['counts'].value

        # instrument
        instrument_group = scan_h5['instrument']
        two_theta = instrument_group['2theta'].value

        # TODO - FIXME - TODAY 0 - Remove after testing is finished
        print(counts)
        print(type(counts))

        print(two_theta)
        print(type(two_theta))
        """
        [0 0 0 ..., 0 0 0]
        <type 'numpy.ndarray'>
        35.0
        <type 'numpy.float64'>
        """

        return counts, two_theta
예제 #21
0
    def load_hidra_project(self, project_file_name, load_calibrated_instrument,
                           load_detectors_counts, load_reduced_diffraction):
        """ Load hidra project file and then CLOSE!
        :param project_file_name:
        :param load_calibrated_instrument:
        :param load_detectors_counts: Flag to load detector counts
        :param load_reduced_diffraction: Flag to reduced diffraction data
        :return: HidraWorkspace instance
        """
        # check inputs
        checkdatatypes.check_file_name(project_file_name, True, False, False,
                                       'Project file to load')

        # Check
        if self._curr_workspace is None:
            raise RuntimeError(
                'Call init_session to create a ReductionWorkspace')

        # PyRS HDF5
        # Check permission of file to determine the RW mode of HidraProject file
        if os.access(project_file_name, os.W_OK):
            # Read/Write: Append mode
            file_mode = HidraProjectFileMode.READWRITE
        else:
            # Read only
            file_mode = HidraProjectFileMode.READONLY
        project_h5_file = HidraProjectFile(project_file_name, mode=file_mode)

        # Load
        self._curr_workspace.load_hidra_project(
            project_h5_file,
            load_raw_counts=load_detectors_counts,
            load_reduced_diffraction=load_reduced_diffraction)

        # Close
        project_h5_file.close()
        return self._curr_workspace
예제 #22
0
def write_calibration_to_json(shifts,
                              shifts_error,
                              wave_length,
                              wave_lenngth_error,
                              calibration_status,
                              file_name=None):
    """Write geometry and wave length calibration to a JSON file

    Parameters
    ----------

    Returns
    -------
    None
    """
    # Check inputs
    checkdatatypes.check_file_name(file_name, False, True, False,
                                   'Output JSON calibration file')
    assert isinstance(shifts, AnglerCameraDetectorShift)
    assert isinstance(shifts_error, AnglerCameraDetectorShift)

    # Create calibration dictionary
    calibration_dict = shifts.convert_to_dict()
    calibration_dict['Lambda'] = wave_length

    calibration_dict.update(shifts_error.convert_error_to_dict())
    calibration_dict['error_Lambda'] = wave_lenngth_error

    calibration_dict.update({'Status': calibration_status})

    print('DICTIONARY:\n{}'.format(calibration_dict))

    with open(file_name, 'w') as outfile:
        json.dump(calibration_dict, outfile)
    print('[INFO] Calibration file is written to {}'.format(file_name))

    return
예제 #23
0
    def save_reduced_diffraction(self, session_name, output_name):
        """
        Save the reduced diffraction data to file
        :param session_name:
        :param output_name:
        :return:
        """
        checkdatatypes.check_file_name(output_name, False, True, False,
                                       'Output reduced file')

        workspace = self._session_dict[session_name]

        # Open
        if os.path.exists(output_name):
            io_mode = HidraProjectFileMode.READWRITE
        else:
            io_mode = HidraProjectFileMode.OVERWRITE
        project_file = HidraProjectFile(output_name, io_mode)

        # Save
        workspace.save_reduced_diffraction_data(project_file)

        # Close
        project_file.save()
예제 #24
0
    def __init__(self, cal_file_name=None, read_only=False):
        """
        initialization.
        :param cal_file_name: calibration.  If false, then in the writing out mode
        :param read_only: if True, then read only. Otherwise, read/write mode
        """
        # init some parameters
        self._h5_file = None  # HDF5 handler
        self._geometry_calibration = AnglerCameraDetectorShift(
            0, 0, 0, 0, 0, 0)
        self._calibration_date = ''

        # check
        checkdatatypes.check_bool_variable('Calibration file read only mode',
                                           read_only)
        if cal_file_name:
            # read or read/write mode
            checkdatatypes.check_file_name(cal_file_name,
                                           check_exist=True,
                                           check_writable=not read_only,
                                           is_dir=False,
                                           description='HB2B calibration file')
            self._cal_file_name = cal_file_name
            if read_only:
                self._file_mode = 'r'
            else:
                self._file_mode = 'r+'

            self._import_h5_calibration(self._cal_file_name)
        else:
            # write mode
            self._cal_file_name = None
            self._file_mode = 'w'
        # END-IF-ELSE

        return
예제 #25
0
    def __init__(self,
                 nexus_file_name,
                 mask_file_name=None,
                 extra_logs=list()):
        """Initialization

        Parameters
        ----------
        nexus_file_name : str
            Name of NeXus file
        mask_file_name : str
            Name of masking file
        extra_logs : list, tuple
            list of string with no default logs to keep in project file
        """
        # configure logging for this class
        self._log = Logger(__name__)

        # validate NeXus file exists
        checkdatatypes.check_file_name(nexus_file_name, True, False, False,
                                       'NeXus file')
        self._nexus_name = nexus_file_name

        # validate mask file exists
        if mask_file_name is None:
            self._mask_file_name = None
        else:
            checkdatatypes.check_file_name(mask_file_name, True, False, False,
                                           'Mask file')
            self._mask_file_name = mask_file_name
            if not mask_file_name.lower().endswith('.xml'):
                raise NotImplementedError(
                    'Only Mantid mask in XML format is supported now.  File '
                    '{} with type {} is not supported yet.'
                    ''.format(mask_file_name,
                              mask_file_name.split('.')[-1]))

        # workspaces
        self._event_ws_name = os.path.basename(nexus_file_name).split('.')[0]

        logs_to_keep = list(extra_logs)
        logs_to_keep.extend(DEFAULT_KEEP_LOGS)

        self.__load_logs(logs_to_keep)

        # load the mask
        self.mask_array = None  # TODO to promote direct access
        if mask_file_name:
            self.__load_mask(mask_file_name)

        # create the hidra workspace
        self._hidra_workspace = workspaces.HidraWorkspace(self._nexus_name)

        # Set a default instrument with this workspace
        # set up instrument
        # initialize instrument with hard coded values
        instrument = DENEXDetectorGeometry(NUM_PIXEL_1D, NUM_PIXEL_1D,
                                           PIXEL_SIZE, PIXEL_SIZE, ARM_LENGTH,
                                           False)

        self._hidra_workspace.set_instrument_geometry(instrument)

        # project file
        self._project_file = None
예제 #26
0
    def load_rs_file_set(self, file_name_list):
        """ Load a set of Residual Stress's intermediate data files
        :param file_name_list:
        :return:
        """
        # sort file name by order
        file_name_list.sort()

        # prepare the data structures
        sample_logs_set = dict()
        diff_data_dict_set = dict()

        for det_id, file_name in file_name_list:
            checkdatatypes.check_file_name(file_name, check_exist=True)

            # define single file dictionary
            sample_logs = dict()
            diff_data_dict = dict()

            # access sub tree
            scan_h5 = h5py.File(file_name)
            if 'Diffraction Data' not in scan_h5.keys():
                raise RuntimeError(scan_h5.keys())
            diff_data_group = scan_h5['Diffraction Data']
            print('File: {0}'.format(file_name))

            # loop through the Logs
            h5_log_i = diff_data_group

            # get 'Log #'
            log_index_vec = h5_log_i['Log #'].value[0, 0].astype(int)
            # print ('Log #: Shape = {0}. Value = {1}'.format(log_index_vec.shape, log_index_vec))

            for item_name in h5_log_i.keys():
                # skip log index
                if item_name == 'Log #':
                    continue

                item_i = h5_log_i[item_name].value
                if isinstance(item_i, numpy.ndarray):
                    # case for diffraction data
                    if item_name == 'Corrected Diffraction Data':
                        print('Item {0}: shape = {1}'.format(
                            item_name, item_i.shape))
                        # corrected 2theta and diffraction
                        if item_i.shape[2] != len(log_index_vec):
                            raise RuntimeError(
                                'File {0}: Corrected Diffraction Data ({1}) has different '
                                'number of entries than log indexes ({2})'
                                ''.format(file_name, item_i.shape[2],
                                          len(log_index_vec)))
                        for i_log_index in range(len(log_index_vec)):
                            vec_2theta = item_i[:, 0, i_log_index]
                            vec_intensity = item_i[:, 1, i_log_index]
                            diff_data_dict[log_index_vec[
                                i_log_index]] = vec_2theta, vec_intensity
                        # END-FOR

                    elif item_name == 'Corrected Intensity':
                        raise NotImplementedError('Not supposed to be here!')
                    else:
                        # sample log data
                        vec_sample_i = item_i[0, 0].astype(float)
                        # dictionary = dict(zip(log_index_vec, vec_sample_i))
                        # sample_logs[str(item_name)] = dictionary  # make sure the log name is a string
                        sample_logs[str(item_name)] = vec_sample_i
                    # END-IF-ELSE
                else:
                    # 1 dimensional (single data point)
                    raise RuntimeError(
                        'There is no use case for single-value item so far. '
                        '{0} of value {1} is not supported to parse in.'
                        ''.format(item_i, item_i.value))
                # END-IF
            # END-FOR

            # conclude for single file
            sample_logs_set[det_id] = sample_logs
            diff_data_dict_set[det_id] = diff_data_dict
        # END-FOR (log_index, file_name)

        return diff_data_dict_set, sample_logs_set
예제 #27
0
    def load_rs_file(file_name):
        """ parse h5 file
        Note: peak_fit data are in sample log dictionary
        :param file_name:
        :return: 2-tuple as diff_data_dict, sample_logs
        """
        def add_peak_fit_parameters(sample_log_dict, h5_group, log_index,
                                    total_scans):
            """
            add peak fitted parameters value to sample log dictionary
            :param sample_log_dict:
            :param h5_group:
            :param log_index:
            :param total_scans
            :return:
            """
            # need it?
            need_init = False
            if 'peak_fit' not in sample_log_dict:
                sample_log_dict['peak_fit'] = dict()
                need_init = True

            for par_name in h5_group.keys():
                # get value
                par_value = h5_group[par_name].value
                # init sample logs vector if needed
                if need_init:
                    sample_log_dict['peak_fit'][par_name] = numpy.ndarray(
                        shape=(total_scans, ), dtype=par_value.dtype)
                # set value
                sample_log_dict['peak_fit'][par_name][log_index] = par_value
            # END-FOR

            return

        checkdatatypes.check_file_name(file_name, check_exist=True)

        # access sub tree
        scan_h5 = h5py.File(file_name)
        if 'Diffraction Data' not in scan_h5.keys():
            raise RuntimeError(scan_h5.keys())
        diff_data_group = scan_h5['Diffraction Data']

        # loop through the Logs
        num_scan_logs = len(diff_data_group)
        sample_logs = dict()
        diff_data_dict = dict()

        try:
            for scan_log_index in range(num_scan_logs):
                log_name_i = 'Log {0}'.format(scan_log_index)
                h5_log_i = diff_data_group[log_name_i]

                vec_2theta = None
                vec_y = None

                for item_name in h5_log_i.keys():
                    # special peak fit
                    if item_name == 'peak_fit':
                        add_peak_fit_parameters(sample_logs,
                                                h5_log_i[item_name],
                                                scan_log_index,
                                                total_scans=num_scan_logs)
                        continue

                    # get value
                    item_i = h5_log_i[item_name].value

                    if isinstance(item_i, numpy.ndarray):
                        if item_name == 'Corrected 2theta':
                            # corrected 2theta
                            if not (len(item_i.shape) == 1 or
                                    h5_log_i[item_name].value.shape[1] == 1):
                                raise RuntimeError(
                                    'Unable to support a non-1D corrected 2theta entry'
                                )
                            vec_2theta = h5_log_i[item_name].value.flatten('F')
                        elif item_name == 'Corrected Intensity':
                            if not (len(item_i.shape) == 1 or
                                    h5_log_i[item_name].value.shape[1] == 1):
                                raise RuntimeError(
                                    'Unable to support a non-1D corrected intensity entry'
                                )
                            vec_y = h5_log_i[item_name].value.flatten('F')
                    else:
                        # 1 dimensional (single data point)
                        item_name_str = str(item_name)
                        if item_name_str not in sample_logs:
                            # create entry as ndarray if it does not exist
                            if isinstance(item_i, six.string_types):
                                # string can only be object type
                                sample_logs[item_name_str] = numpy.ndarray(
                                    shape=(num_scan_logs, ), dtype=object)
                            else:
                                # raw type
                                try:
                                    sample_logs[item_name_str] = numpy.ndarray(
                                        shape=(num_scan_logs, ),
                                        dtype=item_i.dtype)
                                except AttributeError as att_err:
                                    err_msg = 'Item {} with value {} is a unicode object and cannot be converted to ' \
                                              'ndarray due to \n{}'.format(item_name_str, item_i, att_err)
                                    raise AttributeError(err_msg)

                        # add the log
                        sample_logs[item_name_str][scan_log_index] = h5_log_i[
                            item_name].value
                        # END-IF
                # END-FOR

                # record 2theta-intensity
                if vec_2theta is None or vec_y is None:
                    raise RuntimeError(
                        'Log {0} does not have either Corrected 2theta or Corrected Intensity'
                        ''.format(scan_log_index))
                else:
                    diff_data_dict[scan_log_index] = vec_2theta, vec_y
        except KeyError as key_error:
            raise RuntimeError('Failed to load {} due to {}'.format(
                file_name, key_error))

        # END-FOR

        return diff_data_dict, sample_logs
예제 #28
0
def import_instrument_setup(instrument_ascii_file):
    """Import instrument file in ASCII format

    Example:
      # comment
      arm = xxx  meter
      rows = 2048
      columns = 2048
      pixel_size_x = 0.00
      pixel_size_y = 0.00

    Parameters
    ----------
    instrument_ascii_file : str
        instrument file in plain ASCII format

    Returns
    -------
    AnglerCameraDetectorGeometry
        Instrument geometry setup for HB2B

    """
    checkdatatypes.check_file_name(instrument_ascii_file, False, True, False,
                                   'Instrument definition ASCII file')

    instr_file = open(instrument_ascii_file, 'r')
    setup_lines = instr_file.readlines()
    instr_file.close()

    # Init
    arm_length = detector_rows = detector_columns = pixel_size_x = pixel_size_y = None

    # Parse each line
    for line in setup_lines:
        line = line.strip()

        # skip empty and comment
        if line == '' or line.startswith('#'):
            continue

        terms = line.replace('=', ' ').split()
        arg_name = terms[0].strip().lower()
        arg_value = terms[1]

        if arg_name == 'arm':
            arm_length = float(arg_value)
        elif arg_name == 'rows':
            detector_rows = int(arg_value)
        elif arg_name == 'columns':
            detector_columns = int(arg_value)
        elif arg_name == 'pixel_size_x':
            pixel_size_x = float(arg_value)
        elif arg_name == 'pixel_size_y':
            pixel_size_y = float(arg_value)
        else:
            raise RuntimeError(
                'Argument {} is not recognized'.format(arg_name))
    # END-FOR

    instrument = AnglerCameraDetectorGeometry(num_rows=detector_rows,
                                              num_columns=detector_columns,
                                              pixel_size_x=pixel_size_x,
                                              pixel_size_y=pixel_size_y,
                                              arm_length=arm_length,
                                              calibrated=False)

    return instrument
예제 #29
0
def load_mantid_mask(pixel_number, mantid_mask_xml, is_mask):
    """ Load Mantid mask file in XML format
    Assumption: PixelID (detector ID) starts from 0 and there is NO gap
    :param mantid_mask_xml:
    :param pixel_number: total pixel number
    :return: a vector
    """
    checkdatatypes.check_file_name(mantid_mask_xml, True, False, False,
                                   'Mantid XML mask file')
    checkdatatypes.check_int_variable('(Total) pixel number', pixel_number,
                                      (1024**2, 2048**2 + 1))

    # load file to lines
    mask_file = open(mantid_mask_xml, 'r')
    mask_lines = mask_file.readlines()
    mask_file.close()

    # get detector ID range line
    det_id_line = None
    for line in mask_lines:
        if line.count('<detid') > 0:
            det_id_line = line.strip()
            break
    # END-FOR

    if det_id_line is None:
        raise RuntimeError(
            'Mask file {} does not have masked detector IDs'.format(
                mantid_mask_xml))

    # parse
    masked_det_pair_list = det_id_line.split('>')[1].split(
        '<')[0].strip().split(',')
    # print ('[DB...BAT] Masked detectors range: {}'.format(masked_det_pair_list))

    # create vector with 1 (for not masking)
    masking_array = np.zeros((pixel_number, ), 'float')
    if is_mask:
        # is given string are mask then default is not masked
        masking_array += 1.
    # is ROI default = 0

    masked_specs = 0
    for masked_det_pair in masked_det_pair_list:
        # get range
        terms = masked_det_pair.split('-')
        start_detid = int(terms[0])
        end_detid = int(terms[1])
        # check range
        if end_detid >= pixel_number:
            raise RuntimeError(
                'Detector ID {} is out of range of given detector size {}'
                ''.format(end_detid, pixel_number))
        # mask or ROI
        if is_mask:
            masking_array[start_detid:end_detid + 1] = 0.
        else:
            masking_array[start_detid:end_detid + 1] = 1.
        # stat
        masked_specs += end_detid - start_detid + 1
    # END-FOR

    print('[DB...CHECK] Masked spectra = {}, Sum of masking array = {}'
          ''.format(masked_specs, sum(masking_array)))

    return masking_array
예제 #30
0
def read_calibration_json_file(calibration_file_name):
    """Import calibration file in json format

    Example:  input JSON
    {u'Lambda': 1.452,
    u'Rot_x': 0.0,
    u'Rot_y': 0.0,
    u'Rot_z': 0.0,
    u'Shift_x': 0.0,
    u'Shift_y': 0.0,
    u'Shift_z': 0.0,
    u'Status': 3,
    u'error_Lambda': 1.0829782933282927e-07,
    u'error_Rot_x': -1.0,
    u'error_Rot_y': -1.0,
    u'error_Rot_z': -1.0,
    u'error_Shift_x': -1.0,
    u'error_Shift_y': -1.0,
    u'error_Shift_z': -1.0}

    Parameters
    ----------
    calibration_file_name

    Returns
    -------
    ~tuple
        (AnglerCameraDetectorShift, AnglerCameraDetectorShift, float, float, int)
        detector position shifts as the calibration result,detector position shifts error from fitting
        status

    """

    # Check input
    checkdatatypes.check_file_name(calibration_file_name, True, False, False,
                                   'Calibration JSON file')

    # Parse JSON file
    with open(calibration_file_name, 'r') as calib_file:
        calib_dict = json.load(calib_file)
    if calib_dict is None:
        raise RuntimeError('Failed to load JSON calibration file {}'.format(
            calibration_file_name))

    # Convert dictionary to AnglerCameraDetectorShift
    try:
        shift = AnglerCameraDetectorShift(shift_x=calib_dict['Shift_x'],
                                          shift_y=calib_dict['Shift_y'],
                                          shift_z=calib_dict['Shift_z'],
                                          rotation_x=calib_dict['Rot_x'],
                                          rotation_y=calib_dict['Rot_y'],
                                          rotation_z=calib_dict['Rot_z'])
    except KeyError as key_error:
        raise RuntimeError(
            'Missing key parameter from JSON file {}: {}'.format(
                calibration_file_name, key_error))

    # shift error
    try:
        shift_error = AnglerCameraDetectorShift(
            shift_x=calib_dict['error_Shift_x'],
            shift_y=calib_dict['error_Shift_y'],
            shift_z=calib_dict['error_Shift_z'],
            rotation_x=calib_dict['error_Rot_x'],
            rotation_y=calib_dict['error_Rot_y'],
            rotation_z=calib_dict['error_Rot_z'])
    except KeyError as key_error:
        raise RuntimeError(
            'Missing key parameter from JSON file {}: {}'.format(
                calibration_file_name, key_error))

    # Wave length
    try:
        wave_length = calib_dict['Lambda']
        wave_length_error = calib_dict['error_Lambda']
    except KeyError as key_error:
        raise RuntimeError(
            'Missing wave length related parameter from JSON file {}: {}'
            ''.format(calibration_file_name, key_error))

    # Calibration status
    try:
        status = calib_dict['Status']
    except KeyError as key_error:
        raise RuntimeError(
            'Missing status parameter from JSON file {}: {}'.format(
                calibration_file_name, key_error))

    return shift, shift_error, wave_length, wave_length_error, status