Example #1
0
    def pre_process_idl_command(idl_command):
        """ Pre-process IDL command such that
        1. list bracket [] will be identified and string inside will have ',' replaced by '~'
        2. list bracket []'s sequence will checked
        :param idl_command:
        :return:
        """
        datatypeutility.check_string_variable('IDL command', idl_command)

        # check equal of bracket
        if idl_command.count(']') != idl_command.count('['):
            raise RuntimeError('Found unpaired list bracket [ and ] in {}'.format(idl_command))

        # replace
        num_bracket = idl_command.count(']')
        for bracket_index in range(num_bracket):
            left_index = idl_command.index('[')
            right_index = idl_command.index(']')
            if left_index > right_index:
                raise RuntimeError(
                    'In ILD command {}, list bracket\' order is reversed.'.format(idl_command))

            list_str = idl_command[left_index+1:right_index]
            list_str = list_str.replace(',', '~')

            # construct new command
            idl_command = idl_command[:left_index] + list_str + idl_command[right_index+1:]
        # END-FOR

        return idl_command
Example #2
0
def generate_chopped_log_dir(original_dir, make_dir):
    """
    generate the chopped sample log dir
    :param original_dir:
    :param make_dir: Flag to make directory if it does not exist
    :return:
    """
    datatypeutility.check_string_variable('Original chopped data directory', original_dir)

    if original_dir.count('shared') == 0:
        raise RuntimeError(
            'Chopped log directory must be a shared directory in /SNS/VULCAN/IPTS-xxxx/shared/')

    if original_dir.endswith('/'):
        original_dir = original_dir[:-1]
    run_str = original_dir.split('/')[-1]

    base_dir = os.path.join(original_dir.split('shared')[0], 'shared')
    pyvdrive_chopped_dir = os.path.join(base_dir, 'pyvdrive_only')
    if not os.path.exists(pyvdrive_chopped_dir) and make_dir:
        os.mkdir(pyvdrive_chopped_dir)
    chopped_log_dir = os.path.join(pyvdrive_chopped_dir, run_str)
    if not os.path.exists(chopped_log_dir) and make_dir:
        os.mkdir(chopped_log_dir)

    print('[DB...BAT] PyVDrive chopped sample log dir: {}'.format(original_dir))

    return chopped_log_dir
Example #3
0
 def set_title(self, title):
     """
     Set title of the plot
     :param title:
     :return:
     """
     datatypeutility.check_string_variable('Plot title', title)
     self.label_title.setText(title)
Example #4
0
    def save_vanadium(self, diff_ws_name, gsas_file_name, ipts_number,
                      van_run_number, sample_log_ws_name):
        """  Save a WorkspaceGroup which comes from original GSAS workspace
        :param diff_ws_name: diffraction workspace (group) name
        :param gsas_file_name: output GSAS file name
        :param ipts_number: ITPS
        :param van_run_number: (van) run number
        :param sample_log_ws_name: workspace containing sample logs (proton charges)
        :return:
        """
        datatypeutility.check_string_variable(
            'Diffraction workspace (group) name', diff_ws_name)
        datatypeutility.check_file_name(gsas_file_name, False, True, False,
                                        'Smoothed vanadium GSAS file')
        datatypeutility.check_int_variable('IPTS', ipts_number, (1, None))
        datatypeutility.check_string_variable('Sample log workspace name',
                                              sample_log_ws_name)

        # rebin and then write output
        gsas_bank_buffer_dict = dict()
        van_ws = mantid_helper.retrieve_workspace(diff_ws_name)
        num_banks = mantid_helper.get_number_spectra(van_ws)
        datatypeutility.check_file_name(gsas_file_name,
                                        check_exist=False,
                                        check_writable=True,
                                        is_dir=False,
                                        note='Output GSAS file')

        # TODO - TONIGHT 5 - This will break if input is a Workspace but not GroupingWorkspace!!!
        for ws_index in range(num_banks):
            # get value
            bank_id = ws_index + 1
            # write GSAS head considering vanadium
            tof_vector = None
            ws_name_i = van_ws[ws_index].name()
            gsas_section_i = self._write_slog_bank_gsas(
                ws_name_i, 1, tof_vector, None)
            gsas_bank_buffer_dict[bank_id] = gsas_section_i
        # END-FOR

        # header
        log_ws = mantid_helper.retrieve_workspace(sample_log_ws_name)
        gsas_header = self._generate_vulcan_gda_header(log_ws, gsas_file_name,
                                                       ipts_number,
                                                       van_run_number,
                                                       gsas_file_name, False)

        # form to a big string
        gsas_buffer = gsas_header
        for bank_id in sorted(gsas_bank_buffer_dict.keys()):
            gsas_buffer += gsas_bank_buffer_dict[bank_id]

        # write to HDD
        g_file = open(gsas_file_name, 'w')
        g_file.write(gsas_buffer)
        g_file.close()

        return
Example #5
0
    def _generate_chop_run_dir(self, chop_run):
        """
        Generate the directory to save file
        :param chop_run:
        :return:
        """
        datatypeutility.check_string_variable('CHOP RUN', chop_run)
        # create the directory in archive
        chop_run_dir = '/SNS/VULCAN/IPTS-{}/shared/binned_data/{}/'.format(
            self._iptsNumber, chop_run)

        return chop_run_dir
Example #6
0
    def save_nexus(self, ws_ref_id, output_file_name):
        """
        Save workspace to processed NeXus
        :param ws_ref_id:
        :param output_file_name:
        :return:
        """
        datatypeutility.check_string_variable('Workspace/data reference ID', ws_ref_id)

        if ws_ref_id in self._ws_name_dict:
            ws_name = self._ws_name_dict[ws_ref_id]
            file_utilities.save_workspace(ws_name, output_file_name, file_type='nxs')
        else:
            raise RuntimeError('Workspace/data reference ID {0} does not exist.'.format(ws_ref_id))

        return
Example #7
0
    def focus_workspace_list(self, ws_name_list, gsas_ws_name_list, group_ws_name):
        """ Do diffraction focus on a list workspaces and also convert them to IDL GSAS
        This is the main execution body to be executed in multi-threading environment
        :param ws_name_list:
        :param gsas_ws_name_list: name for GSAS
        :param group_ws_name: name for grouping workspace
        :return:
        """
        datatypeutility.check_list('Workspace names', ws_name_list)
        datatypeutility.check_list('(Output) GSAS workspace name list', gsas_ws_name_list)
        if len(ws_name_list) != len(gsas_ws_name_list):
            raise RuntimeError('Input workspace names {} have different number than output GSAS workspace names {}'
                               ''.format(ws_name_list, gsas_ws_name_list))

        for index in range(len(ws_name_list)):
            # set GSAS workspace name same as input workspace name
            ws_name = ws_name_list[index]
            gsas_ws_name_list[index] = ws_name
            gsas_ws_name = ws_name

            datatypeutility.check_string_variable('Workspace name', ws_name)
            datatypeutility.check_string_variable('Output GSAS workspace name', gsas_ws_name)
            # skip empty workspace name that might be returned from FilterEvents
            if len(ws_name) == 0:
                continue
            # focus (simple) it is the same but simplied version in diffraction_focus()
            ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target='dSpacing')
            # diffraction focus
            DiffractionFocussing(InputWorkspace=ws_name, OutputWorkspace=ws_name,
                                 GroupingWorkspace=group_ws_name)
            # convert unit to TOF
            ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name,
                         Target='TOF', ConvertFromPointData=False)
            # edit instrument
            try:
                EditInstrumentGeometry(Workspace=ws_name,
                                       PrimaryFlightPath=self._focus_instrument_dict['L1'],
                                       SpectrumIDs=self._focus_instrument_dict['SpectrumIDs'],
                                       L2=self._focus_instrument_dict['L2'],
                                       Polar=self._focus_instrument_dict['Polar'],
                                       Azimuthal=self._focus_instrument_dict['Azimuthal'])
            except RuntimeError as run_err:
                print('[WARNING] Non-critical error from EditInstrumentGeometry for {}: {}'
                      ''.format(ws_name, run_err))
        # END-FOR

        return
Example #8
0
    def load_auto_record(self, ipts_number, record_type):
        """
        load auto record file
        :except RuntimeError if there is no IPTS in auto record
        :param ipts_number:
        :param record_type: None for AutoRecord.txt, 'data' for AutoRecordData.txt', 'align' for AutoRecordAlign.txt
        :return:
        """
        # check input
        datatypeutility.check_int_variable('IPTS number', ipts_number,
                                           (1, None))
        if record_type is not None:
            datatypeutility.check_string_variable(
                'Log type', record_type, allowed_values=['data', 'align'])

        # locate IPTS folder and AutoRecord file
        ipts_shared_dir = '/SNS/VULCAN/IPTS-{}/shared'.format(ipts_number)
        if os.path.exists(ipts_shared_dir) is False:
            raise RuntimeError(
                'IPTS {} has no directory {} in SNS archive'.format(
                    ipts_number, ipts_shared_dir))

        if record_type is None:
            base_name = 'AutoRecord.txt'
        elif record_type == 'data':
            base_name = 'AutoRecordData.txt'
        elif record_type == 'align':
            base_name = 'AutoRecordAlign.txt'
        else:
            raise NotImplementedError('Impossible to reach this point')

        auto_record_file_name = os.path.join(ipts_shared_dir, base_name)
        if not os.path.exists(auto_record_file_name):
            raise RuntimeError('Auto {} record file {} does not exist.'.format(
                record_type, auto_record_file_name))

        # load and parse the file
        record_key = 'Auto{}-IPTS{}'.format(record_type, ipts_number)
        self._auto_record_dict[record_key] = vulcan_util.import_vulcan_log(
            auto_record_file_name)

        return record_key
Example #9
0
    def init_session(self, workspace_name, ipts_number, van_run_number, out_gsas_name,
                     sample_log_ws_name):
        """
        Initialize vanadium processing session
        :param workspace_name:
        :param ipts_number:
        :param van_run_number:
        :param out_gsas_name:
        :param sample_log_ws_name: required for proton charge
        :return:
        """
        datatypeutility.check_string_variable('Workspace name', workspace_name)
        datatypeutility.check_int_variable('IPTS number', ipts_number, (1, 99999))
        datatypeutility.check_int_variable('Vanadium run number', van_run_number, (1, 999999))
        datatypeutility.check_file_name(out_gsas_name, False, True, False, 'Output GSAS file name')
        datatypeutility.check_string_variable('Sample log workspace name', sample_log_ws_name)

        workspace = mantid_helper.retrieve_workspace(workspace_name)
        if workspace.id() == 'WorkspaceGroup':
            pass
        else:
            # create dictionary and etc
            raise NotImplementedError('Need to implement single workspace case to extract spectra')

        self._van_workspace_name = workspace_name

        self._ipts_number = ipts_number
        self._van_run_number = van_run_number
        self._output_gsas_name = out_gsas_name

        # parameter set up
        self._is_shift_case = False

        # convert to point data as a request
        mantid_helper.convert_to_point_data(self._van_workspace_name)
        mantid_helper.mtd_convert_units(self._van_workspace_name, 'dSpacig')

        self._sample_log_ws_name = sample_log_ws_name

        return
Example #10
0
def get_load_file_by_dialog(parent, title, default_dir, file_filter):
    """ Get the file name to load via QFileDialog
    :param parent:
    :param title:
    :param default_dir:
    :param file_filter:
    :return:
    """
    datatypeutility.check_string_variable('Title (to load file)', title)
    datatypeutility.check_string_variable('Default directory to load file', default_dir)
    datatypeutility.check_file_name(default_dir, True, False, True, 'Default directory to load file')
    datatypeutility.check_string_variable('File filter', file_filter)

    # append "All files:
    if file_filter.count('*.*') == 0:
        file_filter += ';;All files (*.*)'

    # get file name
    returns = QFileDialog.getOpenFileName(parent, title, default_dir, file_filter)
    if isinstance(returns, tuple):
        file_name = str(returns[0])
    else:
        file_name = str(returns).strip()
    file_name = file_name.strip()

    print('[DB...BAT] Splitter file: {}'.format(file_name))

    return file_name
Example #11
0
    def get_argument_as_list(self, arg_name, data_type):
        """ The argument is pre-processed such that ',' is replaced by '~', which is rarely used in any case
        :param arg_name:
        :param data_type:
        :return:
        """
        datatypeutility.check_string_variable(
            '{} argument'.format(self._commandName), arg_name)
        arg_input_value = self._commandArgsDict[arg_name]

        arg_values = arg_input_value.split('~')
        return_list = list()

        for value in arg_values:
            # string value: remove space at two ends
            value = value.strip()
            # convert value
            value = data_type(value)
            # append
            return_list.append(value)

        return return_list
Example #12
0
    def smooth_v_spectrum(self, bank_id, smoother_filter_type, param_n, param_order, ws_name=None):
        """
        smooth vanadium peaks
        :param bank_id:
        :param smoother_filter_type:
        :param param_n:
        :param param_order:
        :param ws_name:
        :return:
        """
        # check inputs:
        datatypeutility.check_int_variable('Bank ID', bank_id, (1, 99))
        datatypeutility.check_string_variable('Smoothing filter type', smoother_filter_type,
                                              ['Zeroing', 'Butterworth'])
        datatypeutility.check_int_variable('Smoothing parameter "n"', param_n, (1, 100))
        datatypeutility.check_int_variable('Smoothing order', param_order, (1, 100))

        # get workspace
        if ws_name is None:
            ws_name = self._striped_peaks_ws_dict[bank_id]

        # output workspace name
        out_ws_name = ws_name + '_Smoothed'

        # convert unit
        mantid_helper.mtd_convert_units(ws_name, 'TOF', out_ws_name)

        # smooth vanadium spectra
        mantid_helper.smooth_vanadium(input_workspace=out_ws_name,
                                      output_workspace=out_ws_name,
                                      smooth_filter=smoother_filter_type,
                                      workspace_index=None,
                                      param_n=param_n,
                                      param_order=param_order,
                                      push_to_positive=True)

        self._smoothed_ws_dict[bank_id] = out_ws_name

        return
Example #13
0
def set_combobox_current_item(combo_box, item_name, match_beginning):
    """
    set the current (index/item) of a combo box by name
    :param combo_box:
    :param item_name:
    :param match_beginning: if True, only need to match beginning but not all
    :return:
    """
    # check
    assert isinstance(combo_box, QComboBox), 'Input widget {} must be a QComboBox instance but not a ' \
                                             '{}'.format(combo_box, type(combo_box))
    datatypeutility.check_string_variable('Combo box item name', item_name)

    # get the list of items' names
    item_name_list = [str(combo_box.itemText(i)).strip() for i in range(combo_box.count())]  # string and no space

    if match_beginning:
        # match beginning
        item_index = None
        for index_i, item_name_i in enumerate(item_name_list):
            if item_name_i.startswith(item_name):
                item_index = index_i
                break
        if item_index is None:
            raise RuntimeError('Combo box does not have item {}.  Available names are {}'
                               ''.format(item_name, item_name_list))
    else:
        # match all
        if item_name not in item_name_list:
            raise RuntimeError('Combo box does not have item {}.  Available names are {}'
                               ''.format(item_name, item_name_list))
        item_index = item_name_list.index(item_name)
    # END-IF-ELSE

    # set current index
    combo_box.setCurrentIndex(item_index)

    return
Example #14
0
    def mask_detectors(self, ws_name, roi_file_list, mask_file_list):
        """
        mask detectors by ROI and/or mask
        :param ws_name:
        :param roi_file_list:
        :param mask_file_list:
        :return: workspace reference
        """
        # check inputs
        datatypeutility.check_string_variable('Workspace name', ws_name)

        datatypeutility.check_list('ROI file names', roi_file_list)
        datatypeutility.check_list('Mask file names', mask_file_list)

        # return if nothing to do
        if len(roi_file_list) + len(mask_file_list) == 0:
            matrix_ws = mantid_helper.retrieve_workspace(ws_name, raise_if_not_exist=True)
            return matrix_ws

        # load mask file and roi file
        roi_ws_list = list()
        mask_ws_list = list()

        for roi_file in roi_file_list:
            roi_ws_name_i = self.load_mask_xml(roi_file, ws_name, is_roi=True)
            roi_ws_list.append(roi_ws_name_i)
        for mask_file in mask_file_list:
            mask_ws_name_i = self.load_mask_xml(mask_file, ws_name, is_roi=False)
            mask_ws_list.append(mask_ws_name_i)

        # mask by ROI workspaces
        self.mask_detectors_by_rois(ws_name, roi_ws_list)
        # mask by masks workspace
        self.mask_detectors_by_masks(ws_name, mask_ws_list)

        matrix_ws = mantid_helper.retrieve_workspace(ws_name, raise_if_not_exist=True)

        return matrix_ws
Example #15
0
    def get_raw_data(self, bank_id, unit):
        """
        Get raw data
        :param bank_id:
        :param unit:
        :return:
        """
        if self._van_workspace_name is None:
            raise RuntimeError('Vanadium workspace has not been set up yet!')

        datatypeutility.check_int_variable('Bank ID', bank_id, (1, 99))
        datatypeutility.check_string_variable('Unit', unit, ['TOF', 'dSpacing'])

        if unit == 'TOF':
            if self._van_workspace_name_tof is None:
                self._van_workspace_name_tof = self._van_workspace_name + '_tof'
                mantid_helper.mtd_convert_units(
                    self._van_workspace_name, 'TOF', self._van_workspace_name_tof)
            workspace = mantid_helper.retrieve_workspace(self._van_workspace_name_tof)
        else:
            workspace = mantid_helper.retrieve_workspace(self._van_workspace_name)

        return workspace[bank_id-1].readX(0), workspace[bank_id-1].readY(0)
Example #16
0
    def smooth_curve(self, method, params):
        """
        smooth curve
        :param method: FFTSmooth, NearestNeighbor
        :return:
        """
        datatypeutility.check_string_variable('Smooth algorithm', method,
                                              ['fft', 'nearest'])

        import h5py
        temp_h5 = h5py.File('smooth_prototype.h5', 'w')
        curve_group = temp_h5.create_group('curve')
        curve_group.create_dataset('x', data=self._vec_x)
        curve_group.create_dataset('y', data=self._vec_y)
        temp_h5.close()

        if method == 'fft':
            self._smooth_vec_y = mantid_helper.fft_smooth(
                self._vec_x, self._vec_y, params)
        elif method == 'nearest':
            self._smooth_vec_y = mantid_helper.nearest_neighbor_smooth(
                self._vec_x, self._vec_y, params)

        return
Example #17
0
    def plot_sample_log(self, vec_x, vec_y, sample_log_name, plot_label, sample_log_name_x='Time'):
        """ plot sample log
        :param vec_x:
        :param vec_y:
        :param sample_log_name: on Y-axis
        :param plot_label: label of log to plot
        :param sample_log_name_x: on X-axis
        :return:
        """
        # check
        datatypeutility.check_numpy_arrays('Vector X and Y', [vec_x, vec_y], 1, True)
        datatypeutility.check_string_variable('Sample log name', sample_log_name)
        datatypeutility.check_string_variable('Sample log name (x-axis)', sample_log_name_x)
        datatypeutility.check_string_variable('Plot label', plot_label)

        # set label
        if plot_label == '':
            try:
                plot_label = '%s Y (%f, %f)' % (sample_log_name, min(vec_y), max(vec_y))
            except TypeError as type_err:
                err_msg = 'Unable to generate log with %s and %s: %s' % (
                    str(min(vec_y)), str(max(vec_y)), str(type_err))
                raise TypeError(err_msg)
        # END-IF

        # add plot and register
        self.reset()

        plot_id = self.add_plot_1d(vec_x, vec_y, x_label=sample_log_name_x,
                                   y_label=sample_log_name,
                                   label=plot_label, marker='.', color='blue', show_legend=True)
        self.set_title(title=plot_label)
        self._sizeRegister[plot_id] = (min(vec_x), max(vec_x), min(vec_y), max(vec_y))

        # auto resize
        self.resize_canvas(margin=0.05)
        # re-scale
        # TODO - TONIGHT 3 - FIXME - No self._maxX  self.auto_rescale()
        min_x = vec_x.min()
        max_x = vec_x.max()
        self.setXYLimit(xmin=min_x, xmax=max_x)

        # update
        self._currPlotID = plot_id
        self._curr_log_name = sample_log_name

        return plot_id
Example #18
0
    def plot_chopped_log(self, vec_x, vec_y, sample_log_name_x, sample_log_name_y, plot_label, color='red'):
        """
        Plot chopped sample log from archive (just points)
        :param vec_x:
        :param vec_y:
        :param sample_log_name_x:
        :param sample_log_name_y:
        :param plot_label:
        :return:
        """
        # check
        datatypeutility.check_numpy_arrays('Vector X and Y', [vec_x, vec_y], 1, True)
        datatypeutility.check_string_variable('Sample log name on Y-axis', sample_log_name_y)
        datatypeutility.check_string_variable('Sample log name on X-axis', sample_log_name_x)
        datatypeutility.check_string_variable('Plot label', plot_label)

        # set label
        if plot_label == '':
            try:
                plot_label = '%s Y (%f, %f)' % (sample_log_name_x, min(vec_y), max(vec_y))
            except TypeError as type_err:
                err_msg = 'Unable to generate log with %s and %s: %s' % (
                    str(min(vec_y)), str(max(vec_y)), str(type_err))
                raise TypeError(err_msg)
        # END-IF

        plot_id = self.add_plot_1d(vec_x, vec_y, x_label=sample_log_name_x,
                                   y_label=sample_log_name_y,
                                   label=plot_label, marker='o', marker_size=4,
                                   color=color, show_legend=True,
                                   line_style='none')
        self._sizeRegister[plot_id] = (min(vec_x), max(vec_x), min(vec_y), max(vec_y))

        # No need to auto resize
        # self.resize_canvas(margin=0.05)
        # re-scale
        if sample_log_name_x.startswith('Time'):
            self.setXYLimit(xmin=0.)
        else:
            min_x = vec_x.min()
            max_x = vec_x.max()
            self.setXYLimit(xmin=min_x, xmax=max_x)

        # # update
        # self._currPlotID = plot_id

        return plot_id
Example #19
0
    def construct_workspace_name(file_name, file_type, prefix, max_int):
        """ Construct a standard workspace for loaded binned data (gsas/processed nexus)
        :param file_name:
        :param file_type:
        :param prefix:
        :param max_int:
        :return:
        """
        # check inputs
        datatypeutility.check_string_variable('File name', file_name)
        datatypeutility.check_string_variable('File type', file_type)
        datatypeutility.check_string_variable('Workspace prefix', prefix)
        datatypeutility.check_int_variable(
            'Maximum integer for file sequence number', max_int, (10, None))

        base_ws_name = os.path.basename(file_name).split('.')[0]
        hash_part = hash(os.path.basename(file_name))

        # add zeros for better sorting
        if base_ws_name.isdigit():
            # add number of zeros in front
            num_zeros = int(math.log(max_int) / math.log(10)) + 1
            if num_zeros < 1:
                num_zeros = 1
            base_ws_name = '{0:0{1}}'.format(int(base_ws_name), num_zeros)

        if prefix != '':
            data_ws_name = '{}_{}'.format(prefix, base_ws_name)
        else:
            data_ws_name = base_ws_name

        if file_type == '':
            raise RuntimeError('File type cannot be empty string')
        else:
            data_ws_name = '{}_{}{}'.format(data_ws_name, file_type[0],
                                            hash_part)

        return data_ws_name
Example #20
0
    def plot_1d_vanadium(self, run_id, bank_id, is_smoothed_data=False):
        """

        :param run_id:
        :param bank_id:
        :return:
        """
        # check input
        datatypeutility.check_string_variable('Run ID', run_id)
        datatypeutility.check_int_variable('Bank ID', bank_id, (1, 100))

        # clear previous image
        self.ui.graphicsView_mainPlot.clear_all_lines()
        # change unit
        self.ui.comboBox_unit.setCurrentIndex(1)

        # plot original run
        # raw_van_key = run_id, bank_id, self._currUnit
        # if raw_van_key in self._currentPlotDataKeyDict:
        #     # data already been loaded before
        #     vec_x, vec_y = self._currentPlotDataKeyDict[raw_van_key]
        # else:
        #     vec_x, vec_y = self.retrieve_loaded_reduced_data(data_key=run_id, bank_id=bank_id,
        #                                                      unit=self._currUnit)
        #     self._currentPlotDataKeyDict[raw_van_key] = vec_x, vec_y

        if is_smoothed_data:
            plot_unit = 'TOF'
        else:
            plot_unit = 'dSpacing'

        # get the original raw data
        ws_name = self._myController.project.vanadium_processing_manager.get_raw_vanadium(
        )[bank_id]
        vec_x, vec_y = self.retrieve_loaded_reduced_data(data_key=ws_name,
                                                         bank_id=1,
                                                         unit=plot_unit)

        # plot
        self._raw_van_plot_id = self.ui.graphicsView_mainPlot.plot_diffraction_data(
            (vec_x, vec_y),
            unit=plot_unit,
            over_plot=False,
            run_id=run_id,
            bank_id=bank_id,
            chop_tag=None,
            line_color='black',
            label='Raw vanadium {}'
            ''.format(run_id))

        if is_smoothed_data:
            ws_name = self._myController.project.vanadium_processing_manager.get_smoothed_vanadium(
            )[bank_id]
        else:
            ws_name = self._myController.project.vanadium_processing_manager.get_peak_striped_vanadium(
            )[bank_id]
        vec_x, vec_y = self.retrieve_loaded_reduced_data(data_key=ws_name,
                                                         bank_id=1,
                                                         unit=plot_unit)
        # plot vanadium
        self._strip_van_plot_id = self.ui.graphicsView_mainPlot.plot_diffraction_data(
            (vec_x, vec_y),
            unit=plot_unit,
            over_plot=True,
            run_id=run_id,
            bank_id=bank_id,
            chop_tag=None,
            line_color='red',
            label='Peak striped vanadium {}'
            ''.format(run_id))

        return
Example #21
0
    def _generate_vulcan_gda_header(self,
                                    gsas_workspace,
                                    gsas_file_name,
                                    ipts,
                                    run_number,
                                    gsas_param_file_name,
                                    from_sliced_ws,
                                    extra_info=None):
        """
        generate a VDRIVE compatible GSAS file's header
        :param gsas_workspace:
        :param gsas_file_name:
        :param ipts:
        :param run_number
        :param gsas_param_file_name:
        :param from_sliced_ws: flag to indicate whether the GSAS workspace is from sliced
        :return: string : multiple lines
        """
        # check
        assert isinstance(gsas_workspace,
                          str) is False, 'GSAS workspace must not be a string.'
        datatypeutility.check_string_variable('(Output) GSAS file name',
                                              gsas_file_name)
        datatypeutility.check_string_variable('GSAS IParam file name',
                                              gsas_param_file_name)
        assert isinstance(ipts, int) or isinstance(ipts, str), 'IPTS number {0} must be either string or integer.' \
                                                               ''.format(ipts)
        if isinstance(ipts, str):
            assert ipts.isdigit(
            ), 'IPTS {0} must be convertible to an integer.'.format(ipts)

        # Get necessary information
        title = gsas_workspace.getTitle()

        # Get information on start/stop
        total_nanosecond_start, total_nanosecond_stop = self._calculate_run_start_stop_time(
            gsas_workspace, from_sliced_ws)

        # Construct new header
        new_header = ""

        if len(title) > 80:
            title = title[0:80]
        new_header += "%-80s\n" % title
        new_header += "%-80s\n" % ("Instrument parameter file: %s" %
                                   gsas_param_file_name)
        new_header += "%-80s\n" % ("#IPTS: %s" % str(ipts))
        if run_number is not None:
            new_header += "%-80s\n" % ("#RUN: %s" % str(run_number))
        new_header += "%-80s\n" % (
            "#binned by: Mantid. From refrence workspace: {})".format(
                str(gsas_workspace)))
        if extra_info:
            new_header += "%-80s\n" % ("#%s" % extra_info)
        new_header += "%-80s\n" % ("#GSAS file name: %s" %
                                   os.path.basename(gsas_file_name))
        new_header += "%-80s\n" % ("#GSAS IPARM file: %s" %
                                   gsas_param_file_name)
        new_header += "%-80s\n" % ("#Pulsestart:    %d" %
                                   total_nanosecond_start)
        new_header += "%-80s\n" % ("#Pulsestop:     %d" %
                                   total_nanosecond_stop)
        new_header += '%-80s\n' % '#'

        return new_header
Example #22
0
    def load_binned_data(self,
                         data_file_name,
                         data_file_type,
                         max_int,
                         prefix='',
                         data_key=None,
                         target_unit=None):
        """ Load binned data
        :param data_file_name:
        :param data_file_type:
        :param prefix: prefix of the GSAS workspace name. It can be None, an integer, or a string
        :param max_int: maximum integer for sequence such as 999 for 001, 002, ... 999
        :param data_key: data key or None (to use workspace name as data key)
        :param target_unit: target unit or None
        :return: string as data key (aka. workspace name)
        """
        # check inputs
        datatypeutility.check_file_name(data_file_name, True, False, False,
                                        'Binned/reduced data file to load')
        if data_file_type is not None:
            datatypeutility.check_string_variable('Data file type',
                                                  data_file_type,
                                                  ['gsas', 'processed nexus'])
        if data_key is not None:
            datatypeutility.check_string_variable('Data key', data_key)
        datatypeutility.check_string_variable('Workspace prefix', prefix)

        # find out the type of the data file
        file_name, file_extension = os.path.splitext(data_file_name)

        if data_file_type is None:
            if file_extension.lower() in ['.gda', '.gsa', '.gss']:
                data_file_type = 'gsas'
            elif file_extension.lower() == '.nxs':
                data_file_type = 'processed nexus'
            else:
                raise RuntimeError(
                    'Data file type {0} is not recognized.'.format(
                        data_file_type))
        else:
            data_file_type = data_file_type.lower()
        # END-IF-ELSE

        # Load data
        data_ws_name = self.construct_workspace_name(data_file_name,
                                                     data_file_type, prefix,
                                                     max_int)

        if data_file_type == 'gsas':
            # load as GSAS
            mantid_helper.load_gsas_file(data_file_name,
                                         data_ws_name,
                                         standard_bin_workspace=None)
        elif data_file_type == 'processed nexus':
            # load processed nexus
            mantid_helper.load_nexus(data_file_name=file_name,
                                     output_ws_name=data_ws_name,
                                     meta_data_only=False)
        else:
            raise RuntimeError('Unable to support %s file.' % data_file_type)

        # convert unit
        if target_unit:
            mantid_helper.mtd_convert_units(data_ws_name, target_unit)

        if data_key is None:
            data_key = data_ws_name

        # register by adding to data management dictionary
        self._workspaceDict[data_key] = data_ws_name
        # TODO - TONIGHT 0 - Add an option to the method such that single run data will go to singleGSASDict
        # TODO - ... ...   - chopped run will NOT to be recorded .. self._loadedGSSDict[] = ...maybe
        self._singleGSASDict[data_key] = data_file_name

        return data_key
Example #23
0
    def sort_info(self, auto_record_ref_id, sort_by, run_range, output_items,
                  num_outputs):
        """ sort the information loaded from auto record file
        Note: current list of indexes
        Index([u'RUN', u'IPTS', u'Title', u'Notes', u'Sample', u'ITEM', u'StartTime',
        u'Duration', u'ProtonCharge', u'TotalCounts', u'Monitor1', u'Monitor2',
        u'X', u'Y', u'Z', u'O', u'HROT', u'VROT', u'BandCentre', u'BandWidth',
        u'Frequency', u'Guide', u'IX', u'IY', u'IZ', u'IHA', u'IVA',
        u'Collimator', u'MTSDisplacement', u'MTSForce', u'MTSStrain',
        u'MTSStress', u'MTSAngle', u'MTSTorque', u'MTSLaser', u'MTSlaserstrain',
        u'MTSDisplaceoffset', u'MTSAngleceoffset', u'MTST1', u'MTST2', u'MTST3',
        u'MTST4', u'MTSHighTempStrain', u'FurnaceT', u'FurnaceOT',
        u'FurnacePower', u'VacT', u'VacOT', u'EuroTherm1Powder',
        u'EuroTherm1SP', u'EuroTherm1Temp', u'EuroTherm2Powder',
        u'EuroTherm2SP', u'EuroTherm2Temp'],
        :param auto_record_ref_id:
        :param sort_by:
        :param run_range:
        :param output_items:
        :param num_outputs:
        :return:
        """
        # check inputs
        datatypeutility.check_string_variable('Auto record reference ID',
                                              auto_record_ref_id)
        datatypeutility.check_string_variable('Column name to sort by',
                                              sort_by)
        if sort_by.lower() not in AUTO_LOG_MAP:
            raise RuntimeError(
                'Pandas DataFrame has no columns mapped from {}; Available include '
                '{}'.format(sort_by.lower(), AUTO_LOG_MAP.keys()))
        if run_range is not None:
            assert not isinstance(run_range,
                                  str), 'Runs range cannot be a string'
            if len(run_range) != 2:
                raise RuntimeError(
                    'Run range {} must have 2 items for start and end.'
                    ''.format(run_range))
        # END-IF

        datatypeutility.check_list('Output column names', output_items)
        if num_outputs is not None:
            datatypeutility.check_int_variable('Number of output rows',
                                               num_outputs, (1, None))

        if auto_record_ref_id not in self._auto_record_dict:
            raise RuntimeError(
                'Auto record ID {} is not in dictionary.  Available keys are {}'
                ''.format(auto_record_ref_id, self._auto_record_dict.keys()))
        if run_range is not None:
            print(
                '[ERROR] Notify developer that run range shall be implemented.'
            )

        # get data frame (data set)
        record_data_set = self._auto_record_dict[auto_record_ref_id]

        # sort the value
        auto_log_key = AUTO_LOG_MAP[sort_by.lower()]
        record_data_set.sort_values(by=[auto_log_key],
                                    ascending=False,
                                    inplace=True)

        # filter out required
        needed_index_list = list()
        for item in output_items:
            needed_index_list.append(AUTO_LOG_MAP[item.lower()])
        filtered = record_data_set.filter(needed_index_list)

        # number of outputs
        if num_outputs is None:
            num_outputs = len(record_data_set)

        # convert to list of dictionary
        column_names = filtered.columns.tolist()
        output_list = list()
        for row_index in range(min(num_outputs, len(filtered))):
            dict_i = dict()
            for j in range(len(column_names)):
                try:
                    dict_i[output_items[j]] = filtered.iloc[row_index, j]
                except IndexError as index_err:
                    print('j = {}, row_index = {}'.format(j, row_index))
                    print(column_names)
                    print('output items: {}'.format(output_items))
                    print(output_items[j])
                    print('filtered: \n{}'.format(filtered))
                    raise index_err
            # print dict_i
            output_list.append(dict_i)

        return output_list
Example #24
0
def browse_file(parent, caption, default_dir, file_filter, file_list=False, save_file=False):
    """ browse a file or files
    :param parent:
    :param caption:
    :param default_dir:
    :param file_filter:
    :param file_list:
    :param save_file:
    :return: if file_list is False: return string (file name); otherwise, return a list;
             if user cancels the operation, then return None
    """
    # check inputs
    assert isinstance(parent, object), 'Parent {} must be of some object.'.format(parent)
    datatypeutility.check_string_variable('File browsing title/caption', caption)
    datatypeutility.check_file_name(default_dir, check_exist=False, is_dir=True)
    datatypeutility.check_bool_variable('Flag for browse a list of files to load', file_list)
    datatypeutility.check_bool_variable('Flag to select loading or saving file', save_file)
    if file_filter is None:
        file_filter = 'All Files (*.*)'
    else:
        datatypeutility.check_string_variable('File filter', file_filter)

    if save_file:
        # browse file name to save to
        if platform.system() == 'Darwin':
            # TODO - 20180721 - Find out the behavior on Mac!
            file_filter = ''
        save_set = QFileDialog.getSaveFileName(parent, caption=caption, directory=default_dir,
                                               filter=file_filter)
        if isinstance(save_set, tuple):
            # returned include both file name and filter
            file_name = str(save_set[0])
        else:
            file_name = str(save_set)

    elif file_list:
        # browse file names to load
        open_set = QFileDialog.getOpenFileNames(parent, caption, default_dir, file_filter)

        if isinstance(open_set, tuple):
            # PyQt5
            file_name_list = open_set[0]
        else:
            file_name_list = open_set

        if len(file_name_list) == 0:
            # use cancel
            return None
        else:
            return file_name_list

    else:
        # browse single file name
        open_set = QFileDialog.getOpenFileName(parent, caption, default_dir, file_filter)

        if isinstance(open_set, tuple):
            # PyQt5
            file_name = open_set[0]
        else:
            file_name = open_set

    # END-IF-ELSE

    # check result for single file whether user cancels operation
    if len(file_name) == 0:
        return None

    return file_name
Example #25
0
    def process_commands(self, vdrive_command):
        """ Process commands string. The work include
        1. pre-process list special such as arg=[a,b,c],
        2. separate command from arguments
        3. ...
        :param vdrive_command:
        :return:
        """
        # check
        datatypeutility.check_string_variable('VDRIVE (IDL) command', vdrive_command, None)

        # pre-process in order to  accept list in bracket [...]
        vdrive_command_pp = self.pre_process_idl_command(vdrive_command)

        # split
        command_script = vdrive_command_pp.split(',')
        command = command_script[0].strip()
        command_args = command_script[1:]

        print('[INFO-IDL] Parse input IDL command: {} to {}\n\tArguments = {}'
              ''.format(vdrive_command, vdrive_command_pp, command))

        # support command case insensitive
        raw_command = command
        command = command.upper()

        # check input command whether it is recognized
        if command not in self._commandList:
            return False, 'Command %s is not in supported command list: %s' \
                          '' % (raw_command, str(self._commandList))

        # process special command VDRIVE (for help)
        if command == 'VDRIVE':
            status, err_msg = self._process_vdrive(command_args)
            return status, err_msg

        # process regular VDRIVE command by parsing command arguments and store them to a dictionary
        status, ret_obj = self.parse_command_arguments(command, command_args)

        if status:
            arg_dict = ret_obj
        else:
            error_msg = ret_obj
            return False, error_msg

        # call the specific command class builder
        if command == 'CHOP':
            # chop
            chop_start_time = time.time()
            status, err_msg = self._process_chop(arg_dict)
            chop_stop_time = time.time()
            err_msg += '\nExecution time = {} seconds'.format(chop_stop_time - chop_start_time)
        elif command == 'VBIN' or command == 'VDRIVEBIN':
            # bin
            status, err_msg = self._process_vbin(arg_dict)

        elif command == '2THETABIN':
            # group pixels by 2theta and reduce to GSAS
            status, err_msg = self._process_2theta_bin(arg_dict)

        elif command == 'VDRIVEVIEW' or command == 'VIEW':
            # view
            status, err_msg = self._process_view(arg_dict)

        elif command == 'MERGE':
            # merge
            status, err_msg = self._process_merge(arg_dict)

        elif command == 'AUTO':
            # auto reduction command
            status, err_msg = self._process_auto_reduction(arg_dict)

        elif command == 'VPEAK':
            # process vanadium peak
            status, err_msg = self._process_vanadium_peak(arg_dict)

        elif command == 'INFO':
            # query some information from previoulsy measured runs
            status, err_msg = self._process_info_query(arg_dict)

        else:
            raise RuntimeError('Impossible situation!')

        return status, err_msg