Exemplo n.º 1
0
    def save_vanadium(self, diff_ws_name, gsas_file_name, ipts_number,
                      van_run_number, sample_log_ws_name):
        """  Save a WorkspaceGroup which comes from original GSAS workspace
        :param diff_ws_name: diffraction workspace (group) name
        :param gsas_file_name: output GSAS file name
        :param ipts_number: ITPS
        :param van_run_number: (van) run number
        :param sample_log_ws_name: workspace containing sample logs (proton charges)
        :return:
        """
        datatypeutility.check_string_variable(
            'Diffraction workspace (group) name', diff_ws_name)
        datatypeutility.check_file_name(gsas_file_name, False, True, False,
                                        'Smoothed vanadium GSAS file')
        datatypeutility.check_int_variable('IPTS', ipts_number, (1, None))
        datatypeutility.check_string_variable('Sample log workspace name',
                                              sample_log_ws_name)

        # rebin and then write output
        gsas_bank_buffer_dict = dict()
        van_ws = mantid_helper.retrieve_workspace(diff_ws_name)
        num_banks = mantid_helper.get_number_spectra(van_ws)
        datatypeutility.check_file_name(gsas_file_name,
                                        check_exist=False,
                                        check_writable=True,
                                        is_dir=False,
                                        note='Output GSAS file')

        # TODO - TONIGHT 5 - This will break if input is a Workspace but not GroupingWorkspace!!!
        for ws_index in range(num_banks):
            # get value
            bank_id = ws_index + 1
            # write GSAS head considering vanadium
            tof_vector = None
            ws_name_i = van_ws[ws_index].name()
            gsas_section_i = self._write_slog_bank_gsas(
                ws_name_i, 1, tof_vector, None)
            gsas_bank_buffer_dict[bank_id] = gsas_section_i
        # END-FOR

        # header
        log_ws = mantid_helper.retrieve_workspace(sample_log_ws_name)
        gsas_header = self._generate_vulcan_gda_header(log_ws, gsas_file_name,
                                                       ipts_number,
                                                       van_run_number,
                                                       gsas_file_name, False)

        # form to a big string
        gsas_buffer = gsas_header
        for bank_id in sorted(gsas_bank_buffer_dict.keys()):
            gsas_buffer += gsas_bank_buffer_dict[bank_id]

        # write to HDD
        g_file = open(gsas_file_name, 'w')
        g_file.write(gsas_buffer)
        g_file.close()

        return
Exemplo n.º 2
0
def get_sub_splitters2(split_ws_name, split_start_index, split_stop_index, run_start_ns):
    """

    :param split_ws_name:
    :param split_start_index:
    :param split_stop_index:
    :param run_start_ns:
    :return:
    """
    # get splitting workspace
    split_ws = mantid_helper.retrieve_workspace(split_ws_name)

    # get the sub splitters name
    sub_split_ws_name = split_ws.name() + '_{0}'.format(split_start_index)

    # split
    if isinstance(split_ws, SplittersWorkspace):
        # splitters workspace
        mantid_helper.create_table_workspace(sub_split_ws_name,
                                             [('float', 'start'), ('float', 'stop'), ('str', 'index')])
        sub_split_ws = mantid_helper.retrieve_workspace(sub_split_ws_name)

        num_rows = split_ws.rowCount()
        for i_row in range(split_start_index, min(split_stop_index, num_rows)):
            start_time = (split_ws.cell(i_row, 0) - run_start_ns) * 1.E-9
            stop_time = (split_ws.cell(i_row, 1) - run_start_ns) * 1.E-9
            target = str(split_ws.cell(i_row, 2))
            sub_split_ws.addRow([start_time, stop_time, target])
        # END-FOR

    elif isinstance(split_ws, MatrixWorkspace):
        # Matrix workspace
        # TODO/TEST - Need to test
        vec_x = split_ws.readX(0)[split_start_index:split_stop_index + 1]
        vec_y = split_ws.readY(0)[split_start_index:split_stop_index]
        vec_e = split_ws.readE(0)[split_start_index:split_stop_index]

        mantid_helper.create_workspace_2d(vec_x, vec_y, vec_e, sub_split_ws_name)

    elif isinstance(split_ws, ITableWorkspace):
        # Table workspace
        # TODO/TEST - Need to verify
        mantid_helper.create_table_workspace(sub_split_ws_name,
                                             [('float', 'start'), ('float', 'stop'), ('str', 'index')])
        sub_split_ws = mantid_helper.retrieve_workspace(sub_split_ws_name)
        num_rows = split_ws.rowCount()
        for i_row in range(split_start_index, min(split_stop_index, num_rows)):
            start_time = split_ws.cell(i_row, 0)
            stop_time = split_ws.cell(i_row, 1)
            target = split_ws.cell(i_row, 2)
            sub_split_ws.addRow([start_time, stop_time, target])
        # END-FOR

    else:
        # unsupported format
        raise RuntimeError('Splitting workspace of type {0} is not supported.'.format(split_ws))

    return sub_split_ws_name
Exemplo n.º 3
0
def group_pixels_2theta(vulcan_ws_name, tth_group_ws_name, start_iws, end_iws,
                        two_theta_bin_range, two_theta_step):

    # create group workspace
    CreateGroupingWorkspace(InputWorkspace=vulcan_ws_name, GroupDetectorsBy='All',
                            OutputWorkspace=tth_group_ws_name)

    # Get workspace
    vulcan_ws = mantid_helper.retrieve_workspace(vulcan_ws_name, True)
    group_ws = mantid_helper.retrieve_workspace(tth_group_ws_name, True)

    # Calculate 2theta for each pixels that is interested
    two_theta_array = numpy.arange(two_theta_bin_range[0], two_theta_bin_range[1] + two_theta_step,
                                   two_theta_step, dtype='float')
    num_2theta = two_theta_array.shape[0]
    num_pixels_array = numpy.zeros(shape=two_theta_array.shape, dtype='int')

    # source and sample position
    source = vulcan_ws.getInstrument().getSource().getPos()
    sample = vulcan_ws.getInstrument().getSample().getPos()
    # Calculate 2theta for each detectors
    for iws in range(0, vulcan_ws.getNumberHistograms()):
        if iws < start_iws or iws >= end_iws:
            # set to group 0 to ignore
            group_ws.dataY(iws)[0] = 0

        else:
            # interested
            det_i = vulcan_ws.getDetector(iws).getPos()
            two_theta_i = (det_i - sample).angle(sample - source) * 180. / numpy.pi
            if two_theta_i < two_theta_array[0] or two_theta_i >= two_theta_array[-1]:
                group_ws.dataY(iws)[0] = 0
            elif two_theta_i == two_theta_array[0]:
                group_ws.dataY(iws)[0] = 1
                num_pixels_array[0] += 1
            else:
                i_2theta = numpy.searchsorted(two_theta_array, [two_theta_i])[0]
                if i_2theta <= 0 or i_2theta >= num_2theta:
                    raise RuntimeError('Programming error!')
                group_ws.dataY(iws)[0] = i_2theta
                num_pixels_array[i_2theta-1] += 1
        # END-IF-ELSE
    # END-FOR

    # deal with zero-count-instance
    num_pixels_array[numpy.where(num_pixels_array < 0.1)] = -1

    return two_theta_array, group_ws, num_pixels_array
Exemplo n.º 4
0
    def execute_calculate_2theta_intensity(self, ipts_number, run_number):
        """
        sum events' counts along tube, convert tube center to 2theta
        :return:
        """
        # locate original nexus file
        if ipts_number is None:
            ipts_number = mantid_helper.get_ipts_number(run_number)
        event_file_name = '/SNS/VULCAN/IPTS-{}/nexus/VULCAN_{}.nxs.h5'.format(
            ipts_number, run_number)

        # load data from file
        ws_name_i = 'VULCAN_{}_events'.format(run_number)
        mantid_helper.load_nexus(data_file_name=event_file_name,
                                 output_ws_name=ws_name_i,
                                 meta_data_only=False)

        # now count the events per column on high angle detector
        counts_vec = self._count_events_by_det_column(ws_name_i)
        self._data_set = counts_vec

        # get proton charges
        event_ws = mantid_helper.retrieve_workspace(ws_name_i)
        plog = event_ws.run().getProperty('proton_charge')
        pcharges = plog.value.sum()
        self._proton_charges = [pcharges]

        return counts_vec
Exemplo n.º 5
0
def get_number_chopped_ws(split_ws_name):
    """
    get the number of expected chopped workspaces from splitters workspace and also find out whether the
    slicers' time are relative time or
    :param split_ws_name:
    :return:
    """
    split_ws = mantid_helper.retrieve_workspace(split_ws_name)

    if isinstance(split_ws, ITableWorkspace):
        # table workspace
        num_rows = split_ws.rowCount()
        target_set = set()
        for i_row in range(num_rows):
            target = split_ws.cell(i_row, 2)
            target_set.add(target)

        run_start_time = split_ws.cell(0, 0)
    else:
        # matrix workspace case
        target_set = set()
        for y in split_ws.readY(0):
            int_y = int(y + 0.1)
            target_set.add(int_y)

        run_start_time = split_ws.readX(0)[0]
    # END-FOR

    # judge whether the run start time is relative or epoch.  even the relative time in second cannot be too large
    if run_start_time > 3600 * 24 * 356:
        epoch_time = True
    else:
        epoch_time = False

    return len(target_set), epoch_time
Exemplo n.º 6
0
    def export_split_logs(split_ws_names, gsas_file_index_start, run_start_time, output_dir):
        """
        Export split sample logs to a series of HDF5
        and also the special mantid log + workspace name
        :param split_ws_names:
        :param gsas_file_index_start:
        :param run_start_time: numpy.datetime64 as the (original) run start time
        :param output_dir:
        :return:
        """
        log_names = [log_pair[1] for log_pair in reduce_VULCAN.VulcanSampleLogList]
        log_names.append('splitter')
        info = ''

        for index, ws_name in enumerate(split_ws_names):
            ws_i = mantid_helper.retrieve_workspace(ws_name, True)
            out_file_name = os.path.join(
                output_dir, '{}.hdf5'.format(index + gsas_file_index_start))
            gda_name = '{}.gda'.format(index + gsas_file_index_start)
            attribute_dict = {'GSAS': gda_name, 'Workspace': ws_name}
            file_utilities.save_sample_logs(
                ws_i, log_names, out_file_name, run_start_time, attribute_dict)
            info += '{}  \t{}  \t{}\n'.format(index, out_file_name, gda_name)
        # END-FOR

        sum_file = open(os.path.join(output_dir, 'summary.txt'), 'w')
        sum_file.write(info)
        sum_file.close()

        return
Exemplo n.º 7
0
def is_overlap_splitter(split_ws_name):
    """
            check whether a workspace contains overlapped splits
            :return:
            """
    # get the workspace
    if mantid_helper.workspace_does_exist(split_ws_name):
        split_ws = mantid_helper.retrieve_workspace(split_ws_name)
    else:
        raise RuntimeError(
            'Splitters workspace {0} cannot be found in ADS.'.format(
                split_ws_name))

    # return True if the number of splitters is too large, i.e., exceeds 10,000
    split_number = get_splitters_number(split_ws)
    if split_number >= LARGE_NUMBER_SPLITTER:
        print(
            '[Notice] Number of splitters = {0}.  It is too large to check. Return True instead'
            ''.format(split_number))
        return True

    vec_splitter = get_splitters(split_ws)
    for i_splitter in range(split_number - 1):
        stop_time_i = vec_splitter[i_splitter][1]
        start_time_ip1 = vec_splitter[i_splitter + 1][1]
        if stop_time_i > start_time_ip1 + NUMERIC_TOLERANCE:
            return False

    return True
Exemplo n.º 8
0
    def import_vanadium(self, vanadium_gsas_file):
        """
        Import vanadium GSAS file for normalization
        :param vanadium_gsas_file:
        :return:
        """
        # NOTE (algorithm) use hash to determine the workspace name from file location
        base_name = os.path.basename(vanadium_gsas_file).split('.')[0]
        van_gsas_ws_name = 'Van_{}_{}'.format(base_name,
                                              hash(vanadium_gsas_file))
        if mantid_helper.workspace_does_exist(van_gsas_ws_name):
            pass
        else:
            mantid_helper.load_gsas_file(vanadium_gsas_file, van_gsas_ws_name,
                                         None)
            mantid_helper.convert_to_point_data(van_gsas_ws_name)
        self._van_ws_names[vanadium_gsas_file] = van_gsas_ws_name

        # force minimum Y to 1
        van_ws = mantid_helper.retrieve_workspace(van_gsas_ws_name)
        assert van_ws.id(
        ) == 'WorkspaceGroup', 'Vanadium workspaces (from GSAS) must be grouped'
        for ws_index in range(len(van_ws)):
            for iy in range(len(van_ws[ws_index].readY(0))):
                if van_ws[ws_index].readY(0)[iy] < 1.:
                    van_ws[ws_index].dataY(0)[iy] = 1.
            # END-FOR
        # END-FOR

        return van_gsas_ws_name
Exemplo n.º 9
0
    def strip_v_peaks(self, bank_id, peak_fwhm, pos_tolerance, background_type, is_high_background):
        """ Strip vanadium peaks
        Note: result is stored in _striped_peaks_ws_dict
        :param bank_id:
        :param peak_fwhm:
        :param pos_tolerance:
        :param background_type:
        :param is_high_background:
        :return:
        """
        datatypeutility.check_int_variable('Bank ID', bank_id, (1, 99))
        datatypeutility.check_int_variable('FWHM (number of pixels)', peak_fwhm, (1, 100))
        datatypeutility.check_float_variable('Peak position tolerance', pos_tolerance, (0, None))

        raw_van_ws = mantid_helper.retrieve_workspace(self._van_workspace_name)
        if mantid_helper.is_workspace_group(self._van_workspace_name):
            input_ws_name = raw_van_ws[bank_id-1].name()
            bank_list = [1]
        else:
            input_ws_name = self._van_workspace_name
            bank_list = [bank_id]

        output_ws_name = input_ws_name + '_NoPeak'
        mantid_helper.strip_vanadium_peaks(input_ws_name=input_ws_name,
                                           output_ws_name=output_ws_name,
                                           bank_list=bank_list,
                                           binning_parameter=None,
                                           # PEAK FWHM must be integer (legacy)
                                           fwhm=peak_fwhm,
                                           peak_pos_tol=pos_tolerance,
                                           background_type=background_type,
                                           is_high_background=is_high_background)
        self._striped_peaks_ws_dict[bank_id] = output_ws_name

        return output_ws_name
Exemplo n.º 10
0
    def get_live_events():
        """
        check
        :return:
        """
        counter_ws = mantid_helper.retrieve_workspace(LiveDataDriver.COUNTER_WORKSPACE_NAME)
        live_events = counter_ws.readX(1)[0]

        return live_events
Exemplo n.º 11
0
    def get_live_counter():
        """
        check
        :return:
        """
        counter_ws = mantid_helper.retrieve_workspace(LiveDataDriver.COUNTER_WORKSPACE_NAME)
        curr_index = counter_ws.readX(0)[0]

        return curr_index
Exemplo n.º 12
0
    def _normalize_by_vanadium(diff_ws, van_ws, diff_ws_name):
        """ Normalize by vanadium
        :param van_ws:
        :param diff_ws_name:
        :return:
        """
        Divide(LHSWorkspace=diff_ws,
               RHSWorkspace=van_ws,
               OutputWorkspace=diff_ws_name)
        diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        return diff_ws
Exemplo n.º 13
0
    def mask_detectors(self, ws_name, roi_file_list, mask_file_list):
        """
        mask detectors by ROI and/or mask
        :param ws_name:
        :param roi_file_list:
        :param mask_file_list:
        :return: workspace reference
        """
        # check inputs
        datatypeutility.check_string_variable('Workspace name', ws_name)

        datatypeutility.check_list('ROI file names', roi_file_list)
        datatypeutility.check_list('Mask file names', mask_file_list)

        # return if nothing to do
        if len(roi_file_list) + len(mask_file_list) == 0:
            matrix_ws = mantid_helper.retrieve_workspace(ws_name, raise_if_not_exist=True)
            return matrix_ws

        # load mask file and roi file
        roi_ws_list = list()
        mask_ws_list = list()

        for roi_file in roi_file_list:
            roi_ws_name_i = self.load_mask_xml(roi_file, ws_name, is_roi=True)
            roi_ws_list.append(roi_ws_name_i)
        for mask_file in mask_file_list:
            mask_ws_name_i = self.load_mask_xml(mask_file, ws_name, is_roi=False)
            mask_ws_list.append(mask_ws_name_i)

        # mask by ROI workspaces
        self.mask_detectors_by_rois(ws_name, roi_ws_list)
        # mask by masks workspace
        self.mask_detectors_by_masks(ws_name, mask_ws_list)

        matrix_ws = mantid_helper.retrieve_workspace(ws_name, raise_if_not_exist=True)

        return matrix_ws
Exemplo n.º 14
0
    def process_vanadium(self, peak_pos_tol=0.1, background_type='Quadratic',
                         is_high_background=True, smoother_filter_type='Butterworth'):
        """ Process vanadium run including strip vanadium peaks and smooth
        This is a high-level call to do all the work with good setup in one action
        :param peak_pos_tol:
        :param background_type:
        :param is_high_background:
        :param smoother_filter_type:
        :return:
        """
        try:
            raw_van_ws = mantid_helper.retrieve_workspace(self._van_workspace_name)
        except RuntimeError as run_err:
            return False, 'Unable to process vanadium due to {}'.format(run_err)

        for ws_index in range(mantid_helper.get_number_spectra(raw_van_ws)):
            # strip vanadium peaks
            bank_id = ws_index + 1
            self.strip_v_peaks(bank_id=ws_index+1, peak_fwhm=self._default_fwhm_dict[bank_id],
                               pos_tolerance=peak_pos_tol,
                               background_type=background_type,
                               is_high_background=is_high_background)
            # smooth
            if self._is_shift_case:
                param_n = self._smooth_param_shift_dict['n'][bank_id]
                param_order = self._smooth_param_shift_dict['order'][bank_id]
            else:
                param_n = self._smooth_param_dict['n'][bank_id]
                param_order = self._smooth_param_dict['order'][bank_id]

            self.smooth_v_spectrum(bank_id=bank_id, smoother_filter_type=smoother_filter_type,
                                   param_n=param_n, param_order=param_order)
        # END-FOR

        # save
        message = 'Vanadium {0} has peaks removed and is smoothed.'
        status = True
        if self._output_gsas_name:
            #  save GSAS file
            try:
                self.save_vanadium_to_file()
                message += 'Processed vanadium is saved to {}'.format(self._output_gsas_name)
            except RuntimeError as run_err:
                message += 'Processed vanadium failed to be written to {0} due to {1}.' \
                           ''.format(self._output_gsas_name, run_err)
                status = False
        else:
            raise NotImplementedError('Is there any case that on one wants GSAS?')

        return status, message
Exemplo n.º 15
0
    def get_raw_data(self, bank_id, unit):
        """
        Get raw data
        :param bank_id:
        :param unit:
        :return:
        """
        if self._van_workspace_name is None:
            raise RuntimeError('Vanadium workspace has not been set up yet!')

        datatypeutility.check_int_variable('Bank ID', bank_id, (1, 99))
        datatypeutility.check_string_variable('Unit', unit, ['TOF', 'dSpacing'])

        if unit == 'TOF':
            if self._van_workspace_name_tof is None:
                self._van_workspace_name_tof = self._van_workspace_name + '_tof'
                mantid_helper.mtd_convert_units(
                    self._van_workspace_name, 'TOF', self._van_workspace_name_tof)
            workspace = mantid_helper.retrieve_workspace(self._van_workspace_name_tof)
        else:
            workspace = mantid_helper.retrieve_workspace(self._van_workspace_name)

        return workspace[bank_id-1].readX(0), workspace[bank_id-1].readY(0)
Exemplo n.º 16
0
    def calculate_live_peak_parameters(self, ws_name, bank_id, norm_by_van, d_min, d_max):
        """ calculate the peak parameters in live data
        :param ws_name:
        :param bank_id
        :param norm_by_van:
        :param d_min:
        :param d_max:
        :return: 3-tuple as (peak integrated intensity, average dSpacing value, variance)
        """
        # check inputs
        assert isinstance(ws_name, str), 'Input workspace name {0} must be a string but not a {1}' \
                                         ''.format(ws_name, type(ws_name))
        assert isinstance(bank_id, int), 'Bank ID {0} must be an integer but not a {1}.'.format(
            bank_id, type(bank_id))

        # check bank ID
        if bank_id < 1 or bank_id > 3:
            raise RuntimeError('Bank ID {0} is out of range.'.format(bank_id))
        else:
            ws_index = bank_id - 1

        # get workspace
        workspace = mantid_helper.retrieve_workspace(ws_name, True)

        # calculate x min and x max indexes
        vec_d = workspace.readX(ws_index)
        min_x_index = max(0, numpy.searchsorted(vec_d, d_min) - 1)
        max_x_index = min(len(vec_d), numpy.searchsorted(vec_d, d_max) + 1)

        # get Y
        vec_y = workspace.readY(ws_index)
        if norm_by_van and bank_id in self._vanadiumWorkspaceDict:
            # normalize vanadium if the flag is on AND vanadium is loaded
            vec_van = self.get_vanadium(bank_id)
            vec_y = vec_y / vec_van

        # estimate background
        bkgd_a, bkgd_b = peak_util.estimate_background(vec_d, vec_y, min_x_index, max_x_index)

        # calculate peak intensity parameters
        try:
            peak_integral, average_d, variance = peak_util.calculate_peak_variance(vec_d, vec_y, min_x_index,
                                                                                   max_x_index, bkgd_a, bkgd_b)
        except ValueError:
            peak_integral = -1.E-20
            average_d = 0.5 * (d_max + d_min)
            variance = 0

        return peak_integral, average_d, variance
Exemplo n.º 17
0
    def get_peak_smoothed_data(self, bank_id):
        """
        Get the data (x, y) for spectrum smoothed
        :param bank_id:
        :return:
        """
        if bank_id in self._smoothed_ws_dict:
            ws_name = self._smoothed_ws_dict[bank_id]
        else:
            ws_name = None
        if ws_name is None:
            raise RuntimeError('Bank {} is not smoothed'.format(bank_id))

        workspace = mantid_helper.retrieve_workspace(ws_name)

        return workspace.readX(0), workspace.readY(0)
Exemplo n.º 18
0
    def __str__(self):
        """
        pretty print information
        :return:
        """
        if self._is_setup:
            source_ws = mantid_helper.retrieve_workspace(
                self._van_workspace_name, raise_if_not_exist=True)
            output_str = 'Process (reduced) vanadium (workspace) {} with {} banks\n' \
                         ''.format(self._van_workspace_name, source_ws.getNumberHistograms())
            output_str += 'Internal workspaces with vanadium peaks striped: {}\n'.format(
                self._striped_peaks_ws_dict)
            output_str += 'Internal workspaces smoothed: {}\n'.format(self._smoothed_ws_dict)
        else:
            output_str = 'No workspace has been set up for vanadium processing.\n'

        return output_str
Exemplo n.º 19
0
def save_ws_ascii(ws_name, output_directory, base_name):
    """

    :param ws_name:
    :param output_directory:
    :param base_name:
    :return:
    """
    # check input blabla

    workspace = mantid_helper.retrieve_workspace(ws_name)
    print('[DB...BAT] {0} has {1} spectra'.format(ws_name, workspace.getNumberHistograms()))
    for ws_index in range(workspace.getNumberHistograms()):
        spec_id = ws_index + 1
        mantidapi.SaveAscii(InputWorkspace=ws_name,
                            Filename=os.path.join(output_directory, base_name +
                                                  '_Spec{0}.dat'.format(spec_id)),
                            Separator='Space',
                            SpectrumList='{0}'.format(ws_index))

    return
Exemplo n.º 20
0
    def init_session(self, workspace_name, ipts_number, van_run_number, out_gsas_name,
                     sample_log_ws_name):
        """
        Initialize vanadium processing session
        :param workspace_name:
        :param ipts_number:
        :param van_run_number:
        :param out_gsas_name:
        :param sample_log_ws_name: required for proton charge
        :return:
        """
        datatypeutility.check_string_variable('Workspace name', workspace_name)
        datatypeutility.check_int_variable('IPTS number', ipts_number, (1, 99999))
        datatypeutility.check_int_variable('Vanadium run number', van_run_number, (1, 999999))
        datatypeutility.check_file_name(out_gsas_name, False, True, False, 'Output GSAS file name')
        datatypeutility.check_string_variable('Sample log workspace name', sample_log_ws_name)

        workspace = mantid_helper.retrieve_workspace(workspace_name)
        if workspace.id() == 'WorkspaceGroup':
            pass
        else:
            # create dictionary and etc
            raise NotImplementedError('Need to implement single workspace case to extract spectra')

        self._van_workspace_name = workspace_name

        self._ipts_number = ipts_number
        self._van_run_number = van_run_number
        self._output_gsas_name = out_gsas_name

        # parameter set up
        self._is_shift_case = False

        # convert to point data as a request
        mantid_helper.convert_to_point_data(self._van_workspace_name)
        mantid_helper.mtd_convert_units(self._van_workspace_name, 'dSpacig')

        self._sample_log_ws_name = sample_log_ws_name

        return
Exemplo n.º 21
0
    def _count_events_by_det_column(ws_name):
        """
        count events by detector column
        :param ws_name:
        :return:
        """
        high_angle_bank_start_index = 6468

        event_ws = mantid_helper.retrieve_workspace(ws_name,
                                                    raise_if_not_exist=True)

        source_pos = event_ws.getInstrument().getSource().getPos()
        sample_pos = event_ws.getInstrument().getSample().getPos()
        k_in = sample_pos - source_pos

        # form output array
        counts_array = numpy.ndarray(shape=(8 * 9, 2), dtype='float')

        for det_col_index in range(8 * 9):  # 9 8-packs
            # calculate neutron events
            ws_index_0 = high_angle_bank_start_index + 256 * det_col_index
            ws_index_f = ws_index_0 + 255
            counts_i = 0
            for iws in range(ws_index_0, ws_index_f + 1):
                counts_i += event_ws.getEventList(iws).getNumberEvents()
            # END-FOR

            # calculate two theta angle
            center_ws_index = (ws_index_0 + ws_index_f) / 2
            det_pos = event_ws.getDetector(center_ws_index).getPos()
            k_out = det_pos - sample_pos

            twotheta = k_out.angle(k_in) * 180. / numpy.pi

            counts_array[det_col_index][0] = twotheta
            counts_array[det_col_index][1] = counts_i
        # END-FOR

        return counts_array
Exemplo n.º 22
0
    def convert_detectors_to_wsindex(self, ref_ws_name, detid_list):
        """
        convert a list of detector IDs to workspace indexes
        :param detid_list:
        :return:
        """
        ws_index_list = list()
        det_id_boundary_list = self.create_detid_boundaries()
        ref_workspace = mantid_helper.retrieve_workspace(
            ref_ws_name, raise_if_not_exist=True)

        for detid in detid_list:
            location = bisect.bisect(det_id_boundary_list, detid)
            if location % 2 == 0:
                raise RuntimeError(
                    'Found a detector (ID = {0}) is out the boundary of any panel'
                    ''.format(detid))

            # print ('[DB...BAT] detector {0} is located at {1} in {2}'
            #        ''.format(detid, location, det_id_boundary_list))

            # convert
            panel = location / 2 + 1
            ws_index = VULCAN_PANEL_START_WSINDEX[self._generation][panel] + \
                detid - det_id_boundary_list[location-1]

            # check
            if ref_workspace.getDetector(ws_index).getID() != detid:
                raise RuntimeError(
                    'Workspace index {0} has detector ID {1} other than {2}'
                    ''.format(ws_index,
                              ref_workspace.getDetetor(ws_index).getID(),
                              detid))

            # append
            ws_index_list.append(ws_index)
        # END-FOR

        return ws_index_list
Exemplo n.º 23
0
def get_run_date(ws_name, raw_file_name):
    """
    get the run's start or end date
    :param ws_name:
    :param raw_file_name:
    :return: datetime.datetime instance with information as YY/MM/DD
    """
    use_creation_date = False
    date_time = None

    if mantid_helper.workspace_does_exist(ws_name):
        # use run start time stored in workspace
        workspace = mantid_helper.retrieve_workspace(ws_name, True)
        try:
            date_time = mantid_helper.get_run_start(workspace, time_unit=None)
        except RuntimeError:
            use_creation_date = True

    # get file creation time (may not be accurate if file is copied)
    if use_creation_date:
        raise RuntimeError('Implement get_file_creation_date()')
        # TODO date_time = get_file_creation_date(raw_file_name)

    return date_time
Exemplo n.º 24
0
    def save(self,
             diff_ws_name,
             run_date_time,
             gsas_file_name,
             ipts_number,
             run_number,
             gsas_param_file_name,
             align_vdrive_bin,
             van_ws_name,
             is_chopped_run,
             write_to_file=True):
        """
        Save a workspace to a GSAS file or a string
        :param diff_ws_name: diffraction data workspace
        :param run_date_time: date and time of the run
        :param gsas_file_name: output file name. None as not output
        :param ipts_number:
        :param run_number: if not None, run number
        :param gsas_param_file_name:
        :param align_vdrive_bin: Flag to align with VDRIVE bin edges/boundaries
        :param van_ws_name: name of vanadium workspaces loaded from GSAS (replacing vanadium_gsas_file)
        :param is_chopped_run: Flag such that the input workspaces is from an event-sliced workspace
        :param write_to_file: flag to write the text buffer to file
        :return: string as the file content
        """
        diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # set the unit to TOF
        if diff_ws.getAxis(0).getUnit() != 'TOF':
            ConvertUnits(InputWorkspace=diff_ws_name,
                         OutputWorkspace=diff_ws_name,
                         Target='TOF',
                         EMode='Elastic')
            diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # convert to Histogram Data
        if not diff_ws.isHistogramData():
            ConvertToHistogram(diff_ws_name, diff_ws_name)

        # get the binning parameters
        if align_vdrive_bin:
            bin_params_set = self._get_tof_bin_params(
                self._get_vulcan_phase(run_date_time),
                diff_ws.getNumberHistograms())
        else:
            # a binning parameter set for doing nothing
            bin_params_set = [(range(1,
                                     diff_ws.getNumberHistograms() + 1), None,
                               None)]

        # check for vanadium GSAS file name
        if van_ws_name is not None:
            # check whether a workspace exists
            if not mantid_helper.workspace_does_exist(van_ws_name):
                raise RuntimeError(
                    'Vanadium workspace {} does not exist in Mantid ADS'.
                    format(van_ws_name))
            van_ws = mantid_helper.retrieve_workspace(van_ws_name)

            # check number of histograms
            if mantid_helper.get_number_spectra(
                    van_ws) != mantid_helper.get_number_spectra(diff_ws):
                raise RuntimeError(
                    'Numbers of histograms between vanadium spectra and output GSAS are different'
                )
        else:
            van_ws = None
        # END-IF

        # rebin and then write output
        gsas_bank_buffer_dict = dict()
        num_bank_sets = len(bin_params_set)

        for bank_set_index in range(num_bank_sets):
            # get value
            bank_id_list, bin_params, tof_vector = bin_params_set[
                bank_set_index]

            # Rebin to these banks' parameters (output = Histogram)
            if bin_params is not None:
                Rebin(InputWorkspace=diff_ws_name,
                      OutputWorkspace=diff_ws_name,
                      Params=bin_params,
                      PreserveEvents=True)

            # Create output
            for bank_id in bank_id_list:
                # check vanadium bin edges
                if van_ws is not None:
                    # check whether the bins are same between GSAS workspace and vanadium workspace
                    unmatched, reason = self._compare_workspaces_dimension(
                        van_ws, bank_id, tof_vector)
                    if unmatched:
                        raise RuntimeError(
                            'Vanadium GSAS workspace {} does not match workspace {}: {}'
                            ''.format(van_ws_name, diff_ws_name, reason))
                # END-IF

                # write GSAS head considering vanadium
                gsas_section_i = self._write_slog_bank_gsas(
                    diff_ws_name, bank_id, tof_vector, van_ws)
                gsas_bank_buffer_dict[bank_id] = gsas_section_i
        # END-FOR

        # header
        diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)
        gsas_header = self._generate_vulcan_gda_header(diff_ws, gsas_file_name,
                                                       ipts_number, run_number,
                                                       gsas_param_file_name,
                                                       is_chopped_run)

        # form to a big string
        gsas_buffer = gsas_header
        for bank_id in sorted(gsas_bank_buffer_dict.keys()):
            gsas_buffer += gsas_bank_buffer_dict[bank_id]

        # write to HDD
        if write_to_file:
            datatypeutility.check_file_name(gsas_file_name,
                                            check_exist=False,
                                            check_writable=True,
                                            is_dir=False,
                                            note='Output GSAS file')
            g_file = open(gsas_file_name, 'w')
            g_file.write(gsas_buffer)
            g_file.close()
        else:
            pass

        return gsas_buffer
Exemplo n.º 25
0
    def save_2theta_group(self, diff_ws_name, output_dir, run_date_time,
                          ipts_number, run_number, gsas_param_file_name,
                          van_ws_name, two_theta_array, tth_pixels_num_array,
                          target_bank_id, scale_factor):
        """ Save workspace from 2theta grouped
        :param diff_ws_name:
        :param output_dir:
        :param run_date_time:
        :param ipts_number:
        :param run_number:
        :param gsas_param_file_name:
        :param van_ws_name:
        :param two_theta_array:
        :param tth_pixels_num_array: array of integers for number of pixels of 2theta range for normalization
        :param target_bank_id:
        :return:
        """
        # process input workspaces
        diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # set the unit to TOF
        if diff_ws.getAxis(0).getUnit() != 'TOF':
            ConvertUnits(InputWorkspace=diff_ws_name,
                         OutputWorkspace=diff_ws_name,
                         Target='TOF',
                         EMode='Elastic')
            diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # convert to Histogram Data
        if not diff_ws.isHistogramData():
            ConvertToHistogram(diff_ws_name, diff_ws_name)

        # vanadium
        if isinstance(van_ws_name, str) and len(van_ws_name) > 0:
            van_ws = mantid_helper.retrieve_workspace(van_ws_name)
        else:
            van_ws = None

        # get the binning parameters
        bin_params_set = self._get_tof_bin_params(
            self._get_vulcan_phase(run_date_time), 3)

        # check output directory
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # For each 2theta bin / spectrum, create a GSAS file
        for tth_id in range(diff_ws.getNumberHistograms()):
            # rebin and then write output
            gsas_bank_buffer_dict = dict()
            num_bank_sets = len(bin_params_set)

            for bank_set_index in range(num_bank_sets):
                # get value
                bank_id_list, bin_params, tof_vector = bin_params_set[
                    bank_set_index]

                # Rebin to these banks' parameters (output = Histogram)
                if bin_params is not None:
                    Rebin(InputWorkspace=diff_ws_name,
                          OutputWorkspace=diff_ws_name,
                          Params=bin_params,
                          PreserveEvents=True)

                # Create output
                for bank_id_i in bank_id_list:
                    # check vanadium bin edges
                    if van_ws is not None:
                        # check whether the bins are same between GSAS workspace and vanadium workspace
                        unmatched, reason = self._compare_workspaces_dimension(
                            van_ws, bank_id_i, tof_vector)
                        if unmatched:
                            raise RuntimeError(
                                'Vanadium GSAS workspace {} does not match workspace {}: {}'
                                ''.format(van_ws_name, diff_ws_name, reason))
                    # END-IF

                    # write GSAS head considering vanadium
                    if bank_id_i == target_bank_id:
                        # target bank to write: east/west
                        source_bank_id = tth_id + 1
                        norm_factor = tth_pixels_num_array[tth_id]
                    else:
                        source_bank_id = bank_id_i
                        norm_factor = -1

                    gsas_section_i = self._write_slog_bank_gsas(
                        diff_ws_name,
                        source_bank_id,
                        tof_vector,
                        van_ws,
                        gsas_bank_id=bank_id_i,
                        norm_factor=norm_factor,
                        scale_factor=scale_factor)
                    gsas_bank_buffer_dict[bank_id_i] = gsas_section_i
                    print('[DB...BAT] Write bank {} to GSAS bank {}'.format(
                        source_bank_id, bank_id_i))
            # END-FOR

            # header
            diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)
            gsas_file_name = os.path.join(output_dir,
                                          '{}.gda'.format(tth_id + 1))
            extra_info = '2theta {} to {}'.format(
                two_theta_array[tth_id], two_theta_array[[tth_id + 1]])
            gsas_header = self._generate_vulcan_gda_header(
                diff_ws, gsas_file_name, ipts_number, run_number,
                gsas_param_file_name, True, extra_info)

            # form to a big string
            gsas_buffer = gsas_header
            for bank_id in sorted(gsas_bank_buffer_dict.keys()):
                gsas_buffer += gsas_bank_buffer_dict[bank_id]

            # write to HDD
            datatypeutility.check_file_name(gsas_file_name,
                                            check_exist=False,
                                            check_writable=True,
                                            is_dir=False,
                                            note='Output GSAS file')
            g_file = open(gsas_file_name, 'w')
            g_file.write(gsas_buffer)
            g_file.close()

        # END-FOR (tth_id)

        return
Exemplo n.º 26
0
    def _write_slog_bank_gsas(self,
                              ws_name,
                              bank_id,
                              vulcan_tof_vector,
                              van_ws,
                              gsas_bank_id=None,
                              norm_factor=None,
                              scale_factor=10000.):
        """
        1. X: format to VDRIVE tradition (refer to ...)
        2. Y: native value
        3. Z: error bar
        :param ws_name:
        :param bank_id: target (output) bank ID
        :param vulcan_tof_vector: If None, then use vector X of workspace
        :param ...
        :param norm_factor: normalization factor
        :return:
        """
        # check vanadium: if not None, assume that number of bins and bin edges are correct
        if van_ws is not None:
            if van_ws.id() == 'WorkspaceGroup':
                van_vec_y = van_ws[bank_id - 1].readY(0)
                van_vec_e = van_ws[bank_id - 1].readE(0)
            else:
                van_vec_y = van_ws.readY(bank_id - 1)
                van_vec_e = van_ws.readE(bank_id - 1)
        else:
            van_vec_y = None
            van_vec_e = None

        # get workspace
        diff_ws = mantid_helper.retrieve_workspace(ws_name)
        if vulcan_tof_vector is None:
            vec_x = diff_ws.readX(bank_id - 1)
        else:
            vec_x = vulcan_tof_vector
        vec_y = diff_ws.readY(bank_id - 1)  # convert to workspace index
        # print ('[DB...BAT] Bank {}  Max Y = {}'.format(bank_id, vec_y.max()))
        # normalization
        if norm_factor is not None:
            # print ('[DB...BAT] Bank {}  Norm Factor = {}'.format(bank_id, norm_factor))
            if norm_factor <= 0.00000001:
                vec_y = vec_y * 0
            else:
                vec_y = vec_y / (1. * norm_factor) * scale_factor
        # END-IF-ELSE

        vec_e = diff_ws.readE(bank_id - 1)
        data_size = len(vec_y)

        # get geometry information
        l1 = self._cal_l1(diff_ws)
        two_theta, difc = self._get_2theta_difc(diff_ws, l1, bank_id - 1)

        bank_buffer = ''

        # write the virtual detector geometry information
        # Example:
        # Total flight path 45.754m, tth 90deg, DIFC 16356.3
        # Data for spectrum :0
        bank_buffer += '%-80s\n' % '# Total flight path {}m, tth {}deg, DIFC {}'.format(
            l1, two_theta, difc)
        if norm_factor is None:
            bank_buffer += '%-80s\n' % '# Data for spectrum :{}'.format(
                bank_id - 1)
        else:
            bank_buffer += '%-80s\n' % '# Data for spectrum :{}.  Inverse Norm factor = {}  Scale Factor = {}' \
                                       ''.format(bank_id - 1, norm_factor, scale_factor)

        # bank header: min TOF, max TOF, delta TOF
        bc1 = '%.1f' % (vec_x[0])
        bc2 = '%.1f' % (vec_x[-1])
        bc3 = '%.7f' % ((vec_x[1] - vec_x[0]) / vec_x[0])
        # check
        if bc1 < 0:
            raise RuntimeError(
                'Cannot write out logarithmic data starting at zero or less')

        if gsas_bank_id is None:
            gsas_bank_id = bank_id
        bank_header = 'BANK %d %d %d %s %s %s %s 0 FXYE' % (
            gsas_bank_id, data_size, data_size, 'SLOG', bc1, bc2, bc3)
        bank_buffer += '%-80s\n' % bank_header

        # write lines: not multiplied by bin width
        if van_vec_y is None:
            for index in range(data_size):
                x_i = '%.1f' % vec_x[index]
                y_i = '%.1f' % vec_y[index]
                e_i = '%.2f' % vec_e[index]
                data_line_i = '%12s%12s%12s' % (x_i, y_i, e_i)
                bank_buffer += '%-80s\n' % data_line_i
            # END-FOR
        else:
            # normalize by vanadium
            for index in range(data_size):
                x_i = '%.1f' % vec_x[index]
                y_i = '%.5f' % (vec_y[index] / van_vec_y[index])
                if vec_y[index] < 1.E-10:
                    alpha = 1.
                else:
                    alpha = vec_e[index] / vec_y[index]
                beta = van_vec_e[index] / van_vec_y[index]
                e_i = '%.5f' % (abs(vec_y[index] / van_vec_y[index]) *
                                math.sqrt(alpha**2 + beta**2))
                data_line_i = '%12s%12s%12s' % (x_i, y_i, e_i)
                bank_buffer += '%-80s\n' % data_line_i
            # END-FOR

        return bank_buffer
Exemplo n.º 27
0
    def chop_data_large_number_targets(self, raw_ws_name, tof_correction,
                                       output_dir, is_epoch_time, num_target_ws,
                                       delete_split_ws=True):
        """ Slice event workspace with large number of output workspaces
        chop data to a large number of output targets
        :param raw_ws_name: raw event workspace to get split
        :param tof_correction:
        :param output_dir:
        :param is_epoch_time:
        :param delete_split_ws:
        :return:
        """
        # get raw workspace
        raw_ws = mantid_helper.retrieve_workspace(raw_ws_name)

        # get run start time
        if is_epoch_time:
            run_start_ns = raw_ws.run().getProperty('proton_charge').firstTime().totalNanoseconds()
        else:
            run_start_ns = 0

        # get split information workspace
        split_ws_name, split_info_name = self._reductionSetup.get_splitters(throw_not_set=True)

        # in loop generate data
        num_loops = int(math.ceil(num_target_ws * 1. / MAX_CHOPPED_WORKSPACE_IN_MEM))

        total_status = True
        total_tup_list = list()
        total_error_message = ''
        for i_loop in range(num_loops):
            # get the subset of the splitters
            sub_split_ws_name = self.get_sub_splitters(split_start_index=i_loop * MAX_CHOPPED_WORKSPACE_IN_MEM,
                                                       split_stop_index=(
                                                           i_loop + 1) * MAX_CHOPPED_WORKSPACE_IN_MEM,
                                                       run_start_ns=run_start_ns)

            # split
            # pre-check
            if AnalysisDataService.doesExist(raw_ws_name) is False:
                raise NotImplementedError('Pre-check Raw workspace {0} cannot be found at loop {1} ({2}).'
                                          ''.format(raw_ws_name, i_loop, num_loops))

            status, ret_obj = mantid_helper.split_event_data(raw_ws_name=raw_ws_name, split_ws_name=sub_split_ws_name,
                                                             info_table_name=split_info_name,
                                                             target_ws_name=raw_ws_name+'_split',
                                                             tof_correction=False,
                                                             output_directory=output_dir,
                                                             delete_split_ws=delete_split_ws)

            # post check
            if AnalysisDataService.doesExist(raw_ws_name) is False:
                return False, str(NotImplementedError('Post-check Raw workspace {0} cannot be found at loop {1} ({2}).'
                                                      ''.format(raw_ws_name, i_loop, num_loops)))

            # process
            if status:
                # split with success
                assert isinstance(
                    ret_obj, list), 'Successful returned value must be a list of 2-tuples'
                total_tup_list.extend(ret_obj)
            else:
                # failed: append error message
                total_status = False
                total_error_message += '{0}\n'.format(ret_obj)

        # END-FOR

        if not total_status:
            return False, total_error_message

        return True, total_tup_list
Exemplo n.º 28
0
def export_vanadium_intensity_to_file(van_nexus_file, gsas_van_int_file):
    """
    export a vanadium to intensity file, whic is of GSAS format
    NOTE: THIS IS VERY INSTRUMENT GEOMETRY SENSITIVE!
    :param van_nexus_file:
    :param gsas_van_int_file:
    :return:
    """
    # check
    assert isinstance(van_nexus_file, str), 'Vanadium NeXus file {0} must be a string but not a {1}.' \
                                            ''.format(van_nexus_file, type(van_nexus_file))
    if os.path.exists(van_nexus_file) is False:
        raise RuntimeError('Given vanadium NeXus path {0} is incorrect.'.format(van_nexus_file))

    assert isinstance(gsas_van_int_file, str), 'Target GSAS vanadium intensity file {0} must be a string but not a ' \
                                               '{1}.'.format(gsas_van_int_file,
                                                             type(gsas_van_int_file))

    # write to file
    try:
        int_file = open(gsas_van_int_file, 'w')
    except IOError as io_err:
        raise RuntimeError('Unable to write to file {0} due to {1}'.format(
            gsas_van_int_file, io_err))
    except OSError as os_err:
        raise RuntimeError('Unable to write to file {0} due to {1}'.format(
            gsas_van_int_file, os_err))

    # load data file
    out_file_name = os.path.basename(van_nexus_file).split('.')[0]
    mantid_helper.load_nexus(data_file_name=van_nexus_file,
                             output_ws_name=out_file_name, meta_data_only=False)
    event_ws = mantid_helper.retrieve_workspace(out_file_name)

    # Parse to intensity file
    int_buf = ''
    # num_spec = event_ws.getNumberHistograms()
    det_count = 0

    for row_index in range(0, 1224 + 1, 8):
        pack_index_west = range(0, 2464 + 1, 1232)
        pack_index_east = range(3696, 6160 + 1, 1232)
        pack_index_both = pack_index_west + pack_index_east
        for pack_index in pack_index_both:
            for i_ws in range(8):
                ws_index = row_index + pack_index + i_ws

                num_events = event_ws.getEventList(ws_index).getNumberEvents()
                # format to float with 8 significant digit
                format_event_str = format_float_number(num_events, 8)

                int_buf += '{0:>16}'.format(format_event_str)
                # start a new line at 8th detector's count
                if det_count == 8 * 6 - 1:
                    int_buf += '\n'
                    det_count = 0
                else:
                    det_count += 1
                    # END-FOR
    # END-FOR

    int_file.write(int_buf)
    int_file.close()

    return
Exemplo n.º 29
0
def align_and_focus_event_ws(event_ws_name, output_ws_name, binning_params,
                             diff_cal_ws_name, grouping_ws_name,
                             reduction_params_dict, convert_to_matrix):
    """ Align and focus event workspace.  The procedure to reduce from the EventNexus includes
    1. compress event
    2. mask workspace
    3. align detectors
    4. sort events
    5. diffraction focus
    6. sort events
    7. edit instruments
    8. rebin (uniform binning)
    Output: still event workspace
    :exception RuntimeError: intolerable error
    :param event_ws_name:
    :param output_ws_name:
    :param binning_params:
    :param diff_cal_ws_name:
    :param grouping_ws_name:
    :param reduction_params_dict:
    :param convert_to_matrix:
    :return: string as ERROR message
    """
    # check inputs
    if not mantid_helper.is_event_workspace(event_ws_name):
        raise RuntimeError('Input {0} is not an EventWorkspace'.format(event_ws_name))
    if not mantid_helper.is_calibration_workspace(diff_cal_ws_name):
        diff_ws = mantid_helper.retrieve_workspace(diff_cal_ws_name)
        raise RuntimeError('Input {0} is not a Calibration workspace but a {1}'.format(diff_cal_ws_name,
                                                                                       diff_ws.__class__.__name__))
    # if not mantid_helper.is_masking_workspace(mask_ws_name):
    #     raise RuntimeError('Input {0} is not a Masking workspace'.format(mask_ws_name))
    if not mantid_helper.is_grouping_workspace(grouping_ws_name):
        raise RuntimeError('Input {0} is not a grouping workspace'.format(grouping_ws_name))

    datatypeutility.check_dict('Reduction parameter dictionary', reduction_params_dict)

    # Align detector
    mantidapi.AlignDetectors(InputWorkspace=event_ws_name,
                             OutputWorkspace=output_ws_name,
                             CalibrationWorkspace=diff_cal_ws_name)

    # # Mask detectors
    # mantid_helper.mask_workspace(to_mask_workspace_name=output_ws_name,
    #                              mask_workspace_name=mask_ws_name)

    # Sort events
    mantidapi.SortEvents(InputWorkspace=output_ws_name,
                         SortBy='X Value')

    # Diffraction focus
    event_ws = mantid_helper.retrieve_workspace(output_ws_name)
    if event_ws.getNumberEvents() == 0:
        print('[DB...BAT] {}: # events = {}'.format(event_ws, event_ws.getNumberEvents()))
        error_message = 'Unable to reduced {} as number of events = 0'.format(event_ws_name)
        raise RuntimeError(error_message)

    mantidapi.DiffractionFocussing(InputWorkspace=output_ws_name,
                                   OutputWorkspace=output_ws_name,
                                   GroupingWorkspace=grouping_ws_name,
                                   PreserveEvents=True)
    # Sort again!
    mantidapi.SortEvents(InputWorkspace=output_ws_name,
                         SortBy='X Value')

    # Compress events as an option
    if 'CompressEvents' in reduction_params_dict:
        compress_events_tolerance = reduction_params_dict['CompressEvents']['Tolerance']
        print('[DB...BAT] User-specified compress tolerance = {}'.format(compress_events_tolerance))
        mantidapi.CompressEvents(InputWorkspace=output_ws_name,
                                 OutputWorkspace=output_ws_name,
                                 Tolerance=1.E-5)

    # rebin
    if binning_params is not None:
        mantid_helper.rebin(workspace_name=output_ws_name,
                            params=binning_params, preserve=not convert_to_matrix)

    # Edit instrument as an option
    if 'EditInstrumentGeometry' in reduction_params_dict:
        try:
            # TODO - NIGHT - In case the number of histograms of output workspace does not match (masked a lot) ...
            # TODO - FIXME - 27 bank Polar and Azimuthal are all None
            print(reduction_params_dict['EditInstrumentGeometry'].keys())
            print(output_ws_name)
            print(mantid_helper.VULCAN_L1)
            print(reduction_params_dict['EditInstrumentGeometry']['SpectrumIDs'])
            print(reduction_params_dict['EditInstrumentGeometry']['L2'])
            print(reduction_params_dict['EditInstrumentGeometry']['Polar'])
            print(reduction_params_dict['EditInstrumentGeometry']['Azimuthal'])

            mantidapi.EditInstrumentGeometry(Workspace=output_ws_name,
                                             PrimaryFlightPath=mantid_helper.VULCAN_L1,
                                             SpectrumIDs=reduction_params_dict['EditInstrumentGeometry']['SpectrumIDs'],
                                             L2=reduction_params_dict['EditInstrumentGeometry']['L2'],
                                             Polar=reduction_params_dict['EditInstrumentGeometry']['Polar'],
                                             Azimuthal=reduction_params_dict['EditInstrumentGeometry']['Azimuthal'])
            """
            Workspace
            PrimaryFlightPath
            SpectrumIDs
            L2
            Polar
            Azimuthal
            DetectorIDs
            InstrumentName
            """
        except RuntimeError as run_err:
            error_message = 'Non-critical failure on EditInstrumentGeometry: {}\n'.format(run_err)
            return error_message

    return ''
Exemplo n.º 30
0
    def locate_cycle_boundaries(self, raw_ws_name, smoothed_ws_name, x_start,
                                x_stop, cycle_local_max_lower_limit,
                                num_neighbors, trust_start_stop):
        def check_statistic(max_x_vector, max_y_vector, level):
            diff_max_x_vec = max_x_vector[1:] - max_x_vector[:-1]
            std_dev = numpy.std(diff_max_x_vec)
            avg_cycle_time = numpy.average(diff_max_x_vec)
            false_indexes = numpy.where(
                diff_max_x_vec < numpy.std(diff_max_x_vec))[0]

            msg = 'Cycle time = {} +/- {}\nFalse local maxima: {}, {}' \
                  ''.format(avg_cycle_time, std_dev,
                            max_x_vector[false_indexes], max_y_vector[false_indexes])
            print('[{}]: {}'.format(level.upper(), msg))

            return avg_cycle_time, std_dev

        # check inputs
        datatypeutility.check_float_variable('Starting time of cycles',
                                             x_start, (0, None))
        datatypeutility.check_float_variable('Stopping time of cycles', x_stop,
                                             (0, None))
        if x_start >= x_stop:
            raise RuntimeError(
                'Starting time {} cannot be equal to later than stopping time {}'
                ''.format(x_start, x_stop))

        # get workspaces
        raw_ws = mantid_helper.retrieve_workspace(raw_ws_name, True)
        smooth_ws = mantid_helper.retrieve_workspace(smoothed_ws_name, True)

        # use smoothed workspace to locate maxima first
        vec_x = smooth_ws.readX(0)
        vec_y = smooth_ws.readY(0)

        raw_vec_times = raw_ws.readX(0)
        raw_vec_value = raw_ws.readY(0)

        # determine start and stop indexes
        start_index = numpy.searchsorted(vec_x, x_start)
        stop_index = numpy.searchsorted(vec_x, x_stop, 'right')
        print('[INFO] Start X = {}, Y = {}, Index = {}'.format(
            vec_x[start_index], vec_y[start_index], start_index))
        print('[INFO] Stap  X = {}, Y = {}, Index = {}'.format(
            vec_x[stop_index], vec_y[stop_index], stop_index))

        # Step 1: use smoothed data to find local maxima: use 'argrelextrema' to find local maxima
        # check Y only
        # roi_vec_x = vec_x[start_index:stop_index]
        roi_vec_y = vec_y[start_index:stop_index]

        roi_maxima_indexes = argrelextrema(roi_vec_y, numpy.greater)
        roi_maxima_indexes = roi_maxima_indexes[0]  # get to list
        print('[DEBUG] maximum indexes (in ROI arrays): {}'.format(
            roi_maxima_indexes))

        # convert to the raw
        local_maxima_indexes = roi_maxima_indexes + start_index

        # there are a lot of local maxima from signal noise: filter out the small values
        max_y_vector = raw_vec_value[
            local_maxima_indexes]  # log values of local maxima
        # indexes for max Y vector
        y_indexes = numpy.where(max_y_vector > cycle_local_max_lower_limit)
        local_maxima_indexes = local_maxima_indexes[y_indexes]
        maxima_times_vec = raw_vec_times[
            local_maxima_indexes]  # times for local maxima
        # equivalent to: max_x_vector = max_x_vector[y_indexes]
        maxima_value_vec = raw_vec_value[
            local_maxima_indexes]  # log values of local maxima
        # equivalent to: max_y_vector = max_y_vector[y_indexes]
        # print ('Filtered indexes: {}'.format(max_index_vector))

        check_statistic(maxima_times_vec, maxima_value_vec, level='debug')

        # Step 2: map from smoothed data to raw data (real maxima)
        max_index_set = set()
        for max_index_i in local_maxima_indexes:
            # search the nearby N = 5 points
            i_start = max_index_i - num_neighbors
            i_stop = max_index_i + num_neighbors
            max_index_i = numpy.argmax(raw_vec_value[i_start:i_stop])
            max_index_set.add(max_index_i + i_start)
        # END-FOR

        # convert to vector: set the max_index_set back to local_maxima_indexes
        local_maxima_indexes = numpy.array(
            sorted(list(max_index_set))
        )  # this local_maxima_indexes is optimized from previous local_maxima_indexes
        maxima_times_vec = raw_vec_times[local_maxima_indexes]
        maxima_value_vec = raw_vec_value[local_maxima_indexes]

        # check
        avg_cycle_time, std_dev = check_statistic(maxima_times_vec,
                                                  maxima_value_vec, 'info')

        # create a workspace
        CreateWorkspace(DataX=maxima_times_vec,
                        DataY=maxima_value_vec,
                        NSpec=1,
                        OutputWorkspace='debug_maxima')

        if maxima_times_vec.shape[0] < 2:
            raise RuntimeError(
                'Only found {} local maxima. Unable to proceed'.format(
                    maxima_times_vec.shape[0]))

        # Step 3: find (real) minima by finding minimum between 2 neighboring local maxima
        local_minima_indexes = numpy.ndarray(shape=(maxima_value_vec.shape[0] +
                                                    1, ),
                                             dtype='int64')
        for i_cycle in range(len(local_maxima_indexes) - 1):
            # locate the minima
            start_index_i = local_maxima_indexes[i_cycle]
            stop_index_i = local_maxima_indexes[i_cycle + 1]
            print('# index: start = {}, stop = {}, # points = {}'.format(
                start_index_i, stop_index_i, stop_index_i - start_index_i))
            vec_x_i = raw_vec_times[start_index_i:stop_index_i]
            vec_y_i = raw_vec_value[start_index_i:stop_index_i]
            print('[DEBUG] Cycle {}: Start @ {}, {}, Stop @ {}, {}'
                  ''.format(i_cycle, vec_x_i[0], vec_y_i[0], vec_x_i[-1],
                            vec_y_i[-1]))

            # find local minima
            min_index_i = numpy.argmin(vec_y_i)
            print(
                '[DEBUG]  {}-th Local minimum: X = {}, Y = {} @ index = {} ... total index = {}'
                ''.format(i_cycle + 1, vec_x_i[min_index_i],
                          vec_y_i[min_index_i], min_index_i,
                          start_index_i + min_index_i))

            # store the result
            local_minima_indexes[i_cycle + 1] = start_index_i + min_index_i
        # END-FOR

        # add the first and last local minimum as the cycle starts and ends at lower temperature
        cycle_indexes_size = local_minima_indexes[2] - local_minima_indexes[1]

        if trust_start_stop:
            start_cycle_index = numpy.searchsorted(
                raw_vec_times[0:local_maxima_indexes[0]], x_start, 'right')
            local_minima_indexes[0] = start_cycle_index

            end_cycle_index = numpy.searchsorted(
                raw_vec_times[local_maxima_indexes[-1]:], x_stop, 'left')
            local_minima_indexes[
                -1] = end_cycle_index + local_maxima_indexes[-1]

        else:
            # use the 1st (i=1) local minimum time to determine the start (i=0)
            minimum_1_time = raw_vec_times[local_minima_indexes[1]]
            estimated_start_time = minimum_1_time - avg_cycle_time
            start_cycle_index = numpy.searchsorted(
                raw_vec_times[(
                    local_minima_indexes[1] -
                    int(1.01 * cycle_indexes_size)):local_maxima_indexes[0]],
                estimated_start_time, 'right')
            assert isinstance(start_cycle_index,
                              int), '{}'.format(type(start_cycle_index))
            local_minima_indexes[0] = start_cycle_index + \
                (local_minima_indexes[1] - int(1.01 * cycle_indexes_size))

            # use the last local minimum (i = -1)
            print(local_minima_indexes[-1], local_minima_indexes[-2])
            estimated_stop_time = raw_vec_times[
                local_minima_indexes[-2]] + avg_cycle_time
            print('stop time: ', estimated_stop_time)
            end_cycle_index = numpy.searchsorted(
                raw_vec_times[local_maxima_indexes[-1]:(
                    local_minima_indexes[-2] +
                    int(1.01 * cycle_indexes_size))], estimated_stop_time,
                'left')
            local_minima_indexes[
                -1] = end_cycle_index + local_maxima_indexes[-1]
        # END-IF

        # create a workspace
        minima_times_vec = raw_vec_times[local_minima_indexes]
        minima_value_vec = raw_vec_value[local_minima_indexes]
        CreateWorkspace(DataX=minima_times_vec,
                        DataY=minima_value_vec,
                        NSpec=1,
                        OutputWorkspace='debug_minima')

        return local_minima_indexes, local_maxima_indexes