Ejemplo n.º 1
0
    def get_detectors_rows_cols(self, det_id_list):
        """
        get the row numbers of the given detector IDs
        :param det_id_list:
        :return:
        """
        datatypeutility.check_list('Detector IDs', det_id_list)

        panel_row_set = set()
        panel_col_set = set()

        for det_id in det_id_list:
            panel_id, row_index, col_index = self.get_detector_location(det_id)
            panel_row_set.add((panel_id, row_index))
            panel_col_set.add((panel_id, col_index))
        # END-FOR

        print('[DB...BAT] {0} Rows   : {1}'.format(len(panel_row_set),
                                                   sorted(
                                                       list(panel_row_set))))
        print('[DB...BAT] {0} Columns: {1}'.format(len(panel_col_set),
                                                   sorted(
                                                       list(panel_col_set))))

        return panel_row_set, panel_col_set
Ejemplo n.º 2
0
    def set_time_slicers(self, time_slicer_list):
        """
        clear the current table and set new time slicers (in a list) to this table
        :param time_slicer_list: list of times (in float)
        :return:
        """
        # check inputs
        datatypeutility.check_list('Event splitters', time_slicer_list)
        if len(time_slicer_list) == 0:
            raise RuntimeError(
                'An empty slicer list is input to set_time_slicers')
        else:
            # sort
            time_slicer_list.sort()

        # clear the current table
        self.remove_all_rows()

        # check it is type 1 (list of times) or type 2 (list of splitters)
        if isinstance(time_slicer_list[0], tuple) or isinstance(
                time_slicer_list[0], list):
            # type 2: splitters
            for slicer_index, time_slicer_tup in enumerate(time_slicer_list):
                if len(time_slicer_tup) <= 1:
                    raise RuntimeError('{}-th slicer has too less items: {}'
                                       ''.format(slicer_index,
                                                 time_slicer_tup))
                elif len(time_slicer_tup) == 2 or time_slicer_tup[2] is None:
                    # use automatic slicer order index as target workspace
                    self.append_row([
                        time_slicer_tup[0], time_slicer_tup[1],
                        slicer_index + 1, True
                    ])
                else:
                    # use user specified as target workspace
                    self.append_row([
                        time_slicer_tup[0], time_slicer_tup[1],
                        time_slicer_tup[2], True
                    ])
                # END-IF-ELSE
            # END-FOR
        else:
            # type 1: list of time stamps: set time
            for i_time in range(len(time_slicer_list) - 1):
                start_time = time_slicer_list[i_time]
                stop_time = time_slicer_list[i_time + 1]
                self.append_row([start_time, stop_time, i_time, True])
        # END-FOR

        return
Ejemplo n.º 3
0
    def scan_runs_from_archive(self, ipts_number, run_number_list):
        """
        Scan VULCAN archive with a specific IPTS by guessing the name of NeXus and checking its existence.
        :param ipts_number:
        :param run_number_list:
        :return: archive key and error message
        """
        # check
        assert isinstance(ipts_number, int), 'IPTS number must be an integer.'
        datatypeutility.check_list('Run numbers', run_number_list)
        assert len(run_number_list) > 0, 'Run number list cannot be empty.'

        # form IPTS
        ipts_dir = os.path.join(self._archiveRootDirectory,
                                'IPTS-%d' % ipts_number)
        if not os.path.exists(ipts_dir):
            raise RuntimeError(
                'IPTS dir {} does not exist for IPTS = {}'.format(
                    ipts_dir, ipts_number))

        # archive key:
        archive_key = ipts_number
        if archive_key not in self._iptsInfoDict:
            self._iptsInfoDict[archive_key] = dict()
        err_msg = ''

        # locate file
        for run_number in sorted(run_number_list):
            # form file
            nexus_file_name = self.locate_event_nexus(ipts_number, run_number)

            if nexus_file_name is None:
                err_msg += 'Run %d does not exist in IPTS %s\n' % (run_number,
                                                                   ipts_number)
            else:
                # create a run information dictionary and put to information-buffering dictionaries
                run_info = {
                    'run': run_number,
                    'ipts': ipts_number,
                    'file': nexus_file_name,
                    'time': None
                }
                self._iptsInfoDict[archive_key][run_number] = run_info
                self._runIptsDict[run_number] = ipts_number
            # END-IF
        # END-FOR

        return archive_key, err_msg
Ejemplo n.º 4
0
    def focus_workspace_list(self, ws_name_list, gsas_ws_name_list, group_ws_name):
        """ Do diffraction focus on a list workspaces and also convert them to IDL GSAS
        This is the main execution body to be executed in multi-threading environment
        :param ws_name_list:
        :param gsas_ws_name_list: name for GSAS
        :param group_ws_name: name for grouping workspace
        :return:
        """
        datatypeutility.check_list('Workspace names', ws_name_list)
        datatypeutility.check_list('(Output) GSAS workspace name list', gsas_ws_name_list)
        if len(ws_name_list) != len(gsas_ws_name_list):
            raise RuntimeError('Input workspace names {} have different number than output GSAS workspace names {}'
                               ''.format(ws_name_list, gsas_ws_name_list))

        for index in range(len(ws_name_list)):
            # set GSAS workspace name same as input workspace name
            ws_name = ws_name_list[index]
            gsas_ws_name_list[index] = ws_name
            gsas_ws_name = ws_name

            datatypeutility.check_string_variable('Workspace name', ws_name)
            datatypeutility.check_string_variable('Output GSAS workspace name', gsas_ws_name)
            # skip empty workspace name that might be returned from FilterEvents
            if len(ws_name) == 0:
                continue
            # focus (simple) it is the same but simplied version in diffraction_focus()
            ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name, Target='dSpacing')
            # diffraction focus
            DiffractionFocussing(InputWorkspace=ws_name, OutputWorkspace=ws_name,
                                 GroupingWorkspace=group_ws_name)
            # convert unit to TOF
            ConvertUnits(InputWorkspace=ws_name, OutputWorkspace=ws_name,
                         Target='TOF', ConvertFromPointData=False)
            # edit instrument
            try:
                EditInstrumentGeometry(Workspace=ws_name,
                                       PrimaryFlightPath=self._focus_instrument_dict['L1'],
                                       SpectrumIDs=self._focus_instrument_dict['SpectrumIDs'],
                                       L2=self._focus_instrument_dict['L2'],
                                       Polar=self._focus_instrument_dict['Polar'],
                                       Azimuthal=self._focus_instrument_dict['Azimuthal'])
            except RuntimeError as run_err:
                print('[WARNING] Non-critical error from EditInstrumentGeometry for {}: {}'
                      ''.format(ws_name, run_err))
        # END-FOR

        return
Ejemplo n.º 5
0
    def plot_contour(self, y_indexes, data_set_list):
        """
        plot 2D contour figure
        :param y_indexes: Indexes for Y axis.  It can be (1) run numbers  (2) chop sequences
        :param data_set_list:
        :return:
        """
        # check
        datatypeutility.check_list('Y axis indexes', y_indexes)

        size_set = set()
        for data_set in data_set_list:
            vec_x, vec_y = data_set
            assert len(vec_x) == len(
                vec_y
            ), 'Size of vector X (%d) and vector Y (%d) must be same!' % (
                len(vec_x), len(vec_y))
            size_set.add(len(vec_x))
        # END-FOR
        assert len(
            size_set
        ) == 1, 'All the reduced data must have equal sizes but not %s.' % str(
            size_set)
        vec_x = data_set_list[0][0]

        # build mesh
        grid_x, grid_y = np.meshgrid(vec_x, y_indexes)

        matrix_y = np.ndarray(grid_x.shape, dtype='float')
        for i in range(len(y_indexes)):
            matrix_y[i] = data_set_list[i][1]

        n = len(y_indexes)
        vec_y = np.ndarray(shape=(n, ), dtype='int')
        for i in range(n):
            vec_y[i] = y_indexes[i]

        self.ui.graphicsView_mainPlot.canvas.add_contour_plot(
            vec_x, np.array(y_indexes), matrix_y)

        return
Ejemplo n.º 6
0
    def mask_detectors(self, ws_name, roi_file_list, mask_file_list):
        """
        mask detectors by ROI and/or mask
        :param ws_name:
        :param roi_file_list:
        :param mask_file_list:
        :return: workspace reference
        """
        # check inputs
        datatypeutility.check_string_variable('Workspace name', ws_name)

        datatypeutility.check_list('ROI file names', roi_file_list)
        datatypeutility.check_list('Mask file names', mask_file_list)

        # return if nothing to do
        if len(roi_file_list) + len(mask_file_list) == 0:
            matrix_ws = mantid_helper.retrieve_workspace(ws_name, raise_if_not_exist=True)
            return matrix_ws

        # load mask file and roi file
        roi_ws_list = list()
        mask_ws_list = list()

        for roi_file in roi_file_list:
            roi_ws_name_i = self.load_mask_xml(roi_file, ws_name, is_roi=True)
            roi_ws_list.append(roi_ws_name_i)
        for mask_file in mask_file_list:
            mask_ws_name_i = self.load_mask_xml(mask_file, ws_name, is_roi=False)
            mask_ws_list.append(mask_ws_name_i)

        # mask by ROI workspaces
        self.mask_detectors_by_rois(ws_name, roi_ws_list)
        # mask by masks workspace
        self.mask_detectors_by_masks(ws_name, mask_ws_list)

        matrix_ws = mantid_helper.retrieve_workspace(ws_name, raise_if_not_exist=True)

        return matrix_ws
Ejemplo n.º 7
0
def convert_pixels_to_workspace_indexes_v1(pixel_id_list):
    """
    convert pixel IDs to workspace indexes
    :param pixel_id_list:
    :return:
    """
    # check inputs
    datatypeutility.check_list('Pixel IDs', pixel_id_list)

    start_pixel_id = START_PIXEL_ID[1]

    # convert to sorted array
    pixel_id_list.sort()
    pixel_id_vec = numpy.array(pixel_id_list)

    # separate array ... TODO - 20181105 - Think of a good numpy algorithm to do it efficiently
    bank_id = 3
    if pixel_id_vec[0] < start_pixel_id[bank_id][0]:
        raise RuntimeError('Contact developer to extend method convert_pixels_to_workspace_indexes_v1() to '
                           'whole instrument')

    ws_index_vec = pixel_id_vec - start_pixel_id[bank_id][1]

    return ws_index_vec
Ejemplo n.º 8
0
    def get_detectors_in_row(self, panel_row_list):
        """

        :param panel_row_list: a list of 2-tuples
        :return:
        """
        datatypeutility.check_list('Panel index / row index list',
                                   panel_row_list)
        panel_row_list.sort()

        det_id_list = list()
        for panel_row_tuple in panel_row_list:
            datatypeutility.check_tuple('Panel index / row index',
                                        panel_row_tuple, 2)
            panel_index, row_index = panel_row_tuple

            zero_det_id = VULCAN_PANEL_DETECTORS[
                self._generation][panel_index][0]
            for col_index in range(
                    VULCAN_PANEL_COLUMN_COUNT[self._generation][panel_index]):
                if VULCAN_PANEL_COLUMN_MAJOR[self._generation][panel_index]:
                    det_id_i = zero_det_id + col_index * \
                        VULCAN_PANEL_ROW_COUNT[self._generation][panel_index] + row_index
                    det_id_list.append(det_id_i)
                else:
                    raise NotImplementedError(
                        'Row major case is not implemented')
            # END-FOR
        # END-FOR
        det_id_list.sort()

        print(
            '[DB] There are {0} pixels in given {1} rows starting from detector ID {2}'
            ''.format(len(det_id_list), len(panel_row_list), det_id_list[0]))

        return det_id_list
Ejemplo n.º 9
0
    def get_detectors_in_column(self, panel_column_list):
        """

        :param panel_column_list:  a list of 2-tuples
        :return:
        """
        datatypeutility.check_list('Panel index / row index list',
                                   panel_column_list)
        panel_column_list.sort()

        det_id_list = list()
        for panel_col_tuple in panel_column_list:
            datatypeutility.check_tuple('Panel index / row index',
                                        panel_col_tuple, 2)
            panel_index, col_index = panel_col_tuple

            zero_det_id = VULCAN_PANEL_DETECTORS[
                self._generation][panel_index][0]
            if VULCAN_PANEL_COLUMN_MAJOR[self._generation][panel_index]:
                num_rows_per_column = VULCAN_PANEL_ROW_COUNT[
                    self._generation][panel_index]
                det_id_list.extend(
                    range(zero_det_id + col_index * num_rows_per_column,
                          zero_det_id + (col_index + 1) * num_rows_per_column))
            else:
                raise NotImplementedError('Row major case is not implemented')
            # END-IF-ELSE
        # END-FOR
        det_id_list.sort()

        print(
            '[DB] There are {0} pixels in given {1} columns starting from detector ID {2}'
            ''.format(len(det_id_list), len(panel_column_list),
                      det_id_list[0]))

        return det_id_list
Ejemplo n.º 10
0
    def generate_sliced_logs(self, ws_name_list, log_type, append=False):
        """
        generate sliced logs
        :param ws_name_list:
        :param log_type: either loadframe or furnace
        :param append: if true and if the file to output exists, then just append the new content at the end
        :return:
        """
        # check inputs
        datatypeutility.check_list('Sliced workspace names', ws_name_list)
        if len(ws_name_list) == 0:
            raise RuntimeError('Workspace names (in list) cannot be empty.')

        if log_type != 'loadframe' and log_type != 'furnace':
            raise RuntimeError('Exported sample log type {0} of type {1} is not supported.'
                               'It must be either furnace or loadframe'.format(log_type, type(log_type)))

        # get workspaces and properties
        # NOTE: workspace names are given in order. No need to sort again

        # get the properties' names list
        ws_name = ws_name_list[0]
        if ws_name == '':
            ws_name = ws_name_list[1]
        workspace = AnalysisDataService.retrieve(ws_name)
        property_name_list = list()
        for sample_log in workspace.run().getProperties():
            p_name = sample_log.name
            property_name_list.append(p_name)
        property_name_list.sort()
        # run_start = DateAndTime(workspace.run().getProperty('run_start').value)  # Kernel.DateAndtime
        run_start = workspace.run().getProperty('proton_charge').times[0]

        # start value
        start_file_name = os.path.join(self._choppedDataDirectory,
                                       '{0}sampleenv_chopped_start.txt'.format(self._run_number))
        mean_file_name = os.path.join(self._choppedDataDirectory,
                                      '{0}sampleenv_chopped_mean.txt'.format(self._run_number))
        end_file_name = os.path.join(self._choppedDataDirectory,
                                     '{0}sampleenv_chopped_end.txt'.format(self._run_number))
        header_file_name = os.path.join(self._choppedDataDirectory,
                                        '{0}sampleenv_header.txt'.format(self._run_number))

        # output
        # create Pandas series dictionary
        start_series_dict = dict()
        mean_series_dict = dict()
        end_series_dict = dict()
        mts_columns = list()

        # set up correct header list
        if log_type == 'loadframe':
            # load frame
            header_list = reduce_VULCAN.MTS_Header_List
            if header_list[0][0] != 'ProtonCharge':
                # insert proton charge explicitly but avoid adding twice
                header_list.insert(0, ('ProtonCharge', 'proton_charge'))
        else:
            # furnace
            header_list = reduce_VULCAN.Furnace_Header_List

        # initialize the data structure for output
        for entry in reduce_VULCAN.MTS_Header_List:
            # pd_series = pd.Series()
            mts_name, log_name = entry
            start_series_dict[mts_name] = pd.Series()
            mean_series_dict[mts_name] = pd.Series()
            end_series_dict[mts_name] = pd.Series()
            mts_columns.append(mts_name)

            if log_name not in property_name_list:
                print('[WARNING] Log {0} is not a sample log in NeXus.'.format(log_name))
        # END-FOR

        for i_ws, ws_name in enumerate(ws_name_list):
            # get workspace
            if ws_name == '':
                continue
            workspace_i = AnalysisDataService.retrieve(ws_name)
            self.export_chopped_logs(i_ws=i_ws,
                                     run_start_time=run_start,
                                     property_name_list=property_name_list,
                                     header_list=header_list,
                                     workspace_i=workspace_i,
                                     start_series_dict=start_series_dict,
                                     mean_series_dict=mean_series_dict,
                                     end_series_dict=end_series_dict)
        # END-FOR (workspace)

        # export to csv file
        # start file
        pd_data_frame = pd.DataFrame(start_series_dict, columns=mts_columns)
        if append and os.path.exists(start_file_name):
            with open(start_file_name, 'a') as f:
                pd_data_frame.to_csv(f, header=False)
        else:
            pd_data_frame.to_csv(start_file_name, sep='\t', float_format='%.5f', header=False)

        # mean file
        pd_data_frame = pd.DataFrame(mean_series_dict, columns=mts_columns)
        if os.path.exists(mean_file_name) and append:
            with open(mean_file_name, 'a') as f:
                pd_data_frame.to_csv(f, header=False)
        else:
            pd_data_frame.to_csv(mean_file_name, sep='\t', float_format='%.5f', header=False)

        # end file
        pd_data_frame = pd.DataFrame(end_series_dict, columns=mts_columns)
        if os.path.exists(end_file_name) and append:
            with open(end_file_name, 'a') as f:
                pd_data_frame.to_csv(f, header=False)
        else:
            pd_data_frame.to_csv(end_file_name, sep='\t', float_format='%.5f', header=False)

        # Write the header for user
        header_file = open(header_file_name, 'w')
        header_file.write('{}'.format(mts_columns))
        header_file.close()

        if mts_columns.count('ProtonCharge') > 1:
            raise NotImplementedError('MTS has more than 1 column as "ProtonCharge"')

        print('[INFO] Chopped log files are written to %s, %s and %s.' % (start_file_name, mean_file_name,
                                                                          end_file_name))

        return
Ejemplo n.º 11
0
    def execute_chop_reduction_v2(self, event_ws_name, binning_parameters, num_reduced_banks,
                                  calib_ws_name, group_ws_name,
                                  gsas_info_dict, fullprof, clear_workspaces, gsas_writer,
                                  chop_overlap_mode, gsas_file_index_start):
        """
        Chop and reduce data with the upgraded algorithm for speed
        Version: 2.0 (latest)
        :param event_ws_name:
        :param binning_parameters:
        :param num_reduced_banks:
        :param calib_ws_name:
        :param group_ws_name:
        :param gsas_info_dict: keys: 'IPTS', 'parm file', 'vanadium'
        :param clear_workspaces: flag to delete output workspaces as they have been written to GSAS
        :param gsas_writer: an instance to the object to write GSAS file
        :param fullprof: Flag to write out Fullprof
        :return:
        """
        # check inputs
        assert isinstance(gsas_writer, save_vulcan_gsas.SaveVulcanGSS), 'GSAS writer must be an instance of ' \
                                                                        'SaveVulcanGSS but not a {}' \
                                                                        ''.format(type(gsas_writer))
        if binning_parameters is not None:
            datatypeutility.check_list('Binning parameters', binning_parameters)

        # create output directory and set instance variable _choppedDataDirectory
        self.create_chop_dir()
        if self._choppedDataDirectory is None:
            self._choppedDataDirectory = self._reductionSetup.get_reduced_data_dir()

        # find out what kind of chopping algorithm shall be used
        split_ws_name, split_info_table = self._reductionSetup.get_splitters(throw_not_set=True)

        # load data from file to workspace
        output_ws_name = event_ws_name + '_split'

        # set up default

        runner = vulcan_slice_reduce.SliceFocusVulcan(number_banks=num_reduced_banks,
                                                      focus_instrument_dict=self._focus_instrument_geometry_dict,
                                                      output_dir=self._reductionSetup.get_chopped_directory()[0])
        run_number = self._reductionSetup.get_run_number()
        runner.set_run_number(run_number)

        info, output_ws_names = runner.slice_focus_event_workspace(event_ws_name=event_ws_name,
                                                                   geometry_calib_ws_name=calib_ws_name,
                                                                   group_ws_name=group_ws_name,
                                                                   split_ws_name=split_ws_name,
                                                                   info_ws_name=split_info_table,
                                                                   output_ws_base=output_ws_name,
                                                                   binning_parameters=binning_parameters,
                                                                   chop_overlap_mode=chop_overlap_mode,
                                                                   gsas_info_dict=gsas_info_dict,
                                                                   gsas_writer=gsas_writer,
                                                                   gsas_file_index_start=gsas_file_index_start,
                                                                   fullprof=fullprof)

        # record
        self._reducedWorkspaceList.extend(output_ws_names)

        return True, info
Ejemplo n.º 12
0
    def load_chopped_binned_data(self,
                                 run_number,
                                 chopped_data_dir,
                                 chop_sequences=None,
                                 file_format='gsas'):
        """
        load chopped and binned data (in GSAS format) for a directory.
        Chopping information file will be searched first
        About returned workspaces dictionary:
            key = sequence, value = (workspace name, data file name)
        :param chopped_data_dir:
        :param chop_sequences: chop sequence (order) indexes
        :param file_format:
        :param run_number: prefix to the loaded workspace from GSAS. It is just for decoration
        :return: 2-tuple of dictionary and integer (run number)
            dictionary: [chop seq index] = (workspace name, gsas file name, log HDF file name)    i.e., 3-tuple
        """
        # check inputs
        datatypeutility.check_int_variable('Run number', run_number,
                                           (1, 9999999))

        assert isinstance(chopped_data_dir, str), 'Directory {0} must be given as a string but not a {1}.' \
                                                  ''.format(chopped_data_dir, str(chopped_data_dir))
        assert isinstance(
            file_format,
            str), 'Reduced data file format {0} must be a string.'.format(
                file_format)
        if file_format != 'gsas':
            raise NotImplementedError(
                'File format {} (other than GSAS) is not supported yet'.format(
                    file_format))

        if not os.path.exists(chopped_data_dir):
            raise RuntimeError(
                'Directory {0} for chopped data does not exist.'.format(
                    chopped_data_dir))

        if run_number is None and run_number is None:
            raise RuntimeError(
                'Run number must be given (or ) but cannot be None')

        # list the files in a directory
        file_list = [
            f for f in listdir(chopped_data_dir)
            if isfile(join(chopped_data_dir, f))
        ]
        chop_info_file = self.search_chop_info_file(file_list)

        if chop_info_file:
            # parsing the chopping information file for reduced file and raw event files
            print('[INFO] Load Chop Information File: {0}'.format(
                chop_info_file))
            reduced_tuple_dict = self.parse_chop_info_file(
                os.path.join(chopped_data_dir, chop_info_file))
        else:
            # look into each file
            # # chopping information file is not given, then search reduced diffraction files from hard disk
            print(
                '[WARNING] Unable to Find Chop Information File in {0}. No Sample Log Loaded.'
                ''.format(chopped_data_dir))
            reduced_tuple_dict = self.search_reduced_files(
                file_format, file_list, chopped_data_dir)

        # END-IF-ELSE
        chopped_sequence_keys = sorted(reduced_tuple_dict.keys())

        # data key list:
        if chop_sequences is None:
            # default for all data
            chop_sequences = range(1, len(chopped_sequence_keys) + 1)
        elif isinstance(chop_sequences, int):
            # convert single sequence to a list
            chop_sequences = [chop_sequences]
        else:
            # a list: remove redundant
            datatypeutility.check_list('Chopped sequences to load',
                                       chop_sequences)
            chop_sequences = sorted(list(set(chop_sequences)))
            # print ('[DB...BAT] User specified sequence: {}'.format(chop_sequences))
        # END-IF-ELSE

        loaded_gsas_dict = dict(
        )  # [sequence] = workspace_name, file_name, None
        loaded_sequence_list = list()

        for seq_index in chop_sequences:
            # load GSAS file
            if seq_index not in reduced_tuple_dict:
                print('[DB...BAT] {}-th chopped data does not exist.'.format(
                    seq_index))
                continue

            loaded_sequence_list.append(seq_index)
            file_name = reduced_tuple_dict[seq_index][0]
            # print ('[DB...BAT] Seq-index = {}, GSAS file name = {}'.format(seq_index, file_name))
            data_ws_name = self.load_binned_data(
                data_file_name=file_name,
                data_file_type=file_format,
                prefix='G{}'.format(run_number),
                max_int=max(10,
                            len(chopped_sequence_keys) + 1))

            # sliced log HDF5
            log_h5_name = file_name.replace('.gda', '.hdf')
            if not os.path.exists(log_h5_name):
                log_h5_name = None

            loaded_gsas_dict[seq_index] = data_ws_name, file_name, log_h5_name
        # END-FOR

        # register for chopped data dictionary: if run exists, then merge 2 dictionary!
        if run_number in self._chopped_gsas_dict:
            # run already exists, merge 2 dictionary
            self._chopped_gsas_dict[run_number].update(loaded_gsas_dict)
        else:
            self._chopped_gsas_dict[run_number] = loaded_gsas_dict

        return loaded_gsas_dict, loaded_sequence_list
Ejemplo n.º 13
0
    def sort_info(self, auto_record_ref_id, sort_by, run_range, output_items,
                  num_outputs):
        """ sort the information loaded from auto record file
        Note: current list of indexes
        Index([u'RUN', u'IPTS', u'Title', u'Notes', u'Sample', u'ITEM', u'StartTime',
        u'Duration', u'ProtonCharge', u'TotalCounts', u'Monitor1', u'Monitor2',
        u'X', u'Y', u'Z', u'O', u'HROT', u'VROT', u'BandCentre', u'BandWidth',
        u'Frequency', u'Guide', u'IX', u'IY', u'IZ', u'IHA', u'IVA',
        u'Collimator', u'MTSDisplacement', u'MTSForce', u'MTSStrain',
        u'MTSStress', u'MTSAngle', u'MTSTorque', u'MTSLaser', u'MTSlaserstrain',
        u'MTSDisplaceoffset', u'MTSAngleceoffset', u'MTST1', u'MTST2', u'MTST3',
        u'MTST4', u'MTSHighTempStrain', u'FurnaceT', u'FurnaceOT',
        u'FurnacePower', u'VacT', u'VacOT', u'EuroTherm1Powder',
        u'EuroTherm1SP', u'EuroTherm1Temp', u'EuroTherm2Powder',
        u'EuroTherm2SP', u'EuroTherm2Temp'],
        :param auto_record_ref_id:
        :param sort_by:
        :param run_range:
        :param output_items:
        :param num_outputs:
        :return:
        """
        # check inputs
        datatypeutility.check_string_variable('Auto record reference ID',
                                              auto_record_ref_id)
        datatypeutility.check_string_variable('Column name to sort by',
                                              sort_by)
        if sort_by.lower() not in AUTO_LOG_MAP:
            raise RuntimeError(
                'Pandas DataFrame has no columns mapped from {}; Available include '
                '{}'.format(sort_by.lower(), AUTO_LOG_MAP.keys()))
        if run_range is not None:
            assert not isinstance(run_range,
                                  str), 'Runs range cannot be a string'
            if len(run_range) != 2:
                raise RuntimeError(
                    'Run range {} must have 2 items for start and end.'
                    ''.format(run_range))
        # END-IF

        datatypeutility.check_list('Output column names', output_items)
        if num_outputs is not None:
            datatypeutility.check_int_variable('Number of output rows',
                                               num_outputs, (1, None))

        if auto_record_ref_id not in self._auto_record_dict:
            raise RuntimeError(
                'Auto record ID {} is not in dictionary.  Available keys are {}'
                ''.format(auto_record_ref_id, self._auto_record_dict.keys()))
        if run_range is not None:
            print(
                '[ERROR] Notify developer that run range shall be implemented.'
            )

        # get data frame (data set)
        record_data_set = self._auto_record_dict[auto_record_ref_id]

        # sort the value
        auto_log_key = AUTO_LOG_MAP[sort_by.lower()]
        record_data_set.sort_values(by=[auto_log_key],
                                    ascending=False,
                                    inplace=True)

        # filter out required
        needed_index_list = list()
        for item in output_items:
            needed_index_list.append(AUTO_LOG_MAP[item.lower()])
        filtered = record_data_set.filter(needed_index_list)

        # number of outputs
        if num_outputs is None:
            num_outputs = len(record_data_set)

        # convert to list of dictionary
        column_names = filtered.columns.tolist()
        output_list = list()
        for row_index in range(min(num_outputs, len(filtered))):
            dict_i = dict()
            for j in range(len(column_names)):
                try:
                    dict_i[output_items[j]] = filtered.iloc[row_index, j]
                except IndexError as index_err:
                    print('j = {}, row_index = {}'.format(j, row_index))
                    print(column_names)
                    print('output items: {}'.format(output_items))
                    print(output_items[j])
                    print('filtered: \n{}'.format(filtered))
                    raise index_err
            # print dict_i
            output_list.append(dict_i)

        return output_list
Ejemplo n.º 14
0
def export_experiment_log(ws_name, record_file_name, sample_name_list, sample_title_list, sample_operation_list,
                          patch_list):
    """ Export experiment logs
    Note: duplicate from reduce_VULCAN.ReduceVulcanData._export_experiment_log
    :param ws_name:
    :param record_file_name:
    :param sample_title_list:
    :param sample_operation_list:
    :param patch_list:
    :return:
    """
    # check inputs
    datatypeutility.check_file_name(record_file_name, check_exist=False, check_writable=True,
                                    is_dir=False, note='Standard material record file')
    datatypeutility.check_list('Sample log names', sample_name_list)
    datatypeutility.check_list('Sample log titles', sample_title_list)
    datatypeutility.check_list('Sample log operations', sample_operation_list)

    if len(sample_name_list) != len(sample_title_list) or len(sample_name_list) != len(sample_operation_list):
        raise RuntimeError('Sample name list ({0}), sample title list ({1}) and sample operation list ({2}) '
                           'must have the same size.'
                           ''.format(len(sample_name_list), len(sample_title_list), len(sample_operation_list)))

    # get file mode
    if os.path.exists(record_file_name):
        file_write_mode = 'append'
    else:
        file_write_mode = 'new'

    # write
    print('[DB...BAT] Export (TAG) experiment log record: {}'.format(record_file_name))
    try:
        mantid.simpleapi.ExportExperimentLog(InputWorkspace=ws_name,
                                             OutputFilename=record_file_name,
                                             FileMode=file_write_mode,
                                             SampleLogNames=sample_name_list,
                                             SampleLogTitles=sample_title_list,
                                             SampleLogOperation=sample_operation_list,
                                             TimeZone="America/New_York",
                                             OverrideLogValue=patch_list,
                                             OrderByTitle='RUN',
                                             RemoveDuplicateRecord=True)
    except RuntimeError as run_err:
        message = 'Failed to export experiment record to {} due to {}.' \
                  ''.format(record_file_name, run_err)
        return False, message
    except ValueError as value_err:
        message = 'Exporting experiment record to {0} failed due to {1}.' \
                  ''.format(record_file_name, value_err)
        return False, message

    # Set up the mode for global access
    file_access_mode = oct(os.stat(record_file_name)[stat.ST_MODE])
    file_access_mode = file_access_mode[-3:]
    if file_access_mode != '666' and file_access_mode != '676':
        try:
            os.chmod(record_file_name, 0o666)
        except OSError as os_err:
            return False, '[ERROR] Unable to set file {0} to mode 666 due to {1}' \
                          ''.format(record_file_name, os_err)
    # END-IF

    return True, ''
Ejemplo n.º 15
0
    def slice_focus_event_workspace(self, event_ws_name, geometry_calib_ws_name, group_ws_name,
                                    split_ws_name, info_ws_name,
                                    output_ws_base, binning_parameters, chop_overlap_mode,
                                    gsas_info_dict, gsas_writer, gsas_file_index_start,
                                    fullprof):
        """ Slice and diffraction focus event workspace with option to write the reduced data to GSAS file with
        SaveGSS().
        Each workspace is
        1. sliced from original event workspace
        2. diffraction focused
        3. optionally rebinned to IDL binning and read for SaveGSS()
        :param event_ws_name: name of EventWorkspace that has been masked if there is a mask
        :param geometry_calib_ws_name: DIFC calibration Table workspace
        :param group_ws_name: name of Grouping workspace
        :param split_ws_name:
        :param info_ws_name:
        :param output_ws_base:
        :param chop_overlap_mode: whether the chopped workspace will have overlapped events (in time)
        :param binning_parameters: None for IDL binning; otherwise, use defined binning
        :param gsas_info_dict: required for writing GSAS files keys (IPTS, 'parm file' = 'vulcan.prm', 'vanadium')
        :param gsas_writer: GSASWriter instance to export to VULCAN GSAS file
        :param gsas_file_index_start: starting index of GSAS file (1.gda, 2.gda.. whether 0.gda?)
        :param fullprof: Flag to write reduced data to Fullprof (along with GSAS)
        :return: tuple: [1] slicing information, [2] output workspace names
        """
        # check inputs
        if binning_parameters is not None:
            datatypeutility.check_list('Binning parameters', binning_parameters)
        datatypeutility.check_dict('GSAS information', gsas_info_dict)

        # starting time
        t0 = time.time()

        # Align detectors: OpenMP
        AlignDetectors(InputWorkspace=event_ws_name,
                       OutputWorkspace=event_ws_name,
                       CalibrationWorkspace=geometry_calib_ws_name)

        t1 = time.time()

        # Filter events: OpenMP
        # is relative or not?  TableWorkspace has to be relative!
        split_ws = mantid_helper.retrieve_workspace(split_ws_name, raise_if_not_exist=True)
        if split_ws.__class__.__name__.count('TableWorkspace'):
            is_relative_time = True
        else:
            is_relative_time = False

        result = FilterEvents(InputWorkspace=event_ws_name,
                              SplitterWorkspace=split_ws_name, InformationWorkspace=info_ws_name,
                              OutputWorkspaceBaseName=output_ws_base,
                              FilterByPulseTime=False,
                              GroupWorkspaces=True,
                              OutputWorkspaceIndexedFrom1=True,
                              SplitSampleLogs=True,
                              RelativeTime=is_relative_time)

        # get output workspaces' names
        output_names = mantid_helper.get_filter_events_outputs(result)
        if output_names is None:
            raise RuntimeError(
                'There is no workspace found in the result of FilterEvents (vulcan_slice_reduce)')

        t2 = time.time()

        # construct output GSAS names
        gsas_names = list()
        for index in range(len(output_names)):
            out_ws_name = output_names[index]
            if len(out_ws_name) == 0:
                gsas_name = ''
            else:
                gsas_name = out_ws_name + '_gsas_not_binned'
            gsas_names.append(gsas_name)
        # END-FOR

        # Now start to use multi-threading to diffraction focus the sliced event data
        num_outputs = len(output_names)
        number_ws_per_thread = int(num_outputs / self._number_threads)
        extra = num_outputs % self._number_threads

        print('[DB...IMPORTANT] Output workspace number = {0}, workspace per thread = {1}\n'
              'Output workspaces names: {2}'.format(num_outputs, number_ws_per_thread, output_names))

        thread_pool = dict()
        # create threads and start
        end_sliced_ws_index = 0  # exclusive last
        for thread_id in range(self._number_threads):
            start_sliced_ws_index = end_sliced_ws_index
            end_sliced_ws_index = min(start_sliced_ws_index + number_ws_per_thread + int(thread_id < extra),
                                      num_outputs)
            # call method self.focus_workspace_list() in multiple threading
            # Note: Tread(target=[method name], args=(method argument 0, method argument 1, ...,)
            workspace_names_i = output_names[start_sliced_ws_index:end_sliced_ws_index]
            gsas_workspace_name_list = gsas_names[start_sliced_ws_index:end_sliced_ws_index]
            thread_pool[thread_id] = threading.Thread(target=self.focus_workspace_list,
                                                      args=(workspace_names_i, gsas_workspace_name_list,
                                                            group_ws_name,))
            thread_pool[thread_id].start()
            print('[DB] thread {0}: [{1}: {2}) ---> {3} workspaces'.
                  format(thread_id, start_sliced_ws_index,  end_sliced_ws_index,
                         end_sliced_ws_index-start_sliced_ws_index))
        # END-FOR

        # join the threads after the diffraction focus is finished
        for thread_id in range(self._number_threads):
            thread_pool[thread_id].join()

        # kill any if still alive
        for thread_id in range(self._number_threads):
            thread_i = thread_pool[thread_id]
            if thread_i is not None and thread_i.isAlive():
                thread_i._Thread_stop()

        t3 = time.time()

        # process overlapping chop
        if chop_overlap_mode:
            # FIXME - Shan't be used anymore unless an optimized algorithm developed for DT option
            output_names = self.process_overlap_chopped_data(output_names)
        # END-IF

        # save ONE python script for future reference
        if len(output_names) > 0:
            python_name = os.path.join(self._output_dir,
                                       '{}_{}.py'.format(self._run_number, split_ws_name))
            GeneratePythonScript(InputWorkspace=output_names[0], Filename=python_name)
        else:
            print('[ERROR] No output workspace to export to GSAS!')

        # write all the processed workspaces to GSAS:  IPTS number and parm_file_name shall be passed
        run_date_time = vulcan_util.get_run_date(event_ws_name, '')
        self.write_to_gsas(output_names, ipts_number=gsas_info_dict['IPTS'], parm_file_name=gsas_info_dict['parm file'],
                           vanadium_gda_name=gsas_info_dict['vanadium'],
                           gsas_writer=gsas_writer, run_start_date=run_date_time,  # ref_tof_sets=binning_parameters,
                           gsas_file_index_start=gsas_file_index_start)

        if fullprof:
            output_dir = self._output_dir
            # FIXME TODO - TOMORROW 0 - Vanadium workspace for Fullprof?
            self.write_to_fullprof_files(output_names, None, output_dir)

        # TODO - TONIGHT 1 - put this section to a method
        # TODO FIXME - TODAY 0 -... Debug disable
        if True:
            pc_time0 = mantid_helper.get_workspace_property(event_ws_name, 'proton_charge').times[0]
            # user does not want to HDF5 in same directory.  Need to write to a special directory
            if self._output_dir.startswith('/SNS/VULCAN/IPTS-'):
                # on the SNS server, do it in a different way
                output_dir = vulcan_util.generate_chopped_log_dir(self._output_dir, True)
            else:
                output_dir = self._output_dir
            self.export_split_logs(output_names, gsas_file_index_start=gsas_file_index_start,
                                   run_start_time=pc_time0,
                                   output_dir=output_dir)
        # END-IF

        # write to logs
        self.write_log_records(output_names, log_type='loadframe')
        tf = time.time()

        # processing time output
        process_info = '{0}: Runtime = {1}   Total output workspaces = {2}' \
                       ''.format(event_ws_name, tf - t0, len(output_names))
        process_info += 'Details for thread = {4}:\n\tLoading  = {0}\n\tChopping = {1}\n\tFocusing = {2}\n\t' \
                        'SaveGSS = {3}'.format(t1 - t0, t2 - t1, t3 - t2,
                                               tf - t3, self._number_threads)
        print('[INFO] {}'.format(process_info))

        # FIXME - FUTURE - Whether this for-loop is useful?
        end_sliced_ws_index = 0
        for thread_id in range(self._number_threads):
            start_sliced_ws_index = end_sliced_ws_index
            end_sliced_ws_index = min(start_sliced_ws_index + number_ws_per_thread + int(thread_id < extra),
                                      num_outputs)
            print('thread {0}: [{1}: {2}) ---> {3} workspaces'
                  .format(thread_id, start_sliced_ws_index, end_sliced_ws_index,
                          end_sliced_ws_index-start_sliced_ws_index))

        return process_info, output_names
Ejemplo n.º 16
0
    def execute_scan_rotating_collimator(self, ipts_number, run_number_list,
                                         pixels, to_focus_spectra):
        """
        :param run_number_list:
        :param pixels:
        :param to_focus_spectra:
        :return:
        """
        datatypeutility.check_list('Run numbers', run_number_list)
        datatypeutility.check_list('Pixel IDs', pixels)

        calib_manager = reductionmanager.CalibrationManager()

        data_set = dict()

        self._run_numbers = run_number_list[:]

        # load run numbers
        for run_number in run_number_list:
            # locate original nexus file
            if ipts_number is None:
                ipts_number = mantid_helper.get_ipts_number(run_number)
            event_file_name = '/SNS/VULCAN/IPTS-{}/nexus/VULCAN_{}.nxs.h5'.format(
                ipts_number, run_number)

            # load data from file
            ws_name_i = 'VULCAN_{}_events'.format(run_number)
            mantid_helper.load_nexus(data_file_name=event_file_name,
                                     output_ws_name=ws_name_i,
                                     meta_data_only=False)

            # align
            run_start_date = file_utilities.check_file_creation_date(
                event_file_name)
            has_loaded_cal, calib_ws_collection = calib_manager.has_loaded(
                run_start_date, 3)
            if not has_loaded_cal:
                calib_manager.search_load_calibration_file(
                    run_start_date, 3, ws_name_i)
            # workspaces = calib_manager.get_loaded_calibration_workspaces(run_start_date, 3)
            calib_ws_name = calib_ws_collection.calibration
            # group_ws_name = workspaces.grouping
            # mask_ws_name = workspaces.mask

            # align and output to dSpacing
            mantid_reduction.align_instrument(ws_name_i, calib_ws_name)

            # focus or not
            out_name_i = ws_name_i + '_partial'
            workspace_index_vec = vulcan_util.convert_pixels_to_workspace_indexes_v1(
                pixel_id_list=pixels)
            if to_focus_spectra:
                # focus:
                # mantid_helper.mtd_convert_units(ws_name_i, target_unit='dSpacing')
                mantid_helper.rebin(ws_name_i, '-0.1', preserve=True)
                mantid_helper.sum_spectra(
                    ws_name_i,
                    output_workspace=out_name_i,
                    workspace_index_list=workspace_index_vec)
                mantid_helper.mtd_convert_units(out_name_i, target_unit='TOF')
                mantid_helper.rebin(out_name_i,
                                    '3000, -0.0003, 70000',
                                    preserve=True)
            else:
                # sum spectra: rebin
                mantid_helper.mtd_convert_units(ws_name_i, target_unit='TOF')
                mantid_helper.rebin(ws_name_i,
                                    '3000, -0.0003, 70000',
                                    preserve=True)
                mantid_helper.sum_spectra(
                    ws_name_i,
                    output_workspace=out_name_i,
                    workspace_index_list=workspace_index_vec)
            # END-IF

            # convert to point data
            mantid_helper.convert_to_point_data(out_name_i)

            # get workspace
            out_ws = mantid_helper.retrieve_workspace(out_name_i, True)
            data_set[run_number] = out_ws.readX(0), out_ws.readY(0)
        # END-FOR

        self._data_set = data_set

        return data_set