예제 #1
0
    def __init__(self, run_number, nxs_file_name):
        """
        Initialization
        :return:
        """
        # Check input's validity
        datatypeutility.check_int_variable('Run number', run_number, (0, None))
        datatypeutility.check_file_name(nxs_file_name,
                                        check_exist=True,
                                        check_writable=False,
                                        is_dir=False,
                                        note='NeXus file name')

        # Data structure for log data that is worked on now
        self._myNeXusFileName = nxs_file_name
        self._myRunNumber = run_number

        # workspace name (might be sample log only)
        self._meta_ws_name = None  # name of (meta) workspace
        self._logNameList = None
        self._runStartTime = None

        # splitters
        self._currSplitterWorkspace = None
        self._currInfoTableWorkspace = None
        self._chopSetupDict = dict(
        )  # key: user-specified tag   value: dictionary including everything!

        # initialization operation
        self.load_data_file()

        return
예제 #2
0
    def load_detector_eff_file(self, file_name):
        """
        load detector efficency factor file (HDF5)
        :param file_name:
        :return:
        """
        datatypeutility.check_file_name(file_name, check_exist=True,
                                        note='Detector efficiency (HDF5) file')

        try:
            returned = file_utilities.import_detector_efficiency(file_name)
            pid_vector = returned[0]
            det_eff_vector = returned[1]   # inverted efficiency.  so need to multiply
        except RuntimeError as run_err:
            raise RuntimeError('Unable to load detector efficiency file {0} due to {1}'.format(file_name,
                                                                                               run_err))

        # create the detector efficiency workspace
        self._det_eff_ws_name = os.path.basename(file_name).split(',')[0]
        CreateWorkspace(OutputWorkspace=self._det_eff_ws_name,
                        DataX=pid_vector,  # np.arange(len(det_eff_vector)),
                        DataY=det_eff_vector,
                        NSpec=len(det_eff_vector))

        return
예제 #3
0
def get_load_file_by_dialog(parent, title, default_dir, file_filter):
    """ Get the file name to load via QFileDialog
    :param parent:
    :param title:
    :param default_dir:
    :param file_filter:
    :return:
    """
    datatypeutility.check_string_variable('Title (to load file)', title)
    datatypeutility.check_string_variable('Default directory to load file', default_dir)
    datatypeutility.check_file_name(default_dir, True, False, True, 'Default directory to load file')
    datatypeutility.check_string_variable('File filter', file_filter)

    # append "All files:
    if file_filter.count('*.*') == 0:
        file_filter += ';;All files (*.*)'

    # get file name
    returns = QFileDialog.getOpenFileName(parent, title, default_dir, file_filter)
    if isinstance(returns, tuple):
        file_name = str(returns[0])
    else:
        file_name = str(returns).strip()
    file_name = file_name.strip()

    print('[DB...BAT] Splitter file: {}'.format(file_name))

    return file_name
예제 #4
0
    def load_mask_xml(self, mask_file_name, ref_ws_name, is_roi):
        """
        load a mask in Mantid XML format
        :param mask_file_name:
        :param ref_ws_name:
        :param is_roi: flag that the mask is a ROI
        :return:
        """
        datatypeutility.check_file_name(mask_file_name, check_exist=True, check_writable=False,
                                        is_dir=False, note='Mask/ROI (Mantiod) XML file')

        if mask_file_name in self._mask_file_ws_dict:
            # previously loaded
            mask_ws_name, is_roi = self._mask_file_ws_dict[mask_file_name]
        else:
            # create workspace name as the standard
            mask_ws_name = mask_file_name.lower().split('.xml')[0].replace('/', '.')
            # load
            if is_roi:
                mask_ws_name = 'roi.' + mask_ws_name
                mantid_helper.load_roi_xml(ref_ws_name, mask_file_name, mask_ws_name)
            else:
                mask_ws_name = 'mask.' + mask_ws_name
                mantid_helper.load_mask_xml(ref_ws_name, mask_file_name, mask_ws_name)

            # record
            self._mask_file_ws_dict[mask_file_name] = mask_ws_name, is_roi
        # END-IF-ELSE

        return mask_ws_name
예제 #5
0
    def __init__(self, vulcan_ref_name=None):
        """ constructor of GSAS writer for VDRIVE
        :param vulcan_ref_name:
        """
        # set up the default reference file name
        if vulcan_ref_name is None:
            vulcan_ref_name = '/SNS/VULCAN/shared/CALIBRATION/VDRIVE/vdrive_tof_bin.h5'

        datatypeutility.check_file_name(
            file_name=vulcan_ref_name,
            check_exist=True,
            check_writable=False,
            is_dir=False,
            note='VDRIVE GSAS binning reference file')

        # parse the file
        lower_res_tof_vec, high_res_tof_vec = self._import_tof_ref_file(
            vulcan_ref_name)

        # convert TOF bin boundaries to Mantid binning parameters
        # key = 'bank type', value = TOF vec, binning parameters
        self._mantid_bin_param_dict = dict()
        # lower resolution: east/west
        self._mantid_bin_param_dict[
            'lower'] = lower_res_tof_vec, self._create_binning_parameters(
                lower_res_tof_vec)
        # higher resolution: high angle bank
        self._mantid_bin_param_dict[
            'higher'] = high_res_tof_vec, self._create_binning_parameters(
                high_res_tof_vec)

        # about vanadium
        self._van_ws_names = dict()

        return
예제 #6
0
def scan_rotating_collimator(ipts, runs, pixels, to_focus):
    """
    scan collimator in rotation.
    :param runs: file name containing run numbers
    :param pixels: file name containing pixel IDs
    :param to_focus: flag whether the TOF shall be focused or not
    :return: tuple (bool, str): status and message
    """
    try:
        datatypeutility.check_file_name(
            runs, check_exist=True, note='ASCII file containing run numbers')
        datatypeutility.check_file_name(
            pixels, check_exist=True, note='ASCII file containing pixels IDs')
        datatypeutility.check_bool_variable(
            'Flag to indicate whether the summed spectra shall be focused',
            to_focus)
    except AssertionError as ass_err:
        return False, 'Input arguments error: {}'.format(ass_err)

    try:
        run_number_list = parse_runs_file(runs)
        pixel_list = parse_pixels_file(pixels)
    except ValueError as val_err:
        return False, 'Input file error: {}'.format(val_err)

    try:
        collimator = Collimator()
        collimator.execute_scan_rotating_collimator(ipts,
                                                    run_number_list,
                                                    pixel_list,
                                                    to_focus_spectra=to_focus)
    except RuntimeError as run_err:
        return False, 'Execution error: {}'.format(run_err)

    return True, collimator
예제 #7
0
    def save_vanadium(self, diff_ws_name, gsas_file_name, ipts_number,
                      van_run_number, sample_log_ws_name):
        """  Save a WorkspaceGroup which comes from original GSAS workspace
        :param diff_ws_name: diffraction workspace (group) name
        :param gsas_file_name: output GSAS file name
        :param ipts_number: ITPS
        :param van_run_number: (van) run number
        :param sample_log_ws_name: workspace containing sample logs (proton charges)
        :return:
        """
        datatypeutility.check_string_variable(
            'Diffraction workspace (group) name', diff_ws_name)
        datatypeutility.check_file_name(gsas_file_name, False, True, False,
                                        'Smoothed vanadium GSAS file')
        datatypeutility.check_int_variable('IPTS', ipts_number, (1, None))
        datatypeutility.check_string_variable('Sample log workspace name',
                                              sample_log_ws_name)

        # rebin and then write output
        gsas_bank_buffer_dict = dict()
        van_ws = mantid_helper.retrieve_workspace(diff_ws_name)
        num_banks = mantid_helper.get_number_spectra(van_ws)
        datatypeutility.check_file_name(gsas_file_name,
                                        check_exist=False,
                                        check_writable=True,
                                        is_dir=False,
                                        note='Output GSAS file')

        # TODO - TONIGHT 5 - This will break if input is a Workspace but not GroupingWorkspace!!!
        for ws_index in range(num_banks):
            # get value
            bank_id = ws_index + 1
            # write GSAS head considering vanadium
            tof_vector = None
            ws_name_i = van_ws[ws_index].name()
            gsas_section_i = self._write_slog_bank_gsas(
                ws_name_i, 1, tof_vector, None)
            gsas_bank_buffer_dict[bank_id] = gsas_section_i
        # END-FOR

        # header
        log_ws = mantid_helper.retrieve_workspace(sample_log_ws_name)
        gsas_header = self._generate_vulcan_gda_header(log_ws, gsas_file_name,
                                                       ipts_number,
                                                       van_run_number,
                                                       gsas_file_name, False)

        # form to a big string
        gsas_buffer = gsas_header
        for bank_id in sorted(gsas_bank_buffer_dict.keys()):
            gsas_buffer += gsas_bank_buffer_dict[bank_id]

        # write to HDD
        g_file = open(gsas_file_name, 'w')
        g_file.write(gsas_buffer)
        g_file.close()

        return
예제 #8
0
    def __init__(self, chopped_data_dir, run_number):
        """
        initialization
        :param run_number: self._reductionSetup.get_run_number()
        """
        datatypeutility.check_file_name(chopped_data_dir, check_writable=True, is_dir=True,
                                        note='Directory to store sliced log records')
        datatypeutility.check_int_variable('Run number', run_number, (1, None))

        self._choppedDataDirectory = chopped_data_dir
        self._run_number = run_number

        return
예제 #9
0
def parse_pixels_file(file_name):
    """
    parse a file containing pixel IDs to be reduced
    Accepted PixelIDs: a, a:b, a:b:x (i.e., a, a+x, a+2x, ...)
    :param file_name:
    :return:
    """
    datatypeutility.check_file_name(file_name, check_exist=True)

    # read in lines
    run_file = open(file_name, 'r')
    lines = run_file.readlines()
    run_file.close()

    # parse
    pixel_id_list = list()
    for line in lines:
        line = line.strip()

        if len(line) == 0:
            continue
        elif line.startswith('#'):
            # comment line
            continue

        # remove all empty space and split by ,
        line = line.replace(' ', '')
        items = line.split(',')

        for item in items:
            num_col = item.count(':')
            try:
                if num_col == 0:
                    pixel_ids = [convert_integer(item)]
                elif num_col <= 2:
                    pixel_ids = convert_integer_range(item)
                else:
                    raise ValueError(
                        '{} is not a supported format'.format(item))
            except ValueError as value_err:
                print(
                    'Unable to parse {} to a set of integers due to {}'.format(
                        item, value_err))
            else:
                pixel_id_list.extend(pixel_ids)
        # END-FOR
    # END-FOR

    return pixel_id_list
예제 #10
0
def locate_run(ipts, run_number, base_path='/SNS/VULCAN/'):
    """
    check whether a run exists
    Note: this is renamed from locateRun()
    :param ipts:
    :param run_number:
    :param base_path:
    :return: boolean (found it or not), file name
    """
    datatypeutility.check_int_variable('IPTS number', ipts, (1, None))
    datatypeutility.check_int_variable('Run number', run_number, (1, None))
    datatypeutility.check_file_name(base_path, check_exist=True, is_dir=True)

    err_msg = ''

    # build the name according to the convention
    # there could be 2 possibilities
    # new nED first
    rel_full_path = 'IPTS-{0}/nexus/VULCAN_{1}.nxs.h5'.format(ipts, run_number)
    full_path = os.path.join(base_path, rel_full_path)

    if os.path.exists(full_path):
        # nED
        file_name = full_path
    else:
        # pre-nED
        relpathname = "IPTS-%d/0/%d/NeXus/VULCAN_%d_event.nxs" % (ipts, run_number, run_number)
        nxsfilename = os.path.join(base_path, relpathname)
        if os.path.exists(nxsfilename):
            file_name = full_path
        else:
            file_name = None
            err_msg = 'IPTS {} Run {} does not exist. Neither {} nor {} is a valid path.' \
                      ''.format(full_path, nxsfilename, ipts, run_number)
    # END-IF

    status = file_name is not None
    if not status:
        return status, err_msg

    return True, file_name
예제 #11
0
    def init_session(self, workspace_name, ipts_number, van_run_number, out_gsas_name,
                     sample_log_ws_name):
        """
        Initialize vanadium processing session
        :param workspace_name:
        :param ipts_number:
        :param van_run_number:
        :param out_gsas_name:
        :param sample_log_ws_name: required for proton charge
        :return:
        """
        datatypeutility.check_string_variable('Workspace name', workspace_name)
        datatypeutility.check_int_variable('IPTS number', ipts_number, (1, 99999))
        datatypeutility.check_int_variable('Vanadium run number', van_run_number, (1, 999999))
        datatypeutility.check_file_name(out_gsas_name, False, True, False, 'Output GSAS file name')
        datatypeutility.check_string_variable('Sample log workspace name', sample_log_ws_name)

        workspace = mantid_helper.retrieve_workspace(workspace_name)
        if workspace.id() == 'WorkspaceGroup':
            pass
        else:
            # create dictionary and etc
            raise NotImplementedError('Need to implement single workspace case to extract spectra')

        self._van_workspace_name = workspace_name

        self._ipts_number = ipts_number
        self._van_run_number = van_run_number
        self._output_gsas_name = out_gsas_name

        # parameter set up
        self._is_shift_case = False

        # convert to point data as a request
        mantid_helper.convert_to_point_data(self._van_workspace_name)
        mantid_helper.mtd_convert_units(self._van_workspace_name, 'dSpacig')

        self._sample_log_ws_name = sample_log_ws_name

        return
예제 #12
0
def save_slicers(time_segment_list, file_name):
    """
    Save a list of 3-tuple or 2-tuple time segments to an ASCII file
    Time segments may be disordered.
    Format:
        # Reference Run Number =
        # Run Start Time =
        # Start Stop TargetIndex
        Note that all units of time stamp or difference of time are seconds
    :param time_segment_list:
    :param file_name:
    :return:
    """
    # Check
    datatypeutility.check_file_name(file_name, False, True, False,
                                    'Target file name for segments')
    assert isinstance(time_segment_list, list), 'Time segment list must be a list but not of type %s.' \
                                                '' % type(time_segment_list)

    # sort by segments
    time_segment_list.sort()

    # start to write to file buffer
    file_buffer = '# Start Time \tStop Time \tTarget\n'

    # splitters
    for segment in time_segment_list:
        file_buffer += '%.9f \t%.9f \t%s\n' % (segment[0], segment[1],
                                               str(segment[2]))

    # write file from buffer
    try:
        set_file = open(file_name, 'w')
        set_file.write(file_buffer)
        set_file.close()
    except IOError as e:
        return False, 'Failed to write time segments to file %s due to %s' % (
            file_name, str(e))

    return True, None
예제 #13
0
def parse_runs_file(file_name):
    """ parse a file containing run numbers in a free style
    :param file_name:
    :return:
    """
    datatypeutility.check_file_name(file_name, check_exist=True)

    # read in lines
    run_file = open(file_name, 'r')
    lines = run_file.readlines()
    run_file.close()

    # parse
    run_numbers = list()
    for line in lines:
        line = line.strip()

        if len(line) == 0:
            continue
        elif line.startswith('#'):
            # comment line
            continue

        # replace , with ' '
        line = line.replace(',', '')
        items = line.split()

        for item in items:
            try:
                run_number = int(item)
            except ValueError:
                print('Unable to parse {} as run number'.format(item))
            else:
                run_numbers.append(run_number)
        # END-FOR
    # END-FOR

    run_numbers = sorted(run_numbers)

    return run_numbers
예제 #14
0
    def _create_standard_directory(standard_dir, material_type):
        """
        create a standard sample directory for GSAS file and sample log record
        :param standard_dir:
        :param material_type: name/type of the standard type
        :return:
        """
        datatypeutility.check_file_name(standard_dir,
                                        check_exist=False,
                                        is_dir=True,
                                        check_writable=True,
                                        note='Standard (tag) directory')

        try:
            os.mkdir(standard_dir, 0o777)
            print('[INFO VBIN TAG] Created directory {}'.format(standard_dir))
        except OSError as os_err:
            raise RuntimeError(
                'Failed to create {} for standard material {} due to {}'
                ''.format(standard_dir, material_type, os_err))

        return
예제 #15
0
    def process_merge_output_dir(self):
        """ Process different options for output directory.
        Options are 'output', 'choprun' and 'binfolder'
        :return:
        """
        num_outputs = 0
        output_directory = None

        if 'OUTPUT' in self._commandArgsDict:
            output_directory = self._commandArgsDict['OUTPUT']
            num_outputs += 1

        if 'BINFOLDER' in self._commandArgsDict:
            output_directory = self._commandArgsDict['BINFOLDER']
            num_outputs += 1

        if 'CHOPRUN' in self._commandArgsDict:
            chop_run = str(self._commandArgsDict['CHOPRUN'])
            output_directory = self._generate_chop_run_dir(chop_run)
            num_outputs += 1

        # check output
        if num_outputs == 0:
            raise RuntimeError(
                'User must specify one and only one in OUTPUT, BINFOLDER and CHOPRUN.'
                'Now there is nothing given.')
        elif num_outputs > 1:
            raise RuntimeError(
                'User must specify one and only one in OUTPUT, BINFOLDER and CHOPRUN.'
                'Now there are too much: OUTPUT: {}, BINFOLDER: {}, CHOPRUN: {}.'
                ''.format('OUTPUT' in self._commandArgsDict, 'BINFOLDER'
                          in self._commandArgsDict, 'CHOPRUN'
                          in self._commandArgsDict))

        # check write permission
        datatypeutility.check_file_name(output_directory, False, True, True,
                                        'MERGE outpout directory')

        return output_directory
예제 #16
0
    def __init__(self, number_banks, focus_instrument_dict, num_threads=24, output_dir=None):
        """
        initialization
        :param number_banks: number of banks to focus to
        :param focus_instrument_dict: dictionary of parameter for instrument
        :param output_dir:
        :param num_threads:
        """
        datatypeutility.check_int_variable('Number of banks', number_banks, [1, None])
        datatypeutility.check_int_variable('Number of threads', num_threads, [1, 256])
        datatypeutility.check_dict('Focused instrument dictionary', focus_instrument_dict)

        # other directories
        self._output_dir = '/tmp/'
        if output_dir is not None:
            datatypeutility.check_file_name(
                output_dir, True, True, True, 'Output directory for generated GSAS')
            self._output_dir = output_dir

        # run number (current or coming)
        self._run_number = 0
        self._ws_name_dict = dict()
        self._last_loaded_event_ws = None
        self._last_loaded_ref_id = ''
        self._det_eff_ws_name = None

        self._number_banks = number_banks

        self._focus_instrument_dict = focus_instrument_dict
        # self._init_focused_instruments()

        # multiple threading variables
        self._number_threads = num_threads

        # dictionary for gsas content (multiple threading)
        self._gsas_buffer_dict = dict()

        return
예제 #17
0
    def load_data(self, event_file_name):
        """
        Load an event file
        :param event_file_name:
        :return:
        """
        datatypeutility.check_file_name(event_file_name, check_exist=True, note='Event data file')

        # generate output workspace and data key
        out_ws_name, data_key = self.generate_output_workspace_name(event_file_name)

        # keep it as the current workspace
        if event_file_name.endswith('.h5'):
            self._last_loaded_event_ws = LoadEventNexus(Filename=event_file_name, MetaDataOnly=False, Precount=True,
                                                        OutputWorkspace=out_ws_name)
        else:
            self._last_loaded_event_ws = Load(Filename=event_file_name, OutputWorkspace=out_ws_name)

        self._last_loaded_ref_id = data_key

        self._ws_name_dict[data_key] = out_ws_name

        return data_key
예제 #18
0
    def chop_data_overlap_time_period(
            self, run_number, start_time, stop_time, time_interval,
            overlap_time_interval, reduce_flag, vanadium, output_dir, dry_run,
            chop_loadframe_log, chop_furnace_log, roi_list, mask_list,
            num_banks, binning_parameters, save_to_nexus, iparm_file_name):
        """
        Chop data by time interval
        :param run_number:
        :param start_time:
        :param stop_time:
        :param time_interval:
        :param reduce_flag: flag to reduce the data afterwards
        :param vanadium: vanadium run number for normalization. None for no normalization;
        :param output_dir:
        :param dry_run:
        :param chop_loadframe_log:
        :param chop_furnace_log:
        :return:
        """
        # check inputs
        datatypeutility.check_int_variable('Run number', run_number, (1, None))
        datatypeutility.check_file_name(output_dir,
                                        True,
                                        True,
                                        is_dir=True,
                                        note='Output directory')

        # dry run: return input options
        if dry_run:
            outputs = 'Slice IPTS-{0} Run {1} by time with ({2}, {3}, {4}) and dt = {5}' \
                      ''.format(self._iptsNumber, run_number, start_time, time_interval,
                                stop_time, overlap_time_interval)
            if reduce_flag:
                outputs += 'and reduce (to GSAS) '
            else:
                outputs += 'and save to NeXus files '
            outputs += 'to directory %s' % output_dir

            if not os.access(output_dir, os.W_OK):
                outputs += '\n[WARNING] Output directory %s is not writable!' % output_dir

            return True, outputs
        # END-IF (dry run)

        # generate data slicer
        # get chopper
        chopper = self._controller.project.get_chopper(run_number)
        status, slice_key_list = chopper.set_overlap_time_slicer(
            start_time, stop_time, time_interval, overlap_time_interval)

        if not status:
            error_msg = slice_key_list
            return False, error_msg

        # chop
        for i_slice, slice_key in enumerate(slice_key_list):
            status, message = self._controller.project.chop_run(
                run_number,
                slice_key,
                reduce_flag=reduce_flag,
                fullprof=self._write_to_fullprof,
                vanadium=vanadium,
                save_chopped_nexus=save_to_nexus,
                number_banks=num_banks,
                tof_correction=False,
                output_directory=output_dir,
                user_bin_parameter=binning_parameters,
                roi_list=roi_list,
                mask_list=mask_list,
                nexus_file_name=self._raw_nexus_file_name,
                gsas_iparm_file=iparm_file_name,
                overlap_mode=False,
                gda_start=i_slice)

            print('[DB...BAT] Processed: {} '.format(slice_key))

            if not status:
                return False, message
        # END-FOR

        return True, 'DT is implemented but not efficient'
예제 #19
0
    def chop_data_by_time(self, run_number, start_time, stop_time,
                          time_interval, reduce_flag, vanadium, output_dir,
                          dry_run, chop_loadframe_log, chop_furnace_log,
                          roi_list, mask_list, num_banks, binning_parameters,
                          save_to_nexus, iparm_file_name):
        """
        Chop data by time interval
        :param run_number:
        :param start_time:
        :param stop_time:
        :param time_interval:
        :param reduce_flag: flag to reduce the data afterwards
        :param vanadium: vanadium run number for normalization. None for no normalization;
        :param output_dir:
        :param dry_run:
        :param chop_loadframe_log:
        :param chop_furnace_log:
        :param roi_list: list (region of interest files)
        :param mask_list: list (mask files)
        :param binning_parameters: binning parameters
        :return:
        """
        # check inputs
        if self._raw_nexus_file_name is None:
            datatypeutility.check_int_variable('Run number', run_number,
                                               (1, None))
        else:
            datatypeutility.check_file_name(self._raw_nexus_file_name,
                                            check_exist=True,
                                            check_writable=False,
                                            is_dir=False,
                                            note='Event Nexus file')
            run_number = 0

        datatypeutility.check_file_name(output_dir,
                                        check_exist=True,
                                        check_writable=True,
                                        is_dir=True,
                                        note='Output directory')

        # dry run: return input options
        if dry_run:
            outputs = 'Slice IPTS-%d Run %d by time with (%s, %s, %s) ' % (
                self._iptsNumber, run_number, str(start_time),
                str(time_interval), str(stop_time))
            if reduce_flag:
                outputs += 'and reduce (to GSAS) '
            else:
                outputs += 'and save to NeXus files '
            outputs += 'to directory %s' % output_dir

            if not os.access(output_dir, os.W_OK):
                outputs += '\n[WARNING] Output directory %s is not writable!' % output_dir

            return True, outputs
        # END-IF (dry run)

        # generate data slicer
        status, slicer_key = self._controller.gen_data_slicer_by_time(
            run_number,
            start_time,
            stop_time,
            time_interval,
            raw_nexus_name=self._raw_nexus_file_name)
        if not status:
            error_msg = str(slicer_key)
            return False, 'Unable to generate data slicer by time due to %s.' % error_msg

        # chop and reduce
        # if chop_loadframe_log:
        #     exp_log_type = 'loadframe'
        # elif chop_furnace_log:
        #     exp_log_type = 'furnace'
        # else:
        #     exp_log_type = None

        # chop
        print('[DB...BAT...UND] Slicer key = {}'.format(slicer_key))
        status, message = self._controller.project.chop_run(
            run_number,
            slicer_key,
            reduce_flag=reduce_flag,
            fullprof=self._write_to_fullprof,
            vanadium=vanadium,
            save_chopped_nexus=save_to_nexus,
            number_banks=num_banks,
            tof_correction=False,
            output_directory=output_dir,
            user_bin_parameter=binning_parameters,
            roi_list=roi_list,
            mask_list=mask_list,
            nexus_file_name=self._raw_nexus_file_name,
            gsas_iparm_file=iparm_file_name)

        return status, message
예제 #20
0
    def chop_data_by_log(self, run_number, start_time, stop_time, log_name,
                         min_log_value, max_log_value, log_step_value,
                         reduce_flag, num_banks, exp_log_type,
                         binning_parameters, mask_list, roi_list, output_dir,
                         dry_run, vanadium, iparm_file_name, save_to_nexus):
        """
        chop data by log value.
        Note: always save the chopped NeXus
        :param run_number:
        :param start_time:
        :param stop_time:
        :param log_name:
        :param min_log_value:
        :param max_log_value:
        :param log_step_value:
        :param reduce_flag:
        :param output_dir:
        :param dry_run:
        :return:
        """
        # check inputs
        datatypeutility.check_int_variable('Run number', run_number, (1, None))
        datatypeutility.check_file_name(output_dir, True, True, True,
                                        'Output directory')

        # dry run: return input options
        if dry_run:
            outputs = 'Slice IPTS-{0} Run {1} by log {2}  with ({3}, {4}, {5}) ' \
                      'within wall time ({6}, {7})'.format(self._iptsNumber, run_number, log_name,
                                                           min_log_value, log_step_value, max_log_value,
                                                           start_time, stop_time)
            if reduce_flag:
                outputs += '\n\tand reduce (to GSAS) '
            else:
                outputs += '\n\tand save to NeXus files '
            outputs += 'to directory %s' % output_dir

            if not os.access(output_dir, os.W_OK):
                outputs += '\n[WARNING] Output directory %s is not writable!' % output_dir
            return True, outputs
        # END-IF (dry run)

        # generate data slicer by log value
        status, ret_obj = self._controller.generate_data_slicer_by_log(
            run_number, start_time, stop_time, log_name, min_log_value,
            log_step_value, max_log_value)
        if not status:
            error_msg = str(ret_obj)
            return False, 'Unable to generate data slicer by time due to %s.' % error_msg
        else:
            slicer_key = ret_obj

        # chop and reduce
        print('[DB...BAT] user_bin_parameters = {}  ... type = {}'
              ''.format(binning_parameters, type(binning_parameters)))
        status, message = self._controller.project.chop_run(
            run_number,
            slicer_key,
            reduce_flag=reduce_flag,
            vanadium=vanadium,
            save_chopped_nexus=save_to_nexus,
            output_dir=output_dir,
            number_banks=num_banks,
            tof_correction=False,
            output_directory=output_dir,
            user_bin_parameter=binning_parameters,
            roi_list=roi_list,
            mask_list=mask_list,
            nexus_file_name=self._raw_nexus_file_name,
            gsas_iparam_name=iparm_file_name)

        return status, message
예제 #21
0
    def save(self,
             diff_ws_name,
             run_date_time,
             gsas_file_name,
             ipts_number,
             run_number,
             gsas_param_file_name,
             align_vdrive_bin,
             van_ws_name,
             is_chopped_run,
             write_to_file=True):
        """
        Save a workspace to a GSAS file or a string
        :param diff_ws_name: diffraction data workspace
        :param run_date_time: date and time of the run
        :param gsas_file_name: output file name. None as not output
        :param ipts_number:
        :param run_number: if not None, run number
        :param gsas_param_file_name:
        :param align_vdrive_bin: Flag to align with VDRIVE bin edges/boundaries
        :param van_ws_name: name of vanadium workspaces loaded from GSAS (replacing vanadium_gsas_file)
        :param is_chopped_run: Flag such that the input workspaces is from an event-sliced workspace
        :param write_to_file: flag to write the text buffer to file
        :return: string as the file content
        """
        diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # set the unit to TOF
        if diff_ws.getAxis(0).getUnit() != 'TOF':
            ConvertUnits(InputWorkspace=diff_ws_name,
                         OutputWorkspace=diff_ws_name,
                         Target='TOF',
                         EMode='Elastic')
            diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # convert to Histogram Data
        if not diff_ws.isHistogramData():
            ConvertToHistogram(diff_ws_name, diff_ws_name)

        # get the binning parameters
        if align_vdrive_bin:
            bin_params_set = self._get_tof_bin_params(
                self._get_vulcan_phase(run_date_time),
                diff_ws.getNumberHistograms())
        else:
            # a binning parameter set for doing nothing
            bin_params_set = [(range(1,
                                     diff_ws.getNumberHistograms() + 1), None,
                               None)]

        # check for vanadium GSAS file name
        if van_ws_name is not None:
            # check whether a workspace exists
            if not mantid_helper.workspace_does_exist(van_ws_name):
                raise RuntimeError(
                    'Vanadium workspace {} does not exist in Mantid ADS'.
                    format(van_ws_name))
            van_ws = mantid_helper.retrieve_workspace(van_ws_name)

            # check number of histograms
            if mantid_helper.get_number_spectra(
                    van_ws) != mantid_helper.get_number_spectra(diff_ws):
                raise RuntimeError(
                    'Numbers of histograms between vanadium spectra and output GSAS are different'
                )
        else:
            van_ws = None
        # END-IF

        # rebin and then write output
        gsas_bank_buffer_dict = dict()
        num_bank_sets = len(bin_params_set)

        for bank_set_index in range(num_bank_sets):
            # get value
            bank_id_list, bin_params, tof_vector = bin_params_set[
                bank_set_index]

            # Rebin to these banks' parameters (output = Histogram)
            if bin_params is not None:
                Rebin(InputWorkspace=diff_ws_name,
                      OutputWorkspace=diff_ws_name,
                      Params=bin_params,
                      PreserveEvents=True)

            # Create output
            for bank_id in bank_id_list:
                # check vanadium bin edges
                if van_ws is not None:
                    # check whether the bins are same between GSAS workspace and vanadium workspace
                    unmatched, reason = self._compare_workspaces_dimension(
                        van_ws, bank_id, tof_vector)
                    if unmatched:
                        raise RuntimeError(
                            'Vanadium GSAS workspace {} does not match workspace {}: {}'
                            ''.format(van_ws_name, diff_ws_name, reason))
                # END-IF

                # write GSAS head considering vanadium
                gsas_section_i = self._write_slog_bank_gsas(
                    diff_ws_name, bank_id, tof_vector, van_ws)
                gsas_bank_buffer_dict[bank_id] = gsas_section_i
        # END-FOR

        # header
        diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)
        gsas_header = self._generate_vulcan_gda_header(diff_ws, gsas_file_name,
                                                       ipts_number, run_number,
                                                       gsas_param_file_name,
                                                       is_chopped_run)

        # form to a big string
        gsas_buffer = gsas_header
        for bank_id in sorted(gsas_bank_buffer_dict.keys()):
            gsas_buffer += gsas_bank_buffer_dict[bank_id]

        # write to HDD
        if write_to_file:
            datatypeutility.check_file_name(gsas_file_name,
                                            check_exist=False,
                                            check_writable=True,
                                            is_dir=False,
                                            note='Output GSAS file')
            g_file = open(gsas_file_name, 'w')
            g_file.write(gsas_buffer)
            g_file.close()
        else:
            pass

        return gsas_buffer
예제 #22
0
    def save_2theta_group(self, diff_ws_name, output_dir, run_date_time,
                          ipts_number, run_number, gsas_param_file_name,
                          van_ws_name, two_theta_array, tth_pixels_num_array,
                          target_bank_id, scale_factor):
        """ Save workspace from 2theta grouped
        :param diff_ws_name:
        :param output_dir:
        :param run_date_time:
        :param ipts_number:
        :param run_number:
        :param gsas_param_file_name:
        :param van_ws_name:
        :param two_theta_array:
        :param tth_pixels_num_array: array of integers for number of pixels of 2theta range for normalization
        :param target_bank_id:
        :return:
        """
        # process input workspaces
        diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # set the unit to TOF
        if diff_ws.getAxis(0).getUnit() != 'TOF':
            ConvertUnits(InputWorkspace=diff_ws_name,
                         OutputWorkspace=diff_ws_name,
                         Target='TOF',
                         EMode='Elastic')
            diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)

        # convert to Histogram Data
        if not diff_ws.isHistogramData():
            ConvertToHistogram(diff_ws_name, diff_ws_name)

        # vanadium
        if isinstance(van_ws_name, str) and len(van_ws_name) > 0:
            van_ws = mantid_helper.retrieve_workspace(van_ws_name)
        else:
            van_ws = None

        # get the binning parameters
        bin_params_set = self._get_tof_bin_params(
            self._get_vulcan_phase(run_date_time), 3)

        # check output directory
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        # For each 2theta bin / spectrum, create a GSAS file
        for tth_id in range(diff_ws.getNumberHistograms()):
            # rebin and then write output
            gsas_bank_buffer_dict = dict()
            num_bank_sets = len(bin_params_set)

            for bank_set_index in range(num_bank_sets):
                # get value
                bank_id_list, bin_params, tof_vector = bin_params_set[
                    bank_set_index]

                # Rebin to these banks' parameters (output = Histogram)
                if bin_params is not None:
                    Rebin(InputWorkspace=diff_ws_name,
                          OutputWorkspace=diff_ws_name,
                          Params=bin_params,
                          PreserveEvents=True)

                # Create output
                for bank_id_i in bank_id_list:
                    # check vanadium bin edges
                    if van_ws is not None:
                        # check whether the bins are same between GSAS workspace and vanadium workspace
                        unmatched, reason = self._compare_workspaces_dimension(
                            van_ws, bank_id_i, tof_vector)
                        if unmatched:
                            raise RuntimeError(
                                'Vanadium GSAS workspace {} does not match workspace {}: {}'
                                ''.format(van_ws_name, diff_ws_name, reason))
                    # END-IF

                    # write GSAS head considering vanadium
                    if bank_id_i == target_bank_id:
                        # target bank to write: east/west
                        source_bank_id = tth_id + 1
                        norm_factor = tth_pixels_num_array[tth_id]
                    else:
                        source_bank_id = bank_id_i
                        norm_factor = -1

                    gsas_section_i = self._write_slog_bank_gsas(
                        diff_ws_name,
                        source_bank_id,
                        tof_vector,
                        van_ws,
                        gsas_bank_id=bank_id_i,
                        norm_factor=norm_factor,
                        scale_factor=scale_factor)
                    gsas_bank_buffer_dict[bank_id_i] = gsas_section_i
                    print('[DB...BAT] Write bank {} to GSAS bank {}'.format(
                        source_bank_id, bank_id_i))
            # END-FOR

            # header
            diff_ws = mantid_helper.retrieve_workspace(diff_ws_name)
            gsas_file_name = os.path.join(output_dir,
                                          '{}.gda'.format(tth_id + 1))
            extra_info = '2theta {} to {}'.format(
                two_theta_array[tth_id], two_theta_array[[tth_id + 1]])
            gsas_header = self._generate_vulcan_gda_header(
                diff_ws, gsas_file_name, ipts_number, run_number,
                gsas_param_file_name, True, extra_info)

            # form to a big string
            gsas_buffer = gsas_header
            for bank_id in sorted(gsas_bank_buffer_dict.keys()):
                gsas_buffer += gsas_bank_buffer_dict[bank_id]

            # write to HDD
            datatypeutility.check_file_name(gsas_file_name,
                                            check_exist=False,
                                            check_writable=True,
                                            is_dir=False,
                                            note='Output GSAS file')
            g_file = open(gsas_file_name, 'w')
            g_file.write(gsas_buffer)
            g_file.close()

        # END-FOR (tth_id)

        return
예제 #23
0
    def load_binned_data(self,
                         data_file_name,
                         data_file_type,
                         max_int,
                         prefix='',
                         data_key=None,
                         target_unit=None):
        """ Load binned data
        :param data_file_name:
        :param data_file_type:
        :param prefix: prefix of the GSAS workspace name. It can be None, an integer, or a string
        :param max_int: maximum integer for sequence such as 999 for 001, 002, ... 999
        :param data_key: data key or None (to use workspace name as data key)
        :param target_unit: target unit or None
        :return: string as data key (aka. workspace name)
        """
        # check inputs
        datatypeutility.check_file_name(data_file_name, True, False, False,
                                        'Binned/reduced data file to load')
        if data_file_type is not None:
            datatypeutility.check_string_variable('Data file type',
                                                  data_file_type,
                                                  ['gsas', 'processed nexus'])
        if data_key is not None:
            datatypeutility.check_string_variable('Data key', data_key)
        datatypeutility.check_string_variable('Workspace prefix', prefix)

        # find out the type of the data file
        file_name, file_extension = os.path.splitext(data_file_name)

        if data_file_type is None:
            if file_extension.lower() in ['.gda', '.gsa', '.gss']:
                data_file_type = 'gsas'
            elif file_extension.lower() == '.nxs':
                data_file_type = 'processed nexus'
            else:
                raise RuntimeError(
                    'Data file type {0} is not recognized.'.format(
                        data_file_type))
        else:
            data_file_type = data_file_type.lower()
        # END-IF-ELSE

        # Load data
        data_ws_name = self.construct_workspace_name(data_file_name,
                                                     data_file_type, prefix,
                                                     max_int)

        if data_file_type == 'gsas':
            # load as GSAS
            mantid_helper.load_gsas_file(data_file_name,
                                         data_ws_name,
                                         standard_bin_workspace=None)
        elif data_file_type == 'processed nexus':
            # load processed nexus
            mantid_helper.load_nexus(data_file_name=file_name,
                                     output_ws_name=data_ws_name,
                                     meta_data_only=False)
        else:
            raise RuntimeError('Unable to support %s file.' % data_file_type)

        # convert unit
        if target_unit:
            mantid_helper.mtd_convert_units(data_ws_name, target_unit)

        if data_key is None:
            data_key = data_ws_name

        # register by adding to data management dictionary
        self._workspaceDict[data_key] = data_ws_name
        # TODO - TONIGHT 0 - Add an option to the method such that single run data will go to singleGSASDict
        # TODO - ... ...   - chopped run will NOT to be recorded .. self._loadedGSSDict[] = ...maybe
        self._singleGSASDict[data_key] = data_file_name

        return data_key
예제 #24
0
def export_experiment_log(ws_name, record_file_name, sample_name_list, sample_title_list, sample_operation_list,
                          patch_list):
    """ Export experiment logs
    Note: duplicate from reduce_VULCAN.ReduceVulcanData._export_experiment_log
    :param ws_name:
    :param record_file_name:
    :param sample_title_list:
    :param sample_operation_list:
    :param patch_list:
    :return:
    """
    # check inputs
    datatypeutility.check_file_name(record_file_name, check_exist=False, check_writable=True,
                                    is_dir=False, note='Standard material record file')
    datatypeutility.check_list('Sample log names', sample_name_list)
    datatypeutility.check_list('Sample log titles', sample_title_list)
    datatypeutility.check_list('Sample log operations', sample_operation_list)

    if len(sample_name_list) != len(sample_title_list) or len(sample_name_list) != len(sample_operation_list):
        raise RuntimeError('Sample name list ({0}), sample title list ({1}) and sample operation list ({2}) '
                           'must have the same size.'
                           ''.format(len(sample_name_list), len(sample_title_list), len(sample_operation_list)))

    # get file mode
    if os.path.exists(record_file_name):
        file_write_mode = 'append'
    else:
        file_write_mode = 'new'

    # write
    print('[DB...BAT] Export (TAG) experiment log record: {}'.format(record_file_name))
    try:
        mantid.simpleapi.ExportExperimentLog(InputWorkspace=ws_name,
                                             OutputFilename=record_file_name,
                                             FileMode=file_write_mode,
                                             SampleLogNames=sample_name_list,
                                             SampleLogTitles=sample_title_list,
                                             SampleLogOperation=sample_operation_list,
                                             TimeZone="America/New_York",
                                             OverrideLogValue=patch_list,
                                             OrderByTitle='RUN',
                                             RemoveDuplicateRecord=True)
    except RuntimeError as run_err:
        message = 'Failed to export experiment record to {} due to {}.' \
                  ''.format(record_file_name, run_err)
        return False, message
    except ValueError as value_err:
        message = 'Exporting experiment record to {0} failed due to {1}.' \
                  ''.format(record_file_name, value_err)
        return False, message

    # Set up the mode for global access
    file_access_mode = oct(os.stat(record_file_name)[stat.ST_MODE])
    file_access_mode = file_access_mode[-3:]
    if file_access_mode != '666' and file_access_mode != '676':
        try:
            os.chmod(record_file_name, 0o666)
        except OSError as os_err:
            return False, '[ERROR] Unable to set file {0} to mode 666 due to {1}' \
                          ''.format(record_file_name, os_err)
    # END-IF

    return True, ''
예제 #25
0
def browse_file(parent, caption, default_dir, file_filter, file_list=False, save_file=False):
    """ browse a file or files
    :param parent:
    :param caption:
    :param default_dir:
    :param file_filter:
    :param file_list:
    :param save_file:
    :return: if file_list is False: return string (file name); otherwise, return a list;
             if user cancels the operation, then return None
    """
    # check inputs
    assert isinstance(parent, object), 'Parent {} must be of some object.'.format(parent)
    datatypeutility.check_string_variable('File browsing title/caption', caption)
    datatypeutility.check_file_name(default_dir, check_exist=False, is_dir=True)
    datatypeutility.check_bool_variable('Flag for browse a list of files to load', file_list)
    datatypeutility.check_bool_variable('Flag to select loading or saving file', save_file)
    if file_filter is None:
        file_filter = 'All Files (*.*)'
    else:
        datatypeutility.check_string_variable('File filter', file_filter)

    if save_file:
        # browse file name to save to
        if platform.system() == 'Darwin':
            # TODO - 20180721 - Find out the behavior on Mac!
            file_filter = ''
        save_set = QFileDialog.getSaveFileName(parent, caption=caption, directory=default_dir,
                                               filter=file_filter)
        if isinstance(save_set, tuple):
            # returned include both file name and filter
            file_name = str(save_set[0])
        else:
            file_name = str(save_set)

    elif file_list:
        # browse file names to load
        open_set = QFileDialog.getOpenFileNames(parent, caption, default_dir, file_filter)

        if isinstance(open_set, tuple):
            # PyQt5
            file_name_list = open_set[0]
        else:
            file_name_list = open_set

        if len(file_name_list) == 0:
            # use cancel
            return None
        else:
            return file_name_list

    else:
        # browse single file name
        open_set = QFileDialog.getOpenFileName(parent, caption, default_dir, file_filter)

        if isinstance(open_set, tuple):
            # PyQt5
            file_name = open_set[0]
        else:
            file_name = open_set

    # END-IF-ELSE

    # check result for single file whether user cancels operation
    if len(file_name) == 0:
        return None

    return file_name