Beispiel #1
0
    def _process_command(command_processor, arg_dict):
        """ process VDrive-compatible command
        :param command_processor:
        :param arg_dict:
        :return:
        """
        assert isinstance(command_processor, process_vcommand.VDriveCommand), \
            'VDrive IDL-compatible command processor {} must be an instance of ' \
            'vdrive_commands.process_vcommand.VDriveCommandbut but not of type {}' \
            ''.format(command_processor, type(command_processor))

        datatypeutility.check_dict('VDrive IDL-compatible command arguments', arg_dict)

        if len(arg_dict) == 0:
            # if there is no argument, just print out the help information
            message = command_processor.get_help()
            status = True
        else:
            try:
                status, message = command_processor.exec_cmd()
            except RuntimeError as run_err:
                status = False
                message = 'Unable to execute VDRIVE command due to {}'.format(run_err)

        return status, message
Beispiel #2
0
    def set_focus_virtual_instrument(self, geom_dict):
        """ Set the focused virtual instrument parameter
        :param geom_dict:
        :return:
        """
        datatypeutility.check_dict('Instrument geometry setup', geom_dict)

        self._focus_instrument_geometry_dict = geom_dict

        return
Beispiel #3
0
    def __init__(self, controller, command_args):
        """
        Initialization
        :param controller: VDrive controller class
        :param command_args:
        """
        # call base init
        super(VDriveCommand, self).__init__(None)

        # check input
        assert isinstance(controller, VdriveAPI.VDriveAPI), 'Controller must be a VdriveAPI.VDriveAPI' \
                                                            'instance but not %s.' % controller.__class__.__name__
        datatypeutility.check_dict('VDrive command arguments', command_args)

        # my name
        self._commandName = 'VDRIVE (base)'

        # set controller
        self._controller = controller

        # set arguments to command arguments dictionary: it is only set once here
        # and they must be capital
        self._commandArgsDict = {
            command: command_args[command]
            for command in command_args
        }

        # create a dictionary to compare the capital command arguments with VDrive argument
        self._commandMapDict = {
            command.upper(): command
            for command in self.SupportedArgs
        }

        # other command variables
        self._iptsNumber = None  # IPTS
        self._runNumberList = list()  # RUN numbers
        # alternative
        self._raw_nexus_file_name = None

        return
Beispiel #4
0
    def __init__(self, number_banks, focus_instrument_dict, num_threads=24, output_dir=None):
        """
        initialization
        :param number_banks: number of banks to focus to
        :param focus_instrument_dict: dictionary of parameter for instrument
        :param output_dir:
        :param num_threads:
        """
        datatypeutility.check_int_variable('Number of banks', number_banks, [1, None])
        datatypeutility.check_int_variable('Number of threads', num_threads, [1, 256])
        datatypeutility.check_dict('Focused instrument dictionary', focus_instrument_dict)

        # other directories
        self._output_dir = '/tmp/'
        if output_dir is not None:
            datatypeutility.check_file_name(
                output_dir, True, True, True, 'Output directory for generated GSAS')
            self._output_dir = output_dir

        # run number (current or coming)
        self._run_number = 0
        self._ws_name_dict = dict()
        self._last_loaded_event_ws = None
        self._last_loaded_ref_id = ''
        self._det_eff_ws_name = None

        self._number_banks = number_banks

        self._focus_instrument_dict = focus_instrument_dict
        # self._init_focused_instruments()

        # multiple threading variables
        self._number_threads = num_threads

        # dictionary for gsas content (multiple threading)
        self._gsas_buffer_dict = dict()

        return
Beispiel #5
0
def align_and_focus_event_ws(event_ws_name, output_ws_name, binning_params,
                             diff_cal_ws_name, grouping_ws_name,
                             reduction_params_dict, convert_to_matrix):
    """ Align and focus event workspace.  The procedure to reduce from the EventNexus includes
    1. compress event
    2. mask workspace
    3. align detectors
    4. sort events
    5. diffraction focus
    6. sort events
    7. edit instruments
    8. rebin (uniform binning)
    Output: still event workspace
    :exception RuntimeError: intolerable error
    :param event_ws_name:
    :param output_ws_name:
    :param binning_params:
    :param diff_cal_ws_name:
    :param grouping_ws_name:
    :param reduction_params_dict:
    :param convert_to_matrix:
    :return: string as ERROR message
    """
    # check inputs
    if not mantid_helper.is_event_workspace(event_ws_name):
        raise RuntimeError('Input {0} is not an EventWorkspace'.format(event_ws_name))
    if not mantid_helper.is_calibration_workspace(diff_cal_ws_name):
        diff_ws = mantid_helper.retrieve_workspace(diff_cal_ws_name)
        raise RuntimeError('Input {0} is not a Calibration workspace but a {1}'.format(diff_cal_ws_name,
                                                                                       diff_ws.__class__.__name__))
    # if not mantid_helper.is_masking_workspace(mask_ws_name):
    #     raise RuntimeError('Input {0} is not a Masking workspace'.format(mask_ws_name))
    if not mantid_helper.is_grouping_workspace(grouping_ws_name):
        raise RuntimeError('Input {0} is not a grouping workspace'.format(grouping_ws_name))

    datatypeutility.check_dict('Reduction parameter dictionary', reduction_params_dict)

    # Align detector
    mantidapi.AlignDetectors(InputWorkspace=event_ws_name,
                             OutputWorkspace=output_ws_name,
                             CalibrationWorkspace=diff_cal_ws_name)

    # # Mask detectors
    # mantid_helper.mask_workspace(to_mask_workspace_name=output_ws_name,
    #                              mask_workspace_name=mask_ws_name)

    # Sort events
    mantidapi.SortEvents(InputWorkspace=output_ws_name,
                         SortBy='X Value')

    # Diffraction focus
    event_ws = mantid_helper.retrieve_workspace(output_ws_name)
    if event_ws.getNumberEvents() == 0:
        print('[DB...BAT] {}: # events = {}'.format(event_ws, event_ws.getNumberEvents()))
        error_message = 'Unable to reduced {} as number of events = 0'.format(event_ws_name)
        raise RuntimeError(error_message)

    mantidapi.DiffractionFocussing(InputWorkspace=output_ws_name,
                                   OutputWorkspace=output_ws_name,
                                   GroupingWorkspace=grouping_ws_name,
                                   PreserveEvents=True)
    # Sort again!
    mantidapi.SortEvents(InputWorkspace=output_ws_name,
                         SortBy='X Value')

    # Compress events as an option
    if 'CompressEvents' in reduction_params_dict:
        compress_events_tolerance = reduction_params_dict['CompressEvents']['Tolerance']
        print('[DB...BAT] User-specified compress tolerance = {}'.format(compress_events_tolerance))
        mantidapi.CompressEvents(InputWorkspace=output_ws_name,
                                 OutputWorkspace=output_ws_name,
                                 Tolerance=1.E-5)

    # rebin
    if binning_params is not None:
        mantid_helper.rebin(workspace_name=output_ws_name,
                            params=binning_params, preserve=not convert_to_matrix)

    # Edit instrument as an option
    if 'EditInstrumentGeometry' in reduction_params_dict:
        try:
            # TODO - NIGHT - In case the number of histograms of output workspace does not match (masked a lot) ...
            # TODO - FIXME - 27 bank Polar and Azimuthal are all None
            print(reduction_params_dict['EditInstrumentGeometry'].keys())
            print(output_ws_name)
            print(mantid_helper.VULCAN_L1)
            print(reduction_params_dict['EditInstrumentGeometry']['SpectrumIDs'])
            print(reduction_params_dict['EditInstrumentGeometry']['L2'])
            print(reduction_params_dict['EditInstrumentGeometry']['Polar'])
            print(reduction_params_dict['EditInstrumentGeometry']['Azimuthal'])

            mantidapi.EditInstrumentGeometry(Workspace=output_ws_name,
                                             PrimaryFlightPath=mantid_helper.VULCAN_L1,
                                             SpectrumIDs=reduction_params_dict['EditInstrumentGeometry']['SpectrumIDs'],
                                             L2=reduction_params_dict['EditInstrumentGeometry']['L2'],
                                             Polar=reduction_params_dict['EditInstrumentGeometry']['Polar'],
                                             Azimuthal=reduction_params_dict['EditInstrumentGeometry']['Azimuthal'])
            """
            Workspace
            PrimaryFlightPath
            SpectrumIDs
            L2
            Polar
            Azimuthal
            DetectorIDs
            InstrumentName
            """
        except RuntimeError as run_err:
            error_message = 'Non-critical failure on EditInstrumentGeometry: {}\n'.format(run_err)
            return error_message

    return ''
Beispiel #6
0
    def slice_focus_event_workspace(self, event_ws_name, geometry_calib_ws_name, group_ws_name,
                                    split_ws_name, info_ws_name,
                                    output_ws_base, binning_parameters, chop_overlap_mode,
                                    gsas_info_dict, gsas_writer, gsas_file_index_start,
                                    fullprof):
        """ Slice and diffraction focus event workspace with option to write the reduced data to GSAS file with
        SaveGSS().
        Each workspace is
        1. sliced from original event workspace
        2. diffraction focused
        3. optionally rebinned to IDL binning and read for SaveGSS()
        :param event_ws_name: name of EventWorkspace that has been masked if there is a mask
        :param geometry_calib_ws_name: DIFC calibration Table workspace
        :param group_ws_name: name of Grouping workspace
        :param split_ws_name:
        :param info_ws_name:
        :param output_ws_base:
        :param chop_overlap_mode: whether the chopped workspace will have overlapped events (in time)
        :param binning_parameters: None for IDL binning; otherwise, use defined binning
        :param gsas_info_dict: required for writing GSAS files keys (IPTS, 'parm file' = 'vulcan.prm', 'vanadium')
        :param gsas_writer: GSASWriter instance to export to VULCAN GSAS file
        :param gsas_file_index_start: starting index of GSAS file (1.gda, 2.gda.. whether 0.gda?)
        :param fullprof: Flag to write reduced data to Fullprof (along with GSAS)
        :return: tuple: [1] slicing information, [2] output workspace names
        """
        # check inputs
        if binning_parameters is not None:
            datatypeutility.check_list('Binning parameters', binning_parameters)
        datatypeutility.check_dict('GSAS information', gsas_info_dict)

        # starting time
        t0 = time.time()

        # Align detectors: OpenMP
        AlignDetectors(InputWorkspace=event_ws_name,
                       OutputWorkspace=event_ws_name,
                       CalibrationWorkspace=geometry_calib_ws_name)

        t1 = time.time()

        # Filter events: OpenMP
        # is relative or not?  TableWorkspace has to be relative!
        split_ws = mantid_helper.retrieve_workspace(split_ws_name, raise_if_not_exist=True)
        if split_ws.__class__.__name__.count('TableWorkspace'):
            is_relative_time = True
        else:
            is_relative_time = False

        result = FilterEvents(InputWorkspace=event_ws_name,
                              SplitterWorkspace=split_ws_name, InformationWorkspace=info_ws_name,
                              OutputWorkspaceBaseName=output_ws_base,
                              FilterByPulseTime=False,
                              GroupWorkspaces=True,
                              OutputWorkspaceIndexedFrom1=True,
                              SplitSampleLogs=True,
                              RelativeTime=is_relative_time)

        # get output workspaces' names
        output_names = mantid_helper.get_filter_events_outputs(result)
        if output_names is None:
            raise RuntimeError(
                'There is no workspace found in the result of FilterEvents (vulcan_slice_reduce)')

        t2 = time.time()

        # construct output GSAS names
        gsas_names = list()
        for index in range(len(output_names)):
            out_ws_name = output_names[index]
            if len(out_ws_name) == 0:
                gsas_name = ''
            else:
                gsas_name = out_ws_name + '_gsas_not_binned'
            gsas_names.append(gsas_name)
        # END-FOR

        # Now start to use multi-threading to diffraction focus the sliced event data
        num_outputs = len(output_names)
        number_ws_per_thread = int(num_outputs / self._number_threads)
        extra = num_outputs % self._number_threads

        print('[DB...IMPORTANT] Output workspace number = {0}, workspace per thread = {1}\n'
              'Output workspaces names: {2}'.format(num_outputs, number_ws_per_thread, output_names))

        thread_pool = dict()
        # create threads and start
        end_sliced_ws_index = 0  # exclusive last
        for thread_id in range(self._number_threads):
            start_sliced_ws_index = end_sliced_ws_index
            end_sliced_ws_index = min(start_sliced_ws_index + number_ws_per_thread + int(thread_id < extra),
                                      num_outputs)
            # call method self.focus_workspace_list() in multiple threading
            # Note: Tread(target=[method name], args=(method argument 0, method argument 1, ...,)
            workspace_names_i = output_names[start_sliced_ws_index:end_sliced_ws_index]
            gsas_workspace_name_list = gsas_names[start_sliced_ws_index:end_sliced_ws_index]
            thread_pool[thread_id] = threading.Thread(target=self.focus_workspace_list,
                                                      args=(workspace_names_i, gsas_workspace_name_list,
                                                            group_ws_name,))
            thread_pool[thread_id].start()
            print('[DB] thread {0}: [{1}: {2}) ---> {3} workspaces'.
                  format(thread_id, start_sliced_ws_index,  end_sliced_ws_index,
                         end_sliced_ws_index-start_sliced_ws_index))
        # END-FOR

        # join the threads after the diffraction focus is finished
        for thread_id in range(self._number_threads):
            thread_pool[thread_id].join()

        # kill any if still alive
        for thread_id in range(self._number_threads):
            thread_i = thread_pool[thread_id]
            if thread_i is not None and thread_i.isAlive():
                thread_i._Thread_stop()

        t3 = time.time()

        # process overlapping chop
        if chop_overlap_mode:
            # FIXME - Shan't be used anymore unless an optimized algorithm developed for DT option
            output_names = self.process_overlap_chopped_data(output_names)
        # END-IF

        # save ONE python script for future reference
        if len(output_names) > 0:
            python_name = os.path.join(self._output_dir,
                                       '{}_{}.py'.format(self._run_number, split_ws_name))
            GeneratePythonScript(InputWorkspace=output_names[0], Filename=python_name)
        else:
            print('[ERROR] No output workspace to export to GSAS!')

        # write all the processed workspaces to GSAS:  IPTS number and parm_file_name shall be passed
        run_date_time = vulcan_util.get_run_date(event_ws_name, '')
        self.write_to_gsas(output_names, ipts_number=gsas_info_dict['IPTS'], parm_file_name=gsas_info_dict['parm file'],
                           vanadium_gda_name=gsas_info_dict['vanadium'],
                           gsas_writer=gsas_writer, run_start_date=run_date_time,  # ref_tof_sets=binning_parameters,
                           gsas_file_index_start=gsas_file_index_start)

        if fullprof:
            output_dir = self._output_dir
            # FIXME TODO - TOMORROW 0 - Vanadium workspace for Fullprof?
            self.write_to_fullprof_files(output_names, None, output_dir)

        # TODO - TONIGHT 1 - put this section to a method
        # TODO FIXME - TODAY 0 -... Debug disable
        if True:
            pc_time0 = mantid_helper.get_workspace_property(event_ws_name, 'proton_charge').times[0]
            # user does not want to HDF5 in same directory.  Need to write to a special directory
            if self._output_dir.startswith('/SNS/VULCAN/IPTS-'):
                # on the SNS server, do it in a different way
                output_dir = vulcan_util.generate_chopped_log_dir(self._output_dir, True)
            else:
                output_dir = self._output_dir
            self.export_split_logs(output_names, gsas_file_index_start=gsas_file_index_start,
                                   run_start_time=pc_time0,
                                   output_dir=output_dir)
        # END-IF

        # write to logs
        self.write_log_records(output_names, log_type='loadframe')
        tf = time.time()

        # processing time output
        process_info = '{0}: Runtime = {1}   Total output workspaces = {2}' \
                       ''.format(event_ws_name, tf - t0, len(output_names))
        process_info += 'Details for thread = {4}:\n\tLoading  = {0}\n\tChopping = {1}\n\tFocusing = {2}\n\t' \
                        'SaveGSS = {3}'.format(t1 - t0, t2 - t1, t3 - t2,
                                               tf - t3, self._number_threads)
        print('[INFO] {}'.format(process_info))

        # FIXME - FUTURE - Whether this for-loop is useful?
        end_sliced_ws_index = 0
        for thread_id in range(self._number_threads):
            start_sliced_ws_index = end_sliced_ws_index
            end_sliced_ws_index = min(start_sliced_ws_index + number_ws_per_thread + int(thread_id < extra),
                                      num_outputs)
            print('thread {0}: [{1}: {2}) ---> {3} workspaces'
                  .format(thread_id, start_sliced_ws_index, end_sliced_ws_index,
                          end_sliced_ws_index-start_sliced_ws_index))

        return process_info, output_names
    def plot_contour(self, data_set_dict):
        """ Plot 2D data as a contour plot
        :param data_set_dict: dictionary such that
        :return:
        """
        # Check inputs
        # Check inputs
        datatypeutility.check_dict('Input data set', data_set_dict)

        # TEST/TODO - Find out the status in real time test
        print(
            '[DB...FIND] About to plot contour... Is Zoom From Home = {}, Home XY Limit = {}, '
            'Current X limit = {}'.format(self._isZoomedFromHome,
                                          self._homeXYLimit,
                                          self._zoomInXRange))

        # record current setup
        if self.has_image_on_canvas():
            print('[DB...BAT] Do I have Image? {}'.format(
                self.has_image_on_canvas()))
            self._zoomInXRange = self.canvas.getXLimit()

        # construct the vectors for 2D contour plot
        x_list = sorted(data_set_dict.keys())
        vec_x = data_set_dict[x_list[0]][0]
        vec_y = numpy.array(x_list)
        size_x = len(vec_x)

        # create matrix on mesh
        grid_shape = len(vec_y), len(vec_x)
        matrix_y = numpy.ndarray(grid_shape, dtype='float')
        matrix_index = 0
        for index in vec_y:
            # vector X
            vec_x_i = data_set_dict[index][0]
            if len(vec_x_i) != size_x:
                raise RuntimeError(
                    'Unable to form a contour plot because {0}-th vector has a different size {1} '
                    'than first size {2}'.format(index, len(vec_x_i), size_x))
            # END-IF
            # vector Y: each row will have the value of a pattern

            matrix_y[matrix_index] = data_set_dict[index][1]
            matrix_index += 1

        # END-FOR

        # plot
        self.canvas.add_contour_plot(vec_x, vec_y, matrix_y)

        if self._zoomInXRange is None:
            # no zoom in: set to user defined
            x_min = 0.3
            x_max = 3.0
        else:
            # zoom is pressed down and already zoomed
            x_min = self._zoomInXRange[0]
            x_max = self._zoomInXRange[1]
        self.setXYLimit(xmin=x_min, xmax=x_max)

        # update flag
        self._hasImage = True

        return