def process_monitor_efficiency(workspace_name):
    """
    Process monitor efficiency for a given workspace.

    @param workspace_name Name of workspace to process monitor for
    """
    from mantid.simpleapi import OneMinusExponentialCor

    monitor_workspace_name = workspace_name + '_mon'
    instrument = mtd[workspace_name].getInstrument()

    try:
        area = instrument.getNumberParameter('Workflow.Monitor1-Area')[0]
        thickness = instrument.getNumberParameter('Workflow.Monitor1-Thickness')[0]
        attenuation = instrument.getNumberParameter('Workflow.Monitor1-Attenuation')[0]
    except IndexError:
        raise ValueError('Cannot get monitor details form parameter file')

    if area == -1 or thickness == -1 or attenuation == -1:
        logger.information('For workspace %s, skipping monitor efficiency' % workspace_name)
        return

    OneMinusExponentialCor(InputWorkspace=monitor_workspace_name,
                           OutputWorkspace=monitor_workspace_name,
                           C=attenuation * thickness,
                           C1=area)
    def PyExec(self):
        self._setup()

        ws_basename = str(self._sample_ws_in)

        self._trans_mon(ws_basename, 'Sam', self._sample_ws_in)
        self._trans_mon(ws_basename, 'Can', self._can_ws_in)

        # Generate workspace names
        sam_ws = ws_basename + '_Sam'
        can_ws = ws_basename + '_Can'
        trans_ws = ws_basename + '_Trans'

        # Divide sample and can workspaces
        Divide(LHSWorkspace=sam_ws, RHSWorkspace=can_ws, OutputWorkspace=trans_ws)

        trans = numpy.average(mtd[trans_ws].readY(0))
        logger.information('Average Transmission: ' + str(trans))

        AddSampleLog(Workspace=trans_ws, LogName='can_workspace', LogType='String', LogText=self._can_ws_in)

        # Group workspaces
        group = sam_ws + ',' + can_ws + ',' + trans_ws
        GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=self._out_ws)

        self.setProperty('OutputWorkspace', self._out_ws)
Beispiel #3
0
def Iblock(a,first):                                 #read Ascii block of Integers
    line1 = a[first]
    line2 = a[first+1]
    val = ExtractInt(line2)
    numb = val[0]
    lines=numb/10
    last = numb-10*lines
    if line1.startswith('I'):
        error = ''
    else:
        error = 'NOT an I block starting at line ' +str(first)
        logger.information('ERROR *** ' + error)
        sys.exit(error)
    ival = []
    for m in range(0, lines):
        mm = first+2+m
        val = ExtractInt(a[mm])
        for n in range(0, 10):
            ival.append(val[n])
    mm += 1
    val = ExtractInt(a[mm])
    for n in range(0, last):
        ival.append(val[n])
    mm += 1
    return mm,ival                                       #values as list
Beispiel #4
0
    def _read_width_file(self, readWidth, widthFile, numSampleGroups):
        widthY, widthE = [], []
        if readWidth:
            logger.information('Width file is ' + widthFile)
            # read ascii based width file
            try:
                wfPath = s_api.FileFinder.getFullPath(widthFile)
                handle = open(wfPath, 'r')
                asc = []
                for line in handle:
                    line = line.rstrip()
                    asc.append(line)
                handle.close()
            except Exception:
                raise ValueError('Failed to read width file')
            numLines = len(asc)
            if numLines == 0:
                raise ValueError('No groups in width file')
            if numLines != numSampleGroups:  # check that no. groups are the same
                raise ValueError('Width groups (' + str(numLines) + ') not = Sample (' + str(numSampleGroups) + ')')
        else:
            # no file: just use constant values
            widthY = np.zeros(numSampleGroups)
            widthE = np.zeros(numSampleGroups)
        # pad for Fortran call
        widthY = PadArray(widthY, 51)
        widthE = PadArray(widthE, 51)

        return widthY, widthE
    def PyExec(self):

        peaks = self.getProperty('Workspace').value

        wavelength = self.getProperty("Wavelength").value
        if wavelength == Property.EMPTY_DBL:
            wavelength = peaks.run()['wavelength'].value

        if self.getProperty("OverrideProperty").value:
            flip_x = self.getProperty("FlipX").value
            inner = self.getProperty("InnerGoniometer").value
        else:
            flip_x = peaks.getInstrument().getName() == "HB3A"

            if peaks.getInstrument().getName() == "HB3A":
                inner = math.isclose(peaks.run().getTimeAveragedStd("omega"),
                                     0.0)
            else:
                inner = False

        starting_goniometer = peaks.run().getGoniometer().getR()

        for n in range(peaks.getNumberPeaks()):
            p = peaks.getPeak(n)
            g = Goniometer()
            g.setR(starting_goniometer)
            g.calcFromQSampleAndWavelength(V3D(*p.getQSampleFrame()),
                                           wavelength, flip_x, inner)
            logger.information(
                "Found goniometer omega={:.2f} chi={:.2f} phi={:.2f} for peak {} with Q_sample {}"
                .format(*g.getEulerAngles('YZY'), n, p.getQSampleFrame()))
            p.setWavelength(wavelength)
            p.setGoniometerMatrix(g.getR())
def identify_bad_detectors(workspace_name):
    """
    Identify detectors which should be masked

    @param workspace_name Name of workspace to use to get masking detectors
    @return List of masked spectra
    """
    from mantid.simpleapi import (IdentifyNoisyDetectors, DeleteWorkspace)

    instrument = mtd[workspace_name].getInstrument()

    try:
        masking_type = instrument.getStringParameter('Workflow.Masking')[0]
    except IndexError:
        masking_type = 'None'

    logger.information('Masking type: %s' % masking_type)

    masked_spec = list()

    if masking_type == 'IdentifyNoisyDetectors':
        ws_mask = '__workspace_mask'
        IdentifyNoisyDetectors(InputWorkspace=workspace_name,
                               OutputWorkspace=ws_mask)

        # Convert workspace to a list of spectra
        num_spec = mtd[ws_mask].getNumberHistograms()
        masked_spec = [spec for spec in range(0, num_spec) if mtd[ws_mask].readY(spec)[0] == 0.0]

        # Remove the temporary masking workspace
        DeleteWorkspace(ws_mask)

    logger.debug('Masked spectra for workspace %s: %s' % (workspace_name, str(masked_spec)))

    return masked_spec
Beispiel #7
0
    def _setup(self):
        self._run_numbers = self.getPropertyValue('RunNumbers')
        self._calibration_workspace = self.getPropertyValue('CalibrationWorkspace')
        self._raw_workspace = self.getPropertyValue('RawWorkspace')
        self._red_workspace = self.getPropertyValue('ReducedWorkspace')
        self._red_left_workspace = self._red_workspace + '_left'
        self._red_right_workspace = self._red_workspace + '_right'
        self._map_file = self.getProperty('MapFile').value

        self.setPropertyValue('RawWorkspace', self._raw_workspace)
        self.setPropertyValue('ReducedWorkspace', self._red_workspace)

        self._use_mirror_mode = self.getProperty('MirrorMode').value
        if self._use_mirror_mode:
            self._origin_source = self.getProperty('OriginSource').value
            logger.information('Shift option is %s' % self._origin_source)
        if self._origin_source == 'none':
            self._shift_origin = False
        else:
            self._shift_origin = True
            self._origin_workspace = self.getPropertyValue('OriginWorkspace')
            self._origin_file = self.getPropertyValue('OriginFile')

        self._save = self.getProperty('Save').value
        self._plot = self.getProperty('Plot').value
def load_files(data_files, ipf_filename, spec_min, spec_max, sum_files=False, load_logs=True, load_opts=None):
    """
    Loads a set of files and extracts just the spectra we care about (i.e. detector range and monitor).

    @param data_files List of data file names
    @param ipf_filename File path/name for the instrument parameter file to load
    @param spec_min Minimum spectra ID to load
    @param spec_max Maximum spectra ID to load
    @param sum_files Sum loaded files
    @param load_logs Load log files when loading runs
    @param load_opts Additional options to be passed to load algorithm

    @return List of loaded workspace names and flag indicating chopped data
    """
    workspace_names, chopped_data = _load_files(data_files, ipf_filename, spec_min, spec_max, load_logs, load_opts)

    # Sum files if needed
    if sum_files and len(data_files) > 1:
        if chopped_data:
            workspace_names = sum_chopped_runs(workspace_names)
        else:
            workspace_names = sum_regular_runs(workspace_names)

    logger.information('Summed workspace names: %s' % (str(workspace_names)))

    return workspace_names, chopped_data
Beispiel #9
0
def Iblock(a, first):  # read Ascii block of Integers
    line1 = a[first]
    line2 = a[first + 1]
    val = ExtractInt(line2)
    numb = val[0]
    lines = numb / 10
    last = numb - 10 * lines
    if line1.startswith('I'):
        error = ''
    else:
        error = 'NOT an I block starting at line ' + str(first)
        logger.information('ERROR *** ' + error)
        sys.exit(error)
    ival = []
    for m in range(0, lines):
        mm = first + 2 + m
        val = ExtractInt(a[mm])
        for n in range(0, 10):
            ival.append(val[n])
    mm += 1
    val = ExtractInt(a[mm])
    for n in range(0, last):
        ival.append(val[n])
    mm += 1
    return mm, ival  # values as list
    def _get_spectra_index(self, input_ws):
        """
        Gets the index of the two monitors and first detector for the current instrument configurtion.
        Assumes monitors are named monitor1 and monitor2
        """

        instrument = mtd[input_ws].getInstrument()

        try:
            analyser = instrument.getStringParameter('analyser')[0]
            detector_1_idx = instrument.getComponentByName(analyser)[0].getID() - 1
            logger.information('Got index of first detector for analyser %s: %d' % (analyser, detector_1_idx))
        except IndexError:
            detector_1_idx = 2
            logger.warning('Could not determine index of first detetcor, using default value.')

        try:
            monitor_1_idx = self._get_detector_spectrum_index(input_ws, instrument.getComponentByName('monitor1').getID())

            monitor_2 = instrument.getComponentByName('monitor2')
            if monitor_2 is not None:
                monitor_2_idx = self._get_detector_spectrum_index(input_ws, monitor_2.getID())
            else:
                monitor_2_idx = None

            logger.information('Got index of monitors: %d, %s' % (monitor_1_idx, str(monitor_2_idx)))
        except IndexError:
            monitor_1_idx = 0
            monitor_2_idx = 1
            logger.warning('Could not determine index of monitors, using default values.')

        return monitor_1_idx, monitor_2_idx, detector_1_idx
Beispiel #11
0
def process_monitor_efficiency(workspace_name):
    """
    Process monitor efficiency for a given workspace.

    @param workspace_name Name of workspace to process monitor for
    """
    from mantid.simpleapi import OneMinusExponentialCor

    monitor_workspace_name = workspace_name + '_mon'
    instrument = mtd[workspace_name].getInstrument()

    try:
        area = instrument.getNumberParameter('Workflow.Monitor1-Area')[0]
        thickness = instrument.getNumberParameter(
            'Workflow.Monitor1-Thickness')[0]
        attenuation = instrument.getNumberParameter(
            'Workflow.Monitor1-Attenuation')[0]
    except IndexError:
        raise ValueError('Cannot get monitor details form parameter file')

    if area == -1 or thickness == -1 or attenuation == -1:
        logger.information('For workspace %s, skipping monitor efficiency' %
                           workspace_name)
        return

    OneMinusExponentialCor(InputWorkspace=monitor_workspace_name,
                           OutputWorkspace=monitor_workspace_name,
                           C=attenuation * thickness,
                           C1=area)
Beispiel #12
0
def load_files(data_files, ipf_filename, spec_min, spec_max, sum_files=False, load_logs=True, load_opts=None):
    """
    Loads a set of files and extracts just the spectra we care about (i.e. detector range and monitor).

    @param data_files List of data file names
    @param ipf_filename File path/name for the instrument parameter file to load
    @param spec_min Minimum spectra ID to load
    @param spec_max Maximum spectra ID to load
    @param sum_files Sum loaded files
    @param load_logs Load log files when loading runs
    @param load_opts Additional options to be passed to load algorithm

    @return List of loaded workspace names and flag indicating chopped data
    """
    workspace_names, chopped_data = _load_files(data_files, ipf_filename, spec_min, spec_max, load_logs, load_opts)

    # Sum files if needed
    if sum_files and len(data_files) > 1:
        if chopped_data:
            workspace_names = sum_chopped_runs(workspace_names)
        else:
            workspace_names = sum_regular_runs(workspace_names)

    logger.information('Summed workspace names: %s' % (str(workspace_names)))

    return workspace_names, chopped_data
Beispiel #13
0
    def _read_width_file(self, readWidth, widthFile, numSampleGroups):
        widthY, widthE = [], []
        if readWidth:
            logger.information('Width file is ' + widthFile)
            # read ascii based width file
            try:
                wfPath = s_api.FileFinder.getFullPath(widthFile)
                handle = open(wfPath, 'r')
                asc = []
                for line in handle:
                    line = line.rstrip()
                    asc.append(line)
                handle.close()
            except Exception:
                raise ValueError('Failed to read width file')
            numLines = len(asc)
            if numLines == 0:
                raise ValueError('No groups in width file')
            if numLines != numSampleGroups:  # check that no. groups are the same
                raise ValueError('Width groups (' + str(numLines) + ') not = Sample (' + str(numSampleGroups) + ')')
        else:
            # no file: just use constant values
            widthY = np.zeros(numSampleGroups)
            widthE = np.zeros(numSampleGroups)
        # pad for Fortran call
        widthY = PadArray(widthY, 51)
        widthE = PadArray(widthE, 51)

        return widthY, widthE
def chop_workspace(workspace, monitor_index):
    """
    Chops the specified workspace if its maximum x-value exceeds its instrument
    parameter, 'Workflow.ChopDataIfGreaterThan'.

    :param workspace:     The workspace to chop
    :param monitor_index: The index of the monitor spectra in the workspace.
    :return:              A tuple of the list of output workspace names and a boolean
                          specifying whether the workspace was chopped.
    """
    from mantid.simpleapi import ChopData

    workspace_name = workspace.getName()

    # Chop data if required
    try:
        chop_threshold = workspace.getInstrument().getNumberParameter('Workflow.ChopDataIfGreaterThan')[0]
        x_max = workspace.readX(0)[-1]
        chopped_data = x_max > chop_threshold
    except IndexError:
        logger.warning("Chop threshold not found in instrument parameters")
        chopped_data = False
    logger.information('Workspace {0} need data chop: {1}'.format(workspace_name, str(chopped_data)))

    if chopped_data:
        ChopData(InputWorkspace=workspace,
                 OutputWorkspace=workspace_name,
                 MonitorWorkspaceIndex=monitor_index,
                 IntegrationRangeLower=5000.0,
                 IntegrationRangeUpper=10000.0,
                 NChops=5)
        return mtd[workspace_name].getNames(), True
    else:
        return [workspace_name], False
Beispiel #15
0
def ReadIbackGroup(a,first):                           #read Ascii block of spectrum values
    x = []
    y = []
    e = []
    next = first
    line1 = a[next]
    next += 1
    val = ExtractInt(a[next])
    n1 = val[0]
    ngrp = val[2]
    if line1.startswith('S'):
        error = ''
    else:
        error = 'NOT an S block starting at line ' +str(first)
        logger.information('ERROR *** ' + error)
        sys.exit(error)
    next += 1
    next,Ival = Iblock(a,next)
    for m in range(0, len(Ival)):
        x.append(float(m))
        yy = float(Ival[m])
        y.append(yy)
        ee = math.sqrt(yy)
        e.append(ee)
    return next,x,y,e                                #values of x,y,e as lists
    def _calculate_energy(self, monitor_ws, grouped_ws, red_ws):
        """
        Convert the input run to energy transfer

        @param monitor_ws :: name of the monitor workspace to divide by
        @param grouped_ws :: name of workspace with the detectors grouped
        @param red_ws :: name to call the reduced workspace
        """
        x_range = self._monitor_range(monitor_ws)
        Scale(InputWorkspace=monitor_ws, OutputWorkspace=monitor_ws, Factor=0.001, Operation='Multiply')

        CropWorkspace(InputWorkspace=monitor_ws, OutputWorkspace=monitor_ws, Xmin=x_range[0], XMax=x_range[1])
        ScaleX(InputWorkspace=monitor_ws, OutputWorkspace=monitor_ws, Factor=-x_range[0], Operation='Add')

        CropWorkspace(InputWorkspace=grouped_ws, OutputWorkspace=grouped_ws, Xmin=x_range[0], XMax=x_range[1])
        ScaleX(InputWorkspace=grouped_ws, OutputWorkspace=grouped_ws, Factor=-x_range[0], Operation='Add')

        Divide(LHSWorkspace=grouped_ws, RHSWorkspace=monitor_ws, OutputWorkspace=grouped_ws)
        formula = self._energy_range(grouped_ws)
        ConvertAxisByFormula(InputWorkspace=grouped_ws, OutputWorkspace=red_ws, Axis='X', Formula=formula,
                             AxisTitle='Energy transfer', AxisUnits='meV')

        xnew = mtd[red_ws].readX(0)  # energy array
        logger.information('Energy range : %f to %f' % (xnew[0], xnew[-1]))

        DeleteWorkspace(grouped_ws)
        DeleteWorkspace(monitor_ws)
Beispiel #17
0
    def _generate_UBList(self):
        CreateSingleValuedWorkspace(OutputWorkspace='__ub')
        LoadIsawUB('__ub', self.getProperty("UBMatrix").value)
        ub = mtd['__ub'].sample().getOrientedLattice().getUB().copy()
        DeleteWorkspace(Workspace='__ub')

        symOps = self.getProperty("SymmetryOps").value
        if symOps:
            try:
                symOps = SpaceGroupFactory.subscribedSpaceGroupSymbols(
                    int(symOps))[0]
            except ValueError:
                pass
            if SpaceGroupFactory.isSubscribedSymbol(symOps):
                symOps = SpaceGroupFactory.createSpaceGroup(
                    symOps).getSymmetryOperations()
            else:
                symOps = SymmetryOperationFactory.createSymOps(symOps)
            logger.information('Using symmetries: ' +
                               str([sym.getIdentifier() for sym in symOps]))

            ub_list = []
            for sym in symOps:
                UBtrans = np.zeros((3, 3))
                UBtrans[0] = sym.transformHKL([1, 0, 0])
                UBtrans[1] = sym.transformHKL([0, 1, 0])
                UBtrans[2] = sym.transformHKL([0, 0, 1])
                UBtrans = np.matrix(UBtrans.T)
                ub_list.append(ub * UBtrans)
            return ub_list
        else:
            return [ub]
Beispiel #18
0
def Fblock(a, first):  # read Ascii block of Floats
    line1 = a[first]
    line2 = a[first + 1]
    val = ExtractInt(line2)
    numb = val[0]
    lines = numb / 5
    last = numb - 5 * lines
    if line1.startswith('F'):
        error = ''
    else:
        error = 'NOT an F block starting at line ' + str(first)
        logger.information('ERROR *** ' + error)
        sys.exit(error)
    fval = []
    for m in range(0, lines):
        mm = first + 2 + m
        val = ExtractFloat(a[mm])
        for n in range(0, 5):
            fval.append(val[n])
    mm += 1
    val = ExtractFloat(a[mm])
    for n in range(0, last):
        fval.append(val[n])
    mm += 1
    return mm, fval  # values as list
Beispiel #19
0
def chop_workspace(workspace, monitor_index):
    """
    Chops the specified workspace if its maximum x-value exceeds its instrument
    parameter, 'Workflow.ChopDataIfGreaterThan'.

    :param workspace:     The workspace to chop
    :param monitor_index: The index of the monitor spectra in the workspace.
    :return:              A tuple of the list of output workspace names and a boolean
                          specifying whether the workspace was chopped.
    """
    from mantid.simpleapi import ChopData

    workspace_name = workspace.getName()

    # Chop data if required
    try:
        chop_threshold = workspace.getInstrument().getNumberParameter('Workflow.ChopDataIfGreaterThan')[0]
        x_max = workspace.readX(0)[-1]
        chopped_data = x_max > chop_threshold
    except IndexError:
        logger.warning("Chop threshold not found in instrument parameters")
        chopped_data = False
    logger.information('Workspace {0} need data chop: {1}'.format(workspace_name, str(chopped_data)))

    if chopped_data:
        ChopData(InputWorkspace=workspace,
                 OutputWorkspace=workspace_name,
                 MonitorWorkspaceIndex=monitor_index,
                 IntegrationRangeLower=5000.0,
                 IntegrationRangeUpper=10000.0,
                 NChops=5)
        return mtd[workspace_name].getNames(), True
    else:
        return [workspace_name], False
Beispiel #20
0
def Fblock(a,first):                                 #read Ascii block of Floats
    line1 = a[first]
    line2 = a[first+1]
    val = ExtractInt(line2)
    numb = val[0]
    lines=numb/5
    last = numb-5*lines
    if line1.startswith('F'):
        error= ''
    else:
        error = 'NOT an F block starting at line ' +str(first)
        logger.information('ERROR *** ' + error)
        sys.exit(error)
    fval = []
    for m in range(0, lines):
        mm = first+2+m
        val = ExtractFloat(a[mm])
        for n in range(0, 5):
            fval.append(val[n])
    mm += 1
    val = ExtractFloat(a[mm])
    for n in range(0, last):
        fval.append(val[n])
    mm += 1
    return mm,fval                                       #values as list
Beispiel #21
0
def CheckAnalysers(in1WS,in2WS):
    '''Check workspaces have identical analysers and reflections

    Args:
      @param in1WS - first 2D workspace
      @param in2WS - second 2D workspace

    Returns:
      @return None

    Raises:
      @exception Valuerror - workspaces have different analysers
      @exception Valuerror - workspaces have different reflections
    '''
    ws1 = mtd[in1WS]
    a1 = ws1.getInstrument().getStringParameter('analyser')[0]
    r1 = ws1.getInstrument().getStringParameter('reflection')[0]
    ws2 = mtd[in2WS]
    a2 = ws2.getInstrument().getStringParameter('analyser')[0]
    r2 = ws2.getInstrument().getStringParameter('reflection')[0]
    if a1 != a2:
        raise ValueError('Workspace '+in1WS+' and '+in2WS+' have different analysers')
    elif r1 != r2:
        raise ValueError('Workspace '+in1WS+' and '+in2WS+' have different reflections')
    else:
        logger.information('Analyser is '+a1+r1)
Beispiel #22
0
def identify_bad_detectors(workspace_name):
    """
    Identify detectors which should be masked

    @param workspace_name Name of workspace to use to get masking detectors
    @return List of masked spectra
    """
    from mantid.simpleapi import (IdentifyNoisyDetectors, DeleteWorkspace)

    instrument = mtd[workspace_name].getInstrument()

    try:
        masking_type = instrument.getStringParameter('Workflow.Masking')[0]
    except IndexError:
        masking_type = 'None'

    logger.information('Masking type: %s' % masking_type)

    masked_spec = list()

    if masking_type == 'IdentifyNoisyDetectors':
        ws_mask = '__workspace_mask'
        IdentifyNoisyDetectors(InputWorkspace=workspace_name,
                               OutputWorkspace=ws_mask)

        # Convert workspace to a list of spectra
        num_spec = mtd[ws_mask].getNumberHistograms()
        masked_spec = [spec for spec in range(0, num_spec) if mtd[ws_mask].readY(spec)[0] == 0.0]

        # Remove the temporary masking workspace
        DeleteWorkspace(ws_mask)

    logger.debug('Masked spectra for workspace %s: %s' % (workspace_name, str(masked_spec)))

    return masked_spec
    def _generate_UBList(self):
        CreateSingleValuedWorkspace(OutputWorkspace='__ub')
        LoadIsawUB('__ub',self.getProperty("UBMatrix").value)
        ub=mtd['__ub'].sample().getOrientedLattice().getUB().copy()
        DeleteWorkspace(Workspace='__ub')

        symOps = self.getProperty("SymmetryOps").value
        if symOps:
            try:
                symOps = SpaceGroupFactory.subscribedSpaceGroupSymbols(int(symOps))[0]
            except ValueError:
                pass
            if SpaceGroupFactory.isSubscribedSymbol(symOps):
                symOps = SpaceGroupFactory.createSpaceGroup(symOps).getSymmetryOperations()
            else:
                symOps = SymmetryOperationFactory.createSymOps(symOps)
            logger.information('Using symmetries: '+str([sym.getIdentifier() for sym in symOps]))

            ub_list=[]
            for sym in symOps:
                UBtrans = np.zeros((3,3))
                UBtrans[0] = sym.transformHKL([1,0,0])
                UBtrans[1] = sym.transformHKL([0,1,0])
                UBtrans[2] = sym.transformHKL([0,0,1])
                UBtrans=np.matrix(UBtrans.T)
                ub_list.append(ub*UBtrans)
            return ub_list
        else:
            return [ub]
Beispiel #24
0
def CheckAnalysers(in1WS, in2WS):
    """
    Check workspaces have identical analysers and reflections

    Args:
      @param in1WS - first 2D workspace
      @param in2WS - second 2D workspace

    Returns:
      @return None

    Raises:
      @exception Valuerror - workspaces have different analysers
      @exception Valuerror - workspaces have different reflections
    """
    ws1 = s_api.mtd[in1WS]
    try:
        analyser_1 = ws1.getInstrument().getStringParameter('analyser')[0]
        reflection_1 = ws1.getInstrument().getStringParameter('reflection')[0]
    except IndexError:
        raise RuntimeError('Could not find analyser or reflection for workspace %s' % in1WS)
    ws2 = s_api.mtd[in2WS]
    try:
        analyser_2 = ws2.getInstrument().getStringParameter('analyser')[0]
        reflection_2 = ws2.getInstrument().getStringParameter('reflection')[0]
    except:
        raise RuntimeError('Could not find analyser or reflection for workspace %s' % in2WS)

    if analyser_1 != analyser_2:
        raise ValueError('Workspace %s and %s have different analysers' % (ws1, ws2))
    elif reflection_1 != reflection_2:
        raise ValueError('Workspace %s and %s have different reflections' % (ws1, ws2))
    else:
        logger.information('Analyser is %s, reflection %s' % (analyser_1, reflection_1))
def ReadWidthFile(readWidth,widthFile,numSampleGroups):
	widthY = []
	widthE = []

	if readWidth:

		logger.information('Width file is ' + widthFile)

		# read ascii based width file 
		try:
			wfPath = FileFinder.getFullPath(widthFile)
			handle = open(wfPath, 'r')
			asc = []

			for line in handle:
				line = line.rstrip()
				asc.append(line)
			handle.close()

		except Exception, e:
			raise ValueError('Failed to read width file')

		numLines = len(asc)
		
		if numLines == 0:
			raise ValueError('No groups in width file')
		
		if numLines != numSampleGroups:				# check that no. groups are the same
			error = 'Width groups (' +str(numLines) + ') not = Sample (' +str(numSampleGroups) +')'	
			raise ValueError(error)
Beispiel #26
0
    def _punch_and_fill(self, signal, dimX, dimY, dimZ):  # noqa
        Xmin, Xmax, _, Xwidth = self._get_dim_params(dimX)
        Ymin, Ymax, _, Ywidth = self._get_dim_params(dimY)
        Zmin, Zmax, _, Zwidth = self._get_dim_params(dimZ)
        X, Y, Z = self._get_XYZ_ogrid(dimX, dimY, dimZ)

        size = self.getProperty("Size").value
        if len(size) == 1:
            size = np.repeat(size, 3)
        size /= 2.0  # We want radii or half box width
        cut_shape = self.getProperty("Shape").value
        space_group = self.getProperty("SpaceGroup").value
        if space_group:
            check_space_group = True
            try:
                space_group = SpaceGroupFactory.subscribedSpaceGroupSymbols(
                    int(space_group))[0]
            except ValueError:
                pass
            logger.information('Using space group: ' + space_group)
            sg = SpaceGroupFactory.createSpaceGroup(space_group)
        else:
            check_space_group = False

        if cut_shape == 'cube':
            for h in range(int(np.ceil(Xmin)), int(Xmax) + 1):
                for k in range(int(np.ceil(Ymin)), int(Ymax) + 1):
                    for l in range(int(np.ceil(Zmin)), int(Zmax) + 1):
                        if not check_space_group or sg.isAllowedReflection(
                            [h, k, l]):
                            signal[int((h - size[0] - Xmin) / Xwidth +
                                       1):int((h + size[0] - Xmin) / Xwidth),
                                   int((k - size[1] - Ymin) / Ywidth +
                                       1):int((k + size[1] - Ymin) / Ywidth),
                                   int((l - size[2] - Zmin) / Zwidth +
                                       1):int((l + size[2] - Zmin) /
                                              Zwidth)] = np.nan
        else:  # sphere
            mask = ((X - np.round(X))**2 / size[0]**2 +
                    (Y - np.round(Y))**2 / size[1]**2 +
                    (Z - np.round(Z))**2 / size[2]**2 < 1)

            # Unmask invalid reflections
            if check_space_group:
                for h in range(int(np.ceil(Xmin)), int(Xmax) + 1):
                    for k in range(int(np.ceil(Ymin)), int(Ymax) + 1):
                        for l in range(int(np.ceil(Zmin)), int(Zmax) + 1):
                            if not sg.isAllowedReflection([h, k, l]):
                                mask[int((h - 0.5 - Xmin) / Xwidth +
                                         1):int((h + 0.5 - Xmin) / Xwidth),
                                     int((k - 0.5 - Ymin) / Ywidth +
                                         1):int((k + 0.5 - Ymin) / Ywidth),
                                     int((l - 0.5 - Zmin) / Zwidth +
                                         1):int((l + 0.5 - Zmin) /
                                                Zwidth)] = False

            signal[mask] = np.nan

        return signal
Beispiel #27
0
    def PyExec(self):
        self.log().information('IndirectILLreduction')

        run_path = self.getPropertyValue('Run')
        self._calibration_workspace = self.getPropertyValue('CalibrationWorkspace')
        self._raw_workspace = self.getPropertyValue('RawWorkspace')
        self._red_workspace = self.getPropertyValue('ReducedWorkspace')
        self._red_left_workspace = self.getPropertyValue('LeftWorkspace')
        self._red_right_workspace = self.getPropertyValue('RightWorkspace')
        self._map_file = self.getProperty('MapFile').value

        self._use_mirror_mode = self.getProperty('MirrorMode').value
        self._save = self.getProperty('Save').value
        self._plot = self.getProperty('Plot').value

        LoadILLIndirect(FileName=run_path, OutputWorkspace=self._raw_workspace)

        instrument = mtd[self._raw_workspace].getInstrument()
        self._instrument_name = instrument.getName()

        self._run_number = mtd[self._raw_workspace].getRunNumber()
        self._analyser = self.getPropertyValue('Analyser')
        self._reflection = self.getPropertyValue('Reflection')
        self._run_name = self._instrument_name + '_' + str(self._run_number)

        AddSampleLog(Workspace=self._raw_workspace, LogName="mirror_sense",
                     LogType="String", LogText=str(self._use_mirror_mode))

        logger.information('Nxs file : %s' % run_path)

        output_workspaces = self._reduction()

        if self._save:
            workdir = config['defaultsave.directory']
            for ws in output_workspaces:
                file_path = os.path.join(workdir, ws + '.nxs')
                SaveNexusProcessed(InputWorkspace=ws, Filename=file_path)
                logger.information('Output file : ' + file_path)

        if self._plot:
            from IndirectImport import import_mantidplot
            mtd_plot = import_mantidplot()
            graph = mtd_plot.newGraph()

            for ws in output_workspaces:
                mtd_plot.plotSpectrum(ws, 0, window=graph)

            layer = graph.activeLayer()
            layer.setAxisTitle(mtd_plot.Layer.Bottom, 'Energy Transfer (meV)')
            layer.setAxisTitle(mtd_plot.Layer.Left, '')
            layer.setTitle('')

        self.setPropertyValue('RawWorkspace', self._raw_workspace)
        self.setPropertyValue('ReducedWorkspace', self._red_workspace)

        if self._use_mirror_mode:
            self.setPropertyValue('LeftWorkspace', self._red_left_workspace)
            self.setPropertyValue('RightWorkspace', self._red_right_workspace)
Beispiel #28
0
    def _origin_data(self, ws):
        workdir = config['defaultsave.directory']

        path = os.path.join(workdir, self._origin_file)
        logger.information('Origin source is file: %s' % self._origin_file)
        try:
		    handle = open(path, 'r')
		    asc = []
		    for line in handle:
			    line = line.rstrip()
			    asc.append(line)
		    handle.close()
        except:
            raise LookupError('Could not load file %s' % path)

        len_asc = len(asc)
        lines = int(asc[0])
        if lines != len_asc -1:
            raise LookupError('Text file error : inconsistent number of lines %i' % lines)

        number_histograms = mtd[ws].getNumberHistograms()                      # no. of hist/groups
        if lines != number_histograms:
            raise LookupError('Text file error : number of lines %i not = spectra %i' % (lines, number_histograms))

        ExtractSingleSpectrum(InputWorkspace=ws,
                              OutputWorkspace='__left_temp',
                              WorkspaceIndex=0)
        x_axis = mtd['__left_temp'].readX(0)
        DeleteWorkspace('__left_temp')

        x = []
        self._left_mean = []
        self._right_mean = []
        for n in range(lines):
            values = asc[n+1].split()
            values = map(int, values)
            if values[0] != n + 1:
                raise LookupError('Text file error : inconsistent sequence number %i' % n)
            x.append(n)
            self._left_mean.append(x_axis[values[1]])
            self._right_mean.append(x_axis[values[2]])

        xData = np.array(x)
        xData = np.append(xData, x)
        yData = np.array(self._left_mean)
        yData = np.append(yData, self._right_mean)

        CreateWorkspace(OutputWorkspace=self._origin_file,
                        DataX=xData,
                        DataY=yData,
	                    Nspec=2)
        y_axis = TextAxis.create(2)
        mtd[self._origin_file].replaceAxis(1, y_axis)
        y_axis.setLabel(0, 'left')
        y_axis.setLabel(1, 'right')
        mtd[self._origin_file].setYUnitLabel('Peak centre (channel)')
Beispiel #29
0
def C2Se(sname):
    prog = 'QSe'
    outWS = sname+'_Result'
    asc = readASCIIFile(sname+'.qse')
    lasc = len(asc)
    var = asc[3].split()                            #split line on spaces
    nspec = var[0]
    ndat = var[1]
    var = ExtractInt(asc[6])
    first = 7
    Xout = []
    Yf = []
    Ef = []
    Yi = []
    Ei = []
    Yb = []
    Eb = []
    ns = int(nspec)

    dataX = np.array([])
    dataY = np.array([])
    dataE = np.array([])

    for m in range(0,ns):
        first,Q,int0,fw,it,be = SeBlock(asc,first)
        Xout.append(Q)
        Yf.append(fw[0])
        Ef.append(fw[1])
        Yi.append(it[0])
        Ei.append(it[1])
        Yb.append(be[0])
        Eb.append(be[1])
    Vaxis = []

    dataX = np.append(dataX,np.array(Xout))
    dataY = np.append(dataY,np.array(Yi))
    dataE = np.append(dataE,np.array(Ei))
    nhist = 1
    Vaxis.append('f1.Amplitude')

    dataX = np.append(dataX, np.array(Xout))
    dataY = np.append(dataY, np.array(Yf))
    dataE = np.append(dataE, np.array(Ef))
    nhist += 1
    Vaxis.append('f1.FWHM')

    dataX = np.append(dataX,np.array(Xout))
    dataY = np.append(dataY,np.array(Yb))
    dataE = np.append(dataE,np.array(Eb))
    nhist += 1
    Vaxis.append('f1.Beta')

    logger.information('Vaxis=' + str(Vaxis))
    CreateWorkspace(OutputWorkspace=outWS, DataX=dataX, DataY=dataY, DataE=dataE, Nspec=nhist,
        UnitX='MomentumTransfer', VerticalAxisUnit='Text', VerticalAxisValues=Vaxis, YUnitLabel='')
    return outWS
Beispiel #30
0
 def _establish_save_path(self):
     """
     @return the directory to save FORTRAN outputs to
     """
     workdir = config['defaultsave.directory']
     if not os.path.isdir(workdir):
         workdir = os.getcwd()
         logger.information('Default Save directory is not set.')
         logger.information('Defaulting to current working Directory: ' + workdir)
     return workdir
Beispiel #31
0
 def _establish_save_path(self):
     """
     @return the directory to save FORTRAN outputs to
     """
     workdir = config['defaultsave.directory']
     if not os.path.isdir(workdir):
         workdir = os.getcwd()
         logger.information('Default Save directory is not set.')
         logger.information('Defaulting to current working Directory: ' + workdir)
     return workdir
Beispiel #32
0
    def _calculate_energy(self, monitor_ws, grouped_ws, red_ws):
        """
        Convert the input run to energy transfer

        @param monitor_ws :: name of the monitor workspace to divide by
        @param grouped_ws :: name of workspace with the detectors grouped
        @param red_ws :: name to call the reduced workspace
        """
        x_range = self._monitor_range(monitor_ws)
        Scale(InputWorkspace=monitor_ws,
              OutputWorkspace=monitor_ws,
              Factor=0.001,
              Operation='Multiply')

        CropWorkspace(InputWorkspace=monitor_ws,
                      OutputWorkspace=monitor_ws,
                      Xmin=x_range[0],
                      XMax=x_range[1])
        ScaleX(InputWorkspace=monitor_ws,
               OutputWorkspace=monitor_ws,
               Factor=-x_range[0],
               Operation='Add')

        CropWorkspace(InputWorkspace=grouped_ws,
                      OutputWorkspace=grouped_ws,
                      Xmin=x_range[0],
                      XMax=x_range[1])
        ScaleX(InputWorkspace=grouped_ws,
               OutputWorkspace=grouped_ws,
               Factor=-x_range[0],
               Operation='Add')

        # Apply the detector intensity calibration
        if self._calibration_workspace != '':
            Divide(LHSWorkspace=grouped_ws,
                   RHSWorkspace=self._calibration_workspace,
                   OutputWorkspace=grouped_ws)

        Divide(LHSWorkspace=grouped_ws,
               RHSWorkspace=monitor_ws,
               OutputWorkspace=grouped_ws)
        formula = self._energy_range(grouped_ws)
        ConvertAxisByFormula(InputWorkspace=grouped_ws,
                             OutputWorkspace=red_ws,
                             Axis='X',
                             Formula=formula)

        red_ws_p = mtd[red_ws]
        red_ws_p.getAxis(0).setUnit('DeltaE')

        xnew = red_ws_p.readX(0)  # energy array
        logger.information('Energy range : %f to %f' % (xnew[0], xnew[-1]))

        DeleteWorkspace(grouped_ws)
        DeleteWorkspace(monitor_ws)
Beispiel #33
0
def sum_regular_runs(workspace_names):
    """
    Sum runs with single workspace data.

    @param workspace_names List of names of input workspaces
    @return List of names of workspaces
    """
    from mantid.simpleapi import (MergeRuns, Scale, AddSampleLog,
                                  DeleteWorkspace)

    # Use the first workspace name as the result of summation
    summed_detector_ws_name = workspace_names[0]
    summed_monitor_ws_name = workspace_names[0] + '_mon'

    # Get a list of the run numbers for the original data
    run_numbers = ','.join(
        [str(mtd[ws_name].getRunNumber()) for ws_name in workspace_names])

    # Generate lists of the detector and monitor workspaces
    detector_workspaces = ','.join(workspace_names)
    monitor_workspaces = ','.join(
        [ws_name + '_mon' for ws_name in workspace_names])

    # Merge the raw workspaces
    MergeRuns(InputWorkspaces=detector_workspaces,
              OutputWorkspace=summed_detector_ws_name)
    MergeRuns(InputWorkspaces=monitor_workspaces,
              OutputWorkspace=summed_monitor_ws_name)

    # Delete old workspaces
    for idx in range(1, len(workspace_names)):
        DeleteWorkspace(workspace_names[idx])
        DeleteWorkspace(workspace_names[idx] + '_mon')

    # Derive the scale factor based on number of merged workspaces
    scale_factor = 1.0 / len(workspace_names)
    logger.information('Scale factor for summed workspaces: %f' % scale_factor)

    # Scale the new detector and monitor workspaces
    Scale(InputWorkspace=summed_detector_ws_name,
          OutputWorkspace=summed_detector_ws_name,
          Factor=scale_factor)
    Scale(InputWorkspace=summed_monitor_ws_name,
          OutputWorkspace=summed_monitor_ws_name,
          Factor=scale_factor)

    # Add the list of run numbers to the result workspace as a sample log
    AddSampleLog(Workspace=summed_detector_ws_name,
                 LogName='multi_run_numbers',
                 LogType='String',
                 LogText=run_numbers)

    # Only have the one workspace now
    return [summed_detector_ws_name]
Beispiel #34
0
    def _save_output(self):
        """
        Save the output workspace to the user's default working directory
        """
        from IndirectCommon import getDefaultWorkingDirectory
        workdir = getDefaultWorkingDirectory()
        file_path = os.path.join(workdir, self._output_workspace + '.nxs')
        SaveNexusProcessed(InputWorkspace=self._output_workspace,
                           Filename=file_path)

        logger.information('Output file : ' + file_path)
Beispiel #35
0
    def C2Se(self, sname):
        outWS = sname + '_Result'
        asc = self._read_ascii_file(sname + '.qse')
        var = asc[3].split()  # split line on spaces
        nspec = var[0]
        var = ExtractInt(asc[6])
        first = 7
        Xout = []
        Yf, Yi, Yb = [], [], []
        Ef, Ei, Eb = [], [], []
        ns = int(nspec)

        dataX = np.array([])
        dataY = np.array([])
        dataE = np.array([])
        data = np.array([dataX, dataY, dataE])

        for _ in range(0, ns):
            first, Q, _, fw, it, be = self.SeBlock(asc, first)
            Xout.append(Q)
            Yf.append(fw[0])
            Ef.append(fw[1])
            Yi.append(it[0])
            Ei.append(it[1])
            Yb.append(be[0])
            Eb.append(be[1])
        Vaxis = []

        dataX, dataY, dataE, data = self._add_xye_data(data, Xout, Yi, Ei)
        nhist = 1
        Vaxis.append('f1.Amplitude')

        dataX, dataY, dataE, data = self._add_xye_data(data, Xout, Yf, Ef)
        nhist += 1
        Vaxis.append('f1.FWHM')

        dataX, dataY, dataE, data = self._add_xye_data(data, Xout, Yb, Eb)
        nhist += 1
        Vaxis.append('f1.Beta')

        logger.information('Vaxis=' + str(Vaxis))
        s_api.CreateWorkspace(OutputWorkspace=outWS,
                              DataX=dataX,
                              DataY=dataY,
                              DataE=dataE,
                              Nspec=nhist,
                              UnitX='MomentumTransfer',
                              VerticalAxisUnit='Text',
                              VerticalAxisValues=Vaxis,
                              YUnitLabel='',
                              EnableLogging=False)

        return outWS
def _load_files(file_specifiers, ipf_filename, spec_min, spec_max, load_logs=True, load_opts=None):
    """
    Loads a set of files and extracts just the spectra we care about (i.e. detector range and monitor).

    @param file_specifiers List of data file specifiers
    @param ipf_filename File path/name for the instrument parameter file to load
    @param spec_min Minimum spectra ID to load
    @param spec_max Maximum spectra ID to load
    @param load_logs Load log files when loading runs
    @param load_opts Additional options to be passed to load algorithm

    @return List of loaded workspace names and flag indicating chopped data
    """
    delete_monitors = False

    if load_opts is None:
        load_opts = {}

    if "DeleteMonitors" in load_opts:
        delete_monitors = load_opts["DeleteMonitors"]
        load_opts.pop("DeleteMonitors")

    workspace_names = []
    chopped_data = False

    for file_specifier in file_specifiers:
        # The filename without path and extension will be the workspace name
        ws_name = os.path.splitext(os.path.basename(str(file_specifier)))[0]
        logger.debug('Loading file %s as workspace %s' % (file_specifier, ws_name))
        do_load(file_specifier, ws_name, ipf_filename, load_logs, load_opts)
        workspace = mtd[ws_name]

        # Add the workspace to the list of workspaces
        workspace_names.append(ws_name)

        # Get the spectrum number for the monitor
        instrument = workspace.getInstrument()
        monitor_param = instrument.getNumberParameter('Workflow.Monitor1-SpectrumNumber')

        if monitor_param:
            monitor_index = int(monitor_param[0])
            logger.debug('Workspace %s monitor 1 spectrum number :%d' % (ws_name, monitor_index))

            workspaces, chopped_data = chop_workspace(workspace, monitor_index)
            crop_workspaces(workspaces, spec_min, spec_max, not delete_monitors, monitor_index)

    logger.information('Loaded workspace names: %s' % (str(workspace_names)))
    logger.information('Chopped data: %s' % (str(chopped_data)))

    if delete_monitors:
        load_opts['DeleteMonitors'] = True

    return workspace_names, chopped_data
Beispiel #37
0
def C2Se(sname):
    outWS = sname + '_Result'
    asc = readASCIIFile(sname + '.qse')
    var = asc[3].split()  #split line on spaces
    nspec = var[0]
    var = ExtractInt(asc[6])
    first = 7
    Xout = []
    Yf = []
    Ef = []
    Yi = []
    Ei = []
    Yb = []
    Eb = []
    ns = int(nspec)

    dataX = np.array([])
    dataY = np.array([])
    dataE = np.array([])

    for _ in range(0, ns):
        first, Q, _, fw, it, be = SeBlock(asc, first)
        Xout.append(Q)
        Yf.append(fw[0])
        Ef.append(fw[1])
        Yi.append(it[0])
        Ei.append(it[1])
        Yb.append(be[0])
        Eb.append(be[1])
    Vaxis = []

    dataX = np.append(dataX, np.array(Xout))
    dataY = np.append(dataY, np.array(Yi))
    dataE = np.append(dataE, np.array(Ei))
    nhist = 1
    Vaxis.append('f1.Amplitude')

    dataX = np.append(dataX, np.array(Xout))
    dataY = np.append(dataY, np.array(Yf))
    dataE = np.append(dataE, np.array(Ef))
    nhist += 1
    Vaxis.append('f1.FWHM')

    dataX = np.append(dataX, np.array(Xout))
    dataY = np.append(dataY, np.array(Yb))
    dataE = np.append(dataE, np.array(Eb))
    nhist += 1
    Vaxis.append('f1.Beta')

    logger.information('Vaxis=' + str(Vaxis))
    CreateWorkspace(OutputWorkspace=outWS, DataX=dataX, DataY=dataY, DataE=dataE, Nspec=nhist,\
        UnitX='MomentumTransfer', VerticalAxisUnit='Text', VerticalAxisValues=Vaxis, YUnitLabel='')
    return outWS
Beispiel #38
0
def _load_files(file_specifiers, ipf_filename, spec_min, spec_max, load_logs=True, load_opts=None):
    """
    Loads a set of files and extracts just the spectra we care about (i.e. detector range and monitor).

    @param file_specifiers List of data file specifiers
    @param ipf_filename File path/name for the instrument parameter file to load
    @param spec_min Minimum spectra ID to load
    @param spec_max Maximum spectra ID to load
    @param load_logs Load log files when loading runs
    @param load_opts Additional options to be passed to load algorithm

    @return List of loaded workspace names and flag indicating chopped data
    """
    delete_monitors = False

    if load_opts is None:
        load_opts = {}

    if "DeleteMonitors" in load_opts:
        delete_monitors = load_opts["DeleteMonitors"]
        load_opts.pop("DeleteMonitors")

    workspace_names = []
    chopped_data = False

    for file_specifier in file_specifiers:
        # The filename without path and extension will be the workspace name
        ws_name = os.path.splitext(os.path.basename(str(file_specifier)))[0]
        logger.debug('Loading file %s as workspace %s' % (file_specifier, ws_name))
        do_load(file_specifier, ws_name, ipf_filename, load_logs, load_opts)
        workspace = mtd[ws_name]

        # Add the workspace to the list of workspaces
        workspace_names.append(ws_name)

        # Get the spectrum number for the monitor
        instrument = workspace.getInstrument()
        monitor_param = instrument.getNumberParameter('Workflow.Monitor1-SpectrumNumber')

        if monitor_param:
            monitor_index = int(monitor_param[0])
            logger.debug('Workspace %s monitor 1 spectrum number :%d' % (ws_name, monitor_index))

            workspaces, chopped_data = chop_workspace(workspace, monitor_index)
            crop_workspaces(workspaces, spec_min, spec_max, not delete_monitors, monitor_index)

    logger.information('Loaded workspace names: %s' % (str(workspace_names)))
    logger.information('Chopped data: %s' % (str(chopped_data)))

    if delete_monitors:
        load_opts['DeleteMonitors'] = True

    return workspace_names, chopped_data
Beispiel #39
0
def ChangeAngles(inWS, instr, theta):
    workdir = config['defaultsave.directory']
    filename = instr + '_angles.txt'
    path = os.path.join(workdir, filename)
    logger.information('Creating angles file : ' + path)
    handle = open(path, 'w')
    head = 'spectrum,theta'
    handle.write(head + " \n")
    for n in range(0, len(theta)):
        handle.write(str(n + 1) + '   ' + str(theta[n]) + "\n")
        logger.information('Spectrum ' + str(n + 1) + ' = ' + str(theta[n]))
    handle.close()
    UpdateInstrumentFromFile(Workspace=inWS, Filename=path, MoveMonitors=False, IgnorePhi=False,
                             AsciiHeader=head)
Beispiel #40
0
    def _run_non_mirror_mode(self, monitor_ws, grouped_ws):
        """
        Runs energy reduction with mirror mode.

        @param monitor_ws :: name of the monitor workspace
        @param grouped_ws :: name of workspace with the detectors grouped
        """
        logger.information('Mirror sense is OFF')
        self._calculate_energy(monitor_ws, grouped_ws, self._red_workspace)
        if self._calibration_workspace != '':
            Divide(LHSWorkspace=self._red_workspace,
                   RHSWorkspace=self._calibration_workspace + '_red',
                   OutputWorkspace=self._red_workspace)
        return [self._red_workspace]
Beispiel #41
0
    def _process_output(self, workspace):
        if self._save:
            from mantid.simpleapi import SaveNexusProcessed
            from IndirectCommon import getDefaultWorkingDirectory
            workdir = getDefaultWorkingDirectory()
            fit_path = os.path.join(workdir, workspace + '.nxs')
            SaveNexusProcessed(InputWorkspace=workspace, Filename=fit_path)

            logger.information('Fit file is ' + fit_path)

        if self._plot:
            from IndirectImport import import_mantidplot
            mtd_plot = import_mantidplot()
            mtd_plot.plotSpectrum(workspace, [0, 1, 2], True)
Beispiel #42
0
def ChangeAngles(inWS,instr,theta):
    workdir = config['defaultsave.directory']
    file = instr+'_angles.txt'
    path = os.path.join(workdir, file)
    logger.information('Creating angles file : ' + path)
    handle = open(path, 'w')
    head = 'spectrum,theta'
    handle.write(head +" \n" )
    for n in range(0,len(theta)):
        handle.write(str(n+1) +'   '+ str(theta[n]) +"\n" )
        logger.information('Spectrum ' +str(n+1)+ ' = '+str(theta[n]))
    handle.close()
    UpdateInstrumentFromFile(Workspace=inWS, Filename=path, MoveMonitors=False, IgnorePhi=False,
        AsciiHeader=head)
def sum_regular_runs(workspace_names):
    """
    Sum runs with single workspace data.

    @param workspace_names List of names of input workspaces
    @return List of names of workspaces
    """
    from mantid.simpleapi import (MergeRuns, Scale, AddSampleLog,
                                  DeleteWorkspace)

    # Use the first workspace name as the result of summation
    summed_detector_ws_name = workspace_names[0]
    summed_monitor_ws_name = workspace_names[0] + '_mon'

    # Get a list of the run numbers for the original data
    run_numbers = ','.join([str(mtd[ws_name].getRunNumber()) for ws_name in workspace_names])

    # Generate lists of the detector and monitor workspaces
    detector_workspaces = ','.join(workspace_names)
    monitor_workspaces = ','.join([ws_name + '_mon' for ws_name in workspace_names])

    # Merge the raw workspaces
    MergeRuns(InputWorkspaces=detector_workspaces,
              OutputWorkspace=summed_detector_ws_name)
    MergeRuns(InputWorkspaces=monitor_workspaces,
              OutputWorkspace=summed_monitor_ws_name)

    # Delete old workspaces
    for idx in range(1, len(workspace_names)):
        DeleteWorkspace(workspace_names[idx])
        DeleteWorkspace(workspace_names[idx] + '_mon')

    # Derive the scale factor based on number of merged workspaces
    scale_factor = 1.0 / len(workspace_names)
    logger.information('Scale factor for summed workspaces: %f' % scale_factor)

    # Scale the new detector and monitor workspaces
    Scale(InputWorkspace=summed_detector_ws_name,
          OutputWorkspace=summed_detector_ws_name,
          Factor=scale_factor)
    Scale(InputWorkspace=summed_monitor_ws_name,
          OutputWorkspace=summed_monitor_ws_name,
          Factor=scale_factor)

    # Add the list of run numbers to the result workspace as a sample log
    AddSampleLog(Workspace=summed_detector_ws_name, LogName='multi_run_numbers',
                 LogType='String', LogText=run_numbers)

    # Only have the one workspace now
    return [summed_detector_ws_name]
Beispiel #44
0
def RejectZero(inWS,tot):
    nin = mtd[inWS].getNumberHistograms()                      # no. of hist/groups in sam
    nout = 0
    outWS = inWS[:-3]+'red'
    for n in range(0, nin):
        if tot[n] > 0:
            ExtractSingleSpectrum(InputWorkspace=inWS, OutputWorkspace='__tmp',
                WorkspaceIndex=n)
            if nout == 0:
                RenameWorkspace(InputWorkspace='__tmp', OutputWorkspace=outWS)
            else:
                ConjoinWorkspaces(InputWorkspace1=outWS, InputWorkspace2='__tmp',CheckOverlapping=False)
            nout += 1
        else:
            logger.information('** spectrum '+str(n+1)+' rejected')
Beispiel #45
0
def ReadMap(path):
    asc = loadFile(path)

    lasc = len(asc)
    logger.information('Map file : ' + path + ' ; spectra = ' + str(lasc - 1))
    val = ExtractInt(asc[0])
    numb = val[0]
    if numb != (lasc - 1):
        error = 'Number of lines  not equal to number of spectra'
        logger.error(error)
        sys.exit(error)
    map = []
    for n in range(1, lasc):
        val = ExtractInt(asc[n])
        map.append(val[1])
    return map
Beispiel #46
0
def ReadMap(path):
    asc = loadFile(path)

    lasc = len(asc)
    logger.information('Map file : ' + path + ' ; spectra = ' + str(lasc - 1))
    val = ExtractInt(asc[0])
    numb = val[0]
    if numb != (lasc - 1):
        error = 'Number of lines  not equal to number of spectra'
        logger.error(error)
        sys.exit(error)
    map = []
    for n in range(1, lasc):
        val = ExtractInt(asc[n])
        map.append(val[1])
    return map
Beispiel #47
0
def RejectZero(inWS, tot):
    nin = mtd[inWS].getNumberHistograms()  # no. of hist/groups in sam
    nout = 0
    outWS = inWS[:-3] + 'red'
    for n in range(0, nin):
        if tot[n] > 0:
            ExtractSingleSpectrum(InputWorkspace=inWS, OutputWorkspace='__tmp',
                                  WorkspaceIndex=n)
            if nout == 0:
                RenameWorkspace(InputWorkspace='__tmp', OutputWorkspace=outWS)
            else:
                ConjoinWorkspaces(InputWorkspace1=outWS, InputWorkspace2='__tmp',
                                  CheckOverlapping=False)
            nout += 1
        else:
            logger.information('** spectrum ' + str(n + 1) + ' rejected')
Beispiel #48
0
    def _punch_and_fill(self, signal, dimX, dimY, dimZ): # noqa
        Xmin, Xmax, _, Xwidth = self._get_dim_params(dimX)
        Ymin, Ymax, _, Ywidth = self._get_dim_params(dimY)
        Zmin, Zmax, _, Zwidth = self._get_dim_params(dimZ)
        X, Y, Z = self._get_XYZ_ogrid(dimX, dimY, dimZ)

        size = self.getProperty("Size").value
        if len(size)==1:
            size = np.repeat(size, 3)
        size/=2.0 # We want radii or half box width
        cut_shape = self.getProperty("Shape").value
        space_group = self.getProperty("SpaceGroup").value
        if space_group:
            check_space_group = True
            try:
                space_group=SpaceGroupFactory.subscribedSpaceGroupSymbols(int(space_group))[0]
            except ValueError:
                pass
            logger.information('Using space group: '+space_group)
            sg=SpaceGroupFactory.createSpaceGroup(space_group)
        else:
            check_space_group = False

        if cut_shape == 'cube':
            for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
                for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
                    for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
                        if not check_space_group or sg.isAllowedReflection([h,k,l]):
                            signal[int((h-size[0]-Xmin)/Xwidth+1):int((h+size[0]-Xmin)/Xwidth),
                                   int((k-size[1]-Ymin)/Ywidth+1):int((k+size[1]-Ymin)/Ywidth),
                                   int((l-size[2]-Zmin)/Zwidth+1):int((l+size[2]-Zmin)/Zwidth)]=np.nan
        else:  # sphere
            mask=((X-np.round(X))**2/size[0]**2 + (Y-np.round(Y))**2/size[1]**2 + (Z-np.round(Z))**2/size[2]**2 < 1)

            # Unmask invalid reflections
            if check_space_group:
                for h in range(int(np.ceil(Xmin)), int(Xmax)+1):
                    for k in range(int(np.ceil(Ymin)), int(Ymax)+1):
                        for l in range(int(np.ceil(Zmin)), int(Zmax)+1):
                            if not sg.isAllowedReflection([h,k,l]):
                                mask[int((h-0.5-Xmin)/Xwidth+1):int((h+0.5-Xmin)/Xwidth),
                                     int((k-0.5-Ymin)/Ywidth+1):int((k+0.5-Ymin)/Ywidth),
                                     int((l-0.5-Zmin)/Zwidth+1):int((l+0.5-Zmin)/Zwidth)]=False

            signal[mask]=np.nan

        return signal
Beispiel #49
0
    def _monitor_range(self, monitor_ws):
        """
        Get sensible values for the min and max cropping range

        @param monitor_ws :: name of the monitor workspace
        @return tuple containing the min and max x values in the range
        """
        x = mtd[monitor_ws].readX(0)  # energy array
        y = mtd[monitor_ws].readY(0)  # energy array
        imin = np.argmax(np.array(y[0:20]))
        nch = len(y)
        im = np.argmax(np.array(y[nch - 21:nch - 1]))
        imax = nch - 21 + im

        logger.information('Cropping range %f to %f' % (x[imin], x[imax]))

        return x[imin], x[imax]
Beispiel #50
0
def UseMap(inWS, map):
    nin = mtd[inWS].getNumberHistograms()  # no. of hist/groups in sam
    nout = 0
    outWS = inWS[:-3] + 'red'
    for n in range(0, nin):
        if map[n] == 1:
            ExtractSingleSpectrum(InputWorkspace=inWS, OutputWorkspace='__tmp', \
                                  WorkspaceIndex=n)
            if nout == 0:
                RenameWorkspace(InputWorkspace='__tmp', OutputWorkspace=outWS)
            else:
                ConjoinWorkspaces(InputWorkspace1=outWS, InputWorkspace2='__tmp',
                                  CheckOverlapping=False)
            nout += 1
            logger.information('** spectrum ' + str(n + 1) + ' mapped')
        else:
            logger.information('** spectrum ' + str(n + 1) + ' skipped')
Beispiel #51
0
    def make_fig(self, plot_dict, create_plot=True):
        """
        This method currently only considers single matplotlib.axes.Axes based figures as that is the most common case
        :param plot_dict: dictionary; A dictionary of various items intended to recreate a figure
        :param create_plot: Bool; whether or not to make the plot, or to return the figure.
        :return: matplotlib.figure; Only returns if create_plot=False
        """        # Grab creation arguments
        creation_args = plot_dict["creationArguments"]

        if len(creation_args) == 0:
            logger.information(
                "A plot could not be loaded from the save file, as it did not have creation_args. "
                "The original plot title was: {}".format(plot_dict["label"]))
            return

        for sublist in creation_args:
            for cargs_dict in sublist:
                if 'norm' in cargs_dict and type(cargs_dict['norm']) is dict:
                    cargs_dict['norm'] = self.restore_normalise_obj_from_dict(
                        cargs_dict['norm'])
        fig, axes_matrix, _, _ = create_subplots(len(creation_args))
        axes_list = axes_matrix.flatten().tolist()
        for ax, cargs_list in zip(axes_list, creation_args):
            creation_args_copy = copy.deepcopy(cargs_list)
            for cargs in cargs_list:
                if "workspaces" in cargs:
                    workspace_name = cargs.pop("workspaces")
                    workspace = ADS.retrieve(workspace_name)
                    self.workspace_plot_func(workspace, ax, ax.figure, cargs)
                elif "function" in cargs:
                    self.plot_func(ax, cargs)
            for cargs in creation_args_copy:
                cargs.pop('normalize_by_bin_width', None)
            ax.creation_args = creation_args_copy

        # Update the fig
        fig._label = plot_dict["label"]
        if fig.canvas.manager is not None:
            fig.canvas.manager.set_window_title(plot_dict["label"])
        self.restore_figure_data(fig=fig, dic=plot_dict)

        # If the function should create plot then create else return
        if create_plot:
            fig.show()
        else:
            return fig
Beispiel #52
0
def UseMap(inWS, map):
    nin = mtd[inWS].getNumberHistograms()  # no. of hist/groups in sam
    nout = 0
    outWS = inWS[:-3] + 'red'
    for n in range(0, nin):
        if map[n] == 1:
            ExtractSingleSpectrum(InputWorkspace=inWS, OutputWorkspace='__tmp', \
                                  WorkspaceIndex=n)
            if nout == 0:
                RenameWorkspace(InputWorkspace='__tmp', OutputWorkspace=outWS)
            else:
                ConjoinWorkspaces(InputWorkspace1=outWS,
                                  InputWorkspace2='__tmp',
                                  CheckOverlapping=False)
            nout += 1
            logger.information('** spectrum ' + str(n + 1) + ' mapped')
        else:
            logger.information('** spectrum ' + str(n + 1) + ' skipped')
    def _get_spectra_index(self, input_ws):
        """
        Gets the index of the two monitors and first detector for the current instrument configurtion.
        Assumes monitors are named monitor1 and monitor2
        """

        instrument = mtd[input_ws].getInstrument()

        try:
            analyser = instrument.getStringParameter('analyser')[0]
            detector_1_idx = instrument.getComponentByName(
                analyser)[0].getID() - 1
            if self._verbose:
                logger.information(
                    'Got index of first detector for analyser %s: %d' %
                    (analyser, detector_1_idx))
        except IndexError:
            detector_1_idx = 2
            logger.warning(
                'Could not determine index of first detetcor, using default value.'
            )

        try:
            monitor_1_idx = self._get_detector_spectrum_index(
                input_ws,
                instrument.getComponentByName('monitor1').getID())

            monitor_2 = instrument.getComponentByName('monitor2')
            if monitor_2 is not None:
                monitor_2_idx = self._get_detector_spectrum_index(
                    input_ws, monitor_2.getID())
            else:
                monitor_2_idx = None

            if self._verbose:
                logger.information('Got index of monitors: %d, %s' %
                                   (monitor_1_idx, str(monitor_2_idx)))
        except IndexError:
            monitor_1_idx = 0
            monitor_2_idx = 1
            logger.warning(
                'Could not determine index of monitors, using default values.')

        return monitor_1_idx, monitor_2_idx, detector_1_idx
Beispiel #54
0
    def make_fig(self, plot_dict, create_plot=True):
        """
        This method currently only considers single matplotlib.axes.Axes based figures as that is the most common case
        :param plot_dict: dictionary; A dictionary of various items intended to recreate a figure
        :param create_plot: Bool; whether or not to make the plot, or to return the figure.
        :return: matplotlib.figure; Only returns if create_plot=False
        """
        import matplotlib.pyplot as plt
        # Grab creation arguments
        creation_args = plot_dict["creationArguments"]

        if len(creation_args) == 0:
            logger.information(
                "A plot could not be loaded from the save file, as it did not have creation_args. "
                "The original plot title was: {}".format(plot_dict["label"]))
            return

        # Make a copy so it can be applied to the axes, of the plot once created.
        creation_args_copy = copy.deepcopy(creation_args[0])

        # Make initial plot
        fig, ax = plt.subplots(subplot_kw={'projection': 'mantid'})

        # If an overplot is necessary plot onto the same figure
        for cargs in creation_args[0]:
            if "workspaces" in cargs:
                workspace_name = cargs.pop('workspaces')
                workspace = ADS.retrieve(workspace_name)
                self.plot_func(workspace, ax, ax.figure, cargs)

        # Make sure that the axes gets it's creation_args as loading doesn't add them
        ax.creation_args = creation_args_copy

        # Update the fig
        fig._label = plot_dict["label"]
        fig.canvas.set_window_title(plot_dict["label"])
        self.restore_figure_data(fig=fig, dic=plot_dict)

        # If the function should create plot then create else return
        if create_plot:
            fig.show()
        else:
            return fig
Beispiel #55
0
    def PyExec(self):
        from IndirectImport import run_f2py_compatibility_test, is_supported_f2py_platform

        if is_supported_f2py_platform():
            import IndirectBayes as Main

        run_f2py_compatibility_test()

        self.log().information('ResNorm input')
        inType = self.getPropertyValue('InputType')
        prefix = self.getPropertyValue('Instrument')
        ana = self.getPropertyValue('Analyser')
        van = self.getPropertyValue('VanNumber')
        rinType = self.getPropertyValue('ResInputType')
        res = self.getPropertyValue('ResNumber')
        emin = self.getPropertyValue('EnergyMin')
        emax = self.getPropertyValue('EnergyMax')
        nbin = self.getPropertyValue('VanBinning')

        vname = prefix + van + '_' + ana + '_red'
        rname = prefix + res + '_' + ana + '_res'
        erange = [float(emin), float(emax)]
        plotOp = self.getPropertyValue('Plot')
        saveOp = self.getProperty('Save').value

        workdir = config['defaultsave.directory']
        if inType == 'File':
            vpath = os.path.join(workdir,
                                 vname + '.nxs')  # path name for van nxs file
            LoadNexusProcessed(Filename=vpath, OutputWorkspace=vname)
            Vmessage = 'Vanadium from File : ' + vpath
        else:
            Vmessage = 'Vanadium from Workspace : ' + vname
        if rinType == 'File':
            rpath = os.path.join(workdir,
                                 rname + '.nxs')  # path name for res nxs file
            LoadNexusProcessed(Filename=rpath, OutputWorkspace=rname)
            Rmessage = 'Resolution from File : ' + rpath
        else:
            Rmessage = 'Resolution from Workspace : ' + rname
        logger.information(Vmessage)
        logger.information(Rmessage)
        Main.ResNormRun(vname, rname, erange, nbin, plotOp, saveOp)
    def PyExec(self):
        setup_prog = Progress(self, start=0.0, end=0.05, nreports=5)
        setup_prog.report('Setting up algorithm')
        self._setup()

        ws_basename = str(self._sample_ws_in)

        trans_prog = Progress(self, start=0.05, end=0.4, nreports=2)
        trans_prog.report('Transforming monitor for Sample')
        self._trans_mon(ws_basename, 'Sam', self._sample_ws_in)
        trans_prog.report('Transforming monitor for Container')
        self._trans_mon(ws_basename, 'Can', self._can_ws_in)

        workflow_prog = Progress(self, start=0.4, end=1.0, nreports=4)
        # Generate workspace names
        sam_ws = ws_basename + '_Sam'
        can_ws = ws_basename + '_Can'
        trans_ws = ws_basename + '_Trans'

        # Divide sample and can workspaces
        workflow_prog.report('Dividing Sample by Container')
        Divide(LHSWorkspace=sam_ws,
               RHSWorkspace=can_ws,
               OutputWorkspace=trans_ws)

        trans = numpy.average(mtd[trans_ws].readY(0))
        logger.information('Average Transmission: ' + str(trans))

        workflow_prog.report('Adding Sample logs')
        AddSampleLog(Workspace=trans_ws,
                     LogName='can_workspace',
                     LogType='String',
                     LogText=self._can_ws_in)

        # Group workspaces
        workflow_prog.report('Creating output GroupWorkspace')
        group = sam_ws + ',' + can_ws + ',' + trans_ws
        GroupWorkspaces(InputWorkspaces=group, OutputWorkspace=self._out_ws)

        self.setProperty('OutputWorkspace', self._out_ws)
        workflow_prog.report('Algorithm complete')
Beispiel #57
0
    def _setup(self):
        """
        Gets algorithm properties.
        """
        self._instrument_name = self.getPropertyValue('Instrument')

        runs = self.getProperty('InputFiles').value
        self._data_files = []
        self._format_runs(runs)
        first_file = self._data_files[0]
        last_file = self._data_files[len(self._data_files) - 1]

        self._analyser = self.getPropertyValue('Analyser')
        self._reflection = self.getPropertyValue('Reflection')

        self._spectra_range = self.getProperty('SpectraRange').value
        self._elastic_range = self.getProperty('ElasticRange').value
        self._inelastic_range = self.getProperty('InelasticRange').value
        self._total_range = self.getProperty('TotalRange').value

        self._sample_log_name = self.getPropertyValue(
            'SampleEnvironmentLogName')
        self._sample_log_value = self.getPropertyValue(
            'SampleEnvironmentLogValue')

        self._msd_fit = self.getProperty('MSDFit').value

        self._width_fit = self.getProperty('WidthFit').value

        self._output_ws = first_file + '_to_' + last_file + '_scan_red'
        self._scan_ws = first_file + '_to_' + last_file + '_scan'

        self._plot = self.getProperty('Plot').value
        self._save = self.getProperty('Save').value

        # Get the IPF filename
        self._ipf_filename = os.path.join(
            config['instrumentDefinition.directory'], self._instrument_name +
            '_' + self._analyser + '_' + self._reflection + '_Parameters.xml')
        logger.information('Instrument parameter file: %s' %
                           self._ipf_filename)
Beispiel #58
0
def scale_monitor(workspace_name):
    """
    Scale monitor intensity by a factor given as the Workflow.MonitorScalingFactor parameter.

    @param workspace_name Name of workspace to process monitor for
    """
    from mantid.simpleapi import Scale

    monitor_workspace_name = workspace_name + '_mon'
    instrument = mtd[workspace_name].getInstrument()

    try:
        scale_factor = instrument.getNumberParameter('Workflow.Monitor1-ScalingFactor')[0]
    except IndexError:
        logger.information('No monitor scaling factor found for workspace %s' % workspace_name)
        return

    if scale_factor != 1.0:
        Scale(InputWorkspace=monitor_workspace_name,
              OutputWorkspace=monitor_workspace_name,
              Factor=1.0 / scale_factor,
              Operation='Multiply')
Beispiel #59
0
def ReadIbackGroup(a, first):  # read Ascii block of spectrum values
    x = []
    y = []
    e = []
    next = first
    line1 = a[next]
    next += 1

    if line1.startswith('S'):
        error = ''
    else:
        error = 'NOT an S block starting at line ' + str(first)
        logger.information('ERROR *** ' + error)
        sys.exit(error)
    next += 1
    next, Ival = Iblock(a, next)
    for m in range(0, len(Ival)):
        x.append(float(m))
        yy = float(Ival[m])
        y.append(yy)
        ee = math.sqrt(yy)
        e.append(ee)
    return next, x, y, e  # values of x,y,e as lists