Exemplo n.º 1
0
def getData(fname):
    """Extract data from mda-ASCII file and distribute into Pixel objects

    Returns:
    XAS scan data in a detector 'object' (variable "det") energy axis,
    transmission data array and detector filled with fluo data
    (detector object simply full of '0' values if no fluorescence data available)
    Transmission data in "trans" comprises encoder_E, I0/1/2, sample_time, encoder_angle
    !! I0/1/2  are already normalised to t !!

    """
    mda = readMDA.readMDA(fname, verbose=True)
    scanData = mda[1]
    scanSize = scanData.npts
    try:
        detectorData = mda[2]
        detSize = detectorData.npts
    except:
        detSize = 0

    det = makeDet(detSize, scanSize)

    # read transmission data
    pvColumnNames = ['EncEnergy:ActPos', 'scaler1:S2C', 'scaler1:S3C',
                     'scaler1:S4C', 'scaler1.T', 'EncAngle:ActPos']

    trans = np.empty((len(pvColumnNames), scanSize))
    for series in scanData.d:
        try:
            tag = ':'.join(series.name.split(':')[1:])  # use the PV part after the IOC id
            trans[pvColumnNames.index(tag)] = series.data
        except:
            pass

    ts = trans[pvColumnNames.index('scaler1.T')]    # get the sample time "t_s"
    e = trans[pvColumnNames.index('EncEnergy:ActPos')] * 1000.0   # Energy axis (in eV !!)

    # normalise I0, I1, I2 to sample_time ts (use string "scaler1:" as identifier)
    for i, name in enumerate(pvColumnNames):
        if name.startswith('scaler1:'):
            trans[i] = trans[i] / ts

    # read fluorescence data (if it exists)
    if detSize != 0:
        print 'reading fluorescence data...'
        for k in range(scanSize):
            for i in np.arange(detSize):
                det[i].fpeaks[k] = detectorData.d[0].data[k][i]
                det[i].speaks[k] = detectorData.d[1].data[k][i]
                det[i].roi[k] = detectorData.d[2].data[k][i]

        for i in np.arange(detSize):
            det[i].NormT(ts)        # ("ts" = sampling time per data point t_s)

    ## call dead time correction later; should be performed only on
    ## good spectra ("goodPixels") as bad spectra can contain zeros which
    ## this function would divide by
    ## call is via function:  detDeadCorr(det, goodPixels)

    return e, trans, det
Exemplo n.º 2
0
def extract_angles(fn_string, fn_range):
    """
	Convenience function for extracting the angles from a bunch of mda files

	Inputs
	------
	fn_string: the path to the mda files with {:04d} (or similar) for specifying the number.
	fn_range: a two element tuple specifying the dtart and end numbers

	Outputs
	-------

	A pretty list of mda number and angle which can be cut and pasted into a google doc.

	"""
    print "This process takes so long because the entire mda file is parsed despite\nneeding but a single element.\nC'est la vie.\n\n"
    l = []
    for i in range(fn_range[0], fn_range[1]):
        print 'Extracting angles from mda: {:04d}'.format(i)
        mda = readMDA(fn_string.format(i), verbose=0)
        l.append((i, mda[0]['2xfm:userStringCalc10.DD'][2]))

    print 'MDA\t\tAngle (degrees)'
    pstr = '\t{:04d}\t\t{:3.2f}'
    for element in l:
        print pstr.format(element[0], float(element[1]))
Exemplo n.º 3
0
    def convertMDA(self):
        mdaFile = '%s%s' % (self.mda_path,self.scanFileNamePV.get(as_string=True))
        
        print mdaFile
        
        try:
            mda = readMDA(mdaFile, verbose=False)
        except Exception:
            return
        
        data = mda[-1]
        results = [p.data for p in data.p]
        results.extend([d.data for d in data.d])
        results = zip(*results)
        names = [p.name for p in data.p]
        names.extend([d.name for d in data.d])
    
        datFileName = '%s%s.csv' % (self.dataFullPath, os.path.splitext(os.path.basename(mdaFile))[0])
        
        print datFileName
        
        with open(datFileName,'w') as f:
            print >> f, ', '.join('%s' % name for name in names)
            for line in results:
                print >> f, ', '.join('%s' % data for data in line)     

        epn = self.dataFullPath.split('/')[-3]
        try:
            self.redis.rpush('MDA:%s' % (epn,),datFileName)
            self.redis.publish('MDA:NewFile',datFileName)
        except redis.ConnectionError:
            print 'Error connecting to redis database'
Exemplo n.º 4
0
def panMDA(file=''):
    	"panMDA(file='') - display all 2D images from an MDA file in a scrolled window"
	if file == '':
		file ='/home/beams/CHA/data/xxx/cha_0001.mda'
	d = readMDA(file)
        pal = readPalette()
	det2D(d[2].d[0:d[2].nd],scale=(2,2),columns=5,file=file,pal=pal)
	return d
Exemplo n.º 5
0
def panMDA(file=''):
    "panMDA(file='') - display all 2D images from an MDA file in a scrolled window"
    if file == '':
        file = '/home/beams/CHA/data/xxx/cha_0001.mda'
    d = readMDA(file)
    pal = readPalette()
    det2D(d[2].d[0:d[2].nd], scale=(2, 2), columns=5, file=file, pal=pal)
    return d
Exemplo n.º 6
0
def readDir(s):
	dd = []
	files = glob(s)
	i = 0
	for file in files:
		dd.append(readMDA.readMDA(file, verbose=0, maxdim=0))
		print "file:%-15s rank:%d %s" % (dd[i][0]['filename'],
		dd[i][0]['rank'], str(dd[i][0]['dimensions']))
		i = i+1
	return dd
Exemplo n.º 7
0
def readDir(s):
    dd = []
    files = glob(s)
    i = 0
    for file in files:
        dd.append(readMDA.readMDA(file, verbose=0, maxdim=0))
        print "file:%-15s rank:%d %s" % (dd[i][0]['filename'],
                                         dd[i][0]['rank'],
                                         str(dd[i][0]['dimensions']))
        i = i + 1
    return dd
Exemplo n.º 8
0
def getAllExtraPVs(fname):
    """Return a dictionary of all "Extra" PVs, indexed by the part of the PV name
    following the colon, e.g. if fname contains the PV SR12ID01DET01:mca1.R0LO with a
    value ("long", "", [1200]), the returned dict will contain a key:value entry
    'mca1.R0LO':("long", "", [1200]), where the value is the triple obtained from readMDA

    Arguments:
    fname - mda filename

    Returns:
    dict e.g. {'mca1.R0LO':("long", "", [1000]), 'mca1.R0HI':("long", "", [1200]), ...}

    """
    # get the path to the netCDF files from the mda file
    mda = readMDA.readMDA(fname, verbose=False)
    extra_pvs = mda[0]  # get the first list entry - a dict containing the PVs
    extra_pvs_dict = {i.split(':')[-1]: extra_pvs[i] for i in extra_pvs}
    return extra_pvs_dict
Exemplo n.º 9
0
def getAllExtraPVs(fname):
    """Return a dictionary of all "Extra" PVs, indexed by the part of the PV name
    following the colon, e.g. if fname contains the PV SR12ID01DET01:mca1.R0LO with a
    value ("long", "", [1200]), the returned dict will contain a key:value entry
    'mca1.R0LO':("long", "", [1200]), where the value is the triple obtained from readMDA

    Arguments:
    fname - mda filename

    Returns:
    dict e.g. {'mca1.R0LO':("long", "", [1000]), 'mca1.R0HI':("long", "", [1200]), ...}

    """
    # get the path to the netCDF files from the mda file
    mda = readMDA.readMDA(fname, verbose=False)
    extra_pvs = mda[0]         # get the first list entry - a dict containing the PVs
    extra_pvs_dict = {i.split(':')[-1]: extra_pvs[i] for i in extra_pvs}
    return extra_pvs_dict
Exemplo n.º 10
0
    def read_mda(self, filename=None):
        if not filename:
            filename = self.filename

        source = self.mda_file_path[0].lower()
        if source not in ['d', 'p']:
            CXP.log.error("mda_file_path first character must be 'd' or 'p'")
            raise
        channel = self.mda_file_path[1]
        if not np.isnumeric(channel):
            CXP.log.error("mda_file_path second character must be numeric.")
            raise

        try:
            return readMDA.readMDA(filename)[2][source].data
        except:
            CXP.log.error('Could not extract array from mda file')
            raise
Exemplo n.º 11
0
    def read_mda(self, filename=None):
        if not filename:
            filename = self.filename

        source = self.mda_file_path[0].lower()
        if source not in ['d', 'p']:
            CXP.log.error("mda_file_path first character must be 'd' or 'p'")
            raise
        channel = self.mda_file_path[1]
        if not np.isnumeric(channel):
            CXP.log.error("mda_file_path second character must be numeric.")
            raise

        try:
            return readMDA.readMDA(filename)[2][source].data
        except:
            CXP.log.error('Could not extract array from mda file')
            raise
Exemplo n.º 12
0
def extract_angles(fn_string, fn_range):
	"""
	Convenience function for extracting the angles from a bunch of mda files

	Inputs
	------
	fn_string: the path to the mda files with {:04d} (or similar) for specifying the number.
	fn_range: a two element tuple specifying the dtart and end numbers

	Outputs
	-------

	A pretty list of mda number and angle which can be cut and pasted into a google doc.

	"""
	l=[]
	for i in range(fn_range[0], fn_range[1]):
		mda = readMDA(fn_string.format(i))
		l.append((i, mda[0]['2xfm:userStringCalc10.DD'][2]))

	print 'MDA\t\tAngle (degrees)'
	pstr = '\t{:04d}\t\t{:3.2f}'
	for element in l:
		printpstr.format(element[0], element[1])
Exemplo n.º 13
0
def getData(fname):
    """Extract data from mda-ASCII file and distribute into Pixel objects
    Returns:
    XAS scan data in a detector 'object' (variable "det") energy axis,
    transmission data array and detector filled with fluo data
    (detector object simply full of '0' values if no fluorescence data available)
    Transmission data in "trans" comprises encoder_E, I0/1/2, sample_time, encoder_angle
    !! I0/1/2  are already normalised to t !!
    """
    mda = readMDA.readMDA(fname, verbose=True)
    scanData = mda[1]
    scanSize = scanData.npts
    try:
        detectorData = mda[2]
        detSize = detectorData.npts
    except:
        detSize = 0

    det = makeDet(detSize, scanSize)

    # read transmission data
    pvColumnNames = [
        'ENERGY_RBV', 'scaler1:S2C', 'scaler1:S3C', 'scaler1:S4C', 'scaler1.T',
        'BRAGG.RBV'
    ]

    trans = np.empty((len(pvColumnNames), scanSize))
    for series in scanData.d:
        try:
            tag = ':'.join(
                series.name.split(':')[1:])  # use the PV part after the IOC id
            trans[pvColumnNames.index(tag)] = series.data
        except:
            pass

    ts = trans[pvColumnNames.index('scaler1.T')]  # get the sample time "t_s"
    e = trans[pvColumnNames.index(
        'ENERGY_RBV')] * 1000.0  # Energy axis (in eV !!)

    # normalise I0, I1, I2 to sample_time ts (use string "scaler1:" as identifier)
    for i, name in enumerate(pvColumnNames):
        if name.startswith('scaler1:'):
            trans[i] = trans[i] / ts

    # read fluorescence data (if it exists)
    if detSize != 0:
        print 'reading fluorescence data...'
        for k in range(scanSize):
            for i in np.arange(detSize):
                det[i].fpeaks[k] = detectorData.d[0].data[k][i]
                det[i].speaks[k] = detectorData.d[1].data[k][i]
                det[i].roi[k] = detectorData.d[2].data[k][i]

        for i in np.arange(detSize):
            det[i].NormT(ts)  # ("ts" = sampling time per data point t_s)

    ## call dead time correction later; should be performed only on
    ## good spectra ("goodPixels") as bad spectra can contain zeros which
    ## this function would divide by
    ## call is via function:  detDeadCorr(det, goodPixels)

    return e, trans, det
Exemplo n.º 14
0
def getData(fname):
    """Extract data from mda-ASCII file and distribute into Pixel objects

    Returns: XAS scan data in a detector 'object' (variable "det")
            energy axis, transmission data array and detector filled with fluo data
    (detector object simply full of '0' values if no fluorescence data available)
    Transmission data in "trans" comprises encoder_E, I0/1/2, sample_time, encoder_angle
    !! I0/1/2  are already normalised to t !!

    """
    # get the path to the netCDF files from the mda file
    mda = readMDA.readMDA(fname, verbose=False)
    netcdf_basename = os.path.splitext(os.path.basename(fname))[0]
    netcdf_directory = os.path.join(os.path.dirname(os.path.abspath(fname)),
                                    netcdf_basename)
    netcdf_filepattern = '{}_([0-9]*)\.nc'.format(netcdf_basename)

    scanData = mda[1]
    scanSize = scanData.npts

    # create and set the reader for the fluorescence detector
    detector_data = DetectorData(shape=(6, 6), pixelsteps_per_buffer=1,
        buffers_per_file=1, dirpaths=netcdf_directory,
        filepattern=netcdf_filepattern, mca_bins=2048, first_file_n=1)

    detector = Detector(detector_data)

    # set scanSize according to the netCDF data that was available
    scanSize = highest_available_scandata(detector, scanSize)
    detector.steprange = range(scanSize)

    # read transmission data
    # The PVs listed in pvColumnNames all refer to columnar data such as axis values.
    # This is contrasted by thoselisted in pvSingleValues, which are all single values.
    pvColumnNames = ['EncEnergy:ActPos', 'scaler1:S2C', 'scaler1:S3C',
                     'scaler1:S4C', 'scaler1.T', 'EncAngle:ActPos']

    trans = np.empty((len(pvColumnNames), scanSize))
    for series in scanData.d:
        try:
            tag = ':'.join(series.name.split(':')[1:])  # use the PV part after the IOC id
            if tag in pvColumnNames:
                trans[pvColumnNames.index(tag)] = series.data[:scanSize]
        except Exception as e:
            print e
            print 'missing PV ' + tag

    ts = trans[pvColumnNames.index('scaler1.T')]    # get the sample time
    detector.set_ts(ts)
    e = trans[pvColumnNames.index('EncEnergy:ActPos')] * 1000.0  # Energy axis (in eV !!)

    # normalise I0, I1, I2 to sample_time ts (use string "scaler1:" as identifier)
    for i, name in enumerate(pvColumnNames):
        if name.startswith('scaler1:'):
            trans[i] = trans[i] / ts

    ## call dead time correction later; should be performed only on
    ## good spectra ("goodPixels") as bad spectra can contain zeros which
    ## this function would divide by
    ## call is via function:  detDeadCorr(det, goodPixels)

    return e, trans, detector
Exemplo n.º 15
0
def getData(fname):
    """Extract data from mda-ASCII file and distribute into Pixel objects

    Returns:
    XAS scan data in a detector 'object' (variable "det") energy axis,
    transmission data array and detector filled with fluo data
    (detector object simply full of '0' values if no fluorescence data available)
    Transmission data in "trans" comprises encoder_E, I0/1/2, sample_time, encoder_angle
    !! I0/1/2  are already normalised to t !!

    """
    mda = readMDA.readMDA(fname, verbose=True)
    scanData = mda[1]
    scanSize = scanData.npts
    try:
        detectorData = mda[2]
        detSize = detectorData.npts
    except:
        detSize = 0

    det = makeDet(detSize, scanSize)

    # generate a list of scan record detectors used (as written into the base mda file)
    numD = len(scanData.d)
    pvDetectors = []
    for i in np.arange(numD):
        pvDetectors.append(':'.join(scanData.d[i].name.split(':')[1:]))
    print ''
    print 'Detector PVs found in source file: ', pvDetectors

    # test, which set of pvColumnNames is compatible with the list of scan detectors
    #  (this is for compatibility reasons pre-/post- new DCM integration)
    #
    # before DCM install:  pvColumnNames includes 'EncEnergy:ActPos' as energy PV
    # after DCM install: energy PV is now called 'ENERGY_RBV'
    #
    # any future changes to PV names:  add a corresponding list to pvColumnNames

    pvColumnNames = [[
        'EncEnergy:ActPos', 'scaler1:S2C', 'scaler1:S3C', 'scaler1:S4C',
        'scaler1.T', 'EncAngle:ActPos'
    ],
                     [
                         'ENERGY_RBV', 'scaler1:S2C', 'scaler1:S3C',
                         'scaler1:S4C', 'scaler1.T', 'BRAGG.RBV'
                     ]]

    # go through the lists of PVs and test whether these are in the data file (i.e., in 'pvDetectors');
    # choose that list which is consistent with pvDetectors
    pvNotFound = []
    dataRead = False
    for PVs in pvColumnNames[:]:
        if dataRead:
            break
        trans = np.zeros((len(PVs), scanSize))
        #
        # Step 1: test if list is complete
        for i in PVs:
            if i not in pvDetectors: pvNotFound.append(i)
        #
        # Step 2: if not, pass and go to next list;
        #         if OK, go through that list again and read in transmission channels
        if len(pvNotFound) > 0:
            pvNotFound = []
            pass
        else:
            print 'Reading PV columns and transmission data from source file: ', PVs
            print ''
            for i in PVs:
                dataIndex = pvDetectors.index(i)
                trans[PVs.index(i)] = scanData.d[dataIndex].data
            #
            energyPV = PVs[0]
            print 'Setting energy PV to: ', energyPV
            e = trans[PVs.index(energyPV)] * 1000.0  # Energy axis (in eV !!)
            #
            sampletimePV = 'scaler1.T'
            print 'Setting sample time PV to: ', sampletimePV
            ts = trans[PVs.index(sampletimePV)]  # get the sample time "t_s"
            #
            # normalise I0, I1, I2 to sample_time ts (use string "scaler1:S" as identifier)
            print 'Normalising ion chamber signals (I0, I1, I2) to sample time.'
            for i, name in enumerate(PVs):
                if name.startswith('scaler1:S'):
                    trans[i] = trans[i] / ts
            dataRead = True

    if len(pvNotFound) > 0:
        print 'ERROR -- one or more detector PV(s) not found: '
        try:
            stop
        except:
            pass


#    for series in scanData.d:
#        tag = ':'.join(series.name.split(':')[1:])  # use the PV part after the Beamline/IOC id
#        if tag in pvColumnNames :
#            print 'tag found: ', tag
#            trans[pvColumnNames.index(tag)] = series.data
#        else:
#            print 'at least one detector PV not found (',tag,'); trying different PV list list'

# read transmission data
#    trans = np.empty((len(pvColumnNames), scanSize))
#    for series in scanData.d:
#        try:
#            tag = ':'.join(series.name.split(':')[1:])  # use the PV part after the IOC id
#            trans[pvColumnNames.index(tag)] = series.data
#        except:
#            pass

#    ts = trans[pvColumnNames.index('scaler1.T')]    # get the sample time "t_s"
#    e = trans[pvColumnNames.index('EncEnergy:ActPos')] * 1000.0   # Energy axis (in eV !!)

# read fluorescence data (if it exists)
    if detSize != 0:
        print 'reading fluorescence data...'
        for k in range(scanSize):
            for i in np.arange(detSize):
                det[i].fpeaks[k] = detectorData.d[0].data[k][i]
                det[i].speaks[k] = detectorData.d[1].data[k][i]
                det[i].roi[k] = detectorData.d[2].data[k][i]

        for i in np.arange(detSize):
            det[i].NormT(ts)  # ("ts" = sampling time per data point t_s)

    ## call dead time correction later; should be performed only on
    ## good spectra ("goodPixels") as bad spectra can contain zeros which
    ## this function would divide by
    ## call is via function:  detDeadCorr(det, goodPixels)

    return e, trans, det
Exemplo n.º 16
0
 def simple_load_test(self):
     fname = os.path.join(TESTDATA_DIR, MDA_FILE)
     mda = readMDA.readMDA(fname, verbose=False)
     self.assertEqual(mda[0]['rank'], 1)
Exemplo n.º 17
0
def getData(fname):
    """Extract data from mda-ASCII file and distribute into Pixel objects

    Returns: XAS scan data in a detector 'object' (variable "det")
            energy axis, transmission data array and detector filled with fluo data
    (detector object simply full of '0' values if no fluorescence data available)
    Transmission data in "trans" comprises encoder_E, I0/1/2, sample_time, encoder_angle
    !! I0/1/2  are already normalised to t !!

    """
    # get the path to the netCDF files from the mda file
    mda = readMDA.readMDA(fname, verbose=False)
    netcdf_basename = os.path.splitext(os.path.basename(fname))[0]
    netcdf_directory = os.path.join(os.path.dirname(os.path.abspath(fname)),
                                    netcdf_basename)
    netcdf_filepattern = '{}_([0-9]*)\.nc'.format(netcdf_basename)

    scanData = mda[1]
    scanSize = scanData.npts

    # create and set the reader for the fluorescence detector
    detector_data = DetectorData(shape=(6, 6),
                                 pixelsteps_per_buffer=1,
                                 buffers_per_file=1,
                                 dirpaths=netcdf_directory,
                                 filepattern=netcdf_filepattern,
                                 mca_bins=2048,
                                 first_file_n=1)

    detector = Detector(detector_data)

    # set scanSize according to the netCDF data that was available
    scanSize = highest_available_scandata(detector, scanSize)
    detector.steprange = range(scanSize)

    # read transmission data
    # The PVs listed in pvColumnNames all refer to columnar data such as axis values.
    # This is contrasted by thoselisted in pvSingleValues, which are all single values.
    pvColumnNames = [
        'EncEnergy:ActPos', 'scaler1:S2C', 'scaler1:S3C', 'scaler1:S4C',
        'scaler1.T', 'EncAngle:ActPos'
    ]

    trans = np.empty((len(pvColumnNames), scanSize))
    for series in scanData.d:
        try:
            tag = ':'.join(
                series.name.split(':')[1:])  # use the PV part after the IOC id
            if tag in pvColumnNames:
                trans[pvColumnNames.index(tag)] = series.data[:scanSize]
        except Exception as e:
            print e
            print 'missing PV ' + tag

    ts = trans[pvColumnNames.index('scaler1.T')]  # get the sample time
    detector.set_ts(ts)
    e = trans[pvColumnNames.index(
        'EncEnergy:ActPos')] * 1000.0  # Energy axis (in eV !!)

    # normalise I0, I1, I2 to sample_time ts (use string "scaler1:" as identifier)
    for i, name in enumerate(pvColumnNames):
        if name.startswith('scaler1:'):
            trans[i] = trans[i] / ts

    ## call dead time correction later; should be performed only on
    ## good spectra ("goodPixels") as bad spectra can contain zeros which
    ## this function would divide by
    ## call is via function:  detDeadCorr(det, goodPixels)

    return e, trans, detector