def PlotData(directory, filname):
    specdata = spc.File(directory + 'spcFiles/' + filname + '.spc')

    Plot = specdata.plot(filname)
    Plot.savefig(directory + "pngFiles/" + filname + '.png')

    print 'specdata:', type(specdata)
예제 #2
0
    def convert(self):
        self.fol_val = str(self.folder.get())
        self.fmt_val = str(self.output_fmt.get())
        print("About to convert {} with {} ext".format(self.fol_val, self.fmt_val))

        if self.fmt_val == 'txt':
            exten = '.txt'
            delim = '\t'
        else:
            # defaults
            exten = '.csv'
            delim = ','

        flist = []

        # only directory here
        ffn = os.path.abspath(self.fol_val)
        for f in os.listdir(ffn):
            flist.append(os.path.join(ffn, f))

        # process files
        for fpath in flist:
            if fpath.lower().endswith('spc'):

                foutp = fpath[:-4] + exten
                try:
                    print(fpath, end=' ')
                    f = spc.File(fpath)
                    f.write_file(foutp, delimiter=delim)
                    print('Converted')
                except:
                    print('Error processing %s' % fpath)
            else:
                print('%s not spc file, skipping' % fpath)
 def _read_file(self):
     """
     Reads a raw SPC file, updates the self.spectra attribute to have the raw y values for each index in the spectra
     file
     """
     f = spc.File(self.spc_file)
     self.spectra = f.sub[0].y
     self.x_values = f.x
예제 #4
0
def readSpcFtirData(spcFile):
    instrumentData = spc.File(spcFile)
    # Take slice b/c data has trailing tab
    dataTable = [r.split("\t")[0:2]
                 for r in instrumentData.data_txt().split("\n") if r != '']
    # First line is headers, so we drop it
    dataTable = dataTable[1:]
    return dataTable
예제 #5
0
def open_spc_spcl(file_path):
    # spcファイルの場合
    if file_path.endswith(".spc"):
        spc_file = spc.File(file_path)
    # spclファイル(自作漬物ファイル)の場合
    elif file_path.endswith(".spcl"):
        with open(file_path, 'rb') as f:
            spc_file = pickle.load(f)
    return spc_file
예제 #6
0
def loadData(folder, fileName):
    if fileName.endswith(".spc") or fileName.endswith('.SPC'):
        f = spc.File(folder + '\\' + fileName)
        ev = f.x
        counts = f.sub[0].y
    elif fileName.endswith(".CSV") or fileName.endswith(".csv") or fileName.endswith(".PRN") or fileName.endswith(".prn"):
        ev, counts = np.genfromtxt(folder + '\\' + fileName, skip_footer=1, unpack=True, usecols =(0,1))  #load data from text file
    else:
        raise FileTypeError(fileName)
    
    return ev, counts
예제 #7
0
    def read(self):
        try:
            import spc
        except ImportError:
            raise RuntimeError("To load spc files install spc python module (https://github.com/rohanisaac/spc)")

        spc_file = spc.File(self.filename)
        if spc_file.talabs:
            table = self.multi_x_reader(spc_file)
        else:
            table = self.single_x_reader(spc_file)
        return table
예제 #8
0
def _readSPC(path, spectra, **kwargs):
    # Cancelout output of this module
    _stdout = _copy.copy(sys.stdout)
    sys.stdout = None
    spc_file = spc.File(path, **kwargs)
    # Restart standard output
    sys.stdout = _stdout

    wvnmbrs = spc_file.x
    # Append the new spectrum/spectra
    for signal in spc_file.sub:
        spectra.addSpectrum(wvnmbrs, signal.y)
예제 #9
0
def asdLoadPortableSampleData(portableDataFile, dataProduct):
    # clear out any old data
    samples = AsdSample.objects.filter(dataProduct=dataProduct)
    samples.all().delete()
        
    if portableDataFile:
        instrumentData = spc.File(portableDataFile)
        # Take slice b/c data has trailing tab
        dataTable = [r.split("\t")[0:2] for r in instrumentData.data_txt().split("\n") if r != '']
        
        for wl, ab in dataTable[1:]:  # Slice starting @ 1 because 1 line is header
            sample = AsdSample(
                dataProduct = dataProduct,
                wavelength = wl,
                absorbance = ab)
            sample.save()
예제 #10
0
파일: rvd.py 프로젝트: sanket-desai/rvd-git
 def convert(self):
     self.fol_val = str(self.folder.get())
     outfol_val = str(self.outputdir.get())
     if not outfol_val.endswith("/"):
         outfol_val = outfol_val + "/"
     #self.fmt_val = str(self.output_fmt.get())
     #print("About to convert {} with {} ext".format(self.fol_val, self.fmt_val))
     exten = '.csv'
     delim = ','
     flist = []
     # only directory here
     ffn = os.path.abspath(self.fol_val)
     #ffn is the input folder name
     #print(ffn)
     #sys.exit(0)
     onlyfilename = []
     ffn = ffn + "/"
     for f in os.listdir(ffn):
         flist.append(os.path.join(ffn, f))
         onlyfilename.append(f)
     # process files
     ind = 0
     for ind in range(0, len(flist)):
         fpath = flist[ind]
         fname = onlyfilename[ind]
         if fpath.lower().endswith('spc'):
             foutp = fpath[:-4] + exten
             #foutp=outfol_val+fname[:-4]+exten
             try:
                 #print(fpath, end=' ')
                 f = spc.File(fpath)
                 f.write_file(foutp, delimiter=delim)
                 #print('Converted')
             except:
                 print('Error processing %s' % fpath)
                 print(fpath)
                 print(foutp)
                 sys.exit(0)
         #else:
         #    print('%s not spc file, skipping' % fpath)
     sampleinfo = {}
     testind = 1
     for f in onlyfilename:
         sampleinfo[f.replace(".spc", ".csv")] = "test" + str(testind)
         testind = testind + 1
     print(sampleinfo)
     self.csvtoldamatrix(ffn, sampleinfo, outfol_val)
예제 #11
0
def main():
    desc = 'Converts *.spc binary files to text using the spc module'
    parser = GooeyParser(description=desc)
    parser.add_argument('filefolder',
                        widget='DirChooser',
                        help='Input directory containing spc file')
    fformat = parser.add_mutually_exclusive_group()
    fformat.add_argument('-c',
                         '--csv',
                         help='Comma separated output file (.csv) [default]',
                         action='store_true')
    fformat.add_argument('-t',
                         '--txt',
                         help='Tab separated output file (.txt)',
                         action='store_true')
    args = parser.parse_args()

    if args.txt:
        exten = '.txt'
        delim = '\t'
    else:
        # defaults
        exten = '.csv'
        delim = ','

    flist = []

    # only directory here
    ffn = os.path.abspath(args.filefolder)
    for f in os.listdir(ffn):
        flist.append(os.path.join(ffn, f))

    # process files
    for fpath in flist:
        if fpath.lower().endswith('spc'):

            foutp = fpath[:-4] + exten
            try:
                print(fpath, end=' ')
                f = spc.File(fpath)
                f.write_file(foutp, delimiter=delim)
                print('Converted')
            except:
                print('Error processing %s' % fpath)
        else:
            print('%s not spc file, skipping' % fpath)
예제 #12
0
def read_spectra(spectral_file, start_x=600, end_x=1450):
    """
    Uses spc module from https://github.com/rohanisaac/spc (which appears to be abandonware, but has yet to fail on me)
    to read in a spectral file, then takes its first derivative and normalizes its data.
    :param spectral_file: A .spc file produced by our super duper FTIR machine.
    :param start_x: Start of interval over which we want to look at data
    :param end_x: End of interval over which we want to look at data
    :return: Normalized first derivative over the interval specified.
    """
    assert end_x > start_x
    spectral_data = spc.File(spectral_file)
    y = list()
    # Iterate through file, taking first derivative over our range of important stuff.
    for i in range(len(spectral_data.x) - 2, 0, -1):
        x_value = spectral_data.x[i]
        if start_x < x_value < end_x:
            difference = spectral_data.sub[0].y[i] - spectral_data.sub[0].y[i +
                                                                            1]
            y.append(difference)
    # Note: it's possible to get a value of zero for norm if the array has all 0 values - this seems really (x1000000)
    # unlikely to ever happen with a spectra, so we won't bother catching an error here.
    norm = np.linalg.norm(y)
    return y / norm
def fill_df():
    global df_init
    global file_list
    count = 0
    #scan is each file name as a str
    #loop through each file in the selected directory
    for scan in os.listdir():
        fileName = scan
        if fileName[-3:] == 'spc':
            file_list.append(fileName)
            count += 1
            f = spc.File(
                fileName
            )  #Initialize spectra object f. Use f to call the x and y components.
            wave_list = f.x  #list of all the wavelengths
            df_init['wavelength' + str(
                count
            )] = wave_list  #casting the wavelength list into the dataframe
            percentR = f.sub[0].y  #list of all the R% values
            df_init[
                fileName] = percentR  #casting the R% list into the dataframe
        else:
            pass
    return count
예제 #14
0
import spc #https://github.com/rohanisaac/spc
import os


def findFiles(path, extension):
    files = []
    for i in os.listdir(path):
        if i.endswith(extension):
            files.append(path + str(i))
    files.sort()
    return files

path = './'

files = findFiles(path, ".spc")

for i in files:
    filename = i.split('/')[-1][:-4]
    f = spc.File(i)
    f.write_file(filename + '.csv')
예제 #15
0
파일: SPC_to_h5.py 프로젝트: ww334/nplab
def extractRamanSpc(path, bg_path=False, combine_statics=False):
    '''Takes all .spc files from a directory and creates Raman_Spectrum object for each and also background subtracts, if specified
       .spc files must be directly exported at time of measurement. If .wdf file was re-opened with WiRE and then saved as .spc, use old code ('2017-04-14_Spectra_Class')
       Plots ASCII table with relevant metadata. Set table=False to omit this
       Also plots table for background files if user specifies bg_table = True'''
    '''Actual power values for each % laser power in μW. Measured on 09/05/2017.'''

    print '\nGathering .spc (meta)data...\n'

    p532 = {
        0.0001: 0.01,
        0.05: 4.75,
        0.1: 12.08,
        0.5: 49.6,
        1.0: 88.1,
        5.0: 666,
        10.0: 1219,
        50.0: 5360,
        100.0: 9650
    }

    p633 = {
        0.0001: 0.01,
        0.05: 1,
        0.1: 2,
        0.5: 10,
        1.0: 20,
        5.0: 112,
        10.0: 226,
        50.0: 1130,
        100.0: 2200
    }

    p785 = {
        0.0001: 0.17,
        0.05: 8.8,
        0.1: 19.1,
        0.5: 47.8,
        1.0: 104,
        5.0: 243,
        10.0: 537,
        50.0: 1210,
        100.0: 2130
    }

    powerConverter = {
        532: p532,
        633: p633,
        785: p785
    }  #Assigns each laser power dictionary to the appropriate wavelength.

    os.chdir(path)
    spcFiles = [f for f in os.listdir('.') if f.endswith('.spc')]
    spectra = []

    for n, spcFile in enumerate(spcFiles):
        filename = spcFile[:-4]  #Removes extension from filename string
        f = spc.File(spcFile)  #Create File object from .spc file
        laserWl = int(
            f.log_dict['Laser'][7:10]
        )  #Grabs appropriate part of laser wavelength entry from log and converts to integer (must be 3 characters long)

        lpKeys = ['Laser_power', ' Laser_power']

        for lpKey in lpKeys:

            if lpKey in f.log_dict.keys():
                break

        try:
            laserPower = float(
                f.log_dict[lpKey][13:-1]
            )  #Grabs numeric part of string containing laser power info and converts to float

        except:
            laserPower = 'Undefined'

        if laserPower in [0.0001, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 50.0, 100.0]:
            absLaserPower = float(
                powerConverter[laserWl][laserPower]
            ) / 1000  #Returns absolute laser power (in mW), given laser wavelength and % laser power.

        else:
            absLaserPower = 'Undefined'  #To avoid errors if laser power is not recorded correctly

        try:
            integrationTime = float(f.log_dict['Exposure_time'][6:])
        except:
            integrationTime = float(f.log_dict[' Exposure_time'][6:])

        wavenumbers = f.x  #Pulls x data from spc file
        nScans = int(
            f.__dict__['fnsub']
        )  #Number of Raman spectra contained within the spc file (>1 if file contains a kinetic scan)
        ramanIntensities = [f.sub[i].y for i in range(nScans)
                            ]  #Builds list of y data arrays

        metadata = f.__dict__  #Pulls metadata dictionary from spc file for easy access

        if absLaserPower != 'Undefined':
            absRamanIntensities = [
                (spectrum * 1000) / (absLaserPower * integrationTime)
                for spectrum in ramanIntensities
            ]

        else:
            absRamanIntensities = ['N/A'] * nScans

        if nScans == 1:
            ramanIntensities = ramanIntensities[
                0]  #Reduces to single array if not a kinetic scan
            absRamanIntensities = absRamanIntensities[0]  #Also for this

        spectra.append(
            Raman_Spectrum(filename, metadata, laserWl, laserPower,
                           absLaserPower, integrationTime, nScans, wavenumbers,
                           ramanIntensities, absRamanIntensities))

    return spectra
def DataExtrating(directory, filname):
    specdata = spc.File(directory + 'spcFiles/' + filname + '.spc')

    specdata.data_txt()
    specdata.write_file(directory + 'txtFiles/' + filname + '.txt')
    return specdata.data_txt()
예제 #17
0
def extractRamanSpc(path, bg_path=False, combine_statics=False, xaxis='wn'):
    '''Takes all .spc files from a directory and creates Raman_Spectrum object for each and also background subtracts, if specified
       .spc files must be directly exported at time of measurement. If .wdf file was re-opened with WiRE and then saved as .spc, use old code ('2017-04-14_Spectra_Class')
       Plots ASCII table with relevant metadata. Set table=False to omit this
       Also plots table for background files if user specifies bg_table = True'''
    '''Actual power values for each % laser power in μW. Measured on 09/05/2017.'''

    print('\nGathering .spc (meta)data...\n')

    p532 = {
        0.0001: 0.01,
        0.05: 4.75,
        0.1: 12.08,
        0.5: 49.6,
        1.0: 88.1,
        5.0: 666.,
        10.0: 1219.,
        50.0: 5360.,
        100.0: 9650.
    }

    p633 = {
        0.0001: 0.01,
        0.05: 1.,
        0.1: 2.,
        0.5: 10.,
        1.0: 20.,
        5.0: 112.,
        10.0: 226.,
        50.0: 1130.,
        100.0: 2200.
    }

    p785 = {
        0.0001: 0.17,
        0.05: 8.8,
        0.1: 19.1,
        0.5: 47.8,
        1.0: 104.,
        5.0: 243.,
        10.0: 537.,
        50.0: 1210.,
        100.0: 2130.
    }

    powerConverter = {
        532: p532,
        633: p633,
        785: p785
    }  #Assigns each laser power dictionary to the appropriate wavelength.

    os.chdir(path)
    spcFiles_full = sorted(os.listdir('.'), key=os.path.getmtime)
    # spcFiles_full = sorted(os.listdir('.'), key=os.path.getctime)
    print(spcFiles_full)
    spcFiles = [f for f in spcFiles_full if f.endswith('.spc')]
    creation_times = [time.ctime(os.path.getmtime(f)) for f in spcFiles]
    # creation_times = [time.ctime(os.path.getctime(f)) for f in spcFiles]
    spectra = []

    for n, spcFile in enumerate(spcFiles):
        #try:
        filename = spcFile[:-4]  #Removes extension from filename string
        f = spc.File(spcFile)  #Create File object from .spc file

        # laserWlKeys = ['Laser', ' Laser']
        laserWlKeys = [b'Laser', b' Laser']

        for laserWlKey in laserWlKeys:

            if laserWlKey in list(f.log_dict.keys()):
                break

        laserWl = int(
            f.log_dict[laserWlKey][7:10]
        )  #Grabs appropriate part of laser wavelength entry from log and converts to integer (must be 3 characters long)

        lpKeys = [b'Laser_power', b' Laser_power']

        for lpKey in lpKeys:

            if lpKey in list(f.log_dict.keys()):
                break

        try:
            laserPower = float(
                f.log_dict[lpKey][13:-1]
            )  #Grabs numeric part of string containing laser power info and converts to float

        except:
            laserPower = 'Undefined'

        if laserPower in [0.0001, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 50.0, 100.0]:
            absLaserPower = float(
                powerConverter[laserWl][laserPower]
            ) / 1000  #Returns absolute laser power (in mW), given laser wavelength and % laser power.

        else:
            # absLaserPower = 'Undefined' #To avoid errors if laser power is not recorded correctly
            absLaserPower = 0

        try:
            integrationTime = float(f.log_dict[b'Exposure_time'][6:])
        except:
            integrationTime = float(f.log_dict[b' Exposure_time'][6:])

        accumulations = int(f.log_dict[b'Accumulations'][15:])

        if xaxis == 'wn':
            wavenumbers = f.x  #Pulls x data from spc file
        elif xaxis == 'wl':
            wavenumbers = 1e7 * (1 / laserWl - 1 / f.x)

        nScans = int(
            f.__dict__['fnsub']
        )  #Number of Raman spectra contained within the spc file (>1 if file contains a kinetic scan)
        ramanIntensities = np.array([f.sub[i].y for i in range(nScans)
                                     ])  #Builds list of y data arrays

        grating = float(f.log_dict[b'Grating_grooves'][9:13])

        metadata = f.__dict__  #Pulls metadata dictionary from spc file for easy access

        if absLaserPower != 'Undefined':
            absRamanIntensities = [
                old_div(
                    (spectrum * 1000),
                    (absLaserPower * integrationTime * float(accumulations)))
                for spectrum in ramanIntensities
            ]

        else:
            absRamanIntensities = ['N/A'] * nScans

        if nScans == 1:
            ramanIntensities = ramanIntensities[
                0]  #Reduces to single array if not a kinetic scan
            absRamanIntensities = absRamanIntensities[0]  #Also for this

        creation_time = creation_times[n]
        spectra.append(
            Raman_Spectrum(creation_time, filename, metadata, laserWl,
                           laserPower, absLaserPower, grating, integrationTime,
                           accumulations, nScans, wavenumbers,
                           ramanIntensities, absRamanIntensities))

        #except Exception as e:
        #    print 'Something went wrong with %s:' % filename
        #    print e
        #    continue

    return spectra
예제 #18
0
 def _open(self):
     import spc
     # Open the reader
     self._fp = self.request.get_local_filename()
     self._data = self._spc_to_numpy(spc.File(self._fp))
     self._length = len(self._data)
예제 #19
0
from __future__ import absolute_import, unicode_literals, print_function, division
import os
import spc

tfile = 0
tpass = 0

dpath = os.path.join(os.path.dirname(__file__), 'test_data')
mfile = []
rfile = []
lfile = []
for filename in os.listdir(dpath):
    if filename[-3:].lower() == 'spc':
        tfile += 1
        print(filename)
        f1 = spc.File(os.path.join(dpath, filename))

        outfile = os.path.join(dpath, 'spc2', filename + '.spc')
        f1.write_spc(outfile)

        # now, try loading and comparing
        f2 = spc.File(outfile)

        if f1.data_txt() == f2.data_txt():
            print("Pass\n------")
            tpass += 1
        else:
            print("Fail\n------")
            mfile.append(filename)
print("Passed %i of %i tests. " % (tpass, tfile))
print("Did not match ref file: ", mfile)
예제 #20
0
#!/usr/bin/env python
from __future__ import absolute_import, unicode_literals, print_function, division
import os
import spc

tfile = 0
tpass = 0

dpath = os.path.join(os.path.dirname(__file__), 'test_data')
mfile = []
rfile = []
lfile = []
for i in os.listdir(dpath):
    if i[-3:].lower() == 'spc':
        tfile += 1
        print(i)
        f1 = spc.File(os.path.join(dpath, i))
        outfile = os.path.join(dpath, 'txt2', i + '.txt')
        with open(outfile, 'r') as fin:
            dat = fin.read()
            if f1.data_txt() == dat:
                print("Pass\n------")
                tpass += 1
            else:
                print("Fail\n------")
                mfile.append(i)
print("Passed %i of %i tests. " % (tpass, tfile))
print("Did not match ref file: ", mfile)
print("Did not have ref file: ", rfile)
print("Did not load file: ", lfile)
    def convert(self):

        #Get Folder names
        self.fol_val        = str( self.folder.get() )
        self.fol_out_val    = str( self.output_folder.get() )

        ffn_out             = os.path.abspath(self.fol_out_val)

        #Get output format (txt or csv)
        self.fmt_val        = str( self.output_fmt.get() )

        #Get whether to plot or not
        self.fmt_plot_val   = bool( self.output_plots.get() )

        #Inform user of starting conversions
        print( "Analyzing {} with {} ext".format(self.fol_val, self.fmt_val) )

        #Figure output file extension and delimiter
        if self.fmt_val == 'txt':
            exten = '.txt'
            delim = '\t'
        else:
            #Default is CSV
            exten = '.csv'
            delim = ','

        #Initiate grain and measurement indices
        self.grain          = 0
        self.measurement    = 0

        #Build list for files
        flist = []

        #Get file directory from folder value
        ffn = os.path.abspath( self.fol_val )

        #Check for output folder location. If none, default to Input folder
        out_str = self.fol_out_val.replace(" ","")

        #If No output folder selected
        if out_str == "":
            #Default to input folder
            self.fol_out_val = self.fol_val
            self.output_folder.set( value = self.fol_out_val )

        #Find how many grain files exist in folder
        self.grain_count = 0
        for f in os.listdir( ffn ):

            #Check extension
            if f.lower().endswith('spc'):

                #Split file name by underscores
                f_split = f.split("_")
                
                #Ensure correct format for tracking
                if len(f_split) >= 2:

                    #Look at second to last in split ( SomeFileName_1_1.spc )
                    if int( f_split[-2] ) > self.grain_count:
                        self.grain_count = int( f_split[-2] )

        #Consider grain count
        if self.grain_count == 0:
            self.output_message("Input Folder Empty", "Your input folder is either empty or the spc files are not configured correctly.", "Close", None)

        #Cycle through each grain count and append files
        for i in range(self.grain_count):

            #Build Measurements list
            mlist = []
            
            #Append sorted files to the filelist
            for f in os.listdir( ffn ):

                #Check extension
                if f.lower().endswith('spc'):
                    
                    fl = f

                    #Split file name to find current
                    f_split = fl.split("_")

                    #Ensure correct format for tracking
                    if len(f_split) >= 2:

                        #Check if current file is the current grain
                        if int( f_split[-2] ) == i+1:
                            #print( f )
                            #Add to measurement list
                            mlist.append( os.path.join( ffn , f) )

            #Append grain i+1 files
            flist.append( mlist )

        ######  STOPPED HERE! FILES SORTED BY GRAIN. START ANALYZING ######

        #Check if any output is selected. If not, make a silly message
        output_data = bool( self.output_fmt_bool.get() )

        output_plot = bool( self.output_plots.get() )

        if (not output_data) and (not output_plot):
            self.output_message( "What are you doing?", "... You have to select some output method...\n\nI mean, I could analyze this for you but you won't see it.", "Oh yeah, duh!", None  )

        #Else, continue analyzing
        else:
            prog_incr = 100./len( flist )

            #Look at grain i

            #Loop through i grains
            for i in range( len( flist ) ):                     #3

                tot_grain_data_list     = []                    #All unanalyzed grain data
                ave_grain_data_list     = []                    #Averaged data per grain
                sem_grain_data_list     = []                    #SEM data per grain

                #Loop through j measurements   
                for j in range( len( flist[i] ) ):              #5

                    #Open file into array
                    f       = spc.File( flist[i][j] )
                    xy_data = f.data_list()                     #xy data for grain i, measurement j

                    #Append collected data to the grain_data_list for later processing
                    tot_grain_data_list.append( xy_data )

                #print( len(tot_grain_data_list) )    #total measurements
                #print( len(tot_grain_data_list[0]) ) #x and y

                #Average arrays (function? OUT = Single array)
                normalized_to = 1.0

                ave_grain_data_list.append( self.grain_average( tot_grain_data_list, normalized_to ) )

                #Find SEM in arrays (function? OUT = Single array of +/-)
                sem_grain_data_list.append( self.SEM_calc( tot_grain_data_list, ave_grain_data_list[0] ) )

                #print( sem_grain_data_list[0][0][:5] )

                #if i==0:
                #    print( sem_grain_data_list[0][0][:10] )

                #If plot, save plot data as PNG
                if output_plot:
                    
                    #Plot normalized and averaged values
                    figsize = ( 16, 9 )
                    plt.figure( figsize = figsize, dpi = 600 )

                    plt.plot( ave_grain_data_list[0][0], ave_grain_data_list[0][1], color = '#000099', lw = 0.5 )

                    #Calc total SEM Values
                    sem_above = []
                    sem_below = []

                    

                    for k in range( len( sem_grain_data_list[0][0] ) ):

                        sem_above.append( ave_grain_data_list[0][1][k] + sem_grain_data_list[0][1][k] )
                        sem_below.append( ave_grain_data_list[0][1][k] + sem_grain_data_list[0][0][k] )

                    #Plot +/- SEM
                    plt.fill_between( ave_grain_data_list[0][0], sem_below, sem_above, facecolor = '#9999FF', interpolate = True )

                    plt.xlabel( 'cm^-1' )
                    plt.ylabel( 'Intensity (Normalized to {})'.format( normalized_to ) )

                    plt.ylim( 0., normalized_to*1.1 )

                    plt.xticks( np.arange( min( ave_grain_data_list[0][0] ), max( ave_grain_data_list[0][0] )+1, 500.) )
                    plt.yticks( np.arange( 0., normalized_to*1.1, step = 0.2*normalized_to ) ) 

                    data_line = mpatches.Patch(color='#000099', label='Averaged Data' )
                    sem_colors = mpatches.Patch(color='#9999FF', label='SEM Range')

                    plt.legend(handles=[data_line, sem_colors])

                    plt.title( "Averaged Data in Grain {}".format( ( i+1 ) ) )

                    plt.grid(True)

                    plt.savefig( os.path.join(ffn_out, "Grain {} - Averaged_RAMAN.png".format( (i+1) )) )
                        
                #If output type enabled, output data type
                if output_data:

                    #Open File
                    fle     = open( os.path.join(ffn_out, "Grain {} - Averaged_RAMAN{}".format( (i+1), exten )) , 'w' )

                    #Create Header
                    header  = "x" + delim + "y-Ave" + delim + "y-SEM\n"
                    fle.write( header )

                    #Cycle through 'x' values for SEM and
                    for l in range( len( sem_grain_data_list[0][0] ) ):
                        line = str( ave_grain_data_list[0][0][l] )
                        line += delim + str( ave_grain_data_list[0][1][l] )
                        line += delim + str( sem_grain_data_list[0][1][l] )
                        line += "\n"
                        fle.write( line )
                    
                    #Close File
                    fle.close()

                #Update Progress bar
                self.progress_var.set( prog_incr*(i+1) )
                self.mf.update_idletasks()

            self.progress_var.set( 0. )
            self.mf.update_idletasks()
예제 #22
0
def extractRamanSpc(path, bg_path=False, combine_statics=False):
    '''Takes all .spc files from a directory and creates Raman_Spectrum object for each and also background subtracts, if specified
       .spc files must be directly exported at time of measurement. If .wdf file was re-opened with WiRE and then saved as .spc, use old code ('2017-04-14_Spectra_Class')
       Plots ASCII table with relevant metadata. Set table=False to omit this
       Also plots table for background files if user specifies bg_table = True'''
    '''Actual power values for each % laser power in μW. Measured on 09/05/2017.'''

    print('\nGathering .spc (meta)data...\n')

    p532 = {
        0.0001: 0.01,
        0.05: 4.75,
        0.1: 12.08,
        0.5: 49.6,
        1.0: 88.1,
        5.0: 666.,
        10.0: 1219.,
        50.0: 5360.,
        100.0: 9650.
    }

    p633 = {
        0.0001: 0.01,
        0.05: 1.,
        0.1: 2.,
        0.5: 10.,
        1.0: 20.,
        5.0: 112.,
        10.0: 226.,
        50.0: 1130.,
        100.0: 2200.
    }

    p785 = {
        0.0001: 0.17,
        0.05: 8.8,
        0.1: 19.1,
        0.5: 47.8,
        1.0: 104.,
        5.0: 243.,
        10.0: 537.,
        50.0: 1210.,
        100.0: 2130.
    }

    powerConverter = {
        532: p532,
        633: p633,
        785: p785
    }  #Assigns each laser power dictionary to the appropriate wavelength.

    os.chdir(path)
    spcFiles = [f for f in os.listdir('.') if f.endswith('.spc')]
    spectra = []

    for n, spcFile in enumerate(spcFiles):
        filename = spcFile[:-4]  #Removes extension from filename string
        #print(filename)
        f = spc.File(spcFile)
        #try:
        #    f = spc.File(spcFile) #Create File object from .spc file
        #except:
        #    print(filename)
        #    f = spc.File(spcFile) #Create File object from .spc file

        metadata = {}
        fLogDict = {}

        fDicts = [f.__dict__, f.log_dict]
        newFDicts = [metadata, fLogDict]

        for dictN, fDict in enumerate(fDicts):
            for k in list(fDict.keys()):
                i = fDict[k]
                #print('%s (%s) = %s (%s)' % (k, type(k), i, type(i)))
                if type(k) == bytes:
                    k = k.decode()
                if type(i) == bytes:
                    try:
                        i = i.decode()
                    except Exception as e:
                        #print(e)
                        continue

                newFDicts[dictN][k] = i

            #print('%s (%s) = %s (%s)' % (k, type(k), i, type(i)))
        laserWl = int(
            fLogDict['Laser'][7:10]
        )  #Grabs appropriate part of laser wavelength entry from log and converts to integer (must be 3 characters long)

        if 'Laser_power' in list(fLogDict.keys()):
            laserPower = float(
                fLogDict['Laser_power'][13:-1]
            )  #Grabs numeric part of string containing laser power info and converts to float
        else:
            laserPower = 'Undefined'

        if laserPower in [0.0001, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 50.0, 100.0]:
            absLaserPower = float(
                powerConverter[laserWl][laserPower]
            ) / 1000  #Returns absolute laser power (in mW), given laser wavelength and % laser power.

        else:
            absLaserPower = 'Undefined'  #To avoid errors if laser power is not recorded correctly

        integrationTime = float(fLogDict['Exposure_time'][6:])

        accumulations = fLogDict['Accumulations'].split(': ')[1]

        wavenumbers = f.x  #Pulls x data from spc file
        nScans = int(
            metadata['fnsub']
        )  #Number of Raman spectra contained within the spc file (>1 if file contains a kinetic scan)
        ramanIntensities = np.array([f.sub[i].y for i in range(nScans)
                                     ])  #Builds list of y data arrays

        if absLaserPower != 'Undefined':
            absRamanIntensities = [
                old_div(
                    (spectrum * 1000),
                    (absLaserPower * integrationTime * float(accumulations)))
                for spectrum in ramanIntensities
            ]

        else:
            absRamanIntensities = ['N/A'] * nScans

        if nScans == 1:
            ramanIntensities = ramanIntensities[
                0]  #Reduces to single array if not a kinetic scan
            absRamanIntensities = absRamanIntensities[0]  #Also for this

        spectra.append(
            Raman_Spectrum(filename, metadata, laserWl, laserPower,
                           absLaserPower, integrationTime, accumulations,
                           nScans, wavenumbers, ramanIntensities,
                           absRamanIntensities))

        #except Exception as e:
        #    print 'Something went wrong with %s:' % filename
        #    print e
        #    continue

    return spectra
예제 #23
0
    def __import_data(self):
        if self.file_names is None:
            self.file_names = glob.glob(self.directory + '*.' +
                                        self.file_extension)

        self.file_list = pd.DataFrame(self.file_names,
                                      columns=['file_name'],
                                      index=np.arange(len(self.file_names)))
        self.file_list['x_coded'] = np.zeros(len(self.file_names), dtype=int)
        self.file_list['y_coded'] = np.zeros(len(self.file_names), dtype=int)
        self.file_list['z_coded'] = np.zeros(len(self.file_names), dtype=int)

        if self.measurement_type in [
                'Raman_volume', 'Raman_x_scan', 'Raman_y_scan', 'Raman_z_scan',
                'Raman_xy_scan', 'Raman_single_spectrum'
        ]:
            if self.measurement_type in [
                    'Raman_volume', 'Raman_x_scan', 'Raman_xy_scan'
            ]:
                self.file_list.iloc[:, 1] = (
                    pd.to_numeric(self.file_list.iloc[:, 0].str.extract(
                        r'__X_([-*\d*.*\d*]*)\__Y_', expand=False)) *
                    self.coord_conversion_factor).astype(int)
            if self.measurement_type in ['Raman_volume', 'Raman_y_scan']:
                self.file_list.iloc[:, 2] = (
                    pd.to_numeric(self.file_list.iloc[:, 0].str.extract(
                        r'__Y_([-*\d*.*\d*]*)\__Z_', expand=False)) *
                    self.coord_conversion_factor).astype(int)
            if self.measurement_type in ['Raman_xy_scan']:
                self.file_list.iloc[:, 2] = (
                    pd.to_numeric(self.file_list.iloc[:, 0].str.extract(
                        r'__Y_([-*\d*.*\d*]*)\__', expand=False)) *
                    self.coord_conversion_factor).astype(int)
            if self.measurement_type in ['Raman_volume', 'Raman_z_scan']:
                self.file_list.iloc[:, 3] = (
                    pd.to_numeric(self.file_list.iloc[:, 0].str.extract(
                        r'__Z_([-*\d*.*\d*]*)\__', expand=False)) *
                    self.coord_conversion_factor).astype(int)

            self.file_list = self.file_list.sort_values(
                by=['z_coded', 'y_coded', 'x_coded'])
            self.file_list.index = pd.RangeIndex(len(self.file_list.index))

            wavenumbers = np.fromfile(self.file_list['file_name'][0],
                                      sep=' ')[::2]
            intensities = np.zeros(
                (len(self.file_list.index), wavenumbers.size))

            for index, curr_index in enumerate(tqdm(self.file_list.index)):
                intensities[index] = np.fromfile(self.file_list.iloc[index, 0],
                                                 sep=' ')[1::2]

        # Inline_IR and LSM still have to get their own classes
        elif self.measurement_type == 'Inline_IR':
            spectrum_data = spc.File(self.file_list.iloc[0, 0])
            number_of_spectra = len(spectrum_data.sub)
            wavenumbers = spectrum_data.x
            intensities = np.zeros((number_of_spectra, len(spectrum_data.x)))
            time_data = np.zeros(number_of_spectra)

            for index, curr_spec in enumerate(tqdm(spectrum_data.sub)):
                intensities[index, :] = curr_spec.y
                time_data[index] = curr_spec.subtime

            self.file_list = self.file_list.loc[self.file_list.index.repeat(
                number_of_spectra)].reset_index(drop=True)
            self.file_list.iloc[:,
                                1] = (pd.Series(time_data) *
                                      self.coord_conversion_factor).astype(int)

        # Is still experimental, especially correct coordinates are missing and
        # possibly not working for not square images
        elif self.measurement_type == 'LSM':
            # read first image to get image dimensions
            first_image = imageio.imread(self.file_list.iloc[0, 0])
            pixels_per_image = np.shape(first_image)[0] * np.shape(
                first_image)[1]
            number_of_images = len(self.file_list.index)

            intensities = np.zeros((number_of_images * pixels_per_image,
                                    np.shape(first_image)[2]),
                                   dtype='uint8')
            z_coords = np.repeat(np.arange(number_of_images), pixels_per_image)
            x_coords = np.tile(
                np.repeat(np.arange(np.shape(first_image)[0]),
                          np.shape(first_image)[1]), number_of_images)
            y_coords = np.tile(
                np.tile(np.arange(np.shape(first_image)[1]),
                        np.shape(first_image)[0]), number_of_images)

            wavenumbers = np.arange(np.shape(first_image)[2])

            for index, curr_file in enumerate(tqdm(self.file_list.iloc[:, 0])):
                intensities[index * pixels_per_image:(index + 1) *
                            pixels_per_image, :] = np.reshape(
                                imageio.imread(curr_file), (-1, 3))

            self.file_list = pd.DataFrame(np.stack([
                np.repeat(self.file_list.iloc[:, 0], pixels_per_image),
                x_coords, y_coords, z_coords
            ]).T,
                                          columns=self.file_list.columns)

        hyperspectral_image_index = pd.MultiIndex.from_frame(
            self.file_list.iloc[:, 1:4])
        self.spectral_data = pd.DataFrame(intensities,
                                          index=hyperspectral_image_index,
                                          columns=np.around(wavenumbers, 2))