Esempio n. 1
0
    def beginReduction(self, pathname):
        '''
        Processing script, reducing images to 1D plots (Q-Chi, Texture, etc)
        '''
        print('\n')
        print(
            '******************************************** Begin image reduction...'
        )
        # PP: beam polarization, according to beamline setup.
        # Contact beamline scientist for this number
        pixelSize = 79  # detector pixel size, measured in microns

        # pathname was imageFullName
        folder_path = os.path.dirname(pathname)
        filename = os.path.basename(pathname)
        # fileRoot was imageFilename
        fileRoot, ext = os.path.splitext(filename)
        index = re.match('.*?([0-9]+).[a-zA-Z]+$', filename).group(1)
        base_filename = re.match('(.*?)[0-9]+.[a-zA-Z]+$',
                                 filename).group(1)  # name w/o ind

        # Master CSV path
        masterPath = os.path.join(folder_path, base_filename + 'master.csv')

        # generate a folder to put processed files
        save_path = os.path.join(self.processedPath, 'Processed')
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        # make master index (vestigial)
        master_index = str(int(random.random() * 100000000))

        attDict = dict.fromkeys([
            'scanNo', 'SNR', 'textureSum', 'Imax', 'Iave', 'I_ratio',
            'numPeaks'
        ])

        ###### BEGIN READING CALIB FILE #################################################
        # initializing params, transform the calibration parameters from WxDiff to Fit2D
        d_in_pixel = float(str(self.detectorData[0]))
        Rotation_angle = float(str(self.detectorData[1]))
        tilt_angle = float(str(self.detectorData[2]))
        lamda = float(str(self.detectorData[3]))
        x0 = float(str(self.detectorData[4]))
        y0 = float(str(self.detectorData[5]))
        #d_in_pixel, Rotation_angle, tilt_angle, lamda, x0, y0 = parse_calib(calibPath)
        Rot = (np.pi * 2 - Rotation_angle) / (2 *
                                              np.pi) * 360  # detector rotation
        tilt = tilt_angle / (2 * np.pi) * 360  # detector tilt  # wavelength
        d = d_in_pixel * pixelSize * 0.001  # measured in milimeters

        ###### BEGIN PROCESSING IMAGE####################################################
        # import image and convert it into an array
        self.imArray = load_image(pathname)

        # data_reduction to generate 1D spectra, Q
        Qlist, IntAve = self.data_reduction(d, Rot, tilt, lamda, x0, y0,
                                            pixelSize)

        ###### SAVE PLOTS ###############################################################
        # save 1D spectra as a *.csv
        save_1Dcsv(Qlist, IntAve, fileRoot, save_path)

        ###### EXTRACT ATTRIBUTES #######################################################
        # extract composition information if the information is available
        # extract the number of peaks in 1D spectra as attribute3 by default
        newRow3, peaks = ext_peak_num(Qlist, IntAve, index)
        attDict['numPeaks'] = len(peaks)
        #attribute3.append(newRow3)
        #attributes = np.array(attribute3)

        # save 1D plot with detected peaks shown in the plot
        if self.QRange:
            titleAddStr = ', Q:' + str(self.QRange) + ', Chi:' + str(
                self.ChiRange)
        else:
            titleAddStr = '.'
        save_1Dplot(Qlist,
                    IntAve,
                    peaks,
                    fileRoot,
                    save_path,
                    titleAdd=titleAddStr)

        if True:
            # extract maximum/average intensity from 1D spectra as attribute1
            newRow1 = ext_max_ave_intens(IntAve, index)
            attDict['scanNo'], attDict['Imax'], attDict['Iave'], attDict[
                'I_ratio'] = newRow1
            #attribute1.append(newRow1)
            #attributes = np.concatenate((attribute1, attributes), axis=1)

        #if True:
        ## save 1D texture spectra as a plot (*.png) and *.csv
        #Qlist_texture, texture = save_texture_plot_csv(Q, chi, cake, fileRoot, save_path)
        ## extract texture square sum from the 1D texture spectra as attribute2
        #newRow2 = ext_text_extent(Qlist_texture, texture, index)
        #attDict['textureSum'] = newRow2[1]
        ##attribute2.append(newRow2)
        ##attributes = np.concatenate((attribute2, attributes), axis=1)

        if False:
            # extract neighbor distances as attribute4
            newRow4 = nearst_neighbor_distance(index, Qlist, IntAve,
                                               folder_path, save_path,
                                               base_filename,
                                               num_of_smpls_per_row)
            #attribute4.append(newRow4)
            #attributes = np.concatenate((attribute4, attributes), axis=1)

        if True:
            # extract signal-to-noise ratio
            try:
                newRow5 = ext_SNR(index, IntAve)
            except:
                traceback.print_exc()
                self.emit(
                    SIGNAL("addToConsole(PyQt_PyObject)"),
                    "----------------ERROR: Optimal parameters not found.----------------"
                )
                self.abort_flag = True
                return
            attDict['SNR'] = newRow5[1]
            #attribute5.append(newRow5)
            #attributes = np.concatenate((attribute5, attributes), axis=1)

        # add features (floats) to master metadata
        attDict['scanNo'] = int(index)
        addFeatsToMaster(attDict, masterPath)

        # -*- coding: utf-8 -*-
        """
Esempio n. 2
0
def peakFitBBA(filepath, config):
    '''
    Wrapper for Bayesian Block Analysis of 1D Plots.  
    Takes file path
    Assumes 1D files live in (filepath.dirname)/Processed/
    '''
    print('\n')
    print('******************************************** Begin peak fitting...')
    ##############################################################
    ############ Parse filepath input ############################

    processedPath = os.path.join(os.path.dirname(filepath), 'Processed/')
    folder_path = os.path.dirname(filepath)
    filename = os.path.basename(filepath)
    fileRoot, ext = os.path.splitext(filename)

    savePath = processedPath + 'peak_details/'
    csvFilepath = os.path.join(processedPath, fileRoot + '_1D.csv')

    # Generate Master CSV path
    ### name w/o ind
    base_filename = re.match('(.*?)[0-9]+.[a-zA-Z]+$', filename).group(1)
    ### index
    index = re.match('.*?([0-9]+).[a-zA-Z]+$', filename).group(1)
    masterPath = os.path.join(folder_path, base_filename + 'master.csv')
    attDict = {'scanNo': index}

    if not config:
        peakShape = 'Voigt'
        numCurves = 2
        fit_order = 2
    else:
        peakShape = config['peakShape']
        numCurves = config['peakNo']
        fit_order = config['fit_order']
        useBkgdImg = config['bkgdImg']
        print('config read')
    ##############End Input#######################################
    ##############################################################

    if not os.path.exists(savePath):
        os.makedirs(savePath)

    peakCnt = 0
    # File data into array
    print csvFilepath
    data = np.genfromtxt(csvFilepath, delimiter=',')
    Qlist = data[:, 0]
    IntAve = data[:, 1]
    dataArray = np.array([Qlist, IntAve])

    ##############################################################
    #### Data Structure object instantiation (data, fit_order, ncp_prior)
    dataIn = BlockData(dataArray, fit_order, 0.5, peakShape)
    #### has various functions
    ##############################################################

    dataIn.trimData(trimLen=50)

    if useBkgdImg:  # if a background image has been supplied
        print(config['bkgdPath'])
        bkgdData = np.genfromtxt(config['bkgdPath'], delimiter=',')
        bkgdX = bkgdData[:, 0]
        bkgdY = bkgdData[:, 1]

        dataIn.bkgdSubImg(np.array([bkgdX, bkgdY]))
    # background subtracted with polynomial of order = fit_order, trims ends
    elif type(fit_order) is str:
        dataIn.bkgdSub()  # Trim using chebyshev
    else:
        dataIn.bkgdSubPoly(fit_order=fit_order)

    # Plot bkgdSub Data
    plt.figure(figsize=(8, 8))
    plt.plot(Qlist, IntAve, label='Raw data', marker='s', color='k')
    plt.plot(dataIn.subData[0],
             dataIn.bkgd,
             '--',
             label='Background',
             color='g')
    plt.plot(dataIn.subData[0],
             dataIn.subData[1],
             label='Background subtracted',
             color='r')
    try:
        plt.plot(dataIn.downData[0, :],
                 dataIn.downData[1, :],
                 label='Downsampled',
                 color='b',
                 marker='o',
                 linestyle='None')
    except Exception as e:
        print(e)

    plt.legend()
    plt.savefig(savePath + basename(csvFilepath)[:-7] + '_plot.png')
    plt.close()

    save_1Dcsv(dataIn.subData[0], dataIn.subData[1], fileRoot + '_bkgdSub',
               savePath)

    # Guess at noise level
    hld = dataIn.subData
    sigmaGuess = np.std(hld[1][hld[1] <= np.median(hld[1])])

    dataIn.cellData = sigmaGuess * np.ones(len(dataIn.subData[0]))

    # incorporate block information into data struct
    dataIn.blockFinder()

    # Get optimized parameters from fitting each block and plot
    paramDict, litFWHM = bumpFindFit(dataIn, peakShape, numCurves, config,
                                     savePath,
                                     basename(csvFilepath)[:-7])

    # Print information to terminal, print data to csv
    print('---------Fitting Finished')
    print('Fit ({0}) curve(s) per peak'.format(paramDict['numCurves']))
    print('Using ({0}) peak shape'.format(paramDict['peakShape']))

    # Generate residual plot using stored optParams
    pctErr = dataIn.genResidPlot(savePath, csvFilepath)

    genOptParamCSV(savePath, csvFilepath, paramDict)

    genPeakReportCSV(savePath, filepath, litFWHM, pctErr)

    ###############################################################################
    # Add features to master metadata
    # hard pull items for now
    attDict['scanNo'] = int(index)
    (attDict['FSDP_loc'], attDict['FSDP_FWHM'], attDict['FSDP_Intens'],
     attDict['FSDP_yMax']) = findFitFSDP(paramDict, litFWHM, config)
    (attDict['maxPeak_loc'], attDict['maxPeak_FWHM'],
     attDict['maxPeak_Intens']) = findFitMaxPeak(paramDict, litFWHM, config)

    addFeatsToMaster(attDict, masterPath)
def SAXSDimReduce(calibPath, pathname, config):  #QRange=None, ChiRange=None):
    '''
    Processing script, reducing images to 1D plots (Q-Chi, Texture, etc)
    '''
    print('\n')
    print(
        '******************************************** Begin image reduction...'
    )
    # PP: beam polarization, according to beamline setup.
    # Contact beamline scientist for this number
    PP = 0.95
    pixelSize = 79  # detector pixel size, measured in microns

    # pathname was imageFullName
    folder_path = os.path.dirname(pathname)
    filename = os.path.basename(pathname)
    # fileRoot was imageFilename
    fileRoot, ext = os.path.splitext(filename)
    index = re.match('.*?([0-9]+).[a-zA-Z]+$', filename).group(1)
    base_filename = re.match('(.*?)[0-9]+.[a-zA-Z]+$',
                             filename).group(1)  # name w/o ind

    # Master CSV path
    masterPath = os.path.join(folder_path, base_filename + 'master.csv')

    # generate a folder to put processed files
    save_path = os.path.join(folder_path, 'Processed')
    if not os.path.exists(save_path):
        os.makedirs(save_path)
    # make master index (vestigial)
    master_index = str(int(random.random() * 100000000))

    attDict = dict.fromkeys(
        ['scanNo', 'SNR', 'textureSum', 'Imax', 'Iave', 'I_ratio', 'numPeaks'])
    #attribute1=[['scan#', 'Imax', 'Iave', 'Imax/Iave']]
    #attribute2=[['scan#', 'texture_sum']]
    #attribute3=[['scan#', 'peak_num']]
    #attribute4=[['scan#', 'neighbor_distance']]
    #attribute5=[['scan#', 'SNR']]

    ###### BEGIN READING CALIB FILE #################################################
    # initializing params, transform the calibration parameters from WxDiff to Fit2D
    d_in_pixel, Rotation_angle, tilt_angle, lamda, x0, y0 = parse_calib(
        calibPath)
    Rot = (np.pi * 2 - Rotation_angle) / (2 * np.pi) * 360  # detector rotation
    tilt = tilt_angle / (2 * np.pi) * 360  # detector tilt  # wavelength
    d = d_in_pixel * pixelSize * 0.001  # measured in milimeters

    print 'Processing image file: ' + pathname
    if not config:
        print('no config file found')
    else:
        QRange = (config['Qmin'], config['Qmax'])
        ChiRange = (config['ChiMin'], config['ChiMax'])
        # require all bounds to exist, currently can't check default limits
        if (any(isinstance(n, str) for n in QRange)
                or any(isinstance(m, str) for m in ChiRange)):
            print('Pass found, ignoring Q,Chi limits')
            QRange, ChiRange = None, None

        print('config read')

    ###### BEGIN PROCESSING IMAGE####################################################
    # import image and convert it into an array
    imArray = load_image(pathname)

    # data_reduction to generate Q-chi and 1D spectra, Q
    Q, chi, cake, Qlist, IntAve = data_reduction(imArray,
                                                 d,
                                                 Rot,
                                                 tilt,
                                                 lamda,
                                                 x0,
                                                 y0,
                                                 PP,
                                                 pixelSize,
                                                 QRange=QRange,
                                                 ChiRange=ChiRange)

    ###### SAVE PLOTS ###############################################################
    # save Qchi as a plot *.png and *.mat
    save_Qchi(Q, chi, cake, fileRoot, save_path)
    # save 1D spectra as a *.csv
    save_1Dcsv(Qlist, IntAve, fileRoot, save_path)

    ###### EXTRACT ATTRIBUTES #######################################################
    # extract composition information if the information is available
    # extract the number of peaks in 1D spectra as attribute3 by default
    newRow3, peaks = ext_peak_num(Qlist, IntAve, index)
    attDict['numPeaks'] = len(peaks)
    #attribute3.append(newRow3)
    #attributes = np.array(attribute3)

    # save 1D plot with detected peaks shown in the plot
    if QRange:
        titleAddStr = ', Q:' + str(QRange) + ', Chi:' + str(ChiRange)
    else:
        titleAddStr = '.'
    save_1Dplot(Qlist,
                IntAve,
                peaks,
                fileRoot,
                save_path,
                titleAdd=titleAddStr)

    if True:
        # extract maximum/average intensity from 1D spectra as attribute1
        newRow1 = ext_max_ave_intens(IntAve, index)
        attDict['scanNo'], attDict['Imax'], attDict['Iave'], attDict[
            'I_ratio'] = newRow1
        #attribute1.append(newRow1)
        #attributes = np.concatenate((attribute1, attributes), axis=1)

    if True:
        # save 1D texture spectra as a plot (*.png) and *.csv
        Qlist_texture, texture = save_texture_plot_csv(Q, chi, cake, fileRoot,
                                                       save_path)
        # extract texture square sum from the 1D texture spectra as attribute2
        newRow2 = ext_text_extent(Qlist_texture, texture, index)
        attDict['textureSum'] = newRow2[1]
        #attribute2.append(newRow2)
        #attributes = np.concatenate((attribute2, attributes), axis=1)

    if False:
        # extract neighbor distances as attribute4
        newRow4 = nearst_neighbor_distance(index, Qlist, IntAve, folder_path,
                                           save_path, base_filename,
                                           num_of_smpls_per_row)
        #attribute4.append(newRow4)
        #attributes = np.concatenate((attribute4, attributes), axis=1)

    if True:
        # extract signal-to-noise ratio
        newRow5 = ext_SNR(index, IntAve)
        attDict['SNR'] = newRow5[1]
        #attribute5.append(newRow5)
        #attributes = np.concatenate((attribute5, attributes), axis=1)

    # add features (floats) to master metadata
    attDict['scanNo'] = int(index)
    addFeatsToMaster(attDict, masterPath)