Esempio n. 1
0
    def __init__(self):
        cwd = os.getcwd()
        sys.path.append(cwd)
        tspath = getTopOfSuitesDir()
        assert (os.path.exists(tspath))
        ls = os.listdir(tspath)
        ls = [os.path.join(tspath, f) for f in ls]
        dirs = [(os.path.join(cwd, d), []) for d in ls
                if os.path.isdir(d) and d.find('.svn') == -1]

        toFind = ['TestCase.py', 'RTTSummaryReference.xml']
        for fn in toFind:
            print
            for (dir, missing) in dirs:
                if not os.path.exists(os.path.join(dir, fn)):
                    missing.append(fn)

        fn = 'RTTSummary.xml'
        for (dir, missing) in dirs:
            if len(listFiles(dir, fn)) != 1:
                missing.append(fn)

        self.okdirs = [d for (d, missing) in dirs if len(missing) == 0]
        self.baddirs = [(d, missing) for (d, missing) in dirs
                        if len(missing) != 0]
Esempio n. 2
0
def FCSLoadG(fnameRoot, folderName="", printFileNames=True):
    G = correlations()
    files = listFiles(folderName, "csv", fnameRoot)
    for file in files:
        setattr(G, stripGfname(file, fnameRoot, printFileNames), csv2array(file, ','))
    G.dwellTime = 1e6 * csv2array(file, ',')[1, 0] # in µs
    print('--------------------------')
    print(str(len(files)) + ' files found.')
    print('--------------------------')
    return G
Esempio n. 3
0
    def setUp(self):
        if 'dom' in self.__dict__.keys(): return
        infileName = listFiles('.', 'RTTSummary.xml')[0]
        infile = open(infileName, 'r')

        refFile = open('RTTSummaryReference.xml')

        self.dom = xml.dom.minidom.parseString(infile.read())
        self.refDom = xml.dom.minidom.parseString(refFile.read())

        self.logfile = open('TestCase.log', 'w')
Esempio n. 4
0
def allBin2Pickle(
    directory='C:\\Users\\SPAD-FCS\\OneDrive - Fondazione Istituto Italiano Tecnologia'
):
    """
    Convert all FCS bin files in the given directory to picle files.
    Files that already already have a bin file are skipped
    return parameter: data from the last file

    """

    binfiles = listFiles(directory, 'bin')
    picklefiles = listFiles(directory, 'pickle')

    data = []

    for file in binfiles:
        picklefile = file[0:-4] + '_data.pickle'
        if picklefile not in picklefiles:
            print(file)
            data = binFile2Data(file, storePickle=True)

    print('Done.')
    return (data)
Esempio n. 5
0
def FCSBinToCSVAll(folderName=[],
                   Glist=['central', 'sum3', 'sum5', 'chessboard', 'ullr'],
                   split=10):
    # PARSE INPUT
    if folderName == []:
        folderName = getcwd()
    folderName = folderName.replace("\\", "/")
    folderName = Path(folderName)

    # CHECK BIN FILES
    allFiles = listFiles(folderName, 'bin')

    # GO THROUGH EACH FILE
    for file in allFiles:
        fileName = ntpath.basename(file)
        print("File found: " + fileName)
        [G, data] = FCSLoadAndCorrSplit(file, Glist, 50, split)
        corr2csv(G, file[0:-4], [0, 0], 0)
Esempio n. 6
0
def logFileChecker(root, globpar, strings):

    res = 'looking for files below %s matching %s\n' % (root, globpar)
    logfiles = listFiles(root, globpar)
    res += 'Found %d log files\n' % len(logfiles)

    checker = Checker(strings)

    [checker.checkForStrings(f) for f in logfiles]

    res += '\nerror strings:\n'
    dict = checker.dict

    res += 'Occurences   text\n'

    for s in dict:
        if dict[s] > 0: res += '%10d   %s\n' % (dict[s], s)

    res += '\n Strings searched for:\n'
    for s in dict.keys():
        res += s + '\n'

    for s in checker.fdict.keys():
        if len(checker.fdict[s]):
            res += '\n\n"%s" was found in %d files:\n' % (
                s, len(checker.fdict[s]))
            for f in checker.fdict[s]:
                res += f + '\n'
        else:
            res += '\n"%s" was found in no files\n\n' % s

    res += '\n\n%d files with no strings:\n\n' % (
        checker.sdict.values()).count([])

    for f in checker.sdict.keys():
        if not checker.sdict[f]: res += f + '\n'

    return res
Esempio n. 7
0
def plotPyCorrAll(folderName=[]):
    if folderName == []:
        folderName = getcwd()
    folderName = folderName.replace("\\", "/")
    folderName = Path(folderName)
    fileList = listFiles(folderName, 'csv')
    data = np.empty((0, 3), float)
    # go through each file
    for file in fileList:
        if "chunk" not in file and "Gn" not in file and "sum3_average_fit_results" in file and "SPAD" in ntpath.basename(
                file):
            # file found
            print(ntpath.basename(file) + " found.")
            if "fit_results" in file:
                # file with fit found
                result = plotPyCorrFit(file, savefig=0)
                data = np.append(
                    data, [[result.Gfitstart, result.tauD, result.chi2]],
                    axis=0)
            else:
                # file with experimental G found
                plotGcsv(file, savefig=0)
    return data
Esempio n. 8
0
def doit():
    cwd = os.getcwd()
    sdir = os.path.join(cwd, getSuiteDir())
    if not os.path.exists(sdir):
        print 'Non -existant test suite directory ', sdir
        sys.exit(0)
    rfile = os.path.join(sdir, 'RTTSummaryReference.xml')
    if not os.path.exists(os.path.join(sdir,rfile)):
        print 'No reference file ', rfile
        sys.exit(0)

    parser = xml.sax.make_parser()
    handler = KFHandler()
    parser.setContentHandler(handler)
    parser.parse(rfile)
    rkfiles = handler.kfiles

    lfiles = listFiles(sdir, 'RTTSummary.xml')
    if len(lfiles) != 1:
        print 'error looking for RTTSummaryFile', lfiles
        sys.exit(0)
        
    handler = KFHandler()
    parser.setContentHandler(handler)
    parser.parse(lfiles[0])
    skfiles = handler.kfiles
    
    diff = [f for f in rkfiles if f not in skfiles]
    pp = pprint.PrettyPrinter()
    print
    print ' in Reference and not in Summary'
    pp.pprint(diff)

    diff = [f for f in skfiles if f not in rkfiles]
    print
    print ' in Summary and not in Reference'
    pp.pprint(diff)
Esempio n. 9
0
                m += '\n   string: %s\n' %  s
                for l in self.dict[s]:
                    m += '      %s' % l

        return m

    def dump(self):
        print self

config = open(sys.argv[1], 'r')
lines  = config.readlines()
config.close()

toRemove = [l for l in lines if l.startswith('#')]
[lines.remove(l) for l in toRemove]
lines = [stripstr(l) for l in lines]
toRemove = [s for s in lines if len(s)==0]
[lines.remove(l) for l in toRemove]
root = lines[0]
globpar = lines[1]
strings = lines[2:]


print 'looking for files below', root,'matching',globpar
logfiles = listFiles(root, globpar)
print 'Found %d files' % len(logfiles)


checkers = [Checker(f, strings).dump() for f in logfiles]

Esempio n. 10
0
def FCS2docx(folderName=[], Mtype='25ch'):
    """
    Analyze FCS data and store results in image files and a .docx file
    ===========================================================================
    Input           Meaning
    ---------------------------------------------------------------------------
    folderName      Folder name with FCS data
                    Leave blank for current folder
    ===========================================================================

    ===========================================================================
    Output
    ---------------------------------------------------------------------------
    images of time traces, autocorrelations and an overview .docx file
    ===========================================================================
    """

    # PARSE INPUT
    if folderName == []:
        folderName = getcwd()
    folderName = folderName.replace("\\", "/")
    folderName = Path(folderName)

    # OPEN WORD FILE
    document = Document()
    section = document.sections[0]
    section.page_height = Mm(297)
    section.page_width = Mm(210)
    section.bottom_margin = Mm(25)
    section.top_margin = Mm(25)
    section.left_margin = Mm(25)
    section.right_margin = Mm(25)

    # TITLE
    title = date.today().strftime("%Y-%m-%d")
    title = title + ' FCS analysis'
    document.add_heading(title, 0)

    # CHECK PICKLE FILES
    outp = listFiles(folderName, 'bin')

    # GO THROUGH EACH FILE
    for file in outp:
        if 'arrival_times' not in file:

            # file with photon arrival times found
            fileName = ntpath.basename(file)
            print('==========================================================')
            print('File found: ' + fileName)
            print('==========================================================')
            # ================================================================
            document.add_heading('File ' + fileName, level=1)
            # ================================================================

            # get pixel dwell time from text file (TO DO)
            FCSinfo = getFCSinfo(file[0:-4] + '_info.txt')
            #pxdwell = 1e-3 / ReadSpeed  # s
            pxdwell = FCSinfo.dwellTime

            #            # open file
            #            # data = arrivalTimes2Data(file)
            data = file_to_count(file)
            data = data[0]
            data = addSumColumn(data)
            #
            #            # bin files for plotting time trace
            print('Binning data for plotting time trace.')
            binSize = 2000000
            binDuration = pxdwell * binSize
            Nrows = 1000000
            #            # -----
            dataB = binData(data[0:Nrows, :], binSize)
            plt.figure()
            plt.plot(np.arange(0, binDuration * len(dataB), binDuration),
                     dataB[:, -1])
            plt.xlabel('Time [s]')
            plt.ylabel('Total # of photons per ' + str(1000 * binDuration) +
                       ' ms')
            plt.xlim([0, binDuration * len(dataB)])
            plt.title(fileName[0:30] + '...')
            plt.tight_layout()
            picName = fileName[0:-4] + '_timetrace.png'
            #            plt.savefig(picName)
            #            # ================================================================
            #            document.add_heading('Time trace', level=2)
            #            document.add_picture(picName, width=Inches(4))
            #            # ================================================================
            #
            #            # plot Airy pattern
            #            print('Calculating Airy pattern.')
            #            airy = plotAiry(data)
            #            picName = fileName[0:-4] + '_airy.png'
            #            plt.savefig(picName)
            #            # ================================================================
            #            document.add_heading('Airy pattern', level=2)
            #            document.add_paragraph('Number of photons per channel over the entire measurement')
            #            document.add_picture(picName, width=Inches(4))
            #            # ================================================================
            #
            #            # calculate autocorrelations
            #            # if single element is used, only calculate autocorr of this element
            #            # else do complete calculation
            #            print('Calculating correlations.')
            # =================================================================
            document.add_heading('Correlations', level=2)
            # =================================================================
            zoom = int(1e-5 / pxdwell)
            #            if np.max(airy[0:25]) / airy[25] > 0.99:
            #                # single element used
            #                det = int(np.argmax(airy[0:25]))
            #                G = FCS2Corr(data, 1e6*pxdwell, [det])
            #                Gsingle = getattr(G, 'det'+ str(det))
            #                plotFCScorrelations(G, plotList='all')
            #                plt.xlim(left=2*pxdwell)
            #                plt.ylim(top=np.max(Gsingle[2:,1]))
            #                picName = fileName[0:-4] + '_G.png'
            #                plt.savefig(picName)
            #                # =============================================================
            #                document.add_heading('Autocorrelation single detector', level=3)
            #                document.add_picture(picName, width=Inches(4))
            #                # =============================================================
            #                plt.xlim(left=zoom*pxdwell)
            #                plt.ylim(top=np.max(Gsingle[zoom:,1]))
            #                picName = fileName[0:-4] + '_Gzoom.png'
            #                plt.savefig(picName)
            #                # =============================================================
            #                document.add_picture(picName, width=Inches(4))
            # =============================================================
            if Mtype == '2MPD':
                G = FCS2CorrSplit(data, 1e6 * pxdwell, ['2MPD'], 50, 6)
                corr2csv(G, fileName[0:-4])
                plotFCScorrelations(G, [
                    'auto1_average', 'auto2_average', 'cross12_average',
                    'cross21_average', 'cross_average'
                ])
                plt.xlim(left=2 * pxdwell)
                picName = fileName[0:-4] + '_crosscorr.png'
                plt.savefig(picName)
                # =============================================================
                document.add_heading('Correlations', level=3)
                document.add_picture(picName, width=Inches(4))
                # =============================================================
            else:
                plotList = ['central', 'sum3', 'sum5', 'chessboard', 'ullr']
                maxG1 = np.zeros([len(plotList), 1])
                maxG2 = np.zeros([len(plotList), 1])
                # G = FCS2CorrSplit(data, 1e6*pxdwell, plotList, 50, 5)
                G = FCSLoadAndCorrSplit(file, plotList, 16, 10)
                corr2csv(G, fileName[0:-4])
                plotList = [
                    'central_average', 'sum3_average', 'sum5_average',
                    'chessboard_average', 'ullr_average'
                ]
                for i in range(len(plotList)):
                    Gsingle = getattr(G, plotList[i])
                    maxG1[i] = np.max(Gsingle[2:, 1])
                    maxG2[i] = np.max(Gsingle[zoom:, 1])

                plotFCScorrelations(G,
                                    plotList=[
                                        'central_average', 'sum3_average',
                                        'sum5_average'
                                    ])
                plt.xlim(left=2 * pxdwell)
                plt.ylim(top=np.max(maxG1[0:2]))
                picName = fileName[0:-4] + '_G135.png'
                plt.savefig(picName)
                # =============================================================
                document.add_heading('Autocorrelations', level=3)
                document.add_picture(picName, width=Inches(4))
                # =============================================================

                plt.xlim(left=zoom * pxdwell)
                plt.ylim(top=np.max(maxG2[0:2]))
                picName = fileName[0:-4] + '_G135_zoom.png'
                plt.savefig(picName)
                # =============================================================
                document.add_picture(picName, width=Inches(4))
                # =============================================================

                plotFCScorrelations(G, plotList=['sum5_average'])
                plt.xlim(left=zoom * pxdwell)
                plt.ylim(top=np.max(maxG2[2]))
                picName = fileName[0:-4] + '_sum5_zoom.png'
                plt.savefig(picName)
                # =============================================================
                document.add_picture(picName, width=Inches(4))
                # =============================================================

                #plotFCScorrelations(G, plotList=['sum5', 'det-15'])
                #plt.xlim(left=zoom*pxdwell)
                #plt.ylim(top=np.max(maxG2[2:4]))
                #picName = fileName[0:-4] + '_sum5MinusHotPixel_zoom.png'
                #plt.savefig(picName)
                # =============================================================
                #document.add_paragraph('det-15 means the sum of all detector elements except for pixel 15 (hot pixel).')
                #document.add_picture(picName, width=Inches(4))
                # =============================================================

                plotFCScorrelations(
                    G, plotList=['chessboard_average', 'ullr_average'])
                plt.xlim(left=pxdwell)
                plt.ylim(top=np.max(maxG2[4:]))
                picName = fileName[0:-4] + '_crosscorr_zoom.png'
                plt.savefig(picName)
                # =============================================================
                document.add_heading('Cross correlations', level=3)
                document.add_paragraph(
                    'Chessboard means the cross-correlation between the even number pixels and the odd numbered pixels. ULLR means the cross-correlation between the top-left and the bottom-right triangle (UpperLeft-LowerRight).'
                )
                document.add_picture(picName, width=Inches(4))
                # =============================================================

    document.save('Overview_results.docx')
Esempio n. 11
0
    def __init__(self):
        self.dict = {}
        self.sumsize = 0
        self.ncalls = 0

    def checkSize(self, f):
        self.ncalls += 1
        size = int((os.stat(f)).st_size)
        self.dict[os.path.basename(f)] = size
        self.sumsize += size


root = '/afs/cern.ch/atlas/project/RTT/Work/rel_4/dev/build/i686-slc4-gcc34-opt'

print 'looking for files in %s' % root
logfiles = listFiles(root, '*_log')
print 'Found %d log files' % len(logfiles)

checker = Checker()

[checker.checkSize(f) for f in logfiles]

print 'Root directory = ', root
print '\nlog file sizes:'

for s in checker.dict.items():
    print '%10s   %d' % s

print 'log files below:', root
print 'No of files:', len(checker.dict.keys())
print 'Total size (kbytes):', checker.sumsize / 1024.