コード例 #1
0
ファイル: file_operator.py プロジェクト: reflectometry/WRed
 def correct_scan(self):
     try:
         if 'ScanDescr' in self.standards and self.standards['ScanDescr']['metadata']:
             scanstr = '' + self.data[0]['ScanDescr'][0]
             for s in self.data[0]['ScanDescr'][1:]:
                 scanstr += ' ' + s
             myparser = scanparser3.scanparser(scanstr)
             scanlc = myparser.get_varying()
             scan = []
             for s in self.standards:
                 if s.lower() in scanlc:
                     scan.append(s)
             self.standards['Scan'] = Standard(metadata = True).__dict__
             self.standards['ScanRanges'] = Standard(metadata = True).__dict__
             for p in self.data:
                 p['ScanRanges'] = scan
                 p['Scan'] = scan
     except:
         for p in self.data:
             p['ScanRanges'] = ['QY']
             p['Scan'] = ['QY']
コード例 #2
0
 def correct_scan(self):
     try:
         if 'ScanDescr' in self.standards and self.standards['ScanDescr'][
                 'metadata']:
             scanstr = '' + self.data[0]['ScanDescr'][0]
             for s in self.data[0]['ScanDescr'][1:]:
                 scanstr += ' ' + s
             myparser = scanparser3.scanparser(scanstr)
             scanlc = myparser.get_varying()
             scan = []
             for s in self.standards:
                 if s.lower() in scanlc:
                     scan.append(s)
             self.standards['Scan'] = Standard(metadata=True).__dict__
             self.standards['ScanRanges'] = Standard(metadata=True).__dict__
             for p in self.data:
                 p['ScanRanges'] = scan
                 p['Scan'] = scan
     except:
         for p in self.data:
             p['ScanRanges'] = ['QY']
             p['Scan'] = ['QY']
コード例 #3
0
ファイル: readncnr5.py プロジェクト: scattering/dataflow
        def readbt7(self,myfile):
        #get first line
                myFlag=True
                #self.metadata={}
                self.header=[]
                returnline=['']
                while myFlag:
                        tokenized=get_tokenized_line(myfile,returnline=returnline)
                        #print tokenized
                        if tokenized==[]:
                                tokenized=['']
                        if tokenized[0].lower()=="#Date".lower():
                                pass
                        if tokenized[0].lower()=="#Date".lower():
                                date_tokens=tokenized[1].split('-')
                                month=int(date_tokens[1].strip("\'"))
                                day=int(date_tokens[2].strip("\'"))
                                year=int(date_tokens[0].strip("\'"))
                                stime=tokenized[2].strip("\'")
                                stimetok=stime.split(':')
                                hour=int(stimetok[0])
                                minute=int(stimetok[1])
                                second=int(stimetok[2])
                                self.metadata['month']=int(date_tokens[1].strip("\'"))
                                self.metadata['day']=int(date_tokens[2].strip("\'"))
                                self.metadata['year']=int(date_tokens[0].strip("\'"))
                                self.metadata['start_time']=tokenized[2].strip("\'")
                        elif tokenized[0].lower()=="#Epoch".lower():
                                #timeobj=date.datatetime(year,month,day,hour,minute,second)
                                Epoch=float(tokenized[1])
                                #timeobj=mx.DateTime.DateTimeFromTicks(ticks=Epoch) #what I originally used
                                timeobj=datetime.datetime.fromtimestamp(Epoch)
                                #print 'timeobj ',timeobj
                                #print 'Epoch ', Epoch
                                self.metadata['epoch']=Epoch#timeobj
                                #print self.metadata
                        elif tokenized[0].lower()=="#InstrName".lower():
                                self.metadata['instrument']=tokenized[1].lower()
                        elif tokenized[0].lower()=="#ExptID".lower():
                                self.metadata['experiment_id']=tokenized[1].lower()
                        elif tokenized[0].lower()=="#Fixed".lower():
                                self.metadata['fixed_devices']=tokenized[1:]
                        elif tokenized[0].lower()=="#Filename".lower():
                                self.metadata['filename']=tokenized[1]
                                #print 'filename ', tokenized[1]
                                pattern = re.compile('^(?P<base>[^.]*?)(?P<seq>[0-9]*)(?P<ext>[.].*)?$')
                                match = pattern.match(tokenized[1]+'.bt7')
                                dict((a,match.group(a)+"") for a in ['base','seq','ext'])
                                #print 'filebase ',match.group('base')
                                self.metadata['filebase']=match.group('base')
                                self.metadata['fileseq_number']=match.group('seq')
                        elif tokenized[0].lower()=="#Comment".lower():
                                mycomment=''
                                for i in range(1,len(tokenized)):
                                        mycomment=mycomment+' '+tokenized[i]
                                self.metadata['comment']=mycomment
                        elif tokenized[0].lower()=="#MonoSpacing".lower():
                                self.metadata['monochromator_dspacing']=float(tokenized[1])
                        elif tokenized[0].lower()=="#AnaSpacing".lower():
                                self.metadata['analyzer_dspacing']=float(tokenized[1])
                        elif tokenized[0].lower()=="#TemperatureUnits".lower():
                                self.metadata['temperature_units']=tokenized[1]
                        elif tokenized[0].lower()=="#Orient".lower():
                                self.metadata['orient1']['h']=float(tokenized[1])
                                self.metadata['orient1']['k']=float(tokenized[2])
                                self.metadata['orient1']['l']=float(tokenized[3])
                                self.metadata['orient2']['h']=float(tokenized[4])
                                self.metadata['orient2']['k']=float(tokenized[5])
                                self.metadata['orient2']['l']=float(tokenized[6])
                        elif tokenized[0].lower()=="#Lattice".lower():
                                self.metadata['lattice']['a']=float(tokenized[1])
                                self.metadata['lattice']['b']=float(tokenized[2])
                                self.metadata['lattice']['c']=float(tokenized[3])
                                self.metadata['lattice']['alpha']=float(tokenized[4])
                                self.metadata['lattice']['beta']=float(tokenized[5])
                                self.metadata['lattice']['gamma']=float(tokenized[6])
                        elif tokenized[0].lower()=="#AnalyzerDetectorMode".lower():
                                self.metadata['analyzerdetectormode']=tokenized[2].lower()
                        elif tokenized[0].lower()=="#Reference".lower():
                                self.metadata['count_type']=tokenized[2].lower()
                        elif tokenized[0].lower()=="#Signal".lower():
                                self.metadata['signal']=tokenized[2].lower()
                        elif tokenized[0].lower()=="#AnalyzerDetectorDevicesOfInterest".lower():
                                self.metadata['AnalyzerDetectorDevicesOfInterest'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerDDGroup".lower():
                                self.metadata['AnalyzerDDGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerDoorDetectorGroup".lower():
                                self.metadata['AnalyzerDoorDetectorGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerSDGroup".lower():
                                self.metadata['AnalyzerSDGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerPSDGroup".lower():
                                self.metadata['AnalyzerPSDGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerFocusMode".lower():
                                self.metadata['analyzerfocusmode'.lower()]=tokenized[1]
                        elif tokenized[0].lower()=="#MonoVertiFocus".lower():
                                self.metadata['monovertifocus'.lower()]=tokenized[1]
                        elif tokenized[0].lower()=="#MonoHorizFocus".lower():
                                self.metadata['monohorizfocus'.lower()]=tokenized[1]
                        elif tokenized[0].lower()=="#FixedE".lower():
                                try:
                                        self.metadata['efixed'.lower()]=tokenized[1]
                                except IndexError:
                                        pass
                                try:
                                        self.metadata['ef'.lower()]=float(tokenized[2])
                                except IndexError:
                                        pass
                        elif tokenized[0].lower()=="#ScanDescr".lower():
                                scanstr=''
                                for i in range(1,len(tokenized)):
                                        scanstr=scanstr+tokenized[i]+' '
                                self.metadata['scan_description']=scanstr
                                #print 'scanstr',scanstr
                                myparser=scanparser.scanparser(scanstr)
                                self.metadata['varying']=myparser.get_varying()
                                self.metadata['ranges']=myparser.ranges
                                #print 'parsed'
                                #if self.metadata['filebase']!='fpx':
                                #    self.additional_metadata['parsed_scandescription']=self.parse_scan(scanstr)

                                        #CAN'T SEEM TO PARSE fpx files, but if the filename is broken as in the last cycle, then how do I know?
                                        #Better soln is to fix parser
                                #print self.metadata['scan_description']['range']
                        else:
                                currfield=tokenized[0].lower().lower().strip('#')
                                self.additional_metadata[currfield]=(tokenized[1:])
                        if tokenized[0]!='#Columns'.lower():
                                self.header.append(returnline[0])
                        if tokenized[0]=='#Columns'.lower():
                                self.get_columnmetadatas_bt7(tokenized)
                                count =  0
                                try:
                                        lines=int(self.lines)
                                except:
                                        lines=N.Inf
                                while 1:
                                        lineStr = myfile.readline()
                                        if not(lineStr):
                                                break
                                        if lineStr[0] != "#":
                                                if count>=lines:
                                                        break
                                                strippedLine=lineStr.rstrip()
                                                tokenized=strippedLine.split()
                                                for i in range(len(tokenized)):
                                                        field=self.columnlist[i]
                                                        try:
                                                                if field.lower()=='time' and self.timestamp_flag==False:
                                                                        #timedelta=mx.DateTime.DateTimeDelta(0,0,0,float(tokenized[i])) #orig
                                                                        #self.columndict['timestamp'].append((timeobj+timedelta).ticks()) #orig
                                                                        #timestamp_flag is True if the timestamp is already given in the file
                                                                        timedelta=datetime.timedelta(seconds=float(tokenized[i]))
                                                                        self.columndict['timestamp'].append(mktime((timeobj+timedelta).timetuple()))
                                                                        timeobj=timeobj+timedelta
                                                                update_extrema(field, float(tokenized[i]))
                                                                self.columndict[field].append(float(tokenized[i]))
                                                        except ValueError:
                                                                #update_extrema(self, field, tokenized[i]) # is it appropriate for non-number fields?
                                                                self.columndict[field].append((tokenized[i]))
                                                count=count+1
                                myFlag=False
                if len(self.columndict[self.columnlist[0]])==0:
                        self.columndict={}
                        self.columnlist=[]
                        #This is a drastic step, but if the file is empty, then no point in even recording the placeholders
                #print self.columndict['Qx']
                #print self.columnlist
                return
コード例 #4
0
ファイル: readncnr3.py プロジェクト: scottwedge/dataflow
    def readbt7(self, myfile):
        #get first line
        myFlag = True
        #self.metadata={}
        self.header = []
        returnline = ['']
        while myFlag:
            tokenized = get_tokenized_line(myfile, returnline=returnline)
            #print tokenized
            if tokenized == []:
                tokenized = ['']
            if tokenized[0].lower() == "#Date".lower():
                pass
            if tokenized[0].lower() == "#Date".lower():
                date_tokens = tokenized[1].split('-')
                self.metadata['timestamp'] = {}
                month = int(date_tokens[1].strip("\'"))
                day = int(date_tokens[2].strip("\'"))
                year = int(date_tokens[0].strip("\'"))
                stime = tokenized[2].strip("\'")
                stimetok = stime.split(':')
                hour = int(stimetok[0])
                minute = int(stimetok[1])
                second = int(stimetok[2])
                self.metadata['timestamp']['month'] = int(
                    date_tokens[1].strip("\'"))
                self.metadata['timestamp']['day'] = int(
                    date_tokens[2].strip("\'"))
                self.metadata['timestamp']['year'] = int(
                    date_tokens[0].strip("\'"))
                self.metadata['timestamp']['time'] = tokenized[2].strip("\'")
            elif tokenized[0].lower() == "#Epoch".lower():
                #timeobj=date.datatetime(year,month,day,hour,minute,second)
                Epoch = float(tokenized[1])
                #timeobj=mx.DateTime.DateTimeFromTicks(ticks=Epoch) #what I originally used
                timeobj = datetime.datetime.fromtimestamp(Epoch)
                #print 'timeobj ',timeobj
                #print 'Epoch ', Epoch
                self.metadata['timestamp']['epoch'] = Epoch  #timeobj
                #print self.metadata['timestamp']
            elif tokenized[0].lower() == "#InstrName".lower():
                self.metadata['file_info']['instrument'] = tokenized[1].lower()
            elif tokenized[0].lower() == "#ExptID".lower():
                self.metadata['file_info']['experiment_id'] = tokenized[
                    1].lower()
            elif tokenized[0].lower() == "#Fixed".lower():
                self.metadata['file_info']['fixed_devices'] = tokenized[1:]
            elif tokenized[0].lower() == "#Filename".lower():
                self.metadata['file_info']['filename'] = tokenized[1]
                #print 'filename ', tokenized[1]
                pattern = re.compile(
                    '^(?P<base>[^.]*?)(?P<seq>[0-9]*)(?P<ext>[.].*)?$')
                match = pattern.match(tokenized[1] + '.bt7')
                dict((a, match.group(a) + "") for a in ['base', 'seq', 'ext'])
                #print 'filebase ',match.group('base')
                self.metadata['file_info']['filebase'] = match.group('base')
                self.metadata['file_info']['fileseq_number'] = match.group(
                    'seq')
            elif tokenized[0].lower() == "#Comment".lower():
                mycomment = ''
                for i in range(1, len(tokenized)):
                    mycomment = mycomment + ' ' + tokenized[i]
                self.metadata['file_info']['comment'] = mycomment
            elif tokenized[0].lower() == "#MonoSpacing".lower():
                self.metadata['dspacing']['monochromator_dspacing'] = float(
                    tokenized[1])
            elif tokenized[0].lower() == "#AnaSpacing".lower():
                self.metadata['dspacing']['analyzer_dspacing'] = float(
                    tokenized[1])
            elif tokenized[0].lower() == "#TemperatureUnits".lower():
                self.metadata['temperature_info']['units'] = tokenized[1]
            elif tokenized[0].lower() == "#Orient".lower():
                self.metadata['orient1']['h'] = float(tokenized[1])
                self.metadata['orient1']['k'] = float(tokenized[2])
                self.metadata['orient1']['l'] = float(tokenized[3])
                self.metadata['orient2']['h'] = float(tokenized[4])
                self.metadata['orient2']['k'] = float(tokenized[5])
                self.metadata['orient2']['l'] = float(tokenized[6])
            elif tokenized[0].lower() == "#Lattice".lower():
                self.metadata['lattice']['a'] = float(tokenized[1])
                self.metadata['lattice']['b'] = float(tokenized[2])
                self.metadata['lattice']['c'] = float(tokenized[3])
                self.metadata['lattice']['alpha'] = float(tokenized[4])
                self.metadata['lattice']['beta'] = float(tokenized[5])
                self.metadata['lattice']['gamma'] = float(tokenized[6])
            elif tokenized[0].lower() == "#AnalyzerDetectorMode".lower():
                self.metadata['count_info'][
                    'analyzerdetectormode'] = tokenized[2].lower()
            elif tokenized[0].lower() == "#Reference".lower():
                self.metadata['count_info']['count_type'] = tokenized[2].lower(
                )
            elif tokenized[0].lower() == "#Signal".lower():
                self.metadata['count_info']['signal'] = tokenized[2].lower()
            elif tokenized[0].lower(
            ) == "#AnalyzerDetectorDevicesOfInterest".lower():
                self.metadata['count_info']['AnalyzerDetectorDevicesOfInterest'
                                            .lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDDGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerDDGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDoorDetectorGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerDoorDetectorGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerSDGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerSDGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerPSDGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerPSDGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerFocusMode".lower():
                self.metadata['count_info'][
                    'analyzerfocusmode'.lower()] = tokenized[1]
            elif tokenized[0].lower() == "#MonoVertiFocus".lower():
                self.metadata['count_info'][
                    'monovertifocus'.lower()] = tokenized[1]
            elif tokenized[0].lower() == "#MonoHorizFocus".lower():
                self.metadata['count_info'][
                    'monohorizfocus'.lower()] = tokenized[1]
            elif tokenized[0].lower() == "#FixedE".lower():
                try:
                    self.metadata['energy_info'][
                        'efixed'.lower()] = tokenized[1]
                except IndexError:
                    pass
                try:
                    self.metadata['energy_info']['ef'.lower()] = float(
                        tokenized[2])
                except IndexError:
                    pass
            elif tokenized[0].lower() == "#ScanDescr".lower():
                scanstr = ''
                for i in range(1, len(tokenized)):
                    scanstr = scanstr + tokenized[i] + ' '
                self.metadata['file_info']['scan_description'] = scanstr
                #print 'scanstr',scanstr
                myparser = scanparser.scanparser(scanstr)
                self.metadata['count_info']['varying'] = myparser.get_varying()
                self.metadata['count_info']['ranges'] = myparser.ranges
                #print 'parsed'
                #if self.metadata['file_info']['filebase']!='fpx':
                #    self.additional_metadata['parsed_scandescription']=self.parse_scan(scanstr)

                #CAN'T SEEM TO PARSE fpx files, but if the filename is broken as in the last cycle, then how do I know?
                #Better soln is to fix parser
                #print self.metadata['scan_description']['range']
            else:
                currfield = tokenized[0].lower().lower().strip('#')
                self.additional_metadata[currfield] = (tokenized[1:])
            if tokenized[0] != '#Columns'.lower():
                self.header.append(returnline[0])
            if tokenized[0] == '#Columns'.lower():
                self.get_columnmetadatas_bt7(tokenized)
                count = 0
                try:
                    lines = int(self.lines)
                except:
                    lines = N.Inf
                while 1:
                    lineStr = myfile.readline()
                    if not (lineStr):
                        break
                    if lineStr[0] != "#":
                        if count >= lines:
                            break
                        strippedLine = lineStr.rstrip()
                        tokenized = strippedLine.split()
                        for i in range(len(tokenized)):
                            field = self.columnlist[i]
                            try:
                                if field.lower(
                                ) == 'time' and self.timestamp_flag == False:
                                    #timedelta=mx.DateTime.DateTimeDelta(0,0,0,float(tokenized[i])) #orig
                                    #self.columndict['timestamp'].append((timeobj+timedelta).ticks()) #orig
                                    #timestamp_flag is True if the timestamp is already given in the file
                                    timedelta = datetime.timedelta(
                                        seconds=float(tokenized[i]))
                                    self.columndict['timestamp'].append(
                                        mktime(
                                            (timeobj + timedelta).timetuple()))
                                    timeobj = timeobj + timedelta
                                self.columndict[field].append(
                                    float(tokenized[i]))
                            except ValueError:
                                self.columndict[field].append((tokenized[i]))
                        count = count + 1
                myFlag = False
        if len(self.columndict[self.columnlist[0]]) == 0:
            self.columndict = {}
            self.columnlist = []
            #This is a drastic step, but if the file is empty, then no point in even recording the placeholders
        #print self.columndict['Qx']
        #print self.columnlist
        return
コード例 #5
0
ファイル: readncnr3.py プロジェクト: pjreddie/WRed
    def readbt7(self, myfile):
        # get first line
        myFlag = True
        # self.metadata={}
        self.header = []
        returnline = [""]
        while myFlag:
            tokenized = get_tokenized_line(myfile, returnline=returnline)
            # print tokenized
            if tokenized == []:
                tokenized = [""]
            if tokenized[0].lower() == "#Date".lower():
                pass
            if tokenized[0].lower() == "#Date".lower():
                date_tokens = tokenized[1].split("-")
                self.metadata["timestamp"] = {}
                month = int(date_tokens[1].strip("'"))
                day = int(date_tokens[2].strip("'"))
                year = int(date_tokens[0].strip("'"))
                stime = tokenized[2].strip("'")
                stimetok = stime.split(":")
                hour = int(stimetok[0])
                minute = int(stimetok[1])
                second = int(stimetok[2])
                self.metadata["timestamp"]["month"] = int(date_tokens[1].strip("'"))
                self.metadata["timestamp"]["day"] = int(date_tokens[2].strip("'"))
                self.metadata["timestamp"]["year"] = int(date_tokens[0].strip("'"))
                self.metadata["timestamp"]["time"] = tokenized[2].strip("'")
            elif tokenized[0].lower() == "#Epoch".lower():
                # timeobj=date.datatetime(year,month,day,hour,minute,second)
                Epoch = float(tokenized[1])
                # timeobj=mx.DateTime.DateTimeFromTicks(ticks=Epoch) #what I originally used
                timeobj = datetime.datetime.fromtimestamp(Epoch)
                # print 'timeobj ',timeobj
                # print 'Epoch ', Epoch
                self.metadata["timestamp"]["epoch"] = Epoch  # timeobj
                # print self.metadata['timestamp']
            elif tokenized[0].lower() == "#InstrName".lower():
                self.metadata["file_info"]["instrument"] = tokenized[1].lower()
            elif tokenized[0].lower() == "#ExptID".lower():
                self.metadata["file_info"]["experiment_id"] = tokenized[1].lower()
            elif tokenized[0].lower() == "#Fixed".lower():
                self.metadata["file_info"]["fixed_devices"] = tokenized[1:]
            elif tokenized[0].lower() == "#Filename".lower():
                self.metadata["file_info"]["filename"] = tokenized[1]
                # print 'filename ', tokenized[1]
                pattern = re.compile("^(?P<base>[^.]*?)(?P<seq>[0-9]*)(?P<ext>[.].*)?$")
                match = pattern.match(tokenized[1] + ".bt7")
                dict((a, match.group(a) + "") for a in ["base", "seq", "ext"])
                # print 'filebase ',match.group('base')
                self.metadata["file_info"]["filebase"] = match.group("base")
                self.metadata["file_info"]["fileseq_number"] = match.group("seq")
            elif tokenized[0].lower() == "#Comment".lower():
                mycomment = ""
                for i in range(1, len(tokenized)):
                    mycomment = mycomment + " " + tokenized[i]
                self.metadata["file_info"]["comment"] = mycomment
            elif tokenized[0].lower() == "#MonoSpacing".lower():
                self.metadata["dspacing"]["monochromator_dspacing"] = float(tokenized[1])
            elif tokenized[0].lower() == "#AnaSpacing".lower():
                self.metadata["dspacing"]["analyzer_dspacing"] = float(tokenized[1])
            elif tokenized[0].lower() == "#TemperatureUnits".lower():
                self.metadata["temperature_info"]["units"] = tokenized[1]
            elif tokenized[0].lower() == "#Orient".lower():
                self.metadata["orient1"]["h"] = float(tokenized[1])
                self.metadata["orient1"]["k"] = float(tokenized[2])
                self.metadata["orient1"]["l"] = float(tokenized[3])
                self.metadata["orient2"]["h"] = float(tokenized[4])
                self.metadata["orient2"]["k"] = float(tokenized[5])
                self.metadata["orient2"]["l"] = float(tokenized[6])
            elif tokenized[0].lower() == "#Lattice".lower():
                self.metadata["lattice"]["a"] = float(tokenized[1])
                self.metadata["lattice"]["b"] = float(tokenized[2])
                self.metadata["lattice"]["c"] = float(tokenized[3])
                self.metadata["lattice"]["alpha"] = float(tokenized[4])
                self.metadata["lattice"]["beta"] = float(tokenized[5])
                self.metadata["lattice"]["gamma"] = float(tokenized[6])
            elif tokenized[0].lower() == "#AnalyzerDetectorMode".lower():
                self.metadata["count_info"]["analyzerdetectormode"] = tokenized[2].lower()
            elif tokenized[0].lower() == "#Reference".lower():
                self.metadata["count_info"]["count_type"] = tokenized[2].lower()
            elif tokenized[0].lower() == "#Signal".lower():
                self.metadata["count_info"]["signal"] = tokenized[2].lower()
            elif tokenized[0].lower() == "#AnalyzerDetectorDevicesOfInterest".lower():
                self.metadata["count_info"]["AnalyzerDetectorDevicesOfInterest".lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDDGroup".lower():
                self.metadata["count_info"]["AnalyzerDDGroup".lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDoorDetectorGroup".lower():
                self.metadata["count_info"]["AnalyzerDoorDetectorGroup".lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerSDGroup".lower():
                self.metadata["count_info"]["AnalyzerSDGroup".lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerPSDGroup".lower():
                self.metadata["count_info"]["AnalyzerPSDGroup".lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerFocusMode".lower():
                self.metadata["count_info"]["analyzerfocusmode".lower()] = tokenized[1]
            elif tokenized[0].lower() == "#MonoVertiFocus".lower():
                self.metadata["count_info"]["monovertifocus".lower()] = tokenized[1]
            elif tokenized[0].lower() == "#MonoHorizFocus".lower():
                self.metadata["count_info"]["monohorizfocus".lower()] = tokenized[1]
            elif tokenized[0].lower() == "#FixedE".lower():
                try:
                    self.metadata["energy_info"]["efixed".lower()] = tokenized[1]
                except IndexError:
                    pass
                try:
                    self.metadata["energy_info"]["ef".lower()] = float(tokenized[2])
                except IndexError:
                    pass
            elif tokenized[0].lower() == "#ScanDescr".lower():
                scanstr = ""
                for i in range(1, len(tokenized)):
                    scanstr = scanstr + tokenized[i] + " "
                self.metadata["file_info"]["scan_description"] = scanstr
                # print 'scanstr',scanstr
                myparser = scanparser.scanparser(scanstr)
                self.metadata["count_info"]["varying"] = myparser.get_varying()
                self.metadata["count_info"]["ranges"] = myparser.ranges
                # print 'parsed'
                # if self.metadata['file_info']['filebase']!='fpx':
                #    self.additional_metadata['parsed_scandescription']=self.parse_scan(scanstr)

                # CAN'T SEEM TO PARSE fpx files, but if the filename is broken as in the last cycle, then how do I know?
                # Better soln is to fix parser
                # print self.metadata['scan_description']['range']
            else:
                currfield = tokenized[0].lower().lower().strip("#")
                self.additional_metadata[currfield] = tokenized[1:]
            if tokenized[0] != "#Columns".lower():
                self.header.append(returnline[0])
            if tokenized[0] == "#Columns".lower():
                self.get_columnmetadatas_bt7(tokenized)
                count = 0
                try:
                    lines = int(self.lines)
                except:
                    lines = N.Inf
                while 1:
                    lineStr = myfile.readline()
                    if not (lineStr):
                        break
                    if lineStr[0] != "#":
                        if count >= lines:
                            break
                        strippedLine = lineStr.rstrip()
                        tokenized = strippedLine.split()
                        for i in range(len(tokenized)):
                            field = self.columnlist[i]
                            try:
                                if field.lower() == "time" and self.timestamp_flag == False:
                                    # timedelta=mx.DateTime.DateTimeDelta(0,0,0,float(tokenized[i])) #orig
                                    # self.columndict['timestamp'].append((timeobj+timedelta).ticks()) #orig
                                    # timestamp_flag is True if the timestamp is already given in the file
                                    timedelta = datetime.timedelta(seconds=float(tokenized[i]))
                                    self.columndict["timestamp"].append(mktime((timeobj + timedelta).timetuple()))
                                    timeobj = timeobj + timedelta
                                self.columndict[field].append(float(tokenized[i]))
                            except ValueError:
                                self.columndict[field].append((tokenized[i]))
                        count = count + 1
                myFlag = False
        if len(self.columndict[self.columnlist[0]]) == 0:
            self.columndict = {}
            self.columnlist = []
            # This is a drastic step, but if the file is empty, then no point in even recording the placeholders
        # print self.columndict['Qx']
        # print self.columnlist
        return