Exemplo n.º 1
0
        def readbt7(self,myfile):
        #get first line
                myFlag=True
                #self.metadata={}
                self.header=[]
                returnline=['']
                while myFlag:
                        tokenized=get_tokenized_line(myfile,returnline=returnline)
                        #print tokenized
                        if tokenized==[]:
                                tokenized=['']
                        if tokenized[0].lower()=="#Date".lower():
                                pass
                        if tokenized[0].lower()=="#Date".lower():
                                date_tokens=tokenized[1].split('-')
                                self.metadata['timestamp']={}
                                month=int(date_tokens[1].strip("\'"))
                                day=int(date_tokens[2].strip("\'"))
                                year=int(date_tokens[0].strip("\'"))
                                stime=tokenized[2].strip("\'")
                                stimetok=stime.split(':')
                                hour=int(stimetok[0])
                                minute=int(stimetok[1])
                                second=int(stimetok[2])
                                self.metadata['timestamp']['month']=int(date_tokens[1].strip("\'"))
                                self.metadata['timestamp']['day']=int(date_tokens[2].strip("\'"))
                                self.metadata['timestamp']['year']=int(date_tokens[0].strip("\'"))
                                self.metadata['timestamp']['time']=tokenized[2].strip("\'")
                        elif tokenized[0].lower()=="#Epoch".lower():
                                #timeobj=date.datatetime(year,month,day,hour,minute,second)
                                Epoch=float(tokenized[1])
                                #timeobj=mx.DateTime.DateTimeFromTicks(ticks=Epoch) #what I originally used
                                timeobj=datetime.datetime.fromtimestamp(Epoch)
                                #print 'timeobj ',timeobj
                                #print 'Epoch ', Epoch
                                self.metadata['timestamp']['epoch']=Epoch#timeobj
                                #print self.metadata['timestamp']
                        elif tokenized[0].lower()=="#InstrName".lower():
                                self.metadata['file_info']['instrument']=tokenized[1].lower()
                        elif tokenized[0].lower()=="#ExptID".lower():
                                self.metadata['file_info']['experiment_id']=tokenized[1].lower()
                        elif tokenized[0].lower()=="#Fixed".lower():
                                self.metadata['file_info']['fixed_devices']=tokenized[1:]
                        elif tokenized[0].lower()=="#Filename".lower():
                                self.metadata['file_info']['filename']=tokenized[1]
                                #print 'filename ', tokenized[1]
                                pattern = re.compile('^(?P<base>[^.]*?)(?P<seq>[0-9]*)(?P<ext>[.].*)?$')
                                match = pattern.match(tokenized[1]+'.bt7')
                                dict((a,match.group(a)+"") for a in ['base','seq','ext'])
                                #print 'filebase ',match.group('base')
                                self.metadata['file_info']['filebase']=match.group('base')
                                self.metadata['file_info']['fileseq_number']=match.group('seq')
                        elif tokenized[0].lower()=="#Comment".lower():
                                mycomment=''
                                for i in range(1,len(tokenized)):
                                        mycomment=mycomment+' '+tokenized[i]
                                self.metadata['file_info']['comment']=mycomment
                        elif tokenized[0].lower()=="#MonoSpacing".lower():
                                self.metadata['dspacing']['monochromator_dspacing']=float(tokenized[1])
                        elif tokenized[0].lower()=="#AnaSpacing".lower():
                                self.metadata['dspacing']['analyzer_dspacing']=float(tokenized[1])
                        elif tokenized[0].lower()=="#TemperatureUnits".lower():
                                self.metadata['temperature_info']['units']=tokenized[1]
                        elif tokenized[0].lower()=="#Orient".lower():
                                self.metadata['orient1']['h']=float(tokenized[1])
                                self.metadata['orient1']['k']=float(tokenized[2])
                                self.metadata['orient1']['l']=float(tokenized[3])
                                self.metadata['orient2']['h']=float(tokenized[4])
                                self.metadata['orient2']['k']=float(tokenized[5])
                                self.metadata['orient2']['l']=float(tokenized[6])
                        elif tokenized[0].lower()=="#Lattice".lower():
                                self.metadata['lattice']['a']=float(tokenized[1])
                                self.metadata['lattice']['b']=float(tokenized[2])
                                self.metadata['lattice']['c']=float(tokenized[3])
                                self.metadata['lattice']['alpha']=float(tokenized[4])
                                self.metadata['lattice']['beta']=float(tokenized[5])
                                self.metadata['lattice']['gamma']=float(tokenized[6])
                        elif tokenized[0].lower()=="#AnalyzerDetectorMode".lower():
                                self.metadata['count_info']['analyzerdetectormode']=tokenized[2].lower()
                        elif tokenized[0].lower()=="#Reference".lower():
                                self.metadata['count_info']['count_type']=tokenized[2].lower()
                        elif tokenized[0].lower()=="#Signal".lower():
                                self.metadata['count_info']['signal']=tokenized[2].lower()
                        elif tokenized[0].lower()=="#AnalyzerDetectorDevicesOfInterest".lower():
                                self.metadata['count_info']['AnalyzerDetectorDevicesOfInterest'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerDDGroup".lower():
                                self.metadata['count_info']['AnalyzerDDGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerDoorDetectorGroup".lower():
                                self.metadata['count_info']['AnalyzerDoorDetectorGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerSDGroup".lower():
                                self.metadata['count_info']['AnalyzerSDGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerPSDGroup".lower():
                                self.metadata['count_info']['AnalyzerPSDGroup'.lower()]=tokenized[1:]
                        elif tokenized[0].lower()=="#AnalyzerFocusMode".lower():
                                self.metadata['count_info']['analyzerfocusmode'.lower()]=tokenized[1]
                        elif tokenized[0].lower()=="#MonoVertiFocus".lower():
                                self.metadata['count_info']['monovertifocus'.lower()]=tokenized[1]
                        elif tokenized[0].lower()=="#MonoHorizFocus".lower():
                                self.metadata['count_info']['monohorizfocus'.lower()]=tokenized[1]
                        elif tokenized[0].lower()=="#FixedE".lower():
                                try:
                                        self.metadata['energy_info']['efixed'.lower()]=tokenized[1]
                                except IndexError:
                                        pass
                                try:
                                        self.metadata['energy_info']['ef'.lower()]=float(tokenized[2])
                                except IndexError:
                                        pass
                        elif tokenized[0].lower()=="#ScanDescr".lower():
                                scanstr=''
                                for i in range(1,len(tokenized)):
                                        scanstr=scanstr+tokenized[i]+' '
                                self.metadata['file_info']['scan_description']=scanstr
                                #print 'scanstr',scanstr
                                myparser=scanparser.scanparser(scanstr)
                                self.metadata['count_info']['varying']=myparser.get_varying()
                                self.metadata['count_info']['ranges']=myparser.ranges
                                #print 'parsed'
                                #if self.metadata['file_info']['filebase']!='fpx':
                                #    self.additional_metadata['parsed_scandescription']=self.parse_scan(scanstr)

                                        #CAN'T SEEM TO PARSE fpx files, but if the filename is broken as in the last cycle, then how do I know?
                                        #Better soln is to fix parser
                                #print self.metadata['scan_description']['range']
                        else:
                                currfield=tokenized[0].lower().lower().strip('#')
                                self.additional_metadata[currfield]=(tokenized[1:])
                        if tokenized[0]!='#Columns'.lower():
                                self.header.append(returnline[0])
                        if tokenized[0]=='#Columns'.lower():
                                self.get_columnmetadatas_bt7(tokenized)
                                count =  0
                                try:
                                        lines=int(self.lines)
                                except:
                                        lines=N.Inf
                                while 1:
                                        lineStr = myfile.readline()
                                        if not(lineStr):
                                                break
                                        if lineStr[0] != "#":
                                                if count>=lines:
                                                        break
                                                strippedLine=lineStr.rstrip()
                                                tokenized=strippedLine.split()
                                                for i in range(len(tokenized)):
                                                        field=self.columnlist[i]
                                                        try:
                                                                if field.lower()=='time' and self.timestamp_flag==False:
                                                                        #timedelta=mx.DateTime.DateTimeDelta(0,0,0,float(tokenized[i])) #orig
                                                                        #self.columndict['timestamp'].append((timeobj+timedelta).ticks()) #orig
                                                                        #timestamp_flag is True if the timestamp is already given in the file
                                                                        timedelta=datetime.timedelta(seconds=float(tokenized[i]))
                                                                        self.columndict['timestamp'].append(mktime((timeobj+timedelta).timetuple()))
                                                                        timeobj=timeobj+timedelta
                                                                self.columndict[field].append(float(tokenized[i]))
                                                        except ValueError:
                                                                self.columndict[field].append((tokenized[i]))
                                                count=count+1
                                myFlag=False
                if len(self.columndict[self.columnlist[0]])==0:
                        self.columndict={}
                        self.columnlist=[]
                        #This is a drastic step, but if the file is empty, then no point in even recording the placeholders
                #print self.columndict['Qx']
                #print self.columnlist
                return
Exemplo n.º 2
0
    def readbt7(self,myfile):
    #get first line
        myFlag=True
        #self.metadata={}
        self.header=[]
        returnline=['']
        while myFlag:
            tokenized=get_tokenized_line(myfile,returnline=returnline)
            #print tokenized
            if tokenized==[]:
                tokenized=['']
            if tokenized[0].lower()=="#Date".lower():
                pass
            elif tokenized[0].lower()=="#Date".lower():
                #time.strptime("2007-11-03 22:33:22 EDT", "%Y-%m-%d %H:%M:%S %Z")
                date_tokens=tokenized[1].split('-')
                self.metadata['timestamp']={}
                month=int(date_tokens[1].strip("\'"))
                day=int(date_tokens[2].strip("\'"))
                year=int(date_tokens[0].strip("\'"))
                stime=tokenized[2].strip("\'")
                stimetok=stime.split(':')
                hour=int(stimetok[0])
                minute=int(stimetok[1])
                second=int(stimetok[2])
                self.metadata['timestamp']['month']=int(date_tokens[1].strip("\'"))
                self.metadata['timestamp']['day']=int(date_tokens[2].strip("\'"))
                self.metadata['timestamp']['year']=int(date_tokens[0].strip("\'"))
                self.metadata['timestamp']['time']=tokenized[2].strip("\'")
            elif tokenized[0].lower()=="#Epoch".lower():
                #timeobj=date.datatetime(year,month,day,hour,minute,second)
                Epoch=float(tokenized[1])
                #timeobj=mx.DateTime.DateTimeFromTicks(ticks=Epoch) #what I originally used
                timeobj=datetime.datetime.fromtimestamp(Epoch)
                #print 'timeobj ',timeobj
                #print 'Epoch ', Epoch
                self.metadata.epoch=Epoch#timeobj
                #print self.metadata['timestamp']
            elif tokenized[0].lower()=="#InstrName".lower():
                self.metadata.instrument=tokenized[1].lower()
            elif tokenized[0].lower()=="#ExptID".lower():
                self.metadata.experiment_id=tokenized[1].lower()
            elif tokenized[0].lower()=="#Fixed".lower():
                self.metadata.fixed_devices=tokenized[1:]
            elif tokenized[0].lower()=="#Filename".lower():
                self.metadata.filename=tokenized[1]
                #print 'filename ', tokenized[1]
                pattern = re.compile('^(?P<base>[^.]*?)(?P<seq>[0-9]*)(?P<ext>[.].*)?$')
                match = pattern.match(tokenized[1]+'.bt7')
                dict((a,match.group(a)+"") for a in ['base','seq','ext'])
                #print 'filebase ',match.group('base')
                self.metadata.filebase=match.group('base')
                self.metadata.fileseq_number=match.group('seq')
            elif tokenized[0].lower()=="#Comment".lower():
                mycomment=''
                for i in range(1,len(tokenized)):
                    mycomment=mycomment+' '+tokenized[i]
                self.metadata.comment=mycomment
            elif tokenized[0].lower()=="#MonoSpacing".lower():
                self.metadata.monochromator_dspacing=float(tokenized[1])
            elif tokenized[0].lower()=="#AnaSpacing".lower():
                self.metadata.analyzer_dspacing=float(tokenized[1])
            elif tokenized[0].lower()=="#TemperatureUnits".lower():
                self.metadata.temperature_units=tokenized[1]
            elif tokenized[0].lower()=="#Orient".lower():
                h1=float(tokenized[1])
                k1=float(tokenized[2])
                l1=float(tokenized[3])
                h2=float(tokenized[4])
                k2=float(tokenized[5])
                l2=float(tokenized[6])
                self.metadata.orientation=Orientation([h1,k1,l1],[h2,k2,l2])
            elif tokenized[0].lower()=="#Lattice".lower():
                a=float(tokenized[1])
                b=float(tokenized[2])
                c=float(tokenized[3])
                alpha=float(tokenized[4])
                beta=float(tokenized[5])
                gamma=float(tokenized[6])
                self.metadata.lattice=Lattice(a,b,c,alpha,beta,gamma)
            elif tokenized[0].lower()=="#AnalyzerDetectorMode".lower():
                self.metadata.analyzerdetectormode=tokenized[2].lower()
            elif tokenized[0].lower()=="#Reference".lower():
                self.metadata.count_type=tokenized[2].lower()
            elif tokenized[0].lower()=="#Signal".lower():
                self.metadata.signal=tokenized[2].lower()
            elif tokenized[0].lower()=="#AnalyzerDetectorDevicesOfInterest".lower():
                self.metadata.analyzerdetectordevicesofinterest=tokenized[1:]
            elif tokenized[0].lower()=="#AnalyzerDDGroup".lower():
                self.metadata.analyzerddgroup=tokenized[1:]
            elif tokenized[0].lower()=="#AnalyzerDoorDetectorGroup".lower():
                self.metadata.analyzerdoordetectorgroup=tokenized[1:]
            elif tokenized[0].lower()=="#AnalyzerSDGroup".lower():
                self.metadata.analyzersdgroup=tokenized[1:]
            elif tokenized[0].lower()=="#AnalyzerPSDGroup".lower():
                self.metadata.analyzerpsdgroup=tokenized[1:]
            elif tokenized[0].lower()=="#AnalyzerFocusMode".lower():
                self.metadata.analyzerfocusmode=tokenized[1]
            elif tokenized[0].lower()=="#MonoVertiFocus".lower():
                self.metadata.monovertifocus=tokenized[1]
            elif tokenized[0].lower()=="#MonoHorizFocus".lower():
                self.metadata.monohorizfocus=tokenized[1]
            elif tokenized[0].lower()=="#FixedE".lower():
                try:
                    self.metadata.efixed=tokenized[1]
                except IndexError:
                    pass
                try:
                    self.metadata.ef=float(tokenized[2])
                except IndexError:
                    pass
            elif tokenized[0].lower()=="#ScanDescr".lower():
                scanstr=''
                for i in range(1,len(tokenized)):
                    scanstr=scanstr+tokenized[i]+' '
                self.metadata.scan_description=scanstr
                #print 'scanstr',scanstr
                myparser=scanparser.scanparser(scanstr)
                self.metadata.varying=myparser.get_varying()
                self.metadata.ranges=myparser.ranges
                #print 'parsed'
                #if self.metadata['file_info']['filebase']!='fpx':
                #    self.additional_metadata['parsed_scandescription']=self.parse_scan(scanstr)

                    #CAN'T SEEM TO PARSE fpx files, but if the filename is broken as in the last cycle, then how do I know?
                    #Better soln is to fix parser
                #print self.metadata['scan_description']['range']
            else:
                currfield=tokenized[0].lower().lower().strip('#')
                setattr(self.metadata,currfield,tokenized[1:])
            if tokenized[0]!='#Columns'.lower():
                self.header.append(returnline[0])
            if tokenized[0]=='#Columns'.lower():
                self.get_columnmetadatas_bt7(tokenized)
                count =  0
                try:
                    lines=int(self.lines)
                except:
                    lines=N.Inf
                while 1:
                    lineStr = myfile.readline()
                    if not(lineStr):
                        break
                    if lineStr[0] != "#":
                        if count>=lines:
                            break
                        strippedLine=lineStr.rstrip()
                        tokenized=strippedLine.split()
                        #for field in self.data.__dict__.keys():
                        for i in range(len(tokenized)):
                            field=self.columnlist[i]
                            try:
                                if field.lower()=='time' and self.timestamp_flag==False:
                                    #timedelta=mx.DateTime.DateTimeDelta(0,0,0,float(tokenized[i])) #orig
                                    #self.columndict['timestamp'].append((timeobj+timedelta).ticks()) #orig
                                    #timestamp_flag is True if the timestamp is already given in the file
                                    timedelta=datetime.timedelta(seconds=float(tokenized[i]))
                                    self.data.timestamp.append(mktime((timeobj+timedelta).timetuple()))
                                    timeobj=timeobj+timedelta
                                self.data[field].append(float(tokenized[i]))
                            except ValueError:
                                self.data[field].append((tokenized[i]))
                        count=count+1
                myFlag=False
        return
Exemplo n.º 3
0
    def readbt7(self, myfile):
        #get first line
        myFlag = True
        #self.metadata={}
        self.header = []
        returnline = ['']
        while myFlag:
            tokenized = get_tokenized_line(myfile, returnline=returnline)
            #print tokenized
            if tokenized == []:
                tokenized = ['']
            if tokenized[0].lower() == "#Date".lower():
                pass
            if tokenized[0].lower() == "#Date".lower():
                date_tokens = tokenized[1].split('-')
                self.metadata['timestamp'] = {}
                month = int(date_tokens[1].strip("\'"))
                day = int(date_tokens[2].strip("\'"))
                year = int(date_tokens[0].strip("\'"))
                stime = tokenized[2].strip("\'")
                stimetok = stime.split(':')
                hour = int(stimetok[0])
                minute = int(stimetok[1])
                second = int(stimetok[2])
                self.metadata['timestamp']['month'] = int(
                    date_tokens[1].strip("\'"))
                self.metadata['timestamp']['day'] = int(
                    date_tokens[2].strip("\'"))
                self.metadata['timestamp']['year'] = int(
                    date_tokens[0].strip("\'"))
                self.metadata['timestamp']['time'] = tokenized[2].strip("\'")
            elif tokenized[0].lower() == "#Epoch".lower():
                #timeobj=date.datatetime(year,month,day,hour,minute,second)
                Epoch = float(tokenized[1])
                #timeobj=mx.DateTime.DateTimeFromTicks(ticks=Epoch) #what I originally used
                timeobj = datetime.datetime.fromtimestamp(Epoch)
                #print 'timeobj ',timeobj
                #print 'Epoch ', Epoch
                self.metadata['timestamp']['epoch'] = Epoch  #timeobj
                #print self.metadata['timestamp']
            elif tokenized[0].lower() == "#InstrName".lower():
                self.metadata['file_info']['instrument'] = tokenized[1].lower()
            elif tokenized[0].lower() == "#ExptID".lower():
                self.metadata['file_info']['experiment_id'] = tokenized[
                    1].lower()
            elif tokenized[0].lower() == "#Fixed".lower():
                self.metadata['file_info']['fixed_devices'] = tokenized[1:]
            elif tokenized[0].lower() == "#Filename".lower():
                self.metadata['file_info']['filename'] = tokenized[1]
                #print 'filename ', tokenized[1]
                pattern = re.compile(
                    '^(?P<base>[^.]*?)(?P<seq>[0-9]*)(?P<ext>[.].*)?$')
                match = pattern.match(tokenized[1] + '.bt7')
                dict((a, match.group(a) + "") for a in ['base', 'seq', 'ext'])
                #print 'filebase ',match.group('base')
                self.metadata['file_info']['filebase'] = match.group('base')
                self.metadata['file_info']['fileseq_number'] = match.group(
                    'seq')
            elif tokenized[0].lower() == "#Comment".lower():
                mycomment = ''
                for i in range(1, len(tokenized)):
                    mycomment = mycomment + ' ' + tokenized[i]
                self.metadata['file_info']['comment'] = mycomment
            elif tokenized[0].lower() == "#MonoSpacing".lower():
                self.metadata['dspacing']['monochromator_dspacing'] = float(
                    tokenized[1])
            elif tokenized[0].lower() == "#AnaSpacing".lower():
                self.metadata['dspacing']['analyzer_dspacing'] = float(
                    tokenized[1])
            elif tokenized[0].lower() == "#TemperatureUnits".lower():
                self.metadata['temperature_info']['units'] = tokenized[1]
            elif tokenized[0].lower() == "#Orient".lower():
                self.metadata['orient1']['h'] = float(tokenized[1])
                self.metadata['orient1']['k'] = float(tokenized[2])
                self.metadata['orient1']['l'] = float(tokenized[3])
                self.metadata['orient2']['h'] = float(tokenized[4])
                self.metadata['orient2']['k'] = float(tokenized[5])
                self.metadata['orient2']['l'] = float(tokenized[6])
            elif tokenized[0].lower() == "#Lattice".lower():
                self.metadata['lattice']['a'] = float(tokenized[1])
                self.metadata['lattice']['b'] = float(tokenized[2])
                self.metadata['lattice']['c'] = float(tokenized[3])
                self.metadata['lattice']['alpha'] = float(tokenized[4])
                self.metadata['lattice']['beta'] = float(tokenized[5])
                self.metadata['lattice']['gamma'] = float(tokenized[6])
            elif tokenized[0].lower() == "#AnalyzerDetectorMode".lower():
                self.metadata['count_info'][
                    'analyzerdetectormode'] = tokenized[2].lower()
            elif tokenized[0].lower() == "#Reference".lower():
                self.metadata['count_info']['count_type'] = tokenized[2].lower(
                )
            elif tokenized[0].lower() == "#Signal".lower():
                self.metadata['count_info']['signal'] = tokenized[2].lower()
            elif tokenized[0].lower(
            ) == "#AnalyzerDetectorDevicesOfInterest".lower():
                self.metadata['count_info']['AnalyzerDetectorDevicesOfInterest'
                                            .lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDDGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerDDGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDoorDetectorGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerDoorDetectorGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerSDGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerSDGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerPSDGroup".lower():
                self.metadata['count_info'][
                    'AnalyzerPSDGroup'.lower()] = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerFocusMode".lower():
                self.metadata['count_info'][
                    'analyzerfocusmode'.lower()] = tokenized[1]
            elif tokenized[0].lower() == "#MonoVertiFocus".lower():
                self.metadata['count_info'][
                    'monovertifocus'.lower()] = tokenized[1]
            elif tokenized[0].lower() == "#MonoHorizFocus".lower():
                self.metadata['count_info'][
                    'monohorizfocus'.lower()] = tokenized[1]
            elif tokenized[0].lower() == "#FixedE".lower():
                try:
                    self.metadata['energy_info'][
                        'efixed'.lower()] = tokenized[1]
                except IndexError:
                    pass
                try:
                    self.metadata['energy_info']['ef'.lower()] = float(
                        tokenized[2])
                except IndexError:
                    pass
            elif tokenized[0].lower() == "#ScanDescr".lower():
                scanstr = ''
                for i in range(1, len(tokenized)):
                    scanstr = scanstr + tokenized[i] + ' '
                self.metadata['file_info']['scan_description'] = scanstr
                #print 'scanstr',scanstr
                myparser = scanparser.scanparser(scanstr)
                self.metadata['count_info']['varying'] = myparser.get_varying()
                self.metadata['count_info']['ranges'] = myparser.ranges
                #print 'parsed'
                #if self.metadata['file_info']['filebase']!='fpx':
                #    self.additional_metadata['parsed_scandescription']=self.parse_scan(scanstr)

                #CAN'T SEEM TO PARSE fpx files, but if the filename is broken as in the last cycle, then how do I know?
                #Better soln is to fix parser
                #print self.metadata['scan_description']['range']
            else:
                currfield = tokenized[0].lower().lower().strip('#')
                self.additional_metadata[currfield] = (tokenized[1:])
            if tokenized[0] != '#Columns'.lower():
                self.header.append(returnline[0])
            if tokenized[0] == '#Columns'.lower():
                self.get_columnmetadatas_bt7(tokenized)
                count = 0
                try:
                    lines = int(self.lines)
                except:
                    lines = N.Inf
                while 1:
                    lineStr = myfile.readline()
                    if not (lineStr):
                        break
                    if lineStr[0] != "#":
                        if count >= lines:
                            break
                        strippedLine = lineStr.rstrip()
                        tokenized = strippedLine.split()
                        for i in range(len(tokenized)):
                            field = self.columnlist[i]
                            try:
                                if field.lower(
                                ) == 'time' and self.timestamp_flag == False:
                                    #timedelta=mx.DateTime.DateTimeDelta(0,0,0,float(tokenized[i])) #orig
                                    #self.columndict['timestamp'].append((timeobj+timedelta).ticks()) #orig
                                    #timestamp_flag is True if the timestamp is already given in the file
                                    timedelta = datetime.timedelta(
                                        seconds=float(tokenized[i]))
                                    self.columndict['timestamp'].append(
                                        mktime(
                                            (timeobj + timedelta).timetuple()))
                                    timeobj = timeobj + timedelta
                                self.columndict[field].append(
                                    float(tokenized[i]))
                            except ValueError:
                                self.columndict[field].append((tokenized[i]))
                        count = count + 1
                myFlag = False
        if len(self.columndict[self.columnlist[0]]) == 0:
            self.columndict = {}
            self.columnlist = []
            #This is a drastic step, but if the file is empty, then no point in even recording the placeholders
        #print self.columndict['Qx']
        #print self.columnlist
        return
Exemplo n.º 4
0
    def readbt7(self, myfile):
        #get first line
        myFlag = True
        #self.metadata={}
        self.header = []
        returnline = ['']
        while myFlag:
            tokenized = get_tokenized_line(myfile, returnline=returnline)
            #print tokenized
            if tokenized == []:
                tokenized = ['']
            if tokenized[0].lower() == "#Date".lower():
                pass
            elif tokenized[0].lower() == "#Date".lower():
                #time.strptime("2007-11-03 22:33:22 EDT", "%Y-%m-%d %H:%M:%S %Z")
                date_tokens = tokenized[1].split('-')
                self.metadata['timestamp'] = {}
                month = int(date_tokens[1].strip("\'"))
                day = int(date_tokens[2].strip("\'"))
                year = int(date_tokens[0].strip("\'"))
                stime = tokenized[2].strip("\'")
                stimetok = stime.split(':')
                hour = int(stimetok[0])
                minute = int(stimetok[1])
                second = int(stimetok[2])
                self.metadata['timestamp']['month'] = int(
                    date_tokens[1].strip("\'"))
                self.metadata['timestamp']['day'] = int(
                    date_tokens[2].strip("\'"))
                self.metadata['timestamp']['year'] = int(
                    date_tokens[0].strip("\'"))
                self.metadata['timestamp']['time'] = tokenized[2].strip("\'")
            elif tokenized[0].lower() == "#Epoch".lower():
                #timeobj=date.datatetime(year,month,day,hour,minute,second)
                Epoch = float(tokenized[1])
                #timeobj=mx.DateTime.DateTimeFromTicks(ticks=Epoch) #what I originally used
                timeobj = datetime.datetime.fromtimestamp(Epoch)
                #print 'timeobj ',timeobj
                #print 'Epoch ', Epoch
                self.metadata.epoch = Epoch  #timeobj
                #print self.metadata['timestamp']
            elif tokenized[0].lower() == "#InstrName".lower():
                self.metadata.instrument = tokenized[1].lower()
            elif tokenized[0].lower() == "#ExptID".lower():
                self.metadata.experiment_id = tokenized[1].lower()
            elif tokenized[0].lower() == "#Fixed".lower():
                self.metadata.fixed_devices = tokenized[1:]
            elif tokenized[0].lower() == "#Filename".lower():
                self.metadata.filename = tokenized[1]
                #print 'filename ', tokenized[1]
                pattern = re.compile(
                    '^(?P<base>[^.]*?)(?P<seq>[0-9]*)(?P<ext>[.].*)?$')
                match = pattern.match(tokenized[1] + '.bt7')
                dict((a, match.group(a) + "") for a in ['base', 'seq', 'ext'])
                #print 'filebase ',match.group('base')
                self.metadata.filebase = match.group('base')
                self.metadata.fileseq_number = match.group('seq')
            elif tokenized[0].lower() == "#Comment".lower():
                mycomment = ''
                for i in range(1, len(tokenized)):
                    mycomment = mycomment + ' ' + tokenized[i]
                self.metadata.comment = mycomment
            elif tokenized[0].lower() == "#MonoSpacing".lower():
                self.metadata.monochromator_dspacing = float(tokenized[1])
            elif tokenized[0].lower() == "#AnaSpacing".lower():
                self.metadata.analyzer_dspacing = float(tokenized[1])
            elif tokenized[0].lower() == "#TemperatureUnits".lower():
                self.metadata.temperature_units = tokenized[1]
            elif tokenized[0].lower() == "#Orient".lower():
                h1 = float(tokenized[1])
                k1 = float(tokenized[2])
                l1 = float(tokenized[3])
                h2 = float(tokenized[4])
                k2 = float(tokenized[5])
                l2 = float(tokenized[6])
                self.metadata.orientation = Orientation([h1, k1, l1],
                                                        [h2, k2, l2])
            elif tokenized[0].lower() == "#Lattice".lower():
                a = float(tokenized[1])
                b = float(tokenized[2])
                c = float(tokenized[3])
                alpha = float(tokenized[4])
                beta = float(tokenized[5])
                gamma = float(tokenized[6])
                self.metadata.lattice = Lattice(a, b, c, alpha, beta, gamma)
            elif tokenized[0].lower() == "#AnalyzerDetectorMode".lower():
                self.metadata.analyzerdetectormode = tokenized[2].lower()
            elif tokenized[0].lower() == "#Reference".lower():
                self.metadata.count_type = tokenized[2].lower()
            elif tokenized[0].lower() == "#Signal".lower():
                self.metadata.signal = tokenized[2].lower()
            elif tokenized[0].lower(
            ) == "#AnalyzerDetectorDevicesOfInterest".lower():
                self.metadata.analyzerdetectordevicesofinterest = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDDGroup".lower():
                self.metadata.analyzerddgroup = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerDoorDetectorGroup".lower():
                self.metadata.analyzerdoordetectorgroup = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerSDGroup".lower():
                self.metadata.analyzersdgroup = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerPSDGroup".lower():
                self.metadata.analyzerpsdgroup = tokenized[1:]
            elif tokenized[0].lower() == "#AnalyzerFocusMode".lower():
                self.metadata.analyzerfocusmode = tokenized[1]
            elif tokenized[0].lower() == "#MonoVertiFocus".lower():
                self.metadata.monovertifocus = tokenized[1]
            elif tokenized[0].lower() == "#MonoHorizFocus".lower():
                self.metadata.monohorizfocus = tokenized[1]
            elif tokenized[0].lower() == "#FixedE".lower():
                try:
                    self.metadata.efixed = tokenized[1]
                except IndexError:
                    pass
                try:
                    self.metadata.ef = float(tokenized[2])
                except IndexError:
                    pass
            elif tokenized[0].lower() == "#ScanDescr".lower():
                scanstr = ''
                for i in range(1, len(tokenized)):
                    scanstr = scanstr + tokenized[i] + ' '
                self.metadata.scan_description = scanstr
                #print 'scanstr',scanstr
                myparser = scanparser.scanparser(scanstr)
                self.metadata.varying = myparser.get_varying()
                self.metadata.ranges = myparser.ranges
                #print 'parsed'
                #if self.metadata['file_info']['filebase']!='fpx':
                #    self.additional_metadata['parsed_scandescription']=self.parse_scan(scanstr)

                #CAN'T SEEM TO PARSE fpx files, but if the filename is broken as in the last cycle, then how do I know?
                #Better soln is to fix parser
                #print self.metadata['scan_description']['range']
            else:
                currfield = tokenized[0].lower().lower().strip('#')
                setattr(self.metadata, currfield, tokenized[1:])
            if tokenized[0] != '#Columns'.lower():
                self.header.append(returnline[0])
            if tokenized[0] == '#Columns'.lower():
                self.get_columnmetadatas_bt7(tokenized)
                count = 0
                try:
                    lines = int(self.lines)
                except:
                    lines = N.Inf
                while 1:
                    lineStr = myfile.readline()
                    if not (lineStr):
                        break
                    if lineStr[0] != "#":
                        if count >= lines:
                            break
                        strippedLine = lineStr.rstrip()
                        tokenized = strippedLine.split()
                        #for field in self.data.__dict__.keys():
                        for i in range(len(tokenized)):
                            field = self.columnlist[i]
                            try:
                                if field.lower(
                                ) == 'time' and self.timestamp_flag == False:
                                    #timedelta=mx.DateTime.DateTimeDelta(0,0,0,float(tokenized[i])) #orig
                                    #self.columndict['timestamp'].append((timeobj+timedelta).ticks()) #orig
                                    #timestamp_flag is True if the timestamp is already given in the file
                                    timedelta = datetime.timedelta(
                                        seconds=float(tokenized[i]))
                                    self.data.timestamp.append(
                                        mktime(
                                            (timeobj + timedelta).timetuple()))
                                    timeobj = timeobj + timedelta
                                self.data[field].append(float(tokenized[i]))
                            except ValueError:
                                self.data[field].append((tokenized[i]))
                        count = count + 1
                myFlag = False
        return