Beispiel #1
0
def readfiles(filestrlist):
    Ilist=[]
    Ierrlist=[]
    Qlist=[]
    hkllist=[]
    thlist=[]
    mydatalist=[]
    for myfilestr in filestrlist:
        mydatareader=readncnr.datareader()
        mydata=mydatareader.readbuffer(myfilestr)
        filename=mydata.metadata['file_info']['filename']
        h=mydata.metadata['q_center']['h_center']
        k=mydata.metadata['q_center']['k_center']
        l=mydata.metadata['q_center']['l_center']
        hkl='('+str(h)+' '+str(k)+' '+str(l)+')'
        Q=np.array([h,k,l],'float64')
        print 'hkl',hkl
        print 'file', filename
        if len(Ilist)==0:
            mon0=mydata.metadata['count_info']['monitor']
            mon0=82500

        mon=mydata.metadata['count_info']['monitor']
        I_orig=np.array(mydata.data['counts'],'Float64')
                  #node.measured_data.data['counts_err']=N.array(node.measured_data.data['counts_err'],'Float64')
        I_norm=I_orig*mon0/mon
        I_norm_err=np.sqrt(I_orig)*mon0/mon
        Ilist.append(I_norm)
        Ierrlist.append(I_norm_err)
        Qlist.append(Q)
        hkllist.append(hkl)
        th=np.array(mydata.data['a3'])
        thlist.append(th)
        mydatalist.append(mydata)
    return Qlist, thlist,Ilist, Ierrlist,hkllist,mydatalist
Beispiel #2
0
def readfiles(filestrlist):
    Ilist = []
    Ierrlist = []
    Qlist = []
    hkllist = []
    thlist = []
    mydatalist = []
    for myfilestr in filestrlist:
        mydatareader = readncnr.datareader()
        mydata = mydatareader.readbuffer(myfilestr)
        filename = mydata.metadata['file_info']['filename']
        h = mydata.metadata['q_center']['h_center']
        k = mydata.metadata['q_center']['k_center']
        l = mydata.metadata['q_center']['l_center']
        hkl = '(' + str(h) + ' ' + str(k) + ' ' + str(l) + ')'
        Q = np.array([h, k, l], 'float64')
        print 'hkl', hkl
        print 'file', filename
        if len(Ilist) == 0:
            mon0 = mydata.metadata['count_info']['monitor']
            mon0 = 82500

        mon = mydata.metadata['count_info']['monitor']
        I_orig = np.array(mydata.data['counts'], 'Float64')
        #node.measured_data.data['counts_err']=N.array(node.measured_data.data['counts_err'],'Float64')
        I_norm = I_orig * mon0 / mon
        I_norm_err = np.sqrt(I_orig) * mon0 / mon
        Ilist.append(I_norm)
        Ierrlist.append(I_norm_err)
        Qlist.append(Q)
        hkllist.append(hkl)
        th = np.array(mydata.data['a3'])
        thlist.append(th)
        mydatalist.append(mydata)
    return Qlist, thlist, Ilist, Ierrlist, hkllist, mydatalist
def readfiles(flist,tol=1e-4):
    mydatareader=readncnr.datareader()
    H=[]#N.array([])
    I=[]#N.array([])
    Ierr=[]#N.array([])
    monlist=[]
    count=0
    myfirstdata=mydatareader.readbuffer(flist[0])
    mon0=myfirstdata.metadata['count_info']['monitor']
    print 'mon0',mon0
    qtree=Qtree()
    Qtree.mon0=mon0
    #flist=flist[0:12]
    for currfile in flist:
        #print 'MAIN READ',currfile
        mydata=mydatareader.readbuffer(currfile)
        mydata.data['counts_err']=N.sqrt(mydata.data['counts'])*mon0/mydata.metadata['count_info']['monitor']
        mydata.data['counts']=N.array(mydata.data['counts'])*mon0/mydata.metadata['count_info']['monitor']
        mydata.metadata['count_info']['monitor']=mon0
        qtree.addnode(copy.deepcopy(mydata))
        #print 'readloop'
        #print 'q in loop', qtree.qlist[0].q

    for qnode in qtree.qlist:
        print qnode.q['h_center'],qnode.q['k_center'],qnode.q['l_center'],len(qnode.th),qnode.th

    #print qtree.qlist
    return qtree
Beispiel #4
0
def readfiles(filestrlist):
    Ilist = []
    Ierrlist = []
    Qlist = []
    hkllist = []
    thlist = []
    mydatalist = []
    for myfilestr in filestrlist:
        mydatareader = readncnr.datareader()
        mydata = mydatareader.readbuffer(myfilestr)
        filename = mydata.metadata["file_info"]["filename"]
        h = mydata.metadata["q_center"]["h_center"]
        k = mydata.metadata["q_center"]["k_center"]
        l = mydata.metadata["q_center"]["l_center"]
        hkl = "(" + str(h) + " " + str(k) + " " + str(l) + ")"
        Q = np.array([h, k, l], "float64")
        print "hkl", hkl
        print "file", filename
        if len(Ilist) == 0:
            mon0 = mydata.metadata["count_info"]["monitor"]
            mon0 = 82500

        mon = mydata.metadata["count_info"]["monitor"]
        I_orig = np.array(mydata.data["counts"], "Float64")
        # node.measured_data.data['counts_err']=N.array(node.measured_data.data['counts_err'],'Float64')
        I_norm = I_orig * mon0 / mon
        I_norm_err = np.sqrt(I_orig) * mon0 / mon
        Ilist.append(I_norm)
        Ierrlist.append(I_norm_err)
        Qlist.append(Q)
        hkllist.append(hkl)
        th = np.array(mydata.data["a3"])
        thlist.append(th)
        mydatalist.append(mydata)
    return Qlist, thlist, Ilist, Ierrlist, hkllist, mydatalist
Beispiel #5
0
def readfiles(flist, tol=1e-4):
    mydatareader = readncnr.datareader()
    H = []  #N.array([])
    I = []  #N.array([])
    Ierr = []  #N.array([])
    monlist = []
    count = 0
    myfirstdata = mydatareader.readbuffer(flist[0])
    mon0 = myfirstdata.metadata['count_info']['monitor']
    print 'mon0', mon0

    #flist=flist[0:12]
    datalist = []
    for currfile in flist:
        #print 'MAIN READ',currfile
        mydata = mydatareader.readbuffer(currfile)
        mydata.data['counts_err'] = N.sqrt(
            mydata.data['counts']
        ) * mon0 / mydata.metadata['count_info']['monitor']
        mydata.data['counts'] = N.array(
            mydata.data['counts']
        ) * mon0 / mydata.metadata['count_info']['monitor']
        datalist.append(mydata)

    #print qtree.qlist
    return datalist
def readfiles(flist,tol=1e-4):
    mydatareader=readncnr.datareader()
    H=[]#N.array([])
    I=[]#N.array([])
    Ierr=[]#N.array([])
    monlist=[]
    count=0
    myfirstdata=mydatareader.readbuffer(flist[0])
    mon0=myfirstdata.metadata['count_info']['monitor']
    print 'mon0',mon0
    qtree=Qtree()
    Qtree.mon0=mon0
    #flist=flist[0:12]
    for currfile in flist:
        #print 'MAIN READ',currfile
        mydata=mydatareader.readbuffer(currfile)
        mydata.data['counts_err']=N.sqrt(mydata.data['counts'])*mon0/mydata.metadata['count_info']['monitor']
        mydata.data['counts']=N.array(mydata.data['counts'])*mon0/mydata.metadata['count_info']['monitor']
        mydata.metadata['count_info']['monitor']=mon0
        qtree.addnode(copy.deepcopy(mydata))
        #print 'readloop'
        #print 'q in loop', qtree.qlist[0].q

    for qnode in qtree.qlist:
        print qnode.q['h_center'],qnode.q['k_center'],qnode.q['l_center'],len(qnode.th),qnode.th

    #print qtree.qlist
    return qtree
Beispiel #7
0
def read_data(myfilestr):
    mydatareader = readncnr.datareader()
    mydata = mydatareader.readbuffer(myfilestr)
    qz = N.array(mydata.data['qx'])
    I = N.array(mydata.data['detector'], 'float64')
    Ierr = N.sqrt(I)
    mon = mydata.data['monitor'][0]
    return qz, I, Ierr, mon
Beispiel #8
0
def read_fpx(myfilestr, key='smplgfrot'):
    mydatareader = readncnr.datareader()
    mydata = mydatareader.readbuffer(myfilestr)
    qz = N.array(mydata.data[key])
    I = N.array(mydata.data['detector'], 'float64')
    Ierr = N.sqrt(I)
    mon = mydata.data['monitor'][0]
    return qz, I, Ierr, mon
Beispiel #9
0
def read_fpx(myfilestr, key="smplgfrot"):
    mydatareader = readncnr.datareader()
    mydata = mydatareader.readbuffer(myfilestr)
    qz = N.array(mydata.data[key])
    I = N.array(mydata.data["detector"], "float64")
    Ierr = N.sqrt(I)
    mon = mydata.data["monitor"][0]
    return qz, I, Ierr, mon
Beispiel #10
0
def read_data(myfilestr):
    mydatareader = readncnr.datareader()
    mydata = mydatareader.readbuffer(myfilestr)
    qz = N.array(mydata.data["qx"])
    I = N.array(mydata.data["detector"], "float64")
    Ierr = N.sqrt(I)
    mon = mydata.data["monitor"][0]
    return qz, I, Ierr, mon
def read_fpx(myfilestr,key='smplgfrot'):
    mydatareader=readncnr.datareader()
    mydata=mydatareader.readbuffer(myfilestr)
    qz=N.array(mydata.data[key])
    I=N.array(mydata.data['detector'],'float64')
    Ierr=N.sqrt(I)
    mon=mydata.data['monitor'][0]
    return qz,I,Ierr,mon
def read_data(myfilestr):
    mydatareader=readncnr.datareader()
    mydata=mydatareader.readbuffer(myfilestr)
    qz=N.array(mydata.data['qx'])
    I=N.array(mydata.data['detector'],'float64')
    Ierr=N.sqrt(I)
    mon=mydata.data['monitor'][0]
    return qz,I,Ierr,mon
Beispiel #13
0
def readfiles(flist, mon0=None):
    mydatareader = readncnr.datareader()
    #Qx=N.array([])
    #Qy=N.array([])
    #Qz=N.array([])
    #tth=N.array([])
    #Counts=N.array([])
    #T=N.array([])
    tth = []
    Counts = []
    Counts_err = []
    T = []
    i = 0
    for currfile in flist:
        #print currfile
        mydata = mydatareader.readbuffer(currfile)
        #print mydata.data.keys()
        if i == 0:
            if mon0 == None:
                mon0 = mydata.metadata['count_info']['monitor']
        #a=mydata.metadata['lattice']['a']
        #b=mydata.metadata['lattice']['b']
        #c=mydata.metadata['lattice']['c']
        mon = mydata.metadata['count_info']['monitor']
        wl = 2.35916
        #c=2*wl/N.sin(N.deg2rad(N.array(tth))/2)
        #tth.append(2*wl/N.sin(N.deg2rad(N.array(mydata.data['a4'])/2)))
        tth.append(N.array(mydata.data['a4']))
        Counts.append(N.array(mydata.data['counts']) * mon0 / mon)
        Counts_err.append(N.sqrt(N.array(mydata.data['counts'])) * mon0 / mon)
        #T.append(N.array(mydata.data['temp'])) #What we probably want
        Tave = N.array(mydata.data['temp']).mean()
        T.append(N.ones(tth[i].size) * Tave)
        #Qx=N.concatenate((Qx,N.array(mydata.data['qx'])*2*pi/a))
        #Qy=N.concatenate((Qy,N.array(mydata.data['qy'])*2*pi/b))
        #Qz=N.concatenate((Qz,N.array(mydata.data['qz'])*2*pi/c))
        #tth=N.concatenate((tth,N.array(mydata.data['a4'])))
        #Counts=N.concatenate((Counts,N.array(mydata.data['counts'])*mon0/mon))
        #T=N.concatenate((T,N.array(mydata.data['temp'])))

        i = i + 1
    #xa,ya,za=prep_data2(Qx,Qy,Counts);
    #print 'xa',xa.min(),xa.max()
    #print 'qx',Qx.min(),Qx.max()
    #x,y,z=grid(Qx,Qz,Counts)
    return tth, T, Counts, Counts_err, mon0
Beispiel #14
0
def readfiles(flist,mon0=None):
    mydatareader=readncnr.datareader()
    #Qx=N.array([])
    #Qy=N.array([])
    #Qz=N.array([])
    #tth=N.array([])
    #Counts=N.array([])
    #T=N.array([])
    tth=[]
    Counts=[]
    Counts_err=[]
    T=[]
    i=0
    for currfile in flist:
        #print currfile
        mydata=mydatareader.readbuffer(currfile)
        #print mydata.data.keys()
        if i==0:
            if mon0==None:
                mon0=mydata.metadata['count_info']['monitor']
        #a=mydata.metadata['lattice']['a']
        #b=mydata.metadata['lattice']['b']
        #c=mydata.metadata['lattice']['c']
        mon=mydata.metadata['count_info']['monitor']
        wl=2.35916
        #c=2*wl/N.sin(N.deg2rad(N.array(tth))/2)
        #tth.append(2*wl/N.sin(N.deg2rad(N.array(mydata.data['a4'])/2)))
        tth.append(N.array(mydata.data['a4']))
        Counts.append(N.array(mydata.data['counts'])*mon0/mon)
        Counts_err.append(N.sqrt(N.array(mydata.data['counts']))*mon0/mon)
        #T.append(N.array(mydata.data['temp'])) #What we probably want
        Tave=N.array(mydata.data['temp']).mean()
        T.append(N.ones(tth[i].size)*Tave)
        #Qx=N.concatenate((Qx,N.array(mydata.data['qx'])*2*pi/a))
        #Qy=N.concatenate((Qy,N.array(mydata.data['qy'])*2*pi/b))
        #Qz=N.concatenate((Qz,N.array(mydata.data['qz'])*2*pi/c))
        #tth=N.concatenate((tth,N.array(mydata.data['a4'])))
        #Counts=N.concatenate((Counts,N.array(mydata.data['counts'])*mon0/mon))
        #T=N.concatenate((T,N.array(mydata.data['temp'])))
        
        i=i+1
    #xa,ya,za=prep_data2(Qx,Qy,Counts);
    #print 'xa',xa.min(),xa.max()
    #print 'qx',Qx.min(),Qx.max()
        #x,y,z=grid(Qx,Qz,Counts)
    return tth,T,Counts,Counts_err,mon0
Beispiel #15
0
def readfiles(flist,tol=1e-4):
    mydatareader=readncnr.datareader()
    H=[]#N.array([])
    I=[]#N.array([])
    Ierr=[]#N.array([])
    monlist=[]
    count=0
    myfirstdata=mydatareader.readbuffer(flist[0])
    mon0=myfirstdata.metadata['count_info']['monitor']
    print 'mon0',mon0

    #flist=flist[0:12]
    datalist=[]
    for currfile in flist:
        #print 'MAIN READ',currfile
        mydata=mydatareader.readbuffer(currfile)
        mydata.data['counts_err']=N.sqrt(mydata.data['counts'])*mon0/mydata.metadata['count_info']['monitor']
        mydata.data['counts']=N.array(mydata.data['counts'])*mon0/mydata.metadata['count_info']['monitor']
        datalist.append(mydata)
    
    #print qtree.qlist
    return datalist
def readfiles(flist):
    mydatareader=readncnr.datareader()
    Qx=N.array([])
    Qy=N.array([])
    Qz=N.array([])
    Counts=N.array([])
    for currfile in flist:
        #print currfile
        mydata=mydatareader.readbuffer(currfile)
        #print mydata.data.keys()
        a=mydata.metadata['lattice']['a']
        b=mydata.metadata['lattice']['b']
        c=mydata.metadata['lattice']['c']
        Qx=N.concatenate((Qx,N.array(mydata.data['qx'])*2*pi/a))
        Qy=N.concatenate((Qy,N.array(mydata.data['qy'])*2*pi/b))
        Qz=N.concatenate((Qz,N.array(mydata.data['qz'])*2*pi/c))
        Counts=N.concatenate((Counts,N.array(mydata.data['counts'])))
    #xa,ya,za=prep_data2(Qx,Qy,Counts);
    #print 'xa',xa.min(),xa.max()
    #print 'qx',Qx.min(),Qx.max()
        #x,y,z=grid(Qx,Qz,Counts)
    return Qx,Qz,Counts
Beispiel #17
0
def readmeshfiles(mydirectory,myfilebase,myend):
    myfilebaseglob=myfilebase+'*.'+myend
    print myfilebaseglob
    flist = SU.ffind(mydirectory, shellglobs=(myfilebaseglob,))
    #SU.printr(flist)
    mydatareader=readncnr.datareader()
    temp1=N.array([])
    temp2=N.array([])
    temp3=N.array([])
    Counts1=N.array([])
    Counts2=N.array([])
    Counts3=N.array([])
    errors1=N.array([])
    errors2=N.array([])
    errors3=N.array([])
    mydata=mydatareader.readbuffer(flist[0])
    mon0=mydata.metadata['count_info']['monitor']
    for currfile in flist:
        #print currfile
        mydata=mydatareader.readbuffer(currfile)
        #print mydata.data.keys()
        #print mydata.__dict__
        #print mydata.metadata.keys()
        qcenter=mydata.metadata['q_center']
        hc,kc,lc=qcenter['h_center'],qcenter['k_center'],qcenter['l_center']
        mon=mydata.metadata['count_info']['monitor']
        curr_counts=N.array(mydata.data['counts'])
        curr_error=N.sqrt(curr_counts)*mon0/mon
        curr_counts=curr_counts*mon0/mon
        curr_temp=N.array(mydata.data['temp'])
        if hc==1.004:
            Counts1=N.concatenate((Counts1,curr_counts))
            temp1=N.concatenate((temp1,curr_temp))
            errors1=N.concatenate((errors1,curr_error))

        elif hc==1.036:
            Counts2=N.concatenate((Counts2,curr_counts))
            temp2=N.concatenate((temp2,curr_temp))
            errors2=N.concatenate((errors2,curr_error))
        elif hc==2.0:
            Counts3=N.concatenate((Counts3,curr_counts))
            temp3=N.concatenate((temp3,curr_temp))
            errors3=N.concatenate((errors3,curr_error))
        #print
        #Qx=N.concatenate((Qx,N.array(mydata.data['qx'])))
        #Qy=N.concatenate((Qy,N.array(mydata.data['qy'])))
        #Qz=N.concatenate((Qz,N.array(mydata.data['qz'])))

        #Counts=N.concatenate((Counts,N.array(mydata.data['counts'])))
    #print Qx
    #print Qy
    #print Counts
    dataset={}
    dataset['Counts1']=Counts1
    dataset['Counts2']=Counts2
    dataset['Counts3']=Counts3
    dataset['temp1']=temp1
    dataset['temp2']=temp2
    dataset['temp3']=temp3
    dataset['errors1']=errors1
    dataset['errors2']=errors2
    dataset['errors3']=errors3

    return dataset
    for i in len(data.get(Ei))
        thetaA = N.radians(data.get(a6)[i]/2.0)
        arg = asin(N.pi/(0.69472*dA*sqrt(double(data.get(Ei)[i]))))
        norm = (Ei^1.5) / tan(arg)
        cotThetaA = 1/tan(thetaA)
        resCor = norm/(cotThetaA * (Ef^1.5))
    


    N.exp((ki/kf) ** 3) * (1/N.tan(thetaM)) / (1/N.cot(thetaA))
    ''


if __name__=="__main__":
    #Currently for testing purposes only
    #coeff = establish_correction_coefficients("monitor_correction_coordinates.txt")
    #print coeff

    #metadata, data = readFile("bt7 sample data.txt")
    #print metadata
    #print data

    myfilestr=r'EscanQQ7HorNSF91831.bt7'
    mydatareader=R.datareader()
    mydata=mydatareader.readbuffer(myfilestr)
    #print mydata.data.keys()
    #print mydata.data.get('lattice')
    setup_data_structure(mydata)
    #print mydata.data
    #print mydata.metadata
        qtree.addnode(copy.deepcopy(mydata))
        #print 'readloop'
        #print 'q in loop', qtree.qlist[0].q

    for qnode in qtree.qlist:
        print qnode.q['h_center'],qnode.q['k_center'],qnode.q['l_center'],len(qnode.th),qnode.th

    #print qtree.qlist
    return qtree

if __name__=='__main__':
    myfilestr=r'C:\Ce2RhIn8\Mar10_2009\magsc035.bt9'
    #myfilestr=r'c:\bifeo3xtal\jan8_2008\9175\fpx53418.bt7'
    #myfilestr=r'c:\13165\13165\data\MagHigh56784.bt7'
    #myfilestr=r'c:\13176\data\CeOFeAs57255.bt7.out'
    mydatareader=readncnr.datareader()
    mydata=mydatareader.readbuffer(myfilestr)
#    print mydata.__dict__
#    print mydata.additional_metadata
#    print mydata.metadata
#    print mydata.metadata['file_info']['scantype']
#    print mydata.metadata['collimations']
#    print mydata.metadata['dspacing']['monochromator_dspacing']
#    print mydata.metadata['dspacing']['analyzer_dspacing']
#    print mydata.metadata['lattice']['a']
#    print mydata.metadata['lattice']['b']
#    print mydata.metadata['lattice']['c']
#    print mydata.metadata['lattice']['alpha']
#    print mydata.metadata['lattice']['beta']
#    print mydata.metadata['lattice']['gamma']
#    print mydata.metadata['motor3']['step']
Beispiel #20
0
    def __init__(self, filename):
        self.data = []
        self.standards = read_standards()
        f = file(filename, 'r')
        t = []
        template = {}
        for s in self.standards:
            template[s] = [None]

        s = f.read()
        f.close()
        f = file(filename, 'r')

        if s[0] == '#':
            print 'hey'
            for lines in f:
                lines = lines.split()
                if len(lines) == 0:
                    continue
                if lines[0] == '#Columns':
                    t.append(lines[1:])
                    break
                else:
                    if lines[0][1:] in self.standards:
                        pass
                    else:
                        self.standards[lines[0][1:]] = Standard(
                            metadata=True).__dict__
                    template[lines[0][1:]] = lines[1:] or [None]
            for lines in f:
                if lines[0] == '#': break
                t.append(lines.split())
            f.close()

            for s in t[0]:
                if s in self.standards:
                    self.standards[s]['metadata'] = False
                else:
                    self.standards[s] = Standard().__dict__
                    template[s] = [None]
            for i in range(1, len(t)):
                self.data.append(copy.copy(template))
                for j in range(len(t[0])):
                    try:
                        self.data[i - 1][t[0][j]] = [float(t[i][j])]
                    except ValueError:
                        self.data[i - 1][t[0][j]] = [t[i][j]]
            self.detectors = set(['Detector', '_Detector'])
            for s in self.standards:
                if s[:8] == 'Analyzer' and s[-5:] == 'Group':
                    self.detectors = self.detectors | set(self.data[0][s])
                    for d in self.data[0][s]:
                        self.detectors.add('_' + d)
            for d in self.detectors:
                if d in self.standards and d[0] is not '_':
                    if '_' + d not in self.standards:
                        self.standards['_' + d] = Standard().__dict__
                        for p in self.data:
                            p['_' + d] = p[d]

        elif 'fpx' in filename or 'fpt' in filename and s[0] != '#':
            fields = []
            date = []
            first = True
            for lines in f:
                if first:
                    first = False
                    spl = lines.split()
                    for i in range(len(spl)):
                        try:
                            if int(spl[i]) in range(1, 7):
                                fields.append('A' + spl[i])
                        except:
                            if spl[i] == 'Intensity':
                                fields.append('Detector')
                                date = spl[i + 1:]
                                break
                    self.standards = {'Date': Standard(metadata=True).__dict__}
                    for s in fields:
                        self.standards[s] = Standard(metadata=False).__dict__
                else:
                    newdata = {}
                    spl = lines.split()
                    for i in range(len(spl)):

                        try:
                            newdata[fields[i]] = [float(spl[i])]
                        except:
                            pass
                    newdata['Date'] = date
                    self.data.append(newdata)
        else:
            reader = readncnr3.datareader()
            mydata = reader.readbuffer(filename)
            try:
                template['Collimations'] = [
                    mydata.metadata['collimations']['coll1'],
                    mydata.metadata['collimations']['coll2'],
                    mydata.metadata['collimations']['coll3'],
                    mydata.metadata['collimations']['coll4']
                ]
                self.standards['Collimations'] = Standard(
                    metadata=True).__dict__
            except:
                pass
            try:
                template['Lattice'] = [
                    mydata.metadata['lattice']['a'],
                    mydata.metadata['lattice']['b'],
                    mydata.metadata['lattice']['c'],
                    mydata.metadata['lattice']['alpha'],
                    mydata.metadata['lattice']['beta'],
                    mydata.metadata['lattice']['gamma']
                ]
            except:
                pass

            try:
                for sl in mydata.metadata['count_info']:
                    inStandards = False
                    for s in self.standards:
                        if s.lower().split() == ''.join(sl.split('_')):
                            inStandards = True
                            template[s] = [mydata.metadata['count_info'][s]]
                    if not inStandards:
                        s = sl.split('_')
                        for i in range(len(s)):
                            s[i] = s[i].capitalize()
                        s = ''.join(s)
                        template[s] = [mydata.metadata['count_info'][s]]
                        self.standards[s] = Standard(metadata=True).__dict__
            except:
                pass
            try:
                template['MonoSpacing'] = [
                    mydata.metadata['dspacing']['monochromator_dspacing']
                ]
                template['AnaSpacing'] = [
                    mydata.metadata['dspacing']['analyzer_dspacing']
                ]
            except:
                pass
            try:
                template['FixedE'] = [
                    mydata.metadata['energy_info']['efixed'],
                    mydata.metadata['energy_info']['ef']
                ]
            except:
                pass

            try:

                for sl in mydata.metadata['file_info']:
                    inStandards = False
                    for s in self.standards:
                        if s.lower().split().join('_') == sl:
                            inStandards = True
                            template[s] = [mydata.metadata['file_info'][s]]
                    if not inStandards:
                        s = sl.split('_')
                        for i in range(len(s)):
                            s[i] = s[i].capitalize()
                        s = ' '.join(s)
                        template[s] = [mydata.metadata['file_info'][s]]
                        self.standards[s] = Standard(metadata=True).__dict__
            except:
                pass
            try:
                template['HField'] = [
                    mydata.metadata['magnetic_field']['hfield']
                ]
                self.standards['HField'] = Standard(metadata=True).__dict__
            except:
                pass
            try:
                template['AnalyzerMosaic'] = [
                    mydata.metadata['mosaic']['mosaic_analyzer']
                ]
                self.standards['AnalyzerMosaic'] = Standard(
                    metadata=True).__dict__
                template['MonochromatorMosaic'] = [
                    mydata.metadata['mosaic']['mosaic_monochromator']
                ]
                self.standards['MonochromatorMosaic'] = Standard(
                    metadata=True).__dict__
                template['SampleMosaic'] = [
                    mydata.metadata['mosaic']['mosaic_sample']
                ]
                self.standards['SampleMosaic'] = Standard(
                    metadata=True).__dict__
            except:
                pass
            try:
                template['Orient'] = [
                    mydata.metadata['orient1']['h'],
                    mydata.metadata['orient1']['k'],
                    mydata.metadata['orient1']['l'],
                    mydata.metadata['orient2']['h'],
                    mydata.metadata['orient2']['k'],
                    mydata.metadata['orient2']['l']
                ]
            except:
                pass
            areMotors = True
            motors = []
            try:
                for i in range(6):
                    motors.append([
                        mydata.metadata['motor' + str(1 + i)]['start'],
                        mydata.metadata['motor' + str(1 + i)]['step']
                    ])
            except:
                #print mydata.metadata
                areMotors = False
            try:
                for p in mydata.data:
                    pcorr = p.split('_')
                    for i in range(len(pcorr)):
                        pcorr[i] = pcorr[i].capitalize()
                    pcorr = ''.join(pcorr)

                    if pcorr not in self.standards and pcorr not in [
                            'Qx', 'Qy', 'Qz', 'Counts'
                    ]:
                        self.standards[pcorr] = Standard(
                            metadata=False).__dict__
                count = 0
                while (True):
                    print 'hey'
                    try:
                        newdata = copy.copy(template)
                        for p in mydata.data:
                            pcorr = p.split('_')
                            for i in range(len(pcorr)):
                                pcorr[i] = pcorr[i].capitalize()
                            pcorr = ''.join(pcorr)
                            if pcorr[0] == 'Q':
                                if pcorr[1] == 'x':
                                    pcorr = 'QX'
                                elif pcorr[1] == 'y':
                                    pcorr = 'QY'
                                elif pcorr[1] == 'z':
                                    pcorr = 'QZ'
                            if pcorr == 'Counts':
                                pcorr = 'Detector'
                            newdata[pcorr] = [mydata.data[p][count]]

                        if (areMotors):
                            for i in range(len(motors)):
                                newdata['A' + str(i + 1)] = [
                                    motors[i][0] + motors[i][1] * count
                                ]
                        self.data.append(newdata)
                        count += 1
                    except:
                        break

            except:
                pass
Beispiel #21
0
def readmeshfiles(mydirectory, myfilebase, myend):
    myfilebaseglob = myfilebase + '*.' + myend
    print myfilebaseglob
    flist = SU.ffind(mydirectory, shellglobs=(myfilebaseglob, ))
    #SU.printr(flist)
    mydatareader = readncnr.datareader()
    temp1 = N.array([])
    temp2 = N.array([])
    temp3 = N.array([])
    Counts1 = N.array([])
    Counts2 = N.array([])
    Counts3 = N.array([])
    errors1 = N.array([])
    errors2 = N.array([])
    errors3 = N.array([])
    mydata = mydatareader.readbuffer(flist[0])
    mon0 = mydata.metadata['count_info']['monitor']
    for currfile in flist:
        #print currfile
        mydata = mydatareader.readbuffer(currfile)
        #print mydata.data.keys()
        #print mydata.__dict__
        #print mydata.metadata.keys()
        qcenter = mydata.metadata['q_center']
        hc, kc, lc = qcenter['h_center'], qcenter['k_center'], qcenter[
            'l_center']
        mon = mydata.metadata['count_info']['monitor']
        curr_counts = N.array(mydata.data['counts'])
        curr_error = N.sqrt(curr_counts) * mon0 / mon
        curr_counts = curr_counts * mon0 / mon
        curr_temp = N.array(mydata.data['temp'])
        if hc == 1.004:
            Counts1 = N.concatenate((Counts1, curr_counts))
            temp1 = N.concatenate((temp1, curr_temp))
            errors1 = N.concatenate((errors1, curr_error))

        elif hc == 1.036:
            Counts2 = N.concatenate((Counts2, curr_counts))
            temp2 = N.concatenate((temp2, curr_temp))
            errors2 = N.concatenate((errors2, curr_error))
        elif hc == 2.0:
            Counts3 = N.concatenate((Counts3, curr_counts))
            temp3 = N.concatenate((temp3, curr_temp))
            errors3 = N.concatenate((errors3, curr_error))
        #print
        #Qx=N.concatenate((Qx,N.array(mydata.data['qx'])))
        #Qy=N.concatenate((Qy,N.array(mydata.data['qy'])))
        #Qz=N.concatenate((Qz,N.array(mydata.data['qz'])))

        #Counts=N.concatenate((Counts,N.array(mydata.data['counts'])))
    #print Qx
    #print Qy
    #print Counts
    dataset = {}
    dataset['Counts1'] = Counts1
    dataset['Counts2'] = Counts2
    dataset['Counts3'] = Counts3
    dataset['temp1'] = temp1
    dataset['temp2'] = temp2
    dataset['temp3'] = temp3
    dataset['errors1'] = errors1
    dataset['errors2'] = errors2
    dataset['errors3'] = errors3

    return dataset
Beispiel #22
0
    def __init__(self,filename):
        self.data = []
        self.standards = read_standards()
        f = file(filename, 'r')
        t = []
        template = {}
        for s in self.standards:
            template[s] = [None]

        s = f.read()
        f.close()
        f = file(filename, 'r')

        if s[0] == '#':
            print 'hey'
            for lines in f:
                lines = lines.split()
                if len(lines) == 0:
                    continue
                if lines[0] == '#Columns':
                    t.append(lines[1:])
                    break
                else:
                    if lines[0][1:] in self.standards:
                        pass
                    else:
                        self.standards[lines[0][1:]] = Standard(metadata = True).__dict__
                    template[lines[0][1:]] = lines[1:] or [None]
            for lines in f:
                if lines[0] == '#': break
                t.append(lines.split())
            f.close()

            for s in t[0]:
                if s in self.standards:
                    self.standards[s]['metadata'] = False
                else:
                    self.standards[s] = Standard().__dict__
                    template[s] = [None]
            for i in range(1,len(t)):
                self.data.append(copy.copy(template))
                for j in range(len(t[0])):
                    try:
                        self.data[i-1][t[0][j]] = [float(t[i][j])]
                    except ValueError:
                        self.data[i-1][t[0][j]] = [t[i][j]]
            self.detectors = set(['Detector','_Detector'])
            for s in self.standards:
                if s[:8] == 'Analyzer' and s[-5:] == 'Group':
                    self.detectors = self.detectors | set(self.data[0][s])
                    for d in self.data[0][s]:
                        self.detectors.add('_' + d)
            for d in self.detectors:
                if d in self.standards and d[0] is not '_':
                    if '_' + d not in self.standards:
                        self.standards['_' + d] = Standard().__dict__
                        for p in self.data:
                            p['_' + d] = p[d]


        elif 'fpx' in filename or 'fpt' in filename and s[0] != '#':
            fields = []
            date = []
            first = True
            for lines in f:
                if first:
                    first = False
                    spl = lines.split()
                    for i in range(len(spl)):
                        try:
                            if int(spl[i]) in range(1,7):
                                fields.append('A'+spl[i])
                        except:
                            if spl[i] == 'Intensity':
                                fields.append('Detector')
                                date = spl[i+1:]
                                break
                    self.standards = {'Date': Standard(metadata = True).__dict__}
                    for s in fields:
                        self.standards[s] = Standard(metadata = False).__dict__
                else:
                    newdata = {}
                    spl = lines.split()
                    for i in range(len(spl)):

                        try:
                            newdata[fields[i]] = [float(spl[i])]
                        except:
                            pass
                    newdata['Date'] = date
                    self.data.append(newdata)
        else:
            reader = readncnr3.datareader()
            mydata = reader.readbuffer(filename)
            try:
                template['Collimations'] = [mydata.metadata['collimations']['coll1'],mydata.metadata['collimations']['coll2'],mydata.metadata['collimations']['coll3'],mydata.metadata['collimations']['coll4']]
                self.standards['Collimations'] = Standard(metadata = True).__dict__
            except:
                pass
            try:
                template['Lattice'] = [mydata.metadata['lattice']['a'],mydata.metadata['lattice']['b'],mydata.metadata['lattice']['c'],mydata.metadata['lattice']['alpha'],mydata.metadata['lattice']['beta'],mydata.metadata['lattice']['gamma']]
            except:
                pass

            try:
                for sl in mydata.metadata['count_info']:
                    inStandards = False
                    for s in self.standards:
                        if s.lower().split() == ''.join(sl.split('_')):
                            inStandards = True
                            template[s] = [mydata.metadata['count_info'][s]]
                    if not inStandards:
                        s = sl.split('_')
                        for i in range(len(s)):
                            s[i] = s[i].capitalize()
                        s = ''.join(s)
                        template[s] = [mydata.metadata['count_info'][s]]
                        self.standards[s] = Standard(metadata = True).__dict__
            except:
                pass
            try:
                template['MonoSpacing'] = [mydata.metadata['dspacing']['monochromator_dspacing']]
                template['AnaSpacing'] = [mydata.metadata['dspacing']['analyzer_dspacing']]
            except:
                pass
            try:
                template['FixedE'] = [mydata.metadata['energy_info']['efixed'], mydata.metadata['energy_info']['ef']]
            except:
                pass

            try:

                for sl in mydata.metadata['file_info']:
                    inStandards = False
                    for s in self.standards:
                        if s.lower().split().join('_') == sl:
                            inStandards = True
                            template[s] = [mydata.metadata['file_info'][s]]
                    if not inStandards:
                        s = sl.split('_')
                        for i in range(len(s)):
                            s[i] = s[i].capitalize()
                        s = ' '.join(s)
                        template[s] = [mydata.metadata['file_info'][s]]
                        self.standards[s] = Standard(metadata = True).__dict__
            except:
                pass
            try:
                template['HField'] = [mydata.metadata['magnetic_field']['hfield']]
                self.standards['HField'] = Standard(metadata = True).__dict__
            except:
                pass
            try:
                template['AnalyzerMosaic'] = [mydata.metadata['mosaic']['mosaic_analyzer']]
                self.standards['AnalyzerMosaic'] = Standard(metadata = True).__dict__
                template['MonochromatorMosaic'] = [mydata.metadata['mosaic']['mosaic_monochromator']]
                self.standards['MonochromatorMosaic'] = Standard(metadata = True).__dict__
                template['SampleMosaic'] = [mydata.metadata['mosaic']['mosaic_sample']]
                self.standards['SampleMosaic'] = Standard(metadata = True).__dict__
            except:
                pass
            try:
                template['Orient'] = [mydata.metadata['orient1']['h'], mydata.metadata['orient1']['k'],mydata.metadata['orient1']['l'],mydata.metadata['orient2']['h'],mydata.metadata['orient2']['k'], mydata.metadata['orient2']['l']]
            except:
                pass
            areMotors = True
            motors = []
            try:
                for i in range(6):
                    motors.append([mydata.metadata['motor'+str(1+i)]['start'], mydata.metadata['motor' + str(1+i)]['step']])
            except:
                #print mydata.metadata
                areMotors = False
            try:
                for p in mydata.data:
                    pcorr = p.split('_')
                    for i in range(len(pcorr)):
                        pcorr[i] = pcorr[i].capitalize()
                    pcorr = ''.join(pcorr)

                    if pcorr not in self.standards and pcorr not in ['Qx','Qy','Qz','Counts']:
                        self.standards[pcorr] = Standard(metadata = False).__dict__
                count = 0
                while(True):
                    print 'hey'
                    try:
                        newdata = copy.copy(template)
                        for p in mydata.data:
                            pcorr = p.split('_')
                            for i in range(len(pcorr)):
                                pcorr[i] = pcorr[i].capitalize()
                            pcorr = ''.join(pcorr)
                            if pcorr[0] == 'Q':
                                if pcorr[1] == 'x':
                                    pcorr = 'QX'
                                elif pcorr[1] == 'y':
                                    pcorr = 'QY'
                                elif pcorr[1] == 'z':
                                    pcorr = 'QZ'
                            if pcorr == 'Counts':
                                pcorr = 'Detector'
                            newdata[pcorr] = [mydata.data[p][count]]

                        if(areMotors):
                            for i in range(len(motors)):
                                newdata['A' + str(i+1)] = [motors[i][0] +  motors[i][1] * count]
                        self.data.append(newdata)
                        count+=1
                    except:
                        break


            except:
                pass
        qtree.addnode(copy.deepcopy(mydata))
        #print 'readloop'
        #print 'q in loop', qtree.qlist[0].q

    for qnode in qtree.qlist:
        print qnode.q['h_center'],qnode.q['k_center'],qnode.q['l_center'],len(qnode.th),qnode.th

    #print qtree.qlist
    return qtree

if __name__=='__main__':
    myfilestr=r'C:\Ce2RhIn8\Mar10_2009\magsc035.bt9'
    #myfilestr=r'c:\bifeo3xtal\jan8_2008\9175\fpx53418.bt7'
    #myfilestr=r'c:\13165\13165\data\MagHigh56784.bt7'
    #myfilestr=r'c:\13176\data\CeOFeAs57255.bt7.out'
    mydatareader=readncnr.datareader()
    mydata=mydatareader.readbuffer(myfilestr)
#    print mydata.__dict__
#    print mydata.additional_metadata
#    print mydata.metadata
#    print mydata.metadata['file_info']['scantype']
#    print mydata.metadata['collimations']
#    print mydata.metadata['dspacing']['monochromator_dspacing']
#    print mydata.metadata['dspacing']['analyzer_dspacing']
#    print mydata.metadata['lattice']['a']
#    print mydata.metadata['lattice']['b']
#    print mydata.metadata['lattice']['c']
#    print mydata.metadata['lattice']['alpha']
#    print mydata.metadata['lattice']['beta']
#    print mydata.metadata['lattice']['gamma']
#    print mydata.metadata['motor3']['step']