def __init__(self, filepath, config, src=None):
        self.src = src
        self.selected = False
        filepath = str(filepath)
        if os.path.exists(filepath):
            self.location = os.path.abspath(filepath)
            base, self.ext = os.path.splitext(filepath)
            self.filename = os.path.split(base)[1]
            if self.ext == ".xyd":
                self.type = "Gumtree xyd"
                self.src = Gumtree_xyd.Gumtree_xyd_read(filepath, config)
                True
            elif self.ext == ".hdf":
                self.type = "NeXus hdf"
                self.src = NeXus_hdf.NeXus_hdf_read(filepath, config)
                #self.MotorCorrections(config)
            elif self.ext == ".txt":
                self.type = "ASCII two column"
                self.src = AsciiTwoColumn_txt.AsciiTwoColumn_txt_read(
                    filepath, config)
            elif self.ext == ".xy":
                self.type = "Fullprof compatible"
                self.src = FullprofCompat_xy.FullprofCompat_xy_read(
                    filepath, config)
            elif self.ext == ".sim":
                self.type = "McStas 1D Detector"
                self.src = McStas_sim.McStas_sim_read(filepath, config)
            elif self.ext == ".cmds":
                self.type = "McStas Parameter Study"
                self.src = McStas_sim.McStas_sim_read(filepath,
                                                      config,
                                                      paramstudy=True)
            elif self.ext == ".pkf":
                self.type = "Scanman PeakFit"
                self.src = ScanmanPeakFit_pkf.ScanmanPeakFit_pkf_read(
                    filepath, config)

        else:
            self.location = "memory_" + filepath
            self.filename = "None"
            self.type = "Modified"
            self.ext = ""

        if self.src == None:
            return

        self.srcp = Srcpar.Srcpar(config)  #Source post process
        self.srcsplit = Srcpar.Srcpar(config)  #Source with detector splitted

        self.src.CalcAllAxis()
        self.src.x_chan = self.src.dataset[-1].x_chan
        self.src.nchan = len(self.src.x_chan)
        self.src.CalcSumSetCommon("")
        self.src.CalcSumSet(["raw"])
        self.src.SelectFrame("raw")
        self.src.SelectDataSet(-1)

        if self.src.data2D == False:
            self.srcsplit = self.src
示例#2
0
    def __init__(self, ScanmanMain):
        self.scanman = ScanmanMain
        #self.config = self.scanman.config["source"]
        self.config = self.scanman.config
        self.src = Srcpar.Srcpar(self.config)
        self.paramdict = {}
        self.detdict = {}
        self.filelist = []
        self.name = ""

        self.srcp = Srcpar.Srcpar(self.config)  #Source post process
        self.srcsplit = Srcpar.Srcpar(
            self.config)  #Source with detector splitted
示例#3
0
def McStas_sim_read(fname, config, paramstudy=False):

    mcStasob = McStasReader()
    mcStasob.src = Srcpar.Srcpar(config)
    if paramstudy == False:
        mcStasob.ReadFile(fname)
    else:
        mcStasob.ReadPSFile(fname)
    return mcStasob.src
    True
def AsciiTwoColumn_txt_read(fname, config):
    validaxistypes = [
        'Channel', 'Position', 'Angle', 'd-spacing', 'd', '2th', 'ch', 'mm'
    ]
    src = Srcpar.Srcpar(config)
    #src.dataset.pop()       #Remove the {0,0} dataset
    paramdictcmn = {}
    paramdict = {}
    detdict = {}

    f = open(fname, 'r')
    filecontent = f.read()
    f.close()
    scanStartIdx = 0
    datasections = filecontent[scanStartIdx:-1].split("\n\n")
    for section in datasections:
        paramdict = paramdictcmn.copy()

        #Get the intensity data
        y = np.array([])
        xaxis = np.array([])
        splittedsections = section.split("\n")
        axis, type = splittedsections[0].split()
        try:
            a = float(axis)
            axistype = "mm"
            paramdict["Type"] = "n"
        except:
            axistype = axis
            if axis not in validaxistypes:
                axistype = "Position"
                paramdict["position"] = axis

            paramdict["Type"] = type
            splittedsections = splittedsections[1:]
        for aline in splittedsections:
            x, intensity = aline.split()
            y = np.append(y, float(intensity))
            xaxis = np.append(xaxis, float(x))
        ind = np.lexsort([xaxis])  #sorted indexes

        src.AddData(y[ind], paramdict, detdict, "ASCII_2column", fname,
                    {axistype: xaxis[ind]})
        src.ylabel = paramdict["Type"]
    return src
    def RemoveBelowThreshold(self):
        ythreshold = int(self.ui.ythreshold_edit.text())
        thresholddataset = int(self.ui.thresholddataset_edit.text())
        currset = self.scanman.datasrc.currset

        flistidx, selectstart, selectend = self.ui.datasetsappliedframe.GetSelection(
        )

        if self.scanman.axistype == "Channel": axistype = "ch"
        elif self.scanman.axistype == "Position": axistype = "mm"
        elif self.scanman.axistype == "Angle": axistype = "2th"
        elif self.scanman.axistype == "d-spacing": axistype = "d"

        for filei in flistidx:
            self.scanman.ui.sourceGroupBox.SelectDataFile(filei)
            setend = copy.copy(selectend)
            setstart = selectstart
            if setend == -1:
                setend = len(self.scanman.datasrc.dataset)
            self.scanman.SelectData(thresholddataset, display=False)
            src = self.scanman.datasrc
            validpoints = np.where(src.y > ythreshold)[0]
            dstsrc = Srcpar.Srcpar(src.config)

            for setnr in range(setstart, setend):
                self.scanman.SelectData(setnr, display=False)
                src = self.scanman.datasrc
                fname = "threshold_reduced_" + str(
                    thresholddataset) + "_" + str(ythreshold)
                dstsrc.AddData(src.y[validpoints], src.prm, src.detprm,
                               src.origin + "_reduced", fname,
                               {axistype: src.x[validpoints]})
                True
            #scanman.Source.FileInfo
            finfo = SourceFileDEF.FileInfo(fname, self.scanman.config, dstsrc)
            self.scanman.ui.sourceGroupBox.OpenFile([fname], finfo)
            True

        #dst.AddData(y[ind],paramdict, detdict, "Gumtree_xyd", fname, {self.scanman.axistype:xaxis[ind]})

        #dst.AddData(y[ind],paramdict, detdict, "Gumtree_xyd", fname, {axistype:xaxis[ind]})

        True
    def RemoveEverynth(self):
        nth = int(self.ui.removeevery_edit.text())
        currset = self.scanman.datasrc.currset

        flistidx, selectstart, selectend = self.ui.datasetsappliedframe.GetSelection(
        )

        if self.scanman.axistype == "Channel": axistype = "ch"
        elif self.scanman.axistype == "Position": axistype = "mm"
        elif self.scanman.axistype == "Angle": axistype = "2th"
        elif self.scanman.axistype == "d-spacing": axistype = "d"

        for filei in flistidx:
            self.scanman.ui.sourceGroupBox.SelectDataFile(filei)
            setend = copy.copy(selectend)
            setstart = selectstart
            if setend == -1:
                setend = len(self.scanman.datasrc.dataset)
            #self.scanman.SelectData(thresholddataset,display=False)
            src = self.scanman.datasrc
            #validpoints=np.where(src.y>ythreshold)[0]
            dstsrc = Srcpar.Srcpar(src.config)
            removepoints = range(nth - 1, src.nchan, nth)
            validpoints = range(src.nchan)
            for popval in removepoints:
                validpoints.remove(popval)

            for setnr in range(setstart, setend):
                self.scanman.SelectData(setnr, display=False)
                src = self.scanman.datasrc
                fname = "removed_every_" + str(nth) + "_dataset"
                dstsrc.AddData(src.y[validpoints], src.prm, src.detprm,
                               src.origin + "_removed", fname,
                               {axistype: src.x[validpoints]})
                True
            #scanman.Source.FileInfo
            finfo = SourceFileDEF.FileInfo(fname, self.scanman.config, dstsrc)
            self.scanman.ui.sourceGroupBox.OpenFile([fname], finfo)
            True

        True
示例#7
0
def NeXus_hdf_read(fname, config):
    src = Srcpar.Srcpar(config)
    #src.dataset.pop()       #Remove the {0,0} dataset
    paramdict = {}
    params = {}
    detdict = {}
    detdictcmn = {}
    twodim = False

    f = h5py.File(fname, "r")
    dset = f.get("/")
    #y = dset["/entry1/instrument/detector/hmm_x_corrected"].value.astype('float')
    if "/entry1/instrument/detector/hmm" in dset:
        try:
            #n = dset["/entry1/instrument/detector/hmm"].value.astype('float')
            #CDM .value is depricated: n = dset["/entry1/instrument/detector/hmm"].value
            n = dset["/entry1/instrument/detector/hmm"][()]
            twodim = True
        except ValueError:  #array is too big, try to get the 1D dataset
            #n = dset["/entry1/instrument/detector/hmm_x_corrected"].value.astype('float')
            n = np.array(dset["/entry1/instrument/detector/hmm_x_corrected"])
            twodim = False
    elif "/entry1/instrument/detector/hmm_xy" in dset:
        #n = dset["/entry1/instrument/detector/hmm_xy"].value.astype('float')
        n = dset["/entry1/instrument/detector/hmm_xy"][()].astype('float')
        twodim = True
    elif "/entry1/instrument/detector/hmm_total_xy" in dset:
        #n = dset["/entry1/instrument/detector/hmm_total_xy"].value
        n = dset["/entry1/instrument/detector/hmm_total_xy"][()]
        twodim = True

    numruns = len(n)
    progress = ProgressBar("Loading NeXus hdf datasets...", numruns)

    datakeys = dset["/entry1/data"].keys()
    possiblescanparams = [x for x in datakeys]
    if "run_number" in datakeys:  #Scan was done with Gumtree, therefore use the title to determine the scan variables
        #title = dset["/entry1/experiment/title"].value[0]
        title = str(dset["/entry1/experiment/title"][()])
        possiblescanparams = re.findall(r"[\w']+", title)
    #else:
    #    possiblescanparams = datakeys
    if "time" in possiblescanparams: possiblescanparams.remove("time")

    True
    #x_stth = dset["/entry1/instrument/detector/x_stth"]
    try:
        #stth = dset["/entry1/sample/stth"].value
        stth = dset["/entry1/sample/stth"][()]
    except:
        stth = [90.0] * len(n)
    if "/entry1/instrument/detector/sample_to_detector_distance" in dset:
        #sampletodetector = np.float32(dset["/entry1/instrument/detector/sample_to_detector_distance"].value[0])
        sampletodetector = np.float32(
            dset["/entry1/instrument/detector/sample_to_detector_distance"][0])
    else:
        sampletodetector = np.float32(1000)
    #detwidth = np.float32(dset["/entry1/instrument/detector/active_width"].value[0])
    #detheight = np.float32(dset["/entry1/instrument/detector/active_height"].value[0])
    detwidth = np.float32(dset["/entry1/instrument/detector/active_width"][0])
    detheight = np.float32(
        dset["/entry1/instrument/detector/active_height"][0])
    if "/entry1/instrument/crystal" in dset:
        crystal = dset["/entry1/instrument/crystal"]
    else:
        crystal = dict([])
    if "/entry1/instrument/monochromator" in dset:
        monochromator = dset["/entry1/instrument/monochromator"]
        if "focus" in monochromator:
            monochromator = dset["/entry1/instrument/monochromator/focus"]
    else:
        monochromator = dict([])
    if "/entry1/instrument/slits" in dset:
        slits = dset["/entry1/instrument/slits"]
    else:
        slits = dict([])
    sample = dset["/entry1/sample"]
    #moncounts = [[s,dset["/entry1/monitor/"+s].value] for s in dset["entry1/monitor"] if "_counts" in s]
    #monrates = [[s,dset["/entry1/monitor/"+s].value] for s in dset["entry1/monitor"] if "_event_rate" in s]
    #montime = [[s,dset["/entry1/monitor/"+s].value] for s in dset["entry1/monitor"] if "_time" in s]
    moncounts = [[s, dset["/entry1/monitor/" + s][()]]
                 for s in dset["entry1/monitor"] if "_counts" in s]
    monrates = [[s, dset["/entry1/monitor/" + s][()]]
                for s in dset["entry1/monitor"] if "_event_rate" in s]
    montime = [[s, dset["/entry1/monitor/" + s][()]]
               for s in dset["entry1/monitor"] if "_time" in s]

    detdictcmn["sam_to_det"] = sampletodetector
    detdictcmn["det_xmin"] = -detwidth / 2.0
    detdictcmn["det_xmax"] = detwidth / 2.0
    detdictcmn["det_ymin"] = -detheight / 2.0
    detdictcmn["det_ymax"] = detheight / 2.0
    detdictcmn["lambda"] = config["source"]["detector"]["lambda"]

    flipit = config["source"]["hdf"]["flipxy"]

    #tic=time.time()
    for run in range(numruns):
        progress.setinfo(fname)

        params["RunNr"] = str(run + 1)

        detdict = detdictcmn.copy()
        detdict["stth"] = stth[run]
        #for itemkey in crystal.iterkeys(): DM iterkeys() depricated
        for itemkey in crystal.keys():
            params[itemkey] = str(dset["/entry1/instrument/crystal/%s" %
                                       (itemkey)][run])
        #for itemkey in monochromator.iterkeys():  DM iterkeys() depricated
        for itemkey in monochromator.keys():
            try:
                #params[itemkey] = str(dset["/entry1/instrument/monochromator/%s" %(itemkey)][run])
                params[itemkey] = str(dset[monochromator.name + "/" +
                                           itemkey][run])
            except:
                True
        #for itemkey in slits.iterkeys():  DM iterkeys() depricated
        for itemkey in slits.keys():
            try:
                params[itemkey] = str(dset["/entry1/instrument/slits/%s" %
                                           (itemkey)][run])
            except:
                True
        #for itemkey in sample.iterkeys(): DM iterkeys() depricated
        for itemkey in sample.keys():
            try:  #if type(sample[itemkey]) == h5py._hl.dataset.Dataset:       #Cannot handle groups at the moment, only Datasets
                #theval = sample[itemkey].value
                theval = sample[itemkey][()]
                ivalnr = run
                if theval.size == 1: ivalnr = 0
                params[itemkey] = str(theval[ivalnr])
                #params[itemkey]=theval[ivalnr].astype('str')
                #if sample[itemkey].value.size==1:
                #    params[itemkey]=sample[itemkey].value[0].astype('str')
                #else:
                #    params[itemkey]=sample[itemkey].value[run].astype('str')
            except:
                True
        try:
            #params["time"] = str(dset["/entry1/data/time"].value[run])
            params["time"] = str(dset["/entry1/data/time"][run])
        except:
            True
        for item, value in moncounts:
            params[str(item)] = str(value[run])
        for item, value in monrates:
            params[str(item)] = str(value[run])
        for item, value in montime:
            params[str(item)] = str(value[run])

        paramdict = params.copy()
        #frame = n[run][0]
        #for i in range(len(frame)):
        #    if i != 7:
        #        frame[i]=np.zeros(np.shape(frame[i]))

        #src.AddData(frame, paramdict, detdict, "NeXus_hdf", fname, twod=True)     #the [0] is the time channel

        if len(n.shape) == 4:  # run, time, x, y
            if flipit:
                src.AddData(np.transpose(n[run][0]),
                            paramdict,
                            detdict,
                            "NeXus_hdf",
                            fname,
                            twod=twodim)  #the [0] is the time channel
            else:
                src.AddData(n[run][0],
                            paramdict,
                            detdict,
                            "NeXus_hdf",
                            fname,
                            twod=twodim)  #the [0] is the time channel
            #src.AddData(np.transpose(n[run][0]), paramdict, detdict, "NeXus_hdf", fname, twod=twodim)     #the [0] is the time channel
        else:  # run, x, y
            src.AddData(np.asfarray(n[run]),
                        paramdict,
                        detdict,
                        "NeXus_hdf",
                        fname,
                        twod=twodim)
        True
        if progress.wasCanceled(): break
        progress.step()
    f.close()
    #toc=time.time()
    #print "Time is %f" % (toc-tic)
    src.filename = fname
    paramkeys = [x for x in params.keys()]
    commonparams = list(
        set(paramkeys).intersection(possiblescanparams)
    )  #Determine the scan variables from the title and the actual instrument parameters
    for item in commonparams:
        src.precparams[item] = 0.01
        True
    return src
def Gumtree_xyd_read(fname, config):
    src = Srcpar.Srcpar(config)
    #src.dataset.pop()       #Remove the {0,0} dataset
    paramdictcmn = {}
    paramdict = {}
    detdictcmn = {}
    detdict = {}

    f = open(fname, 'r')
    filecontent = f.read()
    f.close()

    line1end = filecontent.find("\n")
    paramdictcmn['Nexus file'] = filecontent[19:line1end]
    scanStartIdx = filecontent.find("# Scan variable")
    cmnContent = filecontent[line1end + 1:scanStartIdx]
    cmnContentLines = cmnContent.split("\n")
    for aline in cmnContentLines:
        tabsplit = aline[2:].split("\t")
        for itemstr in tabsplit:
            eqsplit = itemstr.split("=")
            if len(eqsplit) > 1:
                paramdictcmn[eqsplit[0]] = eqsplit[1]
            True
        True
    True
    sampletodetector = paramdictcmn.pop(
        "Processed with: calculating two theta on LDS")
    paramdictcmn["cor_to_det"] = sampletodetector
    detdictcmn["det_xmin"] = float(paramdictcmn["active_width"][:-3]) / (-2.0)
    detdictcmn["det_xmax"] = float(paramdictcmn["active_width"][:-3]) / (2.0)
    detdictcmn["sam_to_det"] = float(paramdictcmn["cor_to_det"][:-3])
    detdictcmn["stth"] = float(
        paramdictcmn["stth"][:-7])  #-7 removes the 'degrees'
    detdictcmn[
        "lambda"] = 1.659  #Only valid for MPISI, should do this more cleverly later on

    datasections = filecontent[scanStartIdx:-1].split("\n\n")
    for section in datasections:
        paramdict = paramdictcmn.copy()
        detdict = detdictcmn.copy()

        #Get the Scan variable
        prmnamestart = section.find(":") + 2
        lnend = section.find("\n")
        eqsplit = section[prmnamestart:lnend].split("=")
        paramdict[eqsplit[0]] = eqsplit[1].strip()
        if eqsplit[0] == "stth": detdict["stth"] = float(eqsplit[1].strip())

        #Get the time and counts line
        varstart = lnend + 3  #skip the newline, hash and space
        lnend = section[varstart:].find("\n") + varstart
        splitted = section[varstart:lnend].split(" ")
        for itemstr in splitted:
            eqsplit = itemstr.split("=")
            if len(eqsplit) > 1:
                paramdict[eqsplit[0]] = eqsplit[1].strip()

        #Get the intensity data
        datastart = lnend + 57  # Skip the Header line
        y = np.array([])
        tth = np.array([])
        for aline in section[datastart:-1].split("\n"):
            twotheta, intensity, sigma = aline.split()
            y = np.append(y, float(intensity))
            tth = np.append(tth, float(twotheta))

        src.AddData(y, paramdict, detdict, "Gumtree_xyd", fname, {"2th": tth})
    return src
示例#9
0
    def ReadServer(self):
        tic = time.time()
        wasrunning = False
        try:
            src = Srcpar.Srcpar(self.config)

            #wasrunning = self.autotimer.is_running
            #if wasrunning: self.autotimer.stop()
            #if wasrunning: self.autotimer.halt()

            self.detdict["sam_to_det"] = float(
                self.scanman.ui.sam2det_edit.text())
            self.detdict["stth"] = float(self.scanman.ui.stth_edit.text())
            self.detdict["lambda"] = float(
                self.scanman.ui.wavelength_edit.text())
            self.detdict["det_xmin"] = float(
                self.scanman.ui.detDim_xmin_edit.text())
            self.detdict["det_xmax"] = float(
                self.scanman.ui.detDim_xmax_edit.text())

            #--------------------------------
            response = self.opener.open(self.serveraddr + 'textstatus.egi')
            histstatus = response.read()
            response.close()

            idx1 = histstatus.find("HM-Host") + 9
            idx2 = histstatus.find('\n', idx1)
            self.paramdict["HM-Host"] = histstatus[idx1:idx2]

            idx1 = histstatus.find("DAE_type", idx2) + 10
            idx2 = histstatus.find('\n', idx1)
            self.paramdict["DAE_type"] = histstatus[idx1:idx2]

            idx1 = histstatus.find("DAQ:", idx2) + 5
            idx2 = histstatus.find('\n', idx1)
            self.paramdict["DAQ_Status"] = histstatus[idx1:idx2]

            idx1 = histstatus.find("DAQ_dirname", idx2) + 13
            idx2 = histstatus.find('\n', idx1)
            self.paramdict["DAQ_dirname"] = histstatus[idx1:idx2]
            fname = self.paramdict["DAQ_dirname"]

            idx1 = histstatus.find("acq_dataset_active_sec", idx2) + 24
            idx2 = histstatus.find('\n', idx1)
            self.paramdict["time"] = histstatus[idx1:idx2]

            idx1 = histstatus.find("num_events_to_hfp", idx2) + 18
            idx2 = histstatus.find('\n', idx1)
            self.paramdict["events"] = histstatus[idx1:idx2]

            #--------------------------------
            response = self.opener.open(self.serveraddr + 'readconfig.egi',
                                        timeout=2)
            histstatus = response.read()
            response.close()

            idx1 = histstatus.find("COUNT_METHOD") + 14
            idx2 = histstatus.find('"', idx1)
            self.paramdict["COUNT_METHOD"] = histstatus[idx1:idx2]
            idx1 = histstatus.find("COUNT_SIZE") + 12
            idx2 = histstatus.find('"', idx1)
            self.paramdict["COUNT_SIZE"] = histstatus[idx1:idx2]
            #print "Time is %f" % (time.time()-tic)

            #-------------------------------- 1D data read
            #r1 = self.opener.open(self.serveraddr+'readdataselectdatatype.egi?read_data_type=TOTAL_HISTOGRAM_X&read_data_format=CSV')
            #response = self.opener.open(self.serveraddr+'readdataselected.egi?read_data_uncal_cal=CALIBRATED&read_data_order_flip_x=DISABLE&read_data_order_flip_y=DISABLE&read_data_order_transpose_xy=DISABLE&start=&end=')
            #dataparsed = response.read().split(',')
            #self.src.Clear()
            #y = np.array([float(dataparsed[i]) for i in range(len(dataparsed))])
            #self.src.x_chan = np.arange(0, len(dataparsed), 1)
            #self.src.nchan = len(self.src.x_chan)
            #self.src.AddData(y,self.paramdict, self.detdict, "ANSTO Histmem", fname)

            #-----------------------------------2D data read
            #r1 = self.opener.open(self.serveraddr+'readdataselectdatatype.egi?read_data_type=TOTAL_HISTOGRAM_XY&read_data_format=CSV')
            #response = self.opener.open(self.serveraddr+'readdataselected.egi?read_data_uncal_cal=CALIBRATED&read_data_order_flip_x=DISABLE&read_data_order_flip_y=DISABLE&read_data_order_transpose_xy=DISABLE&start=&end=')
            nrvals = self.nrx * self.nry
            response = self.opener.open(
                self.serveraddr + 'readhmdata.egi?bank=1&start=0&end=' +
                str(nrvals) +
                '&read_data_period_number=0&read_data_type=HISTOPERIOD_XYT')
            textdata = response.read()
            response.close()

            data = struct.unpack(str(nrvals) + 'i', textdata)
            n = np.asarray(data, dtype=float).reshape((self.nrx, self.nry))
            if self.flipxy:
                n = n.transpose()
            #tic=time.time()
            #dataparsed = textdata.split('\n')
            #n = []
            #for yline in dataparsed[:-2]:
            #    n.append(yline.split(","))
            #n=np.asarray(n,dtype=float)
            #print "Time is %f" % (time.time()-tic)
            src.AddData(n,
                        self.paramdict,
                        self.detdict,
                        "ANSTO Histmem 2D",
                        fname,
                        twod=True)
            src.crmap = copy.deepcopy(
                self.src.crmap
            )  #Use the previous geometry correction map if it exists. Saves a lot of time
            self.src = src
            self.srcp = Srcpar.Srcpar(self.config)  #Source post process
            self.srcsplit = Srcpar.Srcpar(
                self.config)  #Source with detector splitted

            #tic=time.time()
            self.src.CalcAllAxis()
            self.src.x_chan = self.src.dataset[-1].x_chan
            self.src.nchan = len(self.src.x_chan)
            self.src.CalcSumSetCommon("")
            self.src.CalcSumSet(["raw"])
            self.src.SelectFrame("raw")
            self.src.SelectDataSet(-1)

            self.scanman.datasrc = self.src  #We have to do this because the linking is broken when assigning different file's datasets

            if self.src.data2D == False:
                self.srcsplit = self.src

            self.scanman.Generate()
            #print "Time is %f" % (time.time()-tic)
            QtCore.QCoreApplication.instance().processEvents(
            )  #This, together with stopping and starting the timer again seems to keep the program from crashing in Qt4lib.dll

        except:
            True
def FullprofCompat_xy_read(fname,config):
    src = Srcpar.Srcpar(config)
    #src.dataset.pop()       #Remove the {0,0} dataset
    paramdictcmn = {}
    paramdict = {}
    detdict = {}
    
    f = open(fname, 'r')
    filecontent = f.read()
    f.close()
    scanStartIdx = 0
    datasections = filecontent[scanStartIdx:-1].split("\n\n")
    numruns = len(datasections)    
    progress = ProgressBar("Loading Fullprof compatible xy datasets...", numruns)
    #progress.setinfo(fname)
    for section in datasections:
        progress.setinfo(fname)
        #paramdict = paramdictcmn.copy()
        
        sectionsplitlines = section.split("\n")
        
        params = sectionsplitlines[3].split("| ")
        params.pop(0)
        detdict = {params[s].split(":")[0]:float(params[s].split(":")[1]) for s in range(len(params))}
        
        params = sectionsplitlines[4].split("| ")
        params.pop(0)   #This is the empty one, should improve the exporting so that this isnt needed
        paramdict = {params[s].split(":")[0]:params[s].split(":")[1] for s in range(len(params))}
        if "stth" not in paramdict:
            paramdict["stth"]=str(detdict["stth"])
        True
            
        #Get the intensity data
        #y = np.array([])
        #tth = np.array([])
        axisheader = sectionsplitlines[5].split(" ")
        if axisheader[0] == "Channel" : axistype = "ch"
        elif axisheader[0] == "Position" : axistype = "mm"
        elif axisheader[0] == "Angle" : axistype = "2th"
        elif axisheader[0] == "d-spacing" : axistype = "d"
        
        datalines = sectionsplitlines[6:]
        nrpoints = len(datalines)
        y = np.array([0.0]*nrpoints)
        x = np.array([0.0]*nrpoints)
        
        #for aline in sectionsplitlines[6:]:
        
        for i in range(len(datalines)):
            #x, intensity = aline.split()
            #y = np.append(y, float(intensity))
            #mm = np.append(tth, float(x))
            xstr, intensity = datalines[i].split()
            y[i] = float(intensity)
            x[i] = float(xstr)
        
        src.AddData(y,paramdict, detdict, "FullprofCompat_xy", fname, {axistype:x})
        if progress.wasCanceled(): break
        progress.step()
    
    return src
    
        
示例#11
0
def ScanmanPeakFit_pkf_read(fname, config):

    from Source import ScanmanPeakFit_pkfDEF
    pkfsourcegui = ScanmanPeakFit_pkfDEF.ScanmanPeakFit_pkfDEF()
    #pkfsourcegui.activateWindow()

    src = Srcpar.Srcpar(config)
    paramdictcmn = {}
    params = {}
    paramdict = {}
    detdict = {}

    #prec = 0.1             #parameter precision
    #prec = 0.0
    fixnegative = False
    fixmassive = float(2.0)

    f = open(fname, 'r')
    filecontent = f.read()
    f.close()
    scanStartIdx = 0
    lines = filecontent[scanStartIdx:-1].split("\n")
    header = lines[0].split("\t")
    i = 0
    ich = []
    for head in header:
        #if "Channel" in head:
        if "Channel_range" in head:
            ich.append(i)
        i = i + 1
    ich.append(len(header))
    varpar = {}
    for i in range(1, ich[0]):
        varpar[header[i]] = i
        True

    pkfsourcegui.setparoptions(varpar)
    pkfsourcegui.setpeakoptions([str(i) for i in range(len(ich) - 1)])

    #selectedpeaks = [0,1,2]
    #selectedpar = ["sample_x","sample_y","sample_z"]
    #selectedpar = ["sx","sy","sz"]
    #selectedpar = ["vsx","vsy","sz"]

    fitparams = {}
    secwidth = ich[1] - ich[0]
    for i in range(ich[0] + 1, ich[1]):
        fitparamcol = []
        head = header[i][:header[i].rfind("_")]
        for j in range(len(ich) - 1):
            fitparamcol.append(i + secwidth * j)
        fitparams[head] = copy(fitparamcol)

    pkfsourcegui.setdataoptions(fitparams.keys())

    if 1:
        result = pkfsourcegui.exec_()
        if pkfsourcegui.selectedfit == "": return
        selectedpar = pkfsourcegui.selectedparams
        selectedpeaks = pkfsourcegui.selectedpeaks
        selectedfit = pkfsourcegui.selectedfit  #"Counts"
        src.ylabel = selectedfit
        prec = pkfsourcegui.precision
        permutate = pkfsourcegui.permutate

    if 0:  #For testing
        selectedpar = ['sx', 'sz']
        selectedpeaks = [0]
        selectedfit = "Intensity"
        src.ylabel = "Intensity"
        prec = 0.6
        permutate = False

    permupar = []
    for parpri in selectedpar:
        perln = [parpri]
        for parsec in selectedpar:
            if parsec != parpri:
                perln.append(parsec)
        permupar.append(perln)
    if permutate == False:
        permupar = [permupar[0]]

    data = []
    for aline in lines[1:]:
        splitted = aline.split("\t")
        temp = []
        for aval in splitted:
            try:
                temp.append(float(aval))
            except:
                temp.append(aval)

        data.append(temp)
        True
    data = np.array(data)
    datat = data.transpose()
    nrdatalns = len(data)

    for parstr in permupar:
        sortedprms = np.array([[0.0] * (1 + len(parstr))])
        for i in range(0, len(data)):
            #dset = srcp.dataset[i]
            #expanded = np.array([np.append([[i]], [float(dset.prm[st]) for st in setparams])])
            expanded = np.array([
                np.append([[i]],
                          [float(datat[varpar[st]][i]) for st in parstr])
            ])
            #expanded = np.array([np.append([[i]], [float(data[varpar[st]][i]) for st in parstr])])
            sortedprms = np.append(sortedprms, expanded, axis=0)
            True
        sortedprms = sortedprms[1:].transpose()
        precparams = []
        #for prm in setparams:  precparams = precparams + [srcp.precparams[prm]]
        #precparams = [0.4,0.4]
        precparams = [prec] * len(parstr)

        src.finalblock = np.zeros((len(sortedprms), 1))
        src.getsubblock(sortedprms, 0, precparams)

        sortedidx = [int(a) for a in src.finalblock[0][1:]]
        sorteddata = data[sortedidx]

        for pknum in selectedpeaks:
            dataln = 0
            mm = np.array([])
            y = np.array([])
            y_err = np.array([])
            paramdict = {}
            while dataln < nrdatalns:
                for ipar in range(0, len(parstr) - 1):
                    paramdict[parstr[ipar]] = str(
                        sorteddata[dataln][varpar[parstr[ipar]]])
                yval = float(sorteddata[dataln][fitparams[selectedfit][pknum]])
                if yval < 0 and fixnegative: yval = 0
                y = np.append(y, yval)
                try:
                    yval_err = float(
                        sorteddata[dataln][fitparams["StDev_" +
                                                     selectedfit][pknum]])
                    y_err = np.append(y_err, yval_err)
                except:
                    True
                mm = np.append(mm,
                               float(sorteddata[dataln][varpar[parstr[-1]]]))
                dataln = dataln + 1
                if len(parstr
                       ) == 1:  #all values will be given in a single dataset
                    for i in range(nrdatalns - 1):
                        yval = float(
                            sorteddata[dataln][fitparams[selectedfit][pknum]])
                        if yval < 0 and fixnegative: yval = 0
                        y = np.append(y, yval)
                        try:
                            yval_err = float(sorteddata[dataln][fitparams[
                                "StDev_" + selectedfit][pknum]])
                            y_err = np.append(y_err, yval_err)
                        except:
                            True
                        mm = np.append(
                            mm, float(sorteddata[dataln][varpar[parstr[-1]]]))
                        dataln = dataln + 1
                    yfix = y
                    paramdict[parstr[-1]] = " "
                    paramdict["peak_nr"] = str(pknum)
                    paramdict["position"] = parstr[-1]
                    src.AddData(yfix,
                                paramdict.copy(),
                                detdict.copy(),
                                "ScanmanPeakFit_pkf",
                                fname, {"mm": mm},
                                y_err=y_err)
                    break

                elif dataln < nrdatalns:
                    paramdictcheck = {}
                    paramdictchecknew = {}
                    for ipar in range(len(parstr) - 1):
                        paramdictcheck[parstr[ipar]] = float(
                            sorteddata[dataln - 1][varpar[parstr[ipar]]])
                        paramdictchecknew[parstr[ipar]] = float(
                            sorteddata[dataln][varpar[parstr[ipar]]])
                    maxdiff = np.max(
                        np.abs(
                            np.array(paramdictcheck.values()) -
                            np.array(paramdictchecknew.values())))
                    if maxdiff > prec:
                        paramdict[parstr[-1]] = " "
                        yave = []
                        #yfix=np.array([y[i] if y[i]/np.average(np.delete(y,y[i]))<=fixmassive else 0 for i in range(len(y))])
                        #yfix=np.array([y[i] if y[i]<15000 else 0 for i in range(len(y))])
                        yfix = y
                        paramdict["peak_nr"] = str(pknum)
                        paramdict["position"] = parstr[-1]
                        src.AddData(yfix,
                                    paramdict.copy(),
                                    detdict.copy(),
                                    "ScanmanPeakFit_pkf",
                                    fname, {"mm": mm},
                                    y_err=y_err)
                        mm = np.array([])
                        y = np.array([])
                        y_err = np.array([])
                        paramdict = {}
                    True
                else:  #Single value
                    paramdict[parstr[-1]] = " "
                    yfix = y
                    paramdict["peak_nr"] = str(pknum)
                    paramdict["position"] = parstr[-1]
                    src.AddData(yfix,
                                paramdict.copy(),
                                detdict.copy(),
                                "ScanmanPeakFit_pkf",
                                fname, {"mm": mm},
                                y_err=y_err)
                    mm = np.array([])
                    y = np.array([])
                    y_err = np.array([])
                    paramdict = {}

            True
        True

    #datasections = filecontent[scanStartIdx:-1].split("\n\n")
    #for section in datasections:
    #    paramdict = paramdictcmn.copy()

    #Get the intensity data
    #    y = np.array([])
    #    tth = np.array([])
    #    for aline in section.split("\n"):
    #        x, intensity = aline.split()
    #        y = np.append(y, float(intensity))
    #        mm = np.append(tth, float(x))

    #    src.AddData(y,paramdict, detdict, "Gumtree_xyd", fname, {"mm":mm})
    #src.AddData(np.array([0,1,2,3,3,2,1,0]),paramdict, detdict, "ScanmanPeakFit_pkf", fname, {"mm":np.array([-10.2,-9,-8.3,-7.4,-6.9,-5.2,-4,-3.5])})

    return src
    def ExtractDetectorLines(self):
        currset = self.scanman.datasrc.currset
        flistidx, selectstart, selectend = self.ui.datasetsappliedframe.GetSelection(
        )

        #if self.scanman.axistype == "Channel" : axistype = "ch"
        #elif self.scanman.axistype == "Position" : axistype = "mm"
        #elif self.scanman.axistype == "Angle" : axistype = "2th"
        #elif self.scanman.axistype == "d-spacing" : axistype = "d"
        axistype = "mm"

        if self.ui.horizontal_radio.isChecked() == True:
            horizontal = True
            detectoraxis = "detector_y"
        else:
            horizontal = False
            detectoraxis = "detector_x"

        for filei in flistidx:
            self.scanman.ui.sourceGroupBox.SelectDataFile(filei)
            xbins = int(self.ui.rebinx_edit.text())
            ybins = int(self.ui.rebiny_edit.text())
            if xbins == -1: xbins = len(self.scanman.datasrc.x)
            if ybins == -1: ybins = len(self.scanman.datasrc.y)
            setend = copy.copy(selectend)
            setstart = selectstart
            dstsrc = Srcpar.Srcpar(self.scanman.datasrc.config)

            fname = path.basename(self.scanman.datasrc.filename
                                  ) + "_" + detectoraxis + "_extracted"
            if setend == -1:
                setend = len(self.scanman.datasrc.dataset) - 1
            for setnr in range(setstart, setend + 1):
                self.scanman.SelectData(setnr, display=False)
                src = self.scanman.datasrc
                cframe = src.dataset[setnr].currframe
                if horizontal == True:
                    x = cframe.hc_mm
                    y = cframe.vc_mm
                    n = cframe.n
                else:
                    x = cframe.vc_mm
                    y = cframe.hc_mm
                    n = cframe.n.transpose()
                #xx = np.array([[a]*len(y) for a in x])
                #yy=np.array([y]*len(x))
                yy = np.array([[a] * len(x) for a in y])
                xx = np.array([x] * len(y))
                histn, xedges, yedges = np.histogram2d(x=xx.flatten(),
                                                       y=yy.flatten(),
                                                       bins=[xbins, ybins],
                                                       weights=n.flatten())
                histn = histn.transpose()
                xc = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
                yc = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
                for irow in range(len(yc)):
                    dstprm = copy.copy(src.prm)
                    dstprm[detectoraxis] = str(yc[irow])
                    dstsrc.AddData(
                        histn[irow], dstprm, src.detprm,
                        src.origin + "_" + detectoraxis + "_lines_extracted",
                        fname, {axistype: xc})
                True
            finfo = SourceFileDEF.FileInfo(fname, self.scanman.config, dstsrc)
            self.scanman.ui.sourceGroupBox.OpenFile([fname], finfo)
            True