def parseFile(self, fname): f = open(fname, "rb") d = f.read() f.close() informationHeader = parseInformationHeader(d) if informationHeader["Identifier"] != 66: raise IOError("Not an OMDAQ File") if informationHeader["ListMode"] != 2: raise IOError("Not an list mode file") hv = informationHeader["HeaderVersion"] adc_offset = self.GENERAL_SIZE + self.RUNDATA_SIZE[hv] adc_list = parseAdcInfo(d, hv, offset=adc_offset) # the offset to the events is unclear, but we know they # are at the end of the file, how they end and the block size block_size = informationHeader["ListModeBlockSize"] n_blocks = len(d) // block_size for i in range(n_blocks): block_end = len(d) - i * block_size block_start = block_end - block_size events = parseLmfBlock( d[block_start:block_end], lmf_version=informationHeader["ListModeVersion"], offset=0) n_events = events.shape[0] for idx in range(n_events): adc, row, col, energy = events[idx] #print(adc, energy, row, col) nChannels = int(adc_list[adc]["Calibration"][-1]) if nChannels < 1: continue if self[adc] is None: self[adc] = DataObject.DataObject() self[adc].data = numpy.zeros((256, 256, nChannels), dtype=numpy.uint32) self[adc].info = {} self[adc].info["SourceType"] = SOURCE_TYPE try: name = adc_list[adc]["Name"] if hasattr(name, "decode"): name = name.decode("utf-8").strip(chr(0)) self[adc].info["SourceName"] = name except: self[adc].info["SourceName"] = adc_list[adc]["Name"] self[adc].info["McaCalib"] = [\ adc_list[adc]["Calibration"][0], adc_list[adc]["Calibration"][1], 0.0] self[adc].info["Channel0"] = 0.0 nSpectra = 256 * 256 nRows = 256 nFiles = nSpectra // nRows self[adc].info["Size"] = nFiles self[adc].info["NumberOfFiles"] = nFiles self[adc].info["FileIndex"] = 0 if energy >= nChannels: continue self[adc].data[row, col, energy] += 1
def newCurve(self, x, y, legend=None, info=None, replace=False, resetzoom=True, color=None, symbol=None, linestyle=None, xlabel=None, ylabel=None, yaxis=None, xerror=None, yerror=None, **kw): """ Create and add a data object to :attr:`dataObjectsDict` """ if "replot" in kw: _logger.warning("addCurve deprecated replot argument, " "use resetzoom instead") resetzoom = kw["replot"] and resetzoom if legend is None: legend = "Unnamed curve 1.1" if xlabel is None: xlabel = "X" if ylabel is None: ylabel = "Y" if info is None: info = {} if color is not None: info["plot_color"] = color if symbol is not None: info["plot_symbol"] = symbol if linestyle is not None: info["plot_linestyle"] = linestyle if yaxis is not None: info["plot_yaxis"] = yaxis newDataObject = DataObject.DataObject() newDataObject.x = [x] newDataObject.y = [y] newDataObject.m = None newDataObject.info = copy.deepcopy(info) newDataObject.info['legend'] = legend newDataObject.info['SourceName'] = legend newDataObject.info['Key'] = "" newDataObject.info['selectiontype'] = "1D" newDataObject.info['LabelNames'] = [xlabel, ylabel] newDataObject.info['selection'] = {'x': [0], 'y': [1]} sel = {'SourceType': "Operation", 'SourceName': legend, 'Key': "", 'legend': legend, 'dataobject': newDataObject, 'scanselection': True, 'selection': {'x': [0], 'y': [1], 'm': [], 'cntlist': [xlabel, ylabel]}, 'selectiontype': "1D"} sel_list = [sel] if replace: self._replaceSelection(sel_list) else: self._addSelection(sel_list, resetzoom=resetzoom)
def _getScanData(self, scan_key, raw=False): index = 0 scan_obj = self._sourceObjectList[index].select(scan_key) scan_info = self.__getScanInfo(scan_key) scan_info["Key"] = scan_key scan_info["FileInfo"] = self.__getFileInfo() scan_type = scan_info["ScanType"] scan_data = None if scan_type & SF_SCAN: try: scan_data = numpy.transpose(scan_obj.data()).copy() except: raise IOError("SF_SCAN read failed") elif scan_type & SF_MESH: try: if raw: try: scan_data = numpy.transpose(scan_obj.data()).copy() except: raise IOError("SF_MESH read failed") else: scan_array = scan_obj.data() (mot1, mot2, cnts) = self.__getMeshSize(scan_array) scan_data = numpy.zeros((mot1, mot2, cnts), numpy.float64) for idx in range(mot2): scan_data[:, idx, :] = numpy.transpose( scan_array[:, idx * mot1:(idx + 1) * mot1]).copy() scan_data = numpy.transpose(scan_data).copy() except: raise IOError("SF_MESH read failed") elif scan_type & SF_MCA: try: scan_data = scan_obj.mca(1) except: raise IOError("SF_MCA read failed") elif scan_type & SF_NMCA: try: scan_data = scan_obj.mca(1) except: raise IOError("SF_NMCA read failed") if scan_data is not None: #create data object dataObject = DataObject.DataObject() #data.info = self.__getKeyInfo(key) dataObject.info = scan_info dataObject.data = scan_data return dataObject else: raise TypeError("getData unknown type")
def _removeMcaClicked(self): #remove the mca #dataObject = self.__mcaData0 #send a dummy object dataObject = DataObject.DataObject() legend = self.__getLegend() if self.normalizeButton.isChecked(): legend += "/" curves = self.mcaWidget.getAllCurves(just_legend=True) for curve in curves: if curve.startswith(legend): legend = curve break self.sendMcaSelection(dataObject, legend=legend, action="REMOVE")
def testStackFastFit(self): # TODO: this is done in PyMcaBatchTest on multiple input formats # so not needed here return from PyMca5.PyMcaIO import specfilewrapper as specfile from PyMca5.PyMcaIO import ConfigDict from PyMca5.PyMcaCore import DataObject spe = os.path.join(self.dataDir, "Steel.spe") cfg = os.path.join(self.dataDir, "Steel.cfg") sf = specfile.Specfile(spe) self.assertTrue(len(sf) == 1, "File %s cannot be read" % spe) self.assertTrue(sf[0].nbmca() == 1, "Spe file should contain MCA data") counts = sf[0].mca(1) channels = numpy.arange(counts.size) sf = None configuration = ConfigDict.ConfigDict() configuration.read(cfg) calibration = configuration["detector"]["zero"], \ configuration["detector"]["gain"], 0.0 initialTime = configuration["concentrations"]["time"] # Fit MCA data with different dimensions: vector, image, stack for ndim in [1, 2, 3]: # create the data imgShape = tuple(range(3, 3 + ndim)) data = numpy.tile(counts, imgShape + (1, )) nTimes = 3 live_time = numpy.arange(numpy.prod(imgShape), dtype=int) live_time = initialTime + (live_time % nTimes) * initialTime # create the stack data object stack = DataObject.DataObject() stack.data = data stack.info = {} stack.info["McaCalib"] = calibration stack.info["McaLiveTime"] = live_time stack.x = [channels] # Test the fast XRF # we need to make sure we use fundamental parameters and # the time read from the file configuration["concentrations"]["usematrix"] = 0 configuration["concentrations"]["useautotime"] = 1 # make sure we use the SNIP background configuration['fit']['stripalgorithm'] = 1 self._verifyFastFit(stack, configuration, live_time, nTimes)
def loadStack(self): if self._stackImageData is not None: #clear with a small stack stack = DataObject.DataObject() stack.data = numpy.zeros((100,100,100), numpy.float32) self.setStack(stack) if self.stackSelector is None: self.stackSelector = StackSelector.StackSelector(self) stack = self.stackSelector.getStack() if type(stack) == type([]): #aifira like, two stacks self.setStack(stack[0]) self._slave = None slave = QStackWidget(master=False, rgbwidget=self.rgbWidget) slave.setStack(stack[1]) self.setSlave(slave) else: self.setStack(stack)
def _stackSignal(self, index=-1, load=False): ddict = self._lastItemDict filename = ddict['file'] name = ddict['name'] sel = {} sel['SourceName'] = self.data.sourceName * 1 sel['SourceType'] = "HDF5" fileIndex = self.data.sourceName.index(filename) phynxFile = self.data._sourceObjectList[fileIndex] title = filename + " " + name sel['selection'] = {} sel['selection']['sourcename'] = filename #single dataset selection scanlist = None sel['selection']['x'] = [] sel['selection']['y'] = [name] sel['selection']['m'] = [] sel['selection']['index'] = index self._checkWidgetDict() widget = QStackWidget.QStackWidget() widget.setWindowTitle(title) widget.notifyCloseEventToWidget(self) #different ways to fill the stack if h5py.version.version < '2.0': useInstance = True else: useInstance = False groupName = posixpath.dirname(name) if useInstance: #this crashes with h5py 1.x #this way it is not loaded into memory unless requested #and cannot crash because same instance is used stack = phynxFile[name] else: #create a new instance phynxFile = h5py.File(filename, 'r') stack = phynxFile[name] # try to find out the "energy" axis axesList = [] xData = None try: group = phynxFile[groupName] if 'axes' in stack.attrs.keys(): axes = stack.attrs['axes'] if sys.version > '2.9': try: axes = axes.decode('utf-8') except: print("WARNING: Cannot decode axes") axes = axes.split(":") for axis in axes: if axis in group.keys(): axesList.append(posixpath.join(groupName, axis)) if len(axesList): xData = phynxFile[axesList[index]].value except: # I cannot afford this Nexus specific things # to break the generic HDF5 functionality if DEBUG: raise axesList = [] #the only problem is that, if the shape is not of type (a, b, c), #it will not be possible to reshape it. In that case I have to #actually read the values nDim = len(stack.shape) if (load) or (nDim != 3): stack = stack.value shape = stack.shape if index == 0: #Stack of images n = 1 for dim in shape[:-2]: n = n * dim stack.shape = n, shape[-2], shape[-1] if len(axesList): if xData.size != n: xData = None else: #stack of mca n = 1 for dim in shape[:-1]: n = n * dim if nDim != 3: stack.shape = 1, n, shape[-1] if len(axesList): if xData.size != shape[-1]: xData = None #index equal -1 should be able to handle it #if not, one would have to uncomment next line #index = 2 actualStack = DataObject.DataObject() actualStack.data = stack if xData is not None: actualStack.x = [xData] widget.setStack(actualStack, mcaindex=index) wid = id(widget) self._lastWidgetId = wid self._widgetDict[wid] = widget widget.show()
def testStackBaseAverageAndSum(self): from PyMca5.PyMcaIO import specfilewrapper as specfile from PyMca5.PyMcaIO import ConfigDict from PyMca5.PyMcaCore import DataObject from PyMca5.PyMcaCore import StackBase from PyMca5.PyMcaPhysics.xrf import FastXRFLinearFit spe = os.path.join(self.dataDir, "Steel.spe") cfg = os.path.join(self.dataDir, "Steel.cfg") sf = specfile.Specfile(spe) self.assertTrue(len(sf) == 1, "File %s cannot be read" % spe) self.assertTrue(sf[0].nbmca() == 1, "Spe file should contain MCA data") y = counts = sf[0].mca(1) x = channels = numpy.arange(y.size).astype(numpy.float) sf = None configuration = ConfigDict.ConfigDict() configuration.read(cfg) calibration = configuration["detector"]["zero"], \ configuration["detector"]["gain"], 0.0 initialTime = configuration["concentrations"]["time"] # create the data nRows = 5 nColumns = 10 nTimes = 3 data = numpy.zeros((nRows, nColumns, counts.size), dtype=numpy.float) live_time = numpy.zeros((nRows * nColumns), dtype=numpy.float) mcaIndex = 0 for i in range(nRows): for j in range(nColumns): data[i, j] = counts live_time[i * nColumns + j] = initialTime * \ (1 + mcaIndex % nTimes) mcaIndex += 1 # create the stack data object stack = DataObject.DataObject() stack.data = data stack.info = {} stack.info["McaCalib"] = calibration stack.info["McaLiveTime"] = live_time stack.x = [channels] # let's play sb = StackBase.StackBase() sb.setStack(stack) x, y, legend, info = sb.getStackOriginalCurve() readCalib = info["McaCalib"] readLiveTime = info["McaLiveTime"] self.assertTrue(abs(readCalib[0] - calibration[0]) < 1.0e-10, "Calibration zero. Expected %f got %f" % \ (calibration[0], readCalib[0])) self.assertTrue(abs(readCalib[1] - calibration[1]) < 1.0e-10, "Calibration gain. Expected %f got %f" % \ (calibration[1], readCalib[0])) self.assertTrue(abs(readCalib[2] - calibration[2]) < 1.0e-10, "Calibration 2nd order. Expected %f got %f" % \ (calibration[2], readCalib[2])) self.assertTrue( abs(live_time.sum() - readLiveTime) < 1.0e-5, "Incorrect sum of live time data") mask = sb.getSelectionMask() if mask is None: mask = numpy.zeros((nRows, nColumns), dtype=numpy.uint8) mask[2, :] = 1 mask[0, 0:2] = 1 live_time.shape = mask.shape sb.setSelectionMask(mask) mcaObject = sb.calculateMcaDataObject(normalize=False) live_time.shape = mask.shape readLiveTime = mcaObject.info["McaLiveTime"] self.assertTrue( abs(live_time[mask > 0].sum() - readLiveTime) < 1.0e-5, "Incorrect sum of masked live time data") mcaObject = sb.calculateMcaDataObject(normalize=True) live_time.shape = mask.shape tmpBuffer = numpy.zeros(mask.shape, dtype=numpy.int32) tmpBuffer[mask > 0] = 1 nSelectedPixels = float(tmpBuffer.sum()) readLiveTime = mcaObject.info["McaLiveTime"] self.assertTrue( \ abs((live_time[mask > 0].sum() / nSelectedPixels) - readLiveTime) < 1.0e-5, "Incorrect average of masked live time data")
def testStackFastFit(self): from PyMca5.PyMcaIO import specfilewrapper as specfile from PyMca5.PyMcaIO import ConfigDict from PyMca5.PyMcaCore import DataObject from PyMca5.PyMcaPhysics.xrf import FastXRFLinearFit spe = os.path.join(self.dataDir, "Steel.spe") cfg = os.path.join(self.dataDir, "Steel.cfg") sf = specfile.Specfile(spe) self.assertTrue(len(sf) == 1, "File %s cannot be read" % spe) self.assertTrue(sf[0].nbmca() == 1, "Spe file should contain MCA data") y = counts = sf[0].mca(1) x = channels = numpy.arange(y.size).astype(numpy.float) sf = None configuration = ConfigDict.ConfigDict() configuration.read(cfg) calibration = configuration["detector"]["zero"], \ configuration["detector"]["gain"], 0.0 initialTime = configuration["concentrations"]["time"] # create the data nRows = 5 nColumns = 10 nTimes = 3 data = numpy.zeros((nRows, nColumns, counts.size), dtype=numpy.float) live_time = numpy.zeros((nRows * nColumns), dtype=numpy.float) mcaIndex = 0 for i in range(nRows): for j in range(nColumns): data[i, j] = counts live_time[i * nColumns + j] = initialTime * \ (1 + mcaIndex % nTimes) mcaIndex += 1 # create the stack data object stack = DataObject.DataObject() stack.data = data stack.info = {} stack.info["McaCalib"] = calibration stack.info["McaLiveTime"] = live_time stack.x = [channels] # Test the fast XRF # we need to make sure we use fundamental parameters and # the time read from the file ffit = FastXRFLinearFit.FastXRFLinearFit() configuration["concentrations"]["usematrix"] = 0 configuration["concentrations"]["useautotime"] = 1 # make sure we use the SNIP background configuration['fit']['stripalgorithm'] = 1 outputDict = ffit.fitMultipleSpectra(y=stack, weight=0, configuration=configuration, concentrations=True, refit=0) names = outputDict["names"] parameters = outputDict["parameters"] uncertainties = outputDict["uncertainties"] concentrations = outputDict["concentrations"] cCounter = 0 for i in range(len(names)): name = names[i] if name.startswith("C(") and name.endswith(")"): # it is a concentrations parameter # verify that concentrations took into account the time reference = concentrations[cCounter][0, 0] cTime = configuration['concentrations']['time'] values = concentrations[cCounter][:] values.shape = -1 for point in range(live_time.size): current = values[point] if DEBUG: print(name, point, reference, current, point % nTimes) if (point % nTimes) and (abs(reference) > 1.0e-10): self.assertTrue( reference != current, "Incorrect concentration for point %d" % point) corrected = current * live_time[point] / cTime if abs(reference) > 1.0e-10: delta = 100 * abs((reference - corrected) / reference) self.assertTrue( delta < 0.01, "Incorrect concentration(t) for point %d" % point) else: self.assertTrue( abs(reference - corrected) < 1.0e-5, "Incorrect concentration(t) for point %d" % point) cCounter += 1 else: if DEBUG: print(name, parameters[i][0, 0]) delta = (parameters[i] - parameters[i][0, 0]) self.assertTrue(delta.max() == 0, "Different fit value for parameter %s delta %f" % \ (name, delta.max())) self.assertTrue(delta.min() == 0, "Different fit value for parameter %s delta %f" % \ (name, delta.min())) delta = (uncertainties[i] - uncertainties[i][0, 0]) self.assertTrue(delta.max() == 0, "Different sigma value for parameter %s delta %f" % \ (name, delta.max())) self.assertTrue(delta.min() == 0, "Different sigma value for parameter %s delta %f" % \ (name, delta.min())) # again with fitting again the negative values outputDict = ffit.fitMultipleSpectra(y=stack, weight=0, configuration=configuration, concentrations=True, refit=1) names = outputDict["names"] parameters = outputDict["parameters"] uncertainties = outputDict["uncertainties"] concentrations = outputDict["concentrations"] cCounter = 0 for i in range(len(names)): name = names[i] if name.startswith("C(") and name.endswith(")"): # it is a concentrations parameter # verify that concentrations took into account the time reference = concentrations[cCounter][0, 0] cTime = configuration['concentrations']['time'] values = concentrations[cCounter][:] values.shape = -1 for point in range(live_time.size): current = values[point] if DEBUG: print(name, point, reference, current, point % nTimes) if (point % nTimes) and (abs(reference) > 1.0e-10): self.assertTrue( reference != current, "Incorrect concentration for point %d" % point) corrected = current * live_time[point] / cTime if abs(reference) > 1.0e-10: delta = 100 * abs((reference - corrected) / reference) self.assertTrue( delta < 0.01, "Incorrect concentration(t) for point %d" % point) else: self.assertTrue( abs(reference - corrected) < 1.0e-5, "Incorrect concentration(t) for point %d" % point) cCounter += 1 else: if DEBUG: print(name, parameters[i][0, 0]) delta = (parameters[i] - parameters[i][0, 0]) self.assertTrue(delta.max() == 0, "Different fit value for parameter %s delta %f" % \ (name, delta.max())) self.assertTrue(delta.min() == 0, "Different fit value for parameter %s delta %f" % \ (name, delta.min())) delta = (uncertainties[i] - uncertainties[i][0, 0]) self.assertTrue(delta.max() == 0, "Different sigma value for parameter %s delta %f" % \ (name, delta.max())) self.assertTrue(delta.min() == 0, "Different sigma value for parameter %s delta %f" % \ (name, delta.min()))
def setStackDataObject(self, stack, index=None, stack_name=None): if hasattr(stack, "info") and hasattr(stack, "data"): dataObject = stack else: dataObject = DataObject.DataObject() dataObject.info = {} dataObject.data = stack if dataObject.data is None: return if stack_name is None: legend = dataObject.info.get('SourceName', "Stack") else: legend = stack_name if index is None: mcaIndex = dataObject.info.get('McaIndex', 0) else: mcaIndex = index shape = dataObject.data.shape self.dataObjectsList = [legend] self.dataObjectsDict = {legend:dataObject} self._browsingIndex = mcaIndex if mcaIndex == 0: if len(shape) == 2: self._nImages = 1 self.setImageData(dataObject.data) self.slider.hide() self.name.setText(legend) else: self._nImages = 1 for dimension in dataObject.data.shape[:-2]: self._nImages *= dimension #This is a problem for dynamic data #dataObject.data.shape = self._nImages, shape[-2], shape[-1] data = self._getImageDataFromSingleIndex(0) self.setImageData(data) self.slider.setRange(0, self._nImages - 1) self.slider.setValue(0) self.slider.show() self.name.setText(legend+" 0") elif mcaIndex in [len(shape)-1, -1]: mcaIndex = -1 self._browsingIndex = mcaIndex if len(shape) == 2: self._nImages = 1 self.setImageData(dataObject.data) self.slider.hide() self.name.setText(legend) else: self._nImages = 1 for dimension in dataObject.data.shape[2:]: self._nImages *= dimension #This is a problem for dynamic data #dataObject.data.shape = self._nImages, shape[-2], shape[-1] data = self._getImageDataFromSingleIndex(0) self.setImageData(data) self.slider.setRange(0, self._nImages - 1) self.slider.setValue(0) self.slider.show() self.name.setText(legend+" 0") else: raise ValueError("Unsupported 1D index %d" % mcaIndex) if self._nImages > 1: self.showImage(0) else: self.plotImage()
def testSingleStackExport(self): from PyMca5 import PyMcaDataDir from PyMca5.PyMcaIO import specfilewrapper as specfile from PyMca5.PyMcaIO import ConfigDict from PyMca5.PyMcaCore import DataObject from PyMca5.PyMcaCore import StackBase from PyMca5.PyMcaCore import McaStackExport spe = os.path.join(self.dataDir, "Steel.spe") cfg = os.path.join(self.dataDir, "Steel.cfg") sf = specfile.Specfile(spe) self.assertTrue(len(sf) == 1, "File %s cannot be read" % spe) self.assertTrue(sf[0].nbmca() == 1, "Spe file should contain MCA data") y = counts = sf[0].mca(1) x = channels = numpy.arange(y.size).astype(numpy.float) sf = None configuration = ConfigDict.ConfigDict() configuration.read(cfg) calibration = configuration["detector"]["zero"], \ configuration["detector"]["gain"], 0.0 initialTime = configuration["concentrations"]["time"] # create the data nRows = 5 nColumns = 10 nTimes = 3 data = numpy.zeros((nRows, nColumns, counts.size), dtype=numpy.float) live_time = numpy.zeros((nRows * nColumns), dtype=numpy.float) xpos = 10 + numpy.zeros((nRows * nColumns), dtype=numpy.float) ypos = 100 + numpy.zeros((nRows * nColumns), dtype=numpy.float) mcaIndex = 0 for i in range(nRows): for j in range(nColumns): data[i, j] = counts live_time[i * nColumns + j] = initialTime * \ (1 + mcaIndex % nTimes) xpos[mcaIndex] += j ypos[mcaIndex] += i mcaIndex += 1 # create the stack data object stack = DataObject.DataObject() stack.data = data stack.info = {} stack.info["McaCalib"] = calibration stack.info["McaLiveTime"] = live_time stack.x = [channels] stack.info["positioners"] = {"x": xpos, "y": ypos} tmpDir = tempfile.gettempdir() self._h5File = os.path.join(tmpDir, "SteelStack.h5") if os.path.exists(self._h5File): os.remove(self._h5File) McaStackExport.exportStackList(stack, self._h5File) # read back the stack from PyMca5.PyMcaIO import HDF5Stack1D stackRead = HDF5Stack1D.HDF5Stack1D([self._h5File], {"y": "/measurement/detector_00"}) # let's play sb = StackBase.StackBase() sb.setStack(stackRead) # positioners data = stackRead.info["positioners"]["x"] self.assertTrue(numpy.allclose(data, xpos), "Incorrect readout of x positions") data = stackRead.info["positioners"]["y"] self.assertTrue(numpy.allclose(data, ypos), "Incorrect readout of y positions") # calibration and live time x, y, legend, info = sb.getStackOriginalCurve() readCalib = info["McaCalib"] readLiveTime = info["McaLiveTime"] self.assertTrue(abs(readCalib[0] - calibration[0]) < 1.0e-10, "Calibration zero. Expected %f got %f" % \ (calibration[0], readCalib[0])) self.assertTrue(abs(readCalib[1] - calibration[1]) < 1.0e-10, "Calibration gain. Expected %f got %f" % \ (calibration[1], readCalib[0])) self.assertTrue(abs(readCalib[2] - calibration[2]) < 1.0e-10, "Calibration 2nd order. Expected %f got %f" % \ (calibration[2], readCalib[2])) self.assertTrue( abs(live_time.sum() - readLiveTime) < 1.0e-5, "Incorrect sum of live time data")
def _addSelection(self, selectionlist, resetzoom=True, replot=None): """Add curves to plot and data objects to :attr:`dataObjectsDict` """ _logger.debug("_addSelection(self, selectionlist) " + str(selectionlist)) if replot is not None: _logger.warning( 'deprecated replot argument, use resetzoom instead') resetzoom = replot and resetzoom sellist = selectionlist if isinstance(selectionlist, list) else \ [selectionlist] if len(self.getAllCurves(just_legend=True)): activeCurve = self.getActiveCurve(just_legend=True) else: activeCurve = None nSelection = len(sellist) for selectionIndex in range(nSelection): sel = sellist[selectionIndex] key = sel['Key'] legend = sel['legend'] # expected form sourcename + scan key if "scanselection" not in sel or not sel["scanselection"] or \ sel['scanselection'] == "MCA": continue if len(key.split(".")) > 2: continue dataObject = sel['dataobject'] # only one-dimensional selections considered if dataObject.info["selectiontype"] != "1D": continue # there must be something to plot if not hasattr(dataObject, 'y'): continue if len(dataObject.y) == 0: # nothing to be plot continue else: for i in range(len(dataObject.y)): if numpy.isscalar(dataObject.y[i]): dataObject.y[i] = numpy.array([dataObject.y[i]]) if not hasattr(dataObject, 'x'): ylen = len(dataObject.y[0]) if ylen: xdata = numpy.arange(ylen).astype(numpy.float) else: #nothing to be plot continue if getattr(dataObject, 'x', None) is None: ylen = len(dataObject.y[0]) if not ylen: # nothing to be plot continue xdata = numpy.arange(ylen).astype(numpy.float) elif len(dataObject.x) > 1: # mesh plot continue else: if numpy.isscalar(dataObject.x[0]): dataObject.x[0] = numpy.array([dataObject.x[0]]) xdata = dataObject.x[0] if sel.get('SourceType') == "SPS": ycounter = -1 if 'selection' not in dataObject.info: dataObject.info['selection'] = copy.deepcopy( sel['selection']) for ydata in dataObject.y: xlabel = None ylabel = None ycounter += 1 # normalize ydata with monitor if dataObject.m is not None and len(dataObject.m[0]) > 0: if len(dataObject.m[0]) != len(ydata): raise ValueError( "Monitor data length different than counter data" ) index = numpy.nonzero(dataObject.m[0])[0] if not len(index): continue xdata = numpy.take(xdata, index) ydata = numpy.take(ydata, index) mdata = numpy.take(dataObject.m[0], index) # A priori the graph only knows about plots ydata = ydata / mdata ylegend = 'y%d' % ycounter if isinstance(dataObject.info['selection'], dict): if 'x' in dataObject.info['selection']: # proper scan selection ilabel = dataObject.info['selection']['y'][ ycounter] ylegend = dataObject.info['LabelNames'][ilabel] ylabel = ylegend if sel['selection']['x'] is not None: if len(dataObject.info['selection']['x']): xlabel = dataObject.info['LabelNames'] \ [dataObject.info['selection']['x'][0]] dataObject.info["xlabel"] = xlabel dataObject.info["ylabel"] = ylabel newLegend = legend + " " + ylegend self.dataObjectsDict[newLegend] = dataObject self.addCurve(xdata, ydata, legend=newLegend, info=dataObject.info, xlabel=xlabel, ylabel=ylabel, resetzoom=False) if self.scanWindowInfoWidget is not None: if not self.infoDockWidget.isHidden(): activeLegend = self.getActiveCurve( just_legend=True) if activeLegend == newLegend: self.scanWindowInfoWidget.updateFromDataObject \ (dataObject) else: # TODO: better to implement scanWindowInfoWidget.clear dummyDataObject = DataObject.DataObject() dummyDataObject.y = [numpy.array([])] dummyDataObject.x = [numpy.array([])] self.scanWindowInfoWidget.updateFromDataObject( dummyDataObject) else: # we have to loop for all y values ycounter = -1 for ydata in dataObject.y: ylen = len(ydata) if ylen == 1 and len(xdata) > 1: ydata = ydata[0] * numpy.ones(len(xdata)).astype( numpy.float) elif len(xdata) == 1: xdata = xdata[0] * numpy.ones(ylen).astype(numpy.float) ycounter += 1 newDataObject = DataObject.DataObject() newDataObject.info = copy.deepcopy(dataObject.info) if dataObject.m is not None: for imon in range(len(dataObject.m)): if numpy.isscalar(dataObject.m[imon]): dataObject.m[imon] = \ numpy.array([dataObject.m[imon]]) if dataObject.m is None: mdata = numpy.ones(len(ydata)).astype(numpy.float) elif len(dataObject.m[0]) == len(ydata): index = numpy.nonzero(dataObject.m[0])[0] if not len(index): continue xdata = numpy.take(xdata, index) ydata = numpy.take(ydata, index) mdata = numpy.take(dataObject.m[0], index) # A priori the graph only knows about plots ydata = ydata / mdata elif len(dataObject.m[0]) == 1: mdata = numpy.ones(len(ydata)).astype(numpy.float) mdata *= dataObject.m[0][0] index = numpy.nonzero(dataObject.m[0])[0] if not len(index): continue xdata = numpy.take(xdata, index) ydata = numpy.take(ydata, index) mdata = numpy.take(dataObject.m[0], index) # A priori the graph only knows about plots ydata = ydata / mdata else: raise ValueError( "Monitor data length different than counter data") newDataObject.x = [xdata] newDataObject.y = [ydata] newDataObject.m = [mdata] newDataObject.info['selection'] = copy.deepcopy( sel['selection']) ylegend = 'y%d' % ycounter xlabel = None ylabel = None if isinstance(sel['selection'], dict) and 'x' in sel['selection']: # proper scan selection newDataObject.info['selection']['x'] = sel[ 'selection']['x'] newDataObject.info['selection']['y'] = [ sel['selection']['y'][ycounter] ] newDataObject.info['selection']['m'] = sel[ 'selection']['m'] ilabel = newDataObject.info['selection']['y'][0] ylegend = newDataObject.info['LabelNames'][ilabel] ylabel = ylegend if len(newDataObject.info['selection']['x']): ilabel = newDataObject.info['selection']['x'][0] xlabel = newDataObject.info['LabelNames'][ilabel] else: xlabel = "Point number" if ('operations' in dataObject.info) and len( dataObject.y) == 1: newDataObject.info['legend'] = legend symbol = 'x' else: symbol = None newDataObject.info['legend'] = legend + " " + ylegend newDataObject.info['selectionlegend'] = legend yaxis = None if "plot_yaxis" in dataObject.info: yaxis = dataObject.info["plot_yaxis"] elif 'operations' in dataObject.info: if dataObject.info['operations'][-1] == 'derivate': yaxis = 'right' self.dataObjectsDict[ newDataObject.info['legend']] = newDataObject self.addCurve(xdata, ydata, legend=newDataObject.info['legend'], info=newDataObject.info, symbol=symbol, yaxis=yaxis, xlabel=xlabel, ylabel=ylabel, resetzoom=False) try: if activeCurve is None and self._curveList: self.setActiveCurve(self._curveList[0]) finally: if resetzoom: self.resetZoom()
def _getMcaData(self, key): index = 0 key_split = key.split(".") scan_key = key_split[0] + "." + key_split[1] scan_info = {} scan_info["Key"] = key scan_info["FileInfo"] = self.__getFileInfo() scan_obj = self._sourceObjectList[index].select(scan_key) scan_info.update(self.__getScanInfo(scan_key)) scan_type = scan_info["ScanType"] scan_data = None mca_range = [] # for each dim., (name, length, values or None) if len(key_split) == 3: if scan_type & SF_NMCA or scan_type & SF_MCA: try: mca_no = int(key_split[2]) scan_data = scan_obj.mca(mca_no) except: raise IOError("Single MCA read failed") if scan_data is not None: scan_info.update(self.__getMcaInfo(mca_no, scan_obj, scan_info)) dataObject = DataObject.DataObject() dataObject.info = scan_info dataObject.data = scan_data return dataObject elif len(key_split) == 4: if scan_type == SF_SCAN + SF_NMCA: try: mca_no = (int(key_split[2])-1) * scan_info["NbMcaDet"] + \ int(key_split[3]) scan_data = scan_obj.mca(mca_no) except: raise IOError("SF_SCAN+SF_NMCA read failed") elif scan_type == SF_MESH + SF_MCA: try: #scan_array= scan_obj.data() #(mot1,mot2,cnts)= self.__getMeshSize(scan_array) #mca_no= 1 + int(key_split[2]) + int(key_split[3])*mot1 mca_no = (int(key_split[2])-1) * scan_info["NbMcaDet"] + \ int(key_split[3]) _logger.debug("try to read mca number = %s", mca_no) _logger.debug("total number of mca = %s", scan_info["NbMca"]) scan_data = scan_obj.mca(mca_no) except: raise IOError("SF_MESH+SF_MCA read failed") elif scan_type & SF_NMCA or scan_type & SF_MCA: try: mca_no = (int(key_split[2])-1) * scan_info["NbMcaDet"] + \ int(key_split[3]) scan_data = scan_obj.mca(mca_no) except: raise IOError("SF_MCA or SF_NMCA read failed") else: raise TypeError("Unknown scan type!!!!!!!!!!!!!!!!") if scan_data is not None: scan_info.update(self.__getMcaInfo(mca_no, scan_obj, scan_info)) dataObject = DataObject.DataObject() dataObject.info = scan_info dataObject.data = scan_data return dataObject