Пример #1
0
 def findPeaksAndTroughs(self, ydataset, delta, xdataset=None):
     '''returns a list of peaks and troughs in tuple of (peak_position, peak_value). 
     If x data set is not provided, it returns as tuple of (peak_index, peak_value)'''
     
     if xdataset is not None:
         xdataset = dnp.asarray(xdataset)
     return peakdet(dnp.asarray(ydataset), delta, xdataset)
Пример #2
0
    def findPeaksAndTroughs(self, ydataset, delta, xdataset=None):
        '''returns a list of peaks and troughs in tuple of (peak_position, peak_value). 
        If x data set is not provided, it returns as tuple of (peak_index, peak_value)'''

        if xdataset is not None:
            xdataset = dnp.asarray(xdataset)
        return peakdet(dnp.asarray(ydataset), delta, xdataset)
Пример #3
0
	def test_process_negative_edge(self):
		# outer = 148, left half (first half of data though!)
		xds, yds = self.getInnerAndDetDatasetForGivenOuterValue(148.0)
		x = dnp.asarray(xds.data[:(len(xds) / 2)])
		y = dnp.asarray(yds.data[:(len(yds) / 2)])
		expected = 0.000000,  0.000000, -3.896408,  0.006343,  1.699449,  0.006343
		check(TwoGaussianEdges()._process(x, y), expected, TwoGaussianEdges().labelList)
Пример #4
0
	def test_process_positive_edge(self):
		# outer = 148, right half (second half of data though!)
		xds, yds = self.getInnerAndDetDatasetForGivenOuterValue(148.0)
		x = dnp.asarray(xds.data[(len(xds) / 2):])
		y = dnp.asarray(yds.data[(len(yds) / 2):])
		expected = -3.944682,  0.006067,  0.000000,  0.000000,  1.683015,  0.006067
		check(TwoGaussianEdges()._process(x, y), expected, TwoGaussianEdges().labelList)
Пример #5
0
 def testAsArray(self):
     ta = np.array([1, 2])
     ata = np.asarray(ta)
     self.assertEquals(ata.dtype, np.int_)
     self.checkitems([1, 2], ata)
     ata = np.asarray(ta, np.float)
     self.assertEquals(ata.dtype, np.float_)
     self.checkitems([1, 2], ata)
Пример #6
0
 def testAsArray(self):
     ta = np.array([1, 2])
     ata = np.asarray(ta)
     self.assertEquals(ata.dtype, np.int_)
     self.checkitems([1, 2], ata)
     ata = np.asarray(ta, np.float)
     self.assertEquals(ata.dtype, np.float_)
     self.checkitems([1, 2], ata)
Пример #7
0
def getDatasetFromLoadedFile(loadedFile, fieldName, scanDataPointCache=None):
	'''
	Gets a dataset called fieldName from an already loaded file see loadScanFile(scanOb)
	returns dataset
	'''

	logger.debug('Getting data for %s, from %s (with cache=%s)', fieldName, loadedFile, scanDataPointCache)

	# Check if the field names are the full local names if so get just the last part of
	# the field names as this should be the node name. Keep original fieldname, it might
	# be useful later
	if '.' in fieldName:
		# with scnname.fieldname strip off scnname
		strippedFieldName = fieldName.split('.')[-1]
	else: # fieldname doesn't require stripping
		strippedFieldName = fieldName

	# If we have a scanDataPointCache use it for performance
	if(scanDataPointCache):
		return  dnp.asarray(scanDataPointCache.getPositionsFor(strippedFieldName))

	# Check if its a NeXus file
	if isinstance(loadedFile, NXroot):
		# Note: Using first node returned, this might fail if there are multiple nodes with the same name!
		# Might be possible to disambiguate this using the original fieldname?
		loadedNodes = loadedFile.getnodes(strippedFieldName, group=False, data=True)
		if len(loadedNodes) == 0:
			raise KeyError("%s not found in data file" % strippedFieldName)

		# Find nodes which have a local_name
		probableNodes = [loadedNodes[_n] for _n in xrange(len(loadedNodes))
			if 'local_name' in loadedNodes[_n].attrs]
		# Use the first local_name which matches the fieldName or fall back on using the first node
		for node in probableNodes:
			if node.attrs['local_name'] == fieldName:
				lazyDataset = node
				break
		else:
			lazyDataset = loadedNodes[0]

		# Use slicing to load the whole lazy dataset into a array i.e. non-lazy dataset
		dataset = lazyDataset[...]

		return dataset

	elif isinstance(loadedFile, DataHolder):
		datasetList = loadedFile[strippedFieldName]

		# Convert the dataset as a list into the array
		dataset = dnp.asarray(datasetList)

		return dataset

	# Not a supported file type
	else:
		print "The file format is not supported"
		print loadedFile.__class__
Пример #8
0
 def singlePeakProcess(self, xDataSet, yDataSet):
     xarray = dnp.asarray(xDataSet)
     yarray = dnp.asarray(yDataSet)
     ymax=yarray.max()
     ymaxindex=yarray.argmax()
     #print "y max index %d" % ymaxindex
     maxpos=xarray[ymaxindex]
     basey=self.baseline(xarray, yarray, 1)
     halfmax=ymax/2+basey/2
     xcrossingvalues=dnp.crossings(yarray, halfmax, xarray)
     #print xcrossingvalues, maxpos
     if len(xcrossingvalues)>2:
         print "multiple peaks exists in the data set!, only process the highest peak."
     fwhmvalue=find_gt(xcrossingvalues, maxpos)-find_lt(xcrossingvalues,maxpos)
     return [(maxpos,ymax,basey,fwhmvalue)]
Пример #9
0
 def singlePeakProcess(self, xDataSet, yDataSet):
     xarray = dnp.asarray(xDataSet)
     yarray = dnp.asarray(yDataSet)
     ymax = yarray.max()
     ymaxindex = yarray.argmax()
     #print "y max index %d" % ymaxindex
     maxpos = xarray[ymaxindex]
     basey = self.baseline(xarray, yarray, 1)
     halfmax = ymax / 2 + basey / 2
     xcrossingvalues = dnp.crossings(yarray, halfmax, xarray)
     #print xcrossingvalues, maxpos
     if len(xcrossingvalues) > 2:
         print "multiple peaks exists in the data set!, only process the highest peak."
     fwhmvalue = find_gt(xcrossingvalues, maxpos) - find_lt(
         xcrossingvalues, maxpos)
     return [(maxpos, ymax, basey, fwhmvalue)]
Пример #10
0
 def baseline(self,xdataset, ydataset, smoothness):
     '''find the baseline y value for a peak in y dataset'''
     xdataset = dnp.asarray(xdataset)
     ydataset = dnp.asarray(ydataset)
     ymaxindex=ydataset.argmax()
     #TODO
     result=dnp.gradient(ydataset,xdataset)
     #derivative(xdataset, ydataset, smoothness)
     leftresult=result[:ymaxindex]
     rightresult=result[ymaxindex+1:]
     leftminderivativeindex=dnp.abs(leftresult).argmin()
     rightminderivativeindex=dnp.abs(rightresult).argmin()
     leftbasey=ydataset[leftminderivativeindex]
     rightbasey=ydataset[rightminderivativeindex+1+leftresult.shape[0]]
     basey=(leftbasey+rightbasey)/2
     return basey
Пример #11
0
 def baseline(self, xdataset, ydataset, smoothness):
     '''find the baseline y value for a peak in y dataset'''
     xdataset = dnp.asarray(xdataset)
     ydataset = dnp.asarray(ydataset)
     ymaxindex = ydataset.argmax()
     #TODO
     result = dnp.gradient(ydataset, xdataset)
     #derivative(xdataset, ydataset, smoothness)
     leftresult = result[:ymaxindex]
     rightresult = result[ymaxindex + 1:]
     leftminderivativeindex = dnp.abs(leftresult).argmin()
     rightminderivativeindex = dnp.abs(rightresult).argmin()
     leftbasey = ydataset[leftminderivativeindex]
     rightbasey = ydataset[rightminderivativeindex + 1 +
                           leftresult.shape[0]]
     basey = (leftbasey + rightbasey) / 2
     return basey
Пример #12
0
    def findBasePoints(self, xdataset, ydataset, delta, smoothness):
        xdataset = dnp.asarray(xdataset)
        ydataset = dnp.asarray(ydataset)
        peaks=self.findPeaksAndTroughs(ydataset, delta)[0]
        #print peaks
        yslices=[]
        xslices=[]
        startindex=0
        for index,value in peaks: #@UnusedVariable
            yslices.append(ydataset[startindex:index])
            xslices.append(xdataset[startindex:index])
            startindex=index+1
        yslices.append(ydataset[startindex:])
        xslices.append(xdataset[startindex:])

        bases=[]
        for xset, yset in zip(xslices, yslices):
            result=dnp.gradient(yset, xset)
            minimumderivativeindex=dnp.abs(result).argmin()
            bases.append((xset[minimumderivativeindex],yset[minimumderivativeindex]))
        #print "Base Points (position, value)   : ", bases
        return bases
Пример #13
0
    def findBasePoints(self, xdataset, ydataset, delta, smoothness):
        xdataset = dnp.asarray(xdataset)
        ydataset = dnp.asarray(ydataset)
        peaks = self.findPeaksAndTroughs(ydataset, delta)[0]
        #print peaks
        yslices = []
        xslices = []
        startindex = 0
        for index, value in peaks:  #@UnusedVariable
            yslices.append(ydataset[startindex:index])
            xslices.append(xdataset[startindex:index])
            startindex = index + 1
        yslices.append(ydataset[startindex:])
        xslices.append(xdataset[startindex:])

        bases = []
        for xset, yset in zip(xslices, yslices):
            result = dnp.gradient(yset, xset)
            minimumderivativeindex = dnp.abs(result).argmin()
            bases.append(
                (xset[minimumderivativeindex], yset[minimumderivativeindex]))
        #print "Base Points (position, value)   : ", bases
        return bases
Пример #14
0
 def getDataSet(self, fName):
     nxsTree = dnp.io.load(fName)
     dataSet = dnp.asarray(nxsTree['/entry/result/data'])
     self.dataSet = dataSet.squeeze()
Пример #15
0
def getDatasetFromLoadedFile(loadedFile, fieldName, scanDataPointCache=None):
    '''
	Gets a dataset called fieldName from an already loaded file see loadScanFile(scanOb)
	returns dataset
	'''

    logger.debug('Getting data for %s, from %s (with cache=%s)', fieldName,
                 loadedFile, scanDataPointCache)

    # Check if the field names are the full local names if so get just the last part of
    # the field names as this should be the node name. Keep original fieldname, it might
    # be useful later
    if '.' in fieldName:
        # with scnname.fieldname strip off scnname
        strippedFieldName = fieldName.split('.')[-1]
    else:  # fieldname doesn't require stripping
        strippedFieldName = fieldName

    # If we have a scanDataPointCache use it for performance
    if (scanDataPointCache):
        return dnp.asarray(
            scanDataPointCache.getPositionsFor(strippedFieldName))

    # Check if its a NeXus file
    if isinstance(loadedFile, NXroot):
        # Note: Using first node returned, this might fail if there are multiple nodes with the same name!
        # Might be possible to disambiguate this using the original fieldname?
        loadedNodes = loadedFile.getnodes(strippedFieldName,
                                          group=False,
                                          data=True)
        if len(loadedNodes) == 0:
            raise KeyError("%s not found in data file" % strippedFieldName)

        # Find nodes which have a local_name
        probableNodes = [
            loadedNodes[_n] for _n in xrange(len(loadedNodes))
            if 'local_name' in loadedNodes[_n].attrs
        ]
        # Use the first local_name which matches the fieldName or fall back on using the first node
        for node in probableNodes:
            if node.attrs['local_name'] == fieldName:
                lazyDataset = node
                break
        else:
            lazyDataset = loadedNodes[0]

        # Use slicing to load the whole lazy dataset into a array i.e. non-lazy dataset
        dataset = lazyDataset[...]

        return dataset

    elif isinstance(loadedFile, DataHolder):
        datasetList = loadedFile[strippedFieldName]

        # Convert the dataset as a list into the array
        dataset = dnp.asarray(datasetList)

        return dataset

    # Not a supported file type
    else:
        print "The file format is not supported"
        print loadedFile.__class__
Пример #16
0
 def getDataSet(self, fName):
     nxsTree = dnp.io.load(fName)
     dataSet = dnp.asarray(nxsTree['/entry/result/data'])
     self.dataSet = dataSet.squeeze()
Пример #17
0
def peakdet(v, delta, x = None):
    """
Converted from MATLAB script at http://billauer.co.il/peakdet.html
Currently returns two lists of tuples, but maybe arrays would be better
function [maxtab, mintab]=peakdet(v, delta, x)
%PEAKDET Detect peaks in a vector
% [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
% maxima and minima ("peaks") in the vector V.
% MAXTAB and MINTAB consists of two columns. Column 1
% contains indices in V, and column 2 the found values.
%
% With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
% in MAXTAB and MINTAB are replaced with the corresponding
% X-values.
%
% A point is considered a maximum peak if it has the maximal
% value, and was preceded (to the left) by a value lower by
% DELTA.
% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
% This function is released to the public domain; Any use is allowed.
"""
    maxtab = []
    mintab = []
       
    if x is None:
        x = dnp.arange(len(v))
    
    v = dnp.asarray(v)
    
    if len(v) != len(x):
        sys.exit('Input vectors v and x must have same length')
    
    if not isinstance(delta, (type(None),int,float,str,bool)):
        sys.exit('Input argument delta must be a number')
    
    if delta <= 0:
        sys.exit('Input argument delta must be positive')
        

    mn, mx = float('inf'), float('-inf')
    mnpos, mxpos = float('NaN'), float('NaN')
    
    lookformax = True
    
    for i in dnp.arange(len(v)):
        this = v[i]
        if this > mx:
            mx = this
            mxpos = x[i]
        if this < mn:
            mn = this
            mnpos = x[i]
        
        if lookformax:
            if this < mx-delta:
                maxtab.append((mxpos, mx))
                mn = this
                mnpos = x[i]
                lookformax = False
        else:
            if this > mn+delta:
                mintab.append((mnpos, mn))
                mx = this
                mxpos = x[i]
                lookformax = True

    return maxtab, mintab