コード例 #1
0
ファイル: fftPlugin.py プロジェクト: cudmore/SanPy
	def rebuildModel(self):
		numSeconds = self.modelSecondsSpinBox.value()
		spikeFreq = self.modelFrequencySpinBox.value()
		amp = self.modelAmpSpinBox.value()
		noiseAmp = self.modelNoiseAmpSpinBox.value()
		fs = 10000  # 10 kHz
		t, spikeTrain, data = getSpikeTrain(numSeconds=numSeconds,
								spikeFreq=spikeFreq,
								fs=fs,
								amp=amp,
								noiseAmp=noiseAmp)
		# (t, data) need to be one column (for bAnalysis)
		t = t.reshape(t.shape[0],-1)
		data = data.reshape(data.shape[0],-1)

		self._startSec = 0
		self._stopSec = numSeconds

		#t = t[:,0]
		#data = data[:,0]

		print('  model t:', t.shape)
		print('  model data:', data.shape)

		modelDict = {
			'sweepX': t,
			'sweepY': data,
			'mode': 'I-Clamp'
		}
		self._ba = sanpy.bAnalysis(fromDict=modelDict)
コード例 #2
0
ファイル: test_analysis.py プロジェクト: cudmore/SanPy
class xxx_Test_Analysis(unittest.TestCase):
    # this patter is wierd to me?
    path = 'data/19114001.abf'
    ba = sanpy.bAnalysis(path)
    expectedNumSpikes = 102
    expectedNumErrors = 1

    def test_0_load(self):
        logger.info('RUNNING')

        self.assertFalse(self.ba.loadError)
        self.assertIsNotNone(self.ba.sweepX)
        self.assertIsNotNone(self.ba.sweepY)
        self.assertEqual(len(self.ba.sweepX), len(self.ba.sweepY))

    def test_1_detect(self):
        logger.info('RUNNING')
        # grab detection parameters
        dDict = sanpy.bAnalysis.getDefaultDetection()
        # detect
        self.ba.spikeDetect(dDict)

        self.assertEqual(self.ba.numSpikes,
                         self.expectedNumSpikes)  # expecting 102 spikes
        self.assertEqual(len(self.ba.dfError),
                         self.expectedNumErrors)  # expecting 102 spikes

    def test_2_stats(self):
        logger.info('RUNNING')
        thresholdSec = self.ba.getStat('thresholdSec')

        self.assertEqual(len(thresholdSec),
                         self.expectedNumSpikes)  # expecting 102 spikes
コード例 #3
0
def loadSanPyDir(path):
	"""
	Load folder of .abf

	Ret:
		list: list of dict
	"""
	print('app.py loadSanPyDir() path:', path)
	retList = []
	for file in os.listdir(path):
		if file.startswith('.'):
			continue
		if file.endswith('.abf'):
			#print('  loadSanPyDir() file: ', file)
			filePath = os.path.join(path, file)
			ba = sanpy.bAnalysis(filePath)
			headerDict = ba.api_getHeader()

			item = {
				'header': headerDict,
				'ba': ba
			}
			retList.append(item)
	#
	return retList
コード例 #4
0
def getFileList(path):
    retFileList = []
    useExtension = '.abf'
    videoFileIdx = 0

    fileDict = {}
    fileDict['File Name'] = ''
    #fileDict['path'] = ''
    fileDict['kHz'] = ''
    fileDict['Duration (Sec)'] = ''
    fileDict['Number of Sweeps'] = ''
    for file in os.listdir(path):
        if file.startswith('.'):
            continue
        if file.endswith(useExtension):
            fullPath = os.path.join(path, file)

            fileDict = {}  # WOW, I need this here !!!!!!!!
            fileDict['File Name'] = file
            #fileDict['path'] = fullPath

            tmp_ba = sanpy.bAnalysis(file=fullPath)
            pntsPerMS = tmp_ba.dataPointsPerMs
            numSweeps = len(tmp_ba.sweepList)
            durationSec = max(tmp_ba.abf.sweepX)

            fileDict['kHz'] = pntsPerMS
            fileDict['Duration (Sec)'] = int(round(durationSec))
            fileDict['Number of Sweeps'] = numSweeps

            retFileList.append(fileDict)
            videoFileIdx += 1
    if len(retFileList) == 0:
        retFileList.append(fileDict)
    return retFileList
コード例 #5
0
ファイル: analysisDir.py プロジェクト: cudmore/SanPy
	def getFileRow(self, path, loadData=False):
		"""
		Get dict representing one file (row in table). Loads bAnalysis to get headers.

		On load error of proper file type (abf, csv), ba.loadError==True

		Args:
			path (Str): Full path to file.
			#rowIdx (int): Optional row index to assign in column 'Idx'

		Return:
			(tuple): tuple containing:

			- ba (bAnalysis): [sanpy.bAnalysis](/api/bAnalysis).
			- rowDict (dict): On success, otherwise None.
					fails when path does not lead to valid bAnalysis file.
		"""
		if not os.path.isfile(path):
			logger.warning(f'Did not find file "{path}"')
			return None, None
		fileType = os.path.splitext(path)[1]
		if fileType not in self.theseFileTypes:
			logger.warning(f'Did not load file type "{fileType}"')
			return None, None

		# load bAnalysis
		#logger.info(f'Loading bAnalysis "{path}"')
		ba = sanpy.bAnalysis(path, loadData=loadData)

		if ba.loadError:
			logger.error(f'Error loading bAnalysis file "{path}"')
			#return None, None

		# not sufficient to default everything to empty str ''
		# sanpyColumns can only have type in ('float', 'str')
		rowDict = dict.fromkeys(self.sanpyColumns.keys() , '')
		for k in rowDict.keys():
			if self.sanpyColumns[k]['type'] == str:
				rowDict[k] = ''
			elif self.sanpyColumns[k]['type'] == float:
				rowDict[k] = np.nan

		#if rowIdx is not None:
		#	rowDict['Idx'] = rowIdx

		if ba.loadError:
			rowDict['I'] = 0
		else:
			rowDict['I'] = 2 # need 2 because checkbox value is in (0,2)
		rowDict['File'] = ba.getFileName() #os.path.split(ba.path)[1]
		rowDict['Dur(s)'] = ba.recordingDur
		rowDict['Sweeps'] = ba.numSweeps
		rowDict['kHz'] = ba.recordingFrequency
		rowDict['Mode'] = ba.recordingMode

		rowDict['dvdtThreshold'] = 20
		rowDict['mvThreshold'] = -20

		return ba, rowDict
コード例 #6
0
def test():
    path = '/home/cudmore/Sites/SanPy/data/19114001.abf'
    ba = sanpy.bAnalysis(path)
    ba.spikeDetect()
    be = bExport(ba)
    df = be.getSummary()
    logger.info('')
    print(df)
コード例 #7
0
def loadFile(name):
    filePath = os.path.join(myPath, name)
    global ba
    ba = sanpy.bAnalysis(filePath)
    dDict = ba.getDefaultDetection()
    dDict['dvdtThreshold'] = myThreshold
    ba.spikeDetect(dDict)
    start = 0
    stop = len(ba.abf.sweepX) - 1
    global subSetOfPnts
    subSetOfPnts = range(start, stop, plotEveryPoint)
コード例 #8
0
def main():
    path = '/home/cudmore/Sites/SanPy/data/19114001.abf'
    ba = sanpy.bAnalysis(path)
    ba.spikeDetect()
    print(ba.numSpikes)

    import sys
    app = QtWidgets.QApplication([])
    pr = plotRecording(ba=ba)
    pr.show()
    sys.exit(app.exec_())
コード例 #9
0
ファイル: analysisDir.py プロジェクト: cudmore/SanPy
	def loadOneAnalysis(self, path, uuid=None, allowAutoLoad=True):
		"""
		Load one bAnalysis either from original file path or uuid of h5 file.

		If from h5, we still need to reload sweeps !!!
		They are binary and fast, saving to h5 (in this case) is slow.
		"""
		logger.info(f'path:"{path}" uuid:"{uuid}" allowAutoLoad:"{allowAutoLoad}"')
		ba = None
		if uuid is not None and uuid:
			# load from h5
			hdfFile = os.path.splitext(self.dbFile)[0] + '.h5'
			hdfPath = os.path.join(self.path, hdfFile)
			with pd.HDFStore(hdfPath) as hdfStore:
				try:
					dfAnalysis = hdfStore[uuid]
					ba = sanpy.bAnalysis(fromDf=dfAnalysis)



					# WHY IS THIS BROKEN ?????????????????????????????????????????????????????????????
					# This is SLOPPY ON MY PART, WE ALWAYS NEED TO RELOAD FILTER
					# ADD SOME COD  THAT CHECKS THIS AND REBUILDS AS NECC !!!!!!!!!!
					if path.endswith('.tif'):
						# kymograph
						ba._loadTif()
					else:
						ba._loadAbf()
					ba.rebuildFiltered()


					logger.info(f'Loaded ba from h5 uuid {uuid} and now ba:{ba}')
				except (KeyError):
					logger.error(f'Did not find uuid in h5 file, uuid:{uuid}')
		if allowAutoLoad and ba is None:
			# load from path
			ba = sanpy.bAnalysis(path)
			logger.info(f'Loaded ba from path {path} and now ba:{ba}')
		#
		return ba
コード例 #10
0
def testPlot():
    import os
    file_path = os.path.realpath(__file__)  # full path to this file
    file_path = os.path.split(file_path)[0]  # folder for this file
    path = os.path.join(file_path, '../../../data/19114001.abf')

    ba = bAnalysis(path)
    if ba.loadError:
        print('error loading file')
        return
    ba.spikeDetect()

    # create plugin
    ap = plotRecording(ba=ba)
コード例 #11
0
ファイル: analysisDir.py プロジェクト: cudmore/SanPy
	def loadFolder(self):
		"""
		Load using cloudDict
		"""

		"""
		# use ['download_url'] to download abf file (no byte conversion)
		response[0] = {
			name : 171116sh_0018.abf
			path : data/171116sh_0018.abf
			sha : 5f3322b08d86458bf7ac8b5c12564933142ffd17
			size : 2047488
			url : https://api.github.com/repos/cudmore/SanPy/contents/data/171116sh_0018.abf?ref=master
			html_url : https://github.com/cudmore/SanPy/blob/master/data/171116sh_0018.abf
			git_url : https://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17
			download_url : https://raw.githubusercontent.com/cudmore/SanPy/master/data/171116sh_0018.abf
			type : file
			_links : {'self': 'https://api.github.com/repos/cudmore/SanPy/contents/data/171116sh_0018.abf?ref=master', 'git': 'https://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17', 'html': 'https://github.com/cudmore/SanPy/blob/master/data/171116sh_0018.abf'}
		}
		"""

		owner = self._cloudDict['owner']
		repo_name = self._cloudDict['repo_name']
		path = self._cloudDict['path']
		url = f'https://api.github.com/repos/{owner}/{repo_name}/contents/{path}'

		# response is a list of dict
		response = requests.get(url).json()
		#print('response:', type(response))

		for idx, item in enumerate(response):
			if not item['name'].endswith('.abf'):
				continue
			print(idx)
			# use item['git_url']
			for k,v in item.items():
				print('  ', k, ':', v)

		#
		# test load
		download_url = response[1]['download_url']
		content = requests.get(download_url).content

		fileLikeObject = io.BytesIO(content)
		ba = sanpy.bAnalysis(byteStream=fileLikeObject)
		#print(ba._abf)
		#print(ba.api_getHeader())
		ba.spikeDetect()
		print(ba.numSpikes)
コード例 #12
0
def parse_contents_abf(contents, filename, date):
	"""
	parses binary abf file
	"""
	print('app2.parse_contents_abf() filename:', filename)

	content_type, content_string = contents.split(',')

	print('  content_type:', content_type)
	decoded = base64.b64decode(content_string)
	#print('  type(decoded)', type(decoded))
	fileLikeObject = io.BytesIO(decoded)
	#print('  type(fileLikeObject)', type(fileLikeObject))

	#print(len(fileLikeObject.getvalue()))
	#major = pyabf.abfHeader.abfFileFormat(fileLikeObject)
	#print('  pyabf major:', major)

	print('  *** instantiating sanpy.bAnalysis with byte stream')
	try:
		global ba
		ba = sanpy.bAnalysis(byteStream=fileLikeObject)

		global baList
		baList.append(ba)

		#print(ba.abf.headerText)

		# TODO: Add to file list
		# {'File Name':fileName, 'kHz':, kHz, 'Duration (Sec)':durSeconds, 'Number of Sweeps':numberOfSweeps}
		print('  todo: keep track of ba for this file and add to file list')

		# todo: get rid of this weirdness
		start = 0
		#stop = len(ba.abf.sweepX) - 1
		stop = len(ba.sweepX) - 1
		print('	stop:', stop)
		print('	plotEveryPoint:', plotEveryPoint)
		global subSetOfPnts
		subSetOfPnts = slice(start, stop, plotEveryPoint)
		#print('  subSetOfPnts:', subSetOfPnts)
	except:
		print('*** exception in app2.parse_contents_abf():')
		print(sys.exc_info())

	return html.Div([
		html.H5(f'Loaded file: {filename}'),
	])
コード例 #13
0
ファイル: analysisPlot.py プロジェクト: cudmore/SanPy
def test_plot(path):
    print('=== test_plot() path:', path)
    ba = sanpy.bAnalysis(path)

    # detect
    dDict = sanpy.bAnalysis.getDefaultDetection()
    dDict['dvdThreshold'] = 50
    ba.spikeDetect(dDict)

    # plot
    bp = sanpy.bAnalysisPlot(ba)

    fig = bp.plotDerivAndRaw()

    fig = bp.plotSpikes()

    plt.show()
コード例 #14
0
ファイル: sanpyPlugin.py プロジェクト: cudmore/SanPy
def test_plugin():
	import sys
	from PyQt5 import QtCore, QtWidgets, QtGui
	import sanpy
	import sanpy.interface

	# create a PyQt application
	app = QtWidgets.QApplication([])

	# load and analyze sample data
	path = '/home/cudmore/Sites/SanPy/data/19114001.abf'
	ba = sanpy.bAnalysis(path)
	ba.spikeDetect()

	# open the interface for the 'saveAnalysis' plugin.
	sa = sanpy.interface.plugins.plotScatter(ba=ba, startStop=None)

	sys.exit(app.exec_())
コード例 #15
0
def loadOneFile(bucket, key, s3=None):
    ba = None
    if s3 is None:
        s3 = boto3.resource('s3')
    file_stream = io.BytesIO()

    try:
        s3.Object(bucket, key).download_fileobj(file_stream)
    except (boto3.exceptions.botocore.exceptions.ClientError) as e:
        print(e)
        #raise
    except (boto3.exceptions.botocore.exceptions.EndpointConnectionError) as e:
        print(e)
        #raise
    else:
        ba = sanpy.bAnalysis(byteStream=file_stream)
        print('  loadOneFile() loaded key:', key, 'ba:', ba)
    #
    return ba
コード例 #16
0
def run():

    # create client assuming ~/.aws/xxx has been defined
    s3 = boto3.resource('s3')

    myBucketName = 'sanpy-data'
    keyList = fetchFileList(myBucketName, s3=s3)

    print(f'loading {len(keyList)} keys/files')
    for key in keyList:
        file_stream = io.BytesIO()
        # works with boto3.client('s3')
        #s3Client.download_fileobj(myBucketName, key, file_stream)
        s3.Object(myBucketName, key).download_fileobj(file_stream)
        ba = sanpy.bAnalysis(byteStream=file_stream)
        print('  loaded key:', key, 'ba:', ba)

    # download one file
    '''
コード例 #17
0
def test2():
    # load abf
    path = '/home/cudmore/Sites/SanPy/data/19114000.abf'
    ba = sanpy.bAnalysis(path)

    # detect
    sweepNumber = 0
    detectionClass = ba.detectionClass
    detectionClass['verbose'] = True
    ba.spikeDetect2__(sweepNumber, detectionClass)
    '''
	printSpikeNum = 4
	print(f'== printing spike {printSpikeNum}')
	ba.printSpike(printSpikeNum)
	'''

    #ba.printErrors()

    sd = ba.getSpikeDictionaries()
    '''
コード例 #18
0
ファイル: analysisDir.py プロジェクト: cudmore/SanPy
def test_hd5_2():
	folderPath = '/home/cudmore/Sites/SanPy/data'
	if 1:
		# save analysisDir hdf
		bad = analysisDir(folderPath)
		print('bad._df:')
		print(bad._df)
		#bad.saveDatabase()  # save .csv
		bad.saveHdf()  # save ALL bAnalysis in .h5

	if 0:
		# load h5 and reconstruct a bAnalysis object
		start = time.time()
		hdfPath = '/home/cudmore/Sites/SanPy/data/sanpy_recording_db.h5'
		with pd.HDFStore(hdfPath, 'r') as hdfStore:
			for key in hdfStore.keys():

				dfTmp = hdfStore[key]
				#path = dfTmp.iloc[0]['path']

				print('===', key)
				#if not key.startswith('/r6'):
				#	continue
				#for col in dfTmp.columns:
				#	print('  ', col, dfTmp[col])
				#print(key, type(dfTmp), dfTmp.shape)
				#print('dfTmp:')
				#print(dfTmp)


				#print(dfTmp.iloc[0]['_sweepX'])

				# load bAnalysis from a pandas DataFrame
				ba = sanpy.bAnalysis(fromDf=dfTmp)

				print(ba)

				ba.spikeDetect()  # this should reproduce exactly what was save ... It WORKS !!!

		stop = time.time()
		logger.info(f'h5 load took {round(stop-start,3)} seconds')
コード例 #19
0
ファイル: uploadpage.py プロジェクト: cudmore/SanPy
def parse_contents_abf(contents, filename, date):
	"""
	parses binary abf file
	"""
	print('parse_contents_abf() filename:', filename)

	content_type, content_string = contents.split(',')

	#print('  content_type:', content_type)
	decoded = base64.b64decode(content_string)
	#print('  type(decoded)', type(decoded))
	fileLikeObject = io.BytesIO(decoded)
	#print('  type(fileLikeObject)', type(fileLikeObject))

	#print(len(fileLikeObject.getvalue()))
	#major = pyabf.abfHeader.abfFileFormat(fileLikeObject)
	#print('  pyabf major:', major)

	print('  instantiating sanpy.bAnalysis with byte stream')
	try:
		global ba
		ba = sanpy.bAnalysis(byteStream=fileLikeObject)
	except:
		print('*** exception in parse_contents_abf():')
		print(sys.exc_info())

	'''
	return html.Div([
		html.H6(f'Loaded file: {filename}'),
	])
	'''
	'''
	return html.Div([
		html.Hr(),
		html.H6(f'Loaded file: {filename}'),
	])
	'''
	return html.Label(f'Loaded file: {filename}')
コード例 #20
0
def test_save_load():
    # load an abf
    path = '/home/cudmore/Sites/SanPy/data/19114001.abf'
    ba = sanpy.bAnalysis(path)

    # analyze
    ba.spikeDetect()
    print(ba)

    # save
    parentPath, fileName = os.path.split(path)

    saveFolder = os.path.join(parentPath, 'sanpy_analysis')
    if not os.path.isdir(saveFolder):
        logger.info(f'making folder: {saveFolder}')
        os.mkdir(saveFolder)
    baseName = os.path.splitext(fileName)[0]
    saveFile = baseName + '_detection.json'
    savePath = os.path.join(saveFolder, saveFile)
    logger.info(f'savePath:{savePath}')

    detectionClass = ba.detectionClass

    # convert no jsonable classes to str representation
    defaultValue = detectionClass._dDict['detectionType']['defaultValue']
    detectionClass._dDict['detectionType']['defaultValue'] = defaultValue.name

    currentValue = detectionClass._dDict['detectionType']['currentValue']
    detectionClass._dDict['detectionType']['currentValue'] = currentValue.name

    #detectionClass.print()

    detectionClass.save(savePath)
    detectionClass._dDict = None

    detectionClass.load(savePath)

    detectionClass.print()
コード例 #21
0
	def old_setFile(self, filePath, plotRaw=False):
		"""
		when main application changes file
		"""
		if not os.path.isfile(filePath):
			print('bExportWidget.setFile did not find path:', filePath)
			return False

		self.filePath = filePath
		self.ba = bAnalysis(filePath)
		if self.ba.loadError:
			print('there was an error loading file', filePath)
			return False

		self.mySweepX = self.ba.abf.sweepX
		self.mySweepY = self.ba.abf.sweepY

		self.mySweepX_Downsample = self.ba.abf.sweepX
		self.mySweepY_Downsample = self.ba.abf.sweepY

		if plotRaw:
			self.plotRaw()
		return True
コード例 #22
0
            self.figure.savefig(fullSavePath)

    def center(self):
        """
		Center the window on the screen
		"""
        qr = self.frameGeometry()
        cp = QtWidgets.QDesktopWidget().availableGeometry().center()
        qr.moveCenter(cp)
        self.move(qr.topLeft())


if __name__ == '__main__':
    path = '../data/19114001.abf'

    ba = bAnalysis(path)
    if ba.loadError:
        logger.error(f'Error loading file: {path}')
    else:
        app = QtWidgets.QApplication(sys.argv)
        app.aboutToQuit.connect(app.deleteLater)

        sweepX = ba.sweepX
        sweepY = ba.sweepY
        xyUnits = ('Time (sec)', 'Vm (mV)')
        type = 'vm'
        GUI = bExportWidget(sweepX,
                            sweepY,
                            path=path,
                            xyUnits=xyUnits,
                            type=type)
コード例 #23
0
            app.selectSpikeList(spikeList, doZoom=doZoom)


def test_print_classes():
    """testing"""
    print('__name__:', __name__)
    for name, obj in inspect.getmembers(sys.modules[__name__]):
        if inspect.isclass(obj):
            print(name, ':', obj)

    print('===')
    for name, obj in inspect.getmembers(sanpy.interface.plugins):
        if inspect.isclass(obj):
            print(name, ':', obj)


if __name__ == '__main__':
    #print_classes()
    #sys.exit(1)

    bp = bPlugins()

    pluginList = bp.pluginList()
    print('pluginList:', pluginList)

    abfPath = '/Users/cudmore/Sites/SanPy/data/19114001.abf'
    ba = sanpy.bAnalysis(abfPath)

    bp.runPlugin('plotRecording', ba)
    #bp.runPlugin('plotRecording3', ba)
コード例 #24
0
ファイル: bKymograph.py プロジェクト: cudmore/SanPy
    def hoverEvent(self, event):
        if event.isExit():
            return

        xPos = event.pos().x()
        yPos = event.pos().y()

        xPos = int(xPos)
        yPos = int(yPos)

        myTif = self.ba.tifData
        intensity = myTif[yPos, xPos]  # flipped

        #logger.info(f'x:{xPos} y:{yPos} intensity:{intensity}')

        self.tifCursorLabel.setText(f'Cursor:{intensity}')
        self.tifCursorLabel.update()


if __name__ == '__main__':
    path = '/media/cudmore/data/rabbit-ca-transient/jan-12-2022/Control/220110n_0003.tif.frames/220110n_0003.tif'
    ba = sanpy.bAnalysis(path)
    print(ba)

    app = QtWidgets.QApplication(sys.argv)

    kw = kymWidget(ba)
    kw.show()

    sys.exit(app.exec_())
コード例 #25
0
ファイル: bFileList.py プロジェクト: imbi7py/SanPy
	def __init__(self, index, path, fromDict=None):
		"""
		path: (str) full path to .mp4 video file
		fromDict: (dict) construct from database dict
		"""

		if not os.path.isfile(path):
			print('error: bVideoFile() could not open file path:', path)
			return

		self.path = path

		videoFileName = os.path.basename(path)

		# abb 20201009
		#self.dict = OrderedDict()
		self.dict = self._getDefaultDict()

		self.dict['file'] = videoFileName

		self.loadError = False # abb 202012

		# load abf file and grab parameters
		if fromDict is None:
			#
			loadWasGood = True
			'''
			try:
				ba = sanpy.bAnalysis(file=path) # load file as abf file (may fail)
			except (NotImplementedError) as e:
				print('bFileList.bVideoFile.__init__() exception, did not load file:', path)
				loadWasGood = False
			'''
			ba = sanpy.bAnalysis(file=path) # load file as abf file (may fail)
			if ba.loadError:
				print('bFileList.bVideoFile.__init__() exception, did not load file:', path)
				loadWasGood = False
				self.loadError = True
			else:
				#
				pntsPerMS = ba.dataPointsPerMs
				numSweeps = len(ba.sweepList)
				durationSec = max(ba.abf.sweepX)
				acqDate = ba.acqDate
				acqTime = ba.acqTime

				##
				##
				## THIS IS PROBABLY AN ERROR
				##
				##
				dvdtThreshold = None
				minSpikeVm = None # abb 202012
				numSpikes = None
				analysisDate = None
				analysisTime = None

		else:
			# loading from a saved _db.json file ???
			pntsPerMS = fromDict['kHz']
			numSweeps = fromDict['numSweeps']
			durationSec = fromDict['durationSec']
			acqDate = fromDict['acqDate']
			acqTime = fromDict['acqTime']

			dvdtThreshold = fromDict['dvdtThreshold']
			minSpikeVm = fromDict['minSpikeVm'] # abb 202012
			numSpikes = fromDict['numSpikes']
			analysisDate = fromDict['analysisDate']
			analysisTime = fromDict['analysisTime']

		if loadWasGood:
			self.dict['kHz'] = pntsPerMS
			self.dict['numSweeps'] = numSweeps
			self.dict['durationSec'] = int(round(durationSec))
			self.dict['acqDate'] = acqDate
			self.dict['acqTime'] = acqTime

			self.dict['dvdtThreshold'] = dvdtThreshold
			self.dict['minSpikeVm'] = minSpikeVm # abb 202012
			self.dict['numSpikes'] = numSpikes
			self.dict['analysisDate'] = analysisDate
			self.dict['analysisTime'] = analysisTime
コード例 #26
0
def old_test_requests():
    """
	this gets all files, including

	https://api.github.com/repos/cudmore/SanPy/git/trees/master?recursive=1

    {
      "path": "data",
      "mode": "040000",
      "type": "tree",
      "sha": "8b97ef351ea95308b524b6febb2890f000b86388",
      "url": "https://api.github.com/repos/cudmore/SanPy/git/trees/8b97ef351ea95308b524b6febb2890f000b86388"
    },
    {
      "path": "data/171116sh_0018.abf",
      "mode": "100644",
      "type": "blob",
      "sha": "5f3322b08d86458bf7ac8b5c12564933142ffd17",
      "size": 2047488,
      "url": "https://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17"
    },

	Then this url:
	https://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17
	returns a dict d{} with

	{
	  "sha": "5f3322b08d86458bf7ac8b5c12564933142ffd17",
	  "node_id": "MDQ6QmxvYjE3MTA2NDA5Nzo1ZjMzMjJiMDhkODY0NThiZjdhYzhiNWMxMjU2NDkzMzE0MmZmZDE3",
	  "size": 2047488,
	  "url": "https://api.github.com/repos/cudmore/SanPy/git/blobs/5f3322b08d86458bf7ac8b5c12564933142ffd17",
	  "coontent": "<CONTENTS>"
	  "encoding": "base64"
	  }

	https://api.github.com/repos/:owner/:repo_name/contents/:path

	"""
    import requests
    import io

    # this works
    '''
	url = "https://github.com/cudmore/SanPy/blob/master/data/19114001.abf?raw=true"
	# Make sure the url is the raw version of the file on GitHub
	download = requests.get(url).content
	'''

    owner = 'cudmore'
    repo_name = 'SanPy'
    path = 'data'
    url = f'https://api.github.com/repos/{owner}/{repo_name}/contents/{path}'
    response = requests.get(url).json()
    print('response:', type(response))
    #print(response.json())
    for idx, item in enumerate(response):
        if not item['name'].endswith('.abf'):
            continue
        print(idx)
        # use item['git_url']
        for k, v in item.items():
            print('  ', k, ':', v)

    #
    # grab the first file
    #gitURl = response[0]['git_url']
    '''
	print('  === gitURL:', gitURL)
	#download = requests.get(gitURl).content
	downloadRespoonse = requests.get(gitURL).json()
	print('  downloadRespoonse:', type(downloadRespoonse))
	content = downloadRespoonse['content']
	#print('  ', downloadRespoonse)
	#decoded = download.decode('utf-8')
	#print('  decoded:', type(decoded))
	'''

    # use response[0]['download_url'] to directly download file
    #gitURL = 'https://raw.githubusercontent.com/cudmore/SanPy/master/data/SAN-AP-example-Rs-change.abf'
    download_url = response[1]['download_url']
    content = requests.get(download_url).content

    #import base64
    #myBase64 = base64.b64encode(bytes(content, 'utf-8'))
    #myBase64 = base64.b64encode(bytes(content, 'base64'))
    '''
	myBase64 = base64.b64encode(bytes(content, 'utf-8'))
	print('myBase64:', type(myBase64))
	'''
    #decoded = content.decode('utf-8')
    #print(download)
    #import pyabf
    fileLikeObject = io.BytesIO(content)
    ba = sanpy.bAnalysis(byteStream=fileLikeObject)
    print(ba._abf)
    print(ba.api_getHeader())
コード例 #27
0
def loadFromDb(path):
    """
	load from SanPy folder DB

	TODO: This is managing 2x lists, one from db and other from abf/tif files in folder.
	path (str): folder
	"""
    baList = []

    dbPath = os.path.join(path, 'sanpy_recording_db.csv')
    dfFolder = pd.read_csv(dbPath)

    fileList = getFileList(path)
    baList = []
    for idx, filePath in enumerate(fileList):
        # debug
        if 0:
            if idx == 4:
                break

        parent = os.path.split(filePath)[
            0]  # corresponds to Olympus export folder
        grandparent = os.path.split(parent)[0]
        condition = os.path.split(grandparent)[1]

        fileName = os.path.split(filePath)[1]

        ba = sanpy.bAnalysis(filePath)

        oneFile = dfFolder[dfFolder['File'] == fileName]
        mvThreshold = oneFile['mvThreshold'].values[0]
        kLeft = oneFile['kLeft'].values[0]
        kTop = oneFile['kTop'].values[0]
        kRight = oneFile['kRight'].values[0]
        kBottom = oneFile['kBottom'].values[0]
        theRect = [kLeft, kTop, kRight, kBottom]

        #print(oneFile)
        print(idx, fileName, mvThreshold, theRect)

        ba._updateTifRoi(theRect)

        detectionClass = sanpy.bDetection()  # gets default detection class
        detectionType = sanpy.bDetection.detectionTypes.mv
        detectionClass['condition'] = condition
        detectionClass[
            'detectionType'] = detectionType  # set detection type to ('dvdt', 'vm')
        detectionClass['dvdtThreshold'] = math.nan
        detectionClass['mvThreshold'] = mvThreshold
        detectionClass['peakWindow_ms'] = 500
        detectionClass['preSpikeClipWidth_ms'] = 100
        detectionClass['postSpikeClipWidth_ms'] = 1000
        detectionClass['refractory_ms'] = 700

        if fileName == '220110n_0032.tif':
            # default is 170 ms
            print("xxx", fileName, "detectionClass['refractory_ms']:",
                  detectionClass['refractory_ms'])
            detectionClass['refractory_ms'] = 1500
            # todo: set detection refractory_ms

        # detect
        ba.spikeDetect(detectionClass=detectionClass)

        #
        # convert each ba sweepY to nM
        #ba._sweepY[:,0] = convertTomM(ba._sweepY[:,0])  # assuming one sweep

        # does not work because oneDf is constructed on fly
        # append some columns for nano-molar
        #oneDf = ba.asDataFrame()
        #peakVal = oneDf['peakVal']
        #peak_nM = convertTomM(peakVal)
        #oneDf['peak_nM'] = peak_nM

        baList.append(ba)
    #
    return baList
コード例 #28
0
ファイル: fftPlugin.py プロジェクト: cudmore/SanPy
def test_fft():
	if 0:
		# abf data
		path = '/Users/cudmore/Sites/SanPy/data/fft/2020_07_07_0000.abf'
		ba = sanpy.bAnalysis(path)

		x = ba.sweepX()
		y = ba.sweepY()

		# 20-30 sec
		startSec = 16.968 # good signal
		stopSec = 31.313

		# reduce to get fft with N=1024 in excel
		dataPointsPerMs = ba.dataPointsPerMs  # 10 for 10 kHz
		startPnt = round(startSec * 1000 * dataPointsPerMs)
		stopPnt = round(stopSec * 1000 * dataPointsPerMs)

		numPnts = stopPnt-startPnt
		print(f'N={numPnts}')

		t = x[startPnt:stopPnt]
		y = y[startPnt:stopPnt]

		#y -= np.nanmean(y)

		dt = 0.0001
		fs = 1/dt
		NFFT = 512 # 2**16 #512 * 100  # The number of data points used in each block for the FFT
		medianPnts = 50  # 5 ms

		fileName = ba.getFileName()

	if 1:
		# sin wave data
		# Fixing random state for reproducibility
		np.random.seed(19680801)

		durSec = 10.24  # to get 1024 points at dt=0.01 (power of two)
		dt = 0.01
		fs = 1/dt
		nfft = 512  # The number of data points used in each block for the FFT
		medianPnts = 5  #

		t = np.arange(0, durSec, dt)
		nse = np.random.randn(len(t))
		r = np.exp(-t / 0.05)

		cnse = np.convolve(nse, r) * dt
		cnse = cnse[:len(t)]

		secondFre = 7
		y = 0.1 * np.sin(2 * np.pi * t) + cnse
		y += 0.1 * np.sin(secondFre * 2 * np.pi * t) + cnse

		fileName = 'fakeSin'

	#def replot_fft():

	#
	# filter
	yFiltered = scipy.ndimage.median_filter(y, medianPnts)

	# subsample down to 1024
	'''
	from scipy.interpolate import interp1d
	t2 = interp1d(np.arange(1024), t, 'linear')
	yFiltered2 = interp1d(t, yFiltered, 'linear')
	print(f'N2={len(t2)}')
	plt.plot(t2, yFiltered2)
	'''

	# save [t,y] to csv
	'''
	import pandas as pd
	tmpDf = pd.DataFrame(columns=['t', 'y'])
	tmpDf['t'] = t
	tmpDf['y'] = y
	csvPath = fileName + '.csv'
	print('saving csv:', csvPath)
	tmpDf.to_csv(csvPath, index=False)
	'''

	'''
	cutOff = 10 #20  # 20, cutOff of filter (Hz)
	order = 50 # 40  # order of filter
	sos = butter_lowpass_sos(cutOff, fs, order)
	'''

	#yFiltered = scipy.signal.sosfilt(sos, yFiltered)

	#
	#Fs = 1/dt  # The sampling frequency (samples per time unit)
	#NFFT = 512  # The number of data points used in each block for the FFT

	# plot
	fig, (ax0, ax1, ax2) = plt.subplots(3, 1)

	ax0.plot(t, y, 'k')
	ax0.plot(t, yFiltered, 'r')

	def myDetrend(x):
		y = plt.mlab.detrend_linear(x)
		y = plt.mlab.detrend_mean(y)
		return y

	# The power spectral density ЁЭСГЁЭСеЁЭСе by Welch's average periodogram method
	print('  fs:', fs, 'nfft:', nfft)
	ax1.clear()
	Pxx, freqs = ax1.psd(yFiltered, NFFT=nfft, Fs=fs, detrend=myDetrend)
	ax1.set_xlim([0, 20])
	ax1.set_ylabel('PSD (dB/Hz)')
	#ax1.callbacks.connect('xlim_changed', self.on_xlims_change)

	'''
	ax1.clear()
	ax1.plot(freqs, Pxx)
	ax1.set_xlim([0, 20])
	'''

	'''
	# recompute the ax.dataLim
	ax1.relim()
	# update ax.viewLim using the new dataLim
	ax1.autoscale_view()
	plt.draw()
	'''

	'''
	ax2.plot(freqs, Pxx)
	ax2.set_xlim([0, 20])
	'''

	maxPsd = np.nanmax(Pxx)
	maxPnt = np.argmax(Pxx)
	print(f'Max PSD freq is {freqs[maxPnt]} with power {maxPsd}')

	scipy_f, scipy_Pxx = scipy.signal.periodogram(yFiltered, fs)
	ax2.plot(scipy_f[1:-1], scipy_Pxx[1:-1])  # drop freq 0
	ax2.set_xlim([0, 20])

	ax2.set_xlabel('Frequency (Hz)')
	ax2.set_ylabel('scipy_Pxx')

	#
	plt.show()
コード例 #29
0
def getFileList(path, bucketName=None):
    """
	Get list of bAnalysis from path

	Returns:
		list of bAnalysis
	"""
    logger.info(f'path: {path} bucketName: {bucketName}')

    baList = []
    retFileList = []
    useExtension = '.abf'
    videoFileIdx = 0

    fileDict = {}
    fileDict['Type'] = 'file'
    fileDict['File Name'] = ''
    #fileDict['path'] = ''
    fileDict['kHz'] = ''
    fileDict['Dur(s)'] = ''
    fileDict['Sweeps'] = ''

    error = False

    doAws = bucketName is not None
    fileList = []
    if doAws:
        #bucket = 'sanpy-data'
        s3 = sanpy.awsUtil.getConnection()
        fileList = sanpy.awsUtil.fetchFileList(bucketName, folder='.', s3=s3)
    else:
        if not os.path.isdir(path):
            # ERROR
            error = True
        else:
            fileList = os.listdir(path)

    if not error:
        for file in fileList:
            if file.startswith('.'):
                continue
            if file.endswith(useExtension):
                if doAws:
                    pass
                else:
                    fullPath = os.path.join(path, file)

                fileDict = {}  # WOW, I need this here !!!!!!!!
                fileDict['Type'] = 'file'
                fileDict['File Name'] = file
                #fileDict['path'] = fullPath

                if doAws:
                    ba = sanpy.awsUtil.loadOneFile(bucketName, file, s3=s3)
                else:
                    ba = sanpy.bAnalysis(file=fullPath)

                baList.append(ba)
                '''
				if videoFileIdx == 0:
					print(ba.abf.headerText)
					sweepUnitsC # what we are clamping (mV, pA)
					sweepUnitsX
					sweepUnitsY
				'''

                # TODO: get this from bAnalysis header
                baHeader = ba.api_getHeader()
                recording_kHz = baHeader['recording_kHz']  #ba.dataPointsPerMs
                numSweeps = len(ba.sweepList)
                recordingDur_sec = baHeader[
                    'recordingDur_sec']  #max(ba.abf.sweepX)

                fileDict['kHz'] = recording_kHz
                fileDict['Dur(s)'] = round(recordingDur_sec, 3)
                fileDict['Sweeps'] = numSweeps

                retFileList.append(fileDict)
                videoFileIdx += 1
    #
    if len(retFileList) == 0:
        retFileList.append(fileDict)

    df = pd.DataFrame(retFileList)

    return df, baList
コード例 #30
0
def old_load(path):
    fileList = getFileList(path)
    # load and analyze, some files are tweaked for thir analysis
    baList = []
    for idx, file in enumerate(fileList):
        # debug
        if 1:
            if idx == 4:
                break

        ba = sanpy.bAnalysis(file)

        #detectionClass = sanpy.bDetection() # gets default detection class

        # get condition from grandparent folder in ['Thapsigargin', 'Control']
        # file path looks like this:
        #  /media/cudmore/data/rabbit-ca-transient/Control/220110n_0049.tif.frames/220110n_0049.tif
        parent = os.path.split(file)[0]  # corresponds to Olympus export folder
        grandparent = os.path.split(parent)[0]
        condition = os.path.split(grandparent)[1]

        #print(condition, file)
        if condition == 'Control':
            mvThreshold = 1.4
        elif condition == 'Thapsigargin':
            mvThreshold = 1.1
        else:
            print(f'XXX ERROR: Case not taken for condition: "{condition}"')

        detectionClass = sanpy.bDetection()  # gets default detection class

        # special cases
        fileName = os.path.split(file)[1]
        if fileName == '220110n_0017.tif':
            mvThreshold = 1.2
        elif fileName == '220110n_0021.tif':
            mvThreshold = 1.2
        elif fileName == '220110n_0023.tif':
            mvThreshold = 1.2
        elif fileName == '220110n_0024.tif':
            mvThreshold = 1.2
        elif fileName == '220110n_0032.tif':
            # default is 170 ms
            print("xxx", fileName, "detectionClass['refractory_ms']:",
                  detectionClass['refractory_ms'])
            detectionClass['refractory_ms'] = 1500
            # todo: set detection refractory_ms
        elif fileName == '220110n_0055.tif':
            mvThreshold = 1.05

        detectionType = sanpy.bDetection.detectionTypes.mv
        detectionClass['condition'] = condition
        detectionClass[
            'detectionType'] = detectionType  # set detection type to ('dvdt', 'vm')
        detectionClass['dvdtThreshold'] = math.nan
        detectionClass['mvThreshold'] = mvThreshold
        detectionClass['peakWindow_ms'] = 500

        ba.spikeDetect(detectionClass=detectionClass)
        baList.append(ba)

    return baList