Beispiel #1
0
def saveContStimPSTH(inputXL, dataXL):
    '''
    Extracts the spike times of each trial of the set of experiments specified in <inputXL> and saves them into the
    excel file <dataXL> with one spike data per row, including the metadata like <Experiment ID>, <Labor State>
    and <TrialName>
    :param inputXL: string, path of an excel file. The excel file should contain three columns with headings
    "Experiment ID", "Labor State" and "NIX File Directory".
    :param dataXL: string, path where resulting excel file will be written.
    :return:
    '''
    types = ['BeforeStimulus', 'DuringStimulus', 'AfterStimulus']
    typeDurs = [3 * qu.s, 1 * qu.s, 1 * qu.s]

    inputDF = pd.read_excel(inputXL)

    dataDF = pd.DataFrame()

    for ind, (
            expID, laborState, nixPath
    ) in inputDF.loc[:, ["Experiment ID", "Labor State", "NIX File Directory"
                         ]].iterrows():
        rda = RawDataAnalyser(expID, nixPath)

        expSpikes = rda.getContSpikes(types=types, freqs=[265])

        for trialInd, trialSpikes in enumerate(expSpikes[265]):
            print("Doing {}, Trial {}".format(expID, trialInd + 1))
            trialSpikeTimeList = []
            for typeSpikeTrain in trialSpikes.itervalues():
                temp = typeSpikeTrain.copy()
                temp.units = qu.s
                trialSpikeTimeList.append(temp.times.magnitude)
            allTrialSpikeTimes = np.concatenate(trialSpikeTimeList)

            spikeTimeUnits = qu.s
            allTrialSpikeTimes *= spikeTimeUnits

            allTrialSpikeTimes -= trialSpikes["DuringStimulus"].t_start

            allTrialSpikeTimes = allTrialSpikeTimes[np.logical_and(
                -typeDurs[0] <= allTrialSpikeTimes,
                allTrialSpikeTimes <= typeDurs[1] + typeDurs[2])]

            for spTime in allTrialSpikeTimes:
                tempS = pd.Series()
                tempS[mdFN["expID"]] = expID
                tempS[mdFN["freq"]] = 265
                tempS[mdFN["laborState"]] = laborState
                tempS[mdFN["trialName"]] = "Trial{}".format(trialInd)
                tempS[
                    mdFN["trialStart"]] = trialSpikes["DuringStimulus"].t_start
                tempS["Spike Time (s)"] = spTime

                dataDF = dataDF.append(tempS, ignore_index=True)

    dataDF.to_excel(dataXL)
def saveData(inputXL, dataXL):
    '''
    Extracts the following two features from the ephys responses of DL-Int-1:
     1. firing rates in five different phases of DL-Int-1 ephys reponse
     2. spike timing of the first four spikes after stimulus onset
    Features are extracted for the Experiment IDs specified in 'inputXL'. For each row of 'inputXL', a NIX file
    with the name "<Experiment ID>.h5" in the directory <NIX File Directory> is used to extract features.
    The extracted feature values are stored in the files specified by 'dataXL'.
    :param inputXL: string, path of an excel file. The excel file should contain three columns with headings
    "Experiment ID", "Labor State" and "NIX File Directory".
    :param dataXL: string, path where resulting excel file will be written.
    :return:
    '''

    types = ['BeforeStimulus', 'DuringStimulus', 'AfterStimulus']
    typeDurs = [3 * qu.s, 1 * qu.s, 1 * qu.s]

    inputDF = pd.read_excel(inputXL)

    dataDF = pd.DataFrame()

    for ind, (
            expID, laborState, nixPath
    ) in inputDF.loc[:, ["Experiment ID", "Labor State", "NIX File Directory"
                         ]].iterrows():
        rda = RawDataAnalyser(expID, nixPath)

        expSpikes = rda.getContSpikes(types=types, freqs=[265])

        for trialInd, trialSpikes in enumerate(expSpikes[265]):
            print("Doing {}, Trial {}".format(expID, trialInd + 1))
            trialSpikeTimeList = []
            for typeSpikeTrain in trialSpikes.itervalues():
                temp = typeSpikeTrain.copy()
                temp.units = qu.s
                trialSpikeTimeList.append(temp.times.magnitude)
            allTrialSpikeTimes = np.concatenate(trialSpikeTimeList)

            spikeTimeUnits = qu.s
            allTrialSpikeTimes *= spikeTimeUnits

            allTrialSpikeTimes -= trialSpikes["DuringStimulus"].t_start

            allTrialSpikeTimes = allTrialSpikeTimes[np.logical_and(
                -typeDurs[0] <= allTrialSpikeTimes,
                allTrialSpikeTimes <= typeDurs[1] + typeDurs[2])]

            trialSpikeTrain = SpikeTrain(times=allTrialSpikeTimes,
                                         units=spikeTimeUnits,
                                         t_start=-typeDurs[0],
                                         t_stop=typeDurs[1] + typeDurs[2])
            tempS = pd.Series()
            tempS[mdFN["expID"]] = expID
            tempS[mdFN["freq"]] = 265
            tempS[mdFN["laborState"]] = laborState
            tempS[mdFN["trialName"]] = "Trial{}".format(trialInd)
            tempS[mdFN["trialStart"]] = trialSpikes["DuringStimulus"].t_start

            for funcName, funcNameFull in spikeFRSpikeTimesFNs.iteritems():

                func = getattr(additionalEphysFuncs,
                               spikeFRSpikeTimesFuncs[funcName])
                tempS[funcNameFull] = func(None, trialSpikeTrain, None, None)

            dataDF = dataDF.append(tempS, ignore_index=True)

    dataDF.to_excel(dataXL)
Beispiel #3
0
) == 2, 'Improper Usage! Please use as follows:\npython {} <json param file>'.format(
    sys.argv[1])

with open(sys.argv[1]) as fle:

    pars = json.load(fle)
    NIXPath = pars['NIXPath']
    expName = pars['expName']
    catResDir = pars['catResDir']
    downSampleFactor = pars['downSampleFactor']
    type2Color = pars['type2Color']
    mplPars = pars['mplPars']

sns.set(rc=mplPars)

rda = RawDataAnalyser(expName, NIXPath)

pulseResps = rda.getPulseResps()
pulseSpikes = rda.getPulseSpikes()

if pulseResps and pulseSpikes:

    expDir = os.path.join(catResDir, expName)

    if not os.path.isdir(expDir):
        os.mkdir(expDir)

    allPPs = pulseResps.keys()
    print('Found these pulse Pars:{}'.format(allPPs))

    allPulseDurs = list(sorted(set([x[0] for x in allPPs])))
Beispiel #4
0
snrs = []
spikeAmpsAll = []
noiseAmpsAll = []

for expName in expNames:

    for ax in axs:
        ax.clear()

    print('Doing ' + expName)

    intervalToIgnore = None
    if expName in toIgnore:
        intervalToIgnore = toIgnore[expName]

    rda = RawDataAnalyser(expName, dirpath)

    resps = rda.getContResps(types=types)
    spikes = rda.getContSpikes(types=types)

    spikeAmps = []
    noiseAmps = []

    for freqInd, freq in enumerate(freqs):
        if freq in resps:
            for typeInd, tpye in enumerate(types):

                for respInd, respAll in enumerate(resps[freq]):

                    resp = respAll[tpye]
Beispiel #5
0
    'JO terminal local neuron'
]

mdDF = parseMetaDataFile(excel, excelSheet, spike2Path)
expNames = map(str, mdDF.index)

expIDsByCat = getExpIDsByCategory(excel, excelSheet, categories, spike2Path)
columns = ['Category', 'Experiment ID', 'Frequency (Hz)', 'number of Trials']
data = {c: [] for c in columns}

for cat, catExps in expIDsByCat.iteritems():
    for expName in catExps:
        if expName in processedResultsDF.index and processedResultsDF.loc[
                expName, 'Result']:
            print("Doing {} of category {}".format(expName, cat))
            rda = RawDataAnalyser(expName=expName, dirpath=NIXPath)
            freqSecNames = rda.getContStats()
            for freq, secNames in freqSecNames.iteritems():
                data['Category'].append(cat)
                data['Experiment ID'].append(expName)
                data['Frequency (Hz)'].append(freq)
                data['number of Trials'].append(len(secNames))

rawDF = pd.DataFrame(data)
catFreqGrouped = rawDF.groupby(['Category', 'Frequency (Hz)'])
statsDF = pd.DataFrame()
statsDF['(number of Exps., number of Trials)'] = catFreqGrouped[
    'number of Trials'].agg(lambda x: (len(x), sum(x)))
statsDFUnstacked = statsDF.unstack()
statsDFTexTable = statsDFUnstacked.to_latex(
    column_format='c' + 'c' * len(statsDFUnstacked.columns))
Beispiel #6
0
                'Bilateral Descending N',
                'JO terminal local neuron'
            ]

mdDF = parseMetaDataFile(excel, excelSheet, spike2Path)
expNames = map(str, mdDF.index)

expIDsByCat = getExpIDsByCategory(excel, excelSheet, categories, spike2Path)
columns = ['Category', 'Exp ID', '(duration, interval) (ms)', 'number of Trials']
rawDF = pd.DataFrame()

for cat, catExps in expIDsByCat.iteritems():
    for expName in catExps:
        if expName in processedResultsDF.index and processedResultsDF.loc[expName, 'Result']:
            print("Doing {} of category {}".format(expName, cat))
            rda = RawDataAnalyser(expName=expName, dirpath=NIXPath)
            expPulseStatsDF = rda.getPulseStats()
            expPulseStatsDF['Experiment ID'] = expName
            expPulseStatsDF['Category'] = cat
            rawDF = rawDF.append(expPulseStatsDF)


rawDF['(Pulse Interval, Pulse Duration) (ms)'] = [(x, y) for x, y in
                                                             zip(rawDF['Pulse Interval (ms)'],
                                                                 rawDF['Pulse Duration (ms)'])]
del rawDF['Pulse Duration (ms)']
del rawDF['Pulse Interval (ms)']
catFreqGrouped = rawDF.groupby(['(Pulse Interval, Pulse Duration) (ms)', 'Category'])
statsDF = pd.DataFrame()
statsDF['(number of Exps., number of Trials)'] = \
    catFreqGrouped['Experiment ID'].agg(lambda x: (len(x.unique()), len(x)))
Beispiel #7
0
    '140813-3Al',
    '140930-1Al',
    # # # '140917-1Al',
    # # # '141030-1Al',
]

fnew = [1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0]

allResps = {}
freqs = set()

for expName in expNames:

    print('Doing ' + expName)

    rda = RawDataAnalyser(expName, NIXPath)

    resps = rda.getContResps(types=['DuringStimulus'])

    allResps[expName] = resps

    freqs = freqs | (resps.viewkeys() - freqs)

avgTracesAll = [{}, {}]
traceLength = 1
Ts = 4.8e-5
nPts = int(0.9 / Ts)

for expName, expResp in allResps.iteritems():

    avgTraces = avgTracesAll[fnew[expNames.index(expName)]]
Beispiel #8
0
    intervalToIgnore = None
    if expName in toIgnore:
        intervalToIgnore = toIgnore[expName]

    def shouldBeIgnored(resp):

        if intervalToIgnore is None:
            return False
        else:
            respInIgnoreIntervals = [(x * qu.s <= resp.t_start <= y * qu.s) |
                                     (x * qu.s <= resp.t_stop <= y * qu.s)
                                     for x, y in intervalToIgnore]
            return reduce(operator.or_, respInIgnoreIntervals)

    rda = RawDataAnalyser(expName, dirpath)

    resps = rda.getContResps(types=types, freqs=[265.0])

    normedFilteredSigs = []
    normedSigs = []

    for resp in resps[265.0]:

        respNormedSigs = []
        respNormedSigsFiltered = []

        for typeInd, tpye in enumerate(types):

            temp = resp[tpye]
            if shouldBeIgnored(temp):
def makeSpikeTotalVsFreqImages(expNamesDict, NIXPath, freqs, resDir, mplPars):

    sns.set(style="whitegrid", rc=mplPars)

    assert len(expNamesDict
               ) == 1, "expNamesDict must be a dict of one key-value pair!"

    categoryStatsDF = pd.DataFrame()
    for expName in expNamesDict.values()[0]:

        print("Doing {}".format(expName))
        rda = RawDataAnalyser(dirpath=NIXPath, expName=expName)
        spikes = rda.getContSpikes(freqs=freqs)

        expStatsDF = pd.DataFrame()

        for freq, freqSpikes in spikes.iteritems():

            tempS = pd.Series()

            for trialInd, trialSpikesDict in enumerate(freqSpikes):

                trialSpikes = trialSpikesDict["DuringStimulus"]
                tempS["Experiment ID"] = expName
                tempS["Frequency (Hz)"] = freq
                tempS["Trial Number"] = trialInd
                tempS["Neuron Category"] = expNamesDict.keys()[0]
                stimDur = trialSpikes.duration
                stimDur.units = qu.s
                tempS["Spike Rate (spikes per second)"] = trialSpikes.shape[
                    0] / stimDur.magnitude

                expStatsDF = expStatsDF.append(tempS, ignore_index=True)

        categoryStatsDF = categoryStatsDF.append(expStatsDF, ignore_index=True)

    fig, ax = plt.subplots(figsize=(7, 5.6))
    fg = sns.factorplot(data=categoryStatsDF,
                        x="Frequency (Hz)",
                        y="Spike Rate (spikes per second)",
                        hue="Experiment ID",
                        ax=ax,
                        kind='point',
                        ci=95,
                        n_boot=1000)
    ax.legend(bbox_to_anchor=(1.65, 1))

    ax.set_title(expNamesDict.keys()[0])
    fig.tight_layout(rect=[0, 0, 0.75, 0.9])
    fig.savefig(os.path.join(resDir, "IndividualExperimentsSeparately.png"),
                dpi=300)

    fig1, ax1 = plt.subplots(figsize=(7, 5.6))
    sns.violinplot(data=categoryStatsDF,
                   x="Frequency (Hz)",
                   y="Spike Rate (spikes per second)",
                   ax=ax1,
                   scale="area",
                   scale_hue=False)
    ax1.set_title(expNamesDict.keys()[0])
    fig1.tight_layout()
    fig1.savefig(os.path.join(resDir, "AllExperimentsCombined.png"), dpi=300)
Beispiel #10
0
with open(sys.argv[1]) as fle:

    pars = json.load(fle)
    NIXPath = pars['NIXPath']
    expName = pars['expName']
    freqs = pars['freqs']
    catResDir = pars['catResDir']
    downSampleFactor = pars['downSampleFactor']
    type2Color = pars['type2Color']
    mplPars = pars['mplPars']

plt.rcParams.update(mplPars)

plt.ioff()

rda = RawDataAnalyser(dirpath=NIXPath, expName=expName)

resps = rda.getContResps(freqs=freqs)
spikes = rda.getContSpikes(freqs=freqs)

expDir = os.path.join(catResDir, expName)
if not os.path.exists(expDir):
    os.mkdir(expDir)

for freq, freqResps in resps.iteritems():

    print('Doing {}'.format(freq))

    overallFig, overallAx = plt.subplots(nrows=1, ncols=1, figsize=(7, 5.6))

    freqDir = os.path.join(expDir, str(freq))
Beispiel #11
0
def saveSpontActivityRates(outFile, toIgnoreFile, expNames, nixPath):

    with open(toIgnoreFile, 'r') as fle:
        toIgnore = json.load(fle)

    allData = pd.DataFrame()

    for expName in expNames:

        print('Doing ' + expName)

        if expName in toIgnore:
            intervalToIgnore = toIgnore[expName]
        else:
            intervalToIgnore = None

        def shouldBeIgnored(resp):

            if intervalToIgnore is None:
                return False
            else:
                respInIgnoreIntervals = [
                    (x * qu.s <= resp.t_start <= y * qu.s) |
                    (x * qu.s <= resp.t_stop <= y * qu.s)
                    for x, y in intervalToIgnore
                ]
                return reduce(operator.or_, respInIgnoreIntervals)

        rda = RawDataAnalyser(expName, nixPath)

        spikes = rda.getContSpikes(types=types)

        for freq, freqResps in spikes.iteritems():
            print('Doing ' + str(freq) + 'Hz; All freqs ' +
                  str(spikes.keys()) + ' Hz')

            t_starts = []

            for stInd, sts in enumerate(freqResps):

                t_start = sts['DuringStimulus'].t_start
                print('{}/{}; trial starting at {}'.format(
                    stInd + 1, len(freqResps), t_start))

                respSpikes = []

                for typeInd, tpye in enumerate(types):

                    temp = sts[tpye]
                    if shouldBeIgnored(temp):
                        print('Trial{} {} ignored'.format(stInd + 1, tpye))
                        break

                    respSpikes.append(sts[tpye])

                if len(respSpikes) == 3:

                    t_starts.append(t_start)

                    allSpikeTimes = np.concatenate(
                        [sp.times.magnitude
                         for sp in respSpikes]) * respSpikes[0].units
                    allSpikeTimes -= t_start
                    allSpikeTimes = allSpikeTimes[np.logical_and(
                        -typeDurs[0] <= allSpikeTimes,
                        allSpikeTimes <= typeDurs[1] + typeDurs[2])]

                    if len(allSpikeTimes):
                        spikeTimesUnits = allSpikeTimes[0].units
                    else:
                        spikeTimesUnits = qu.s

                    respSpikeTrain = SpikeTrain(times=allSpikeTimes,
                                                units=spikeTimesUnits,
                                                t_start=-typeDurs[0],
                                                t_stop=typeDurs[1] +
                                                typeDurs[2])

                    temp = {
                        'expID': expName,
                        'freq': freq,
                        'laborState': expIDLaborStateMap(expName),
                        'trialName': 'Trial{:d}'.format(stInd),
                        'trialStart': t_start.magnitude
                    }
                    tempDFDict = {mdFN[k]: v for k, v in temp.iteritems()}
                    assert len(tempDFDict) == len(
                        mdFN
                    ), 'not all metadata covered in the final data file'

                    tempDFDict[newFFN["spontFR3"]] = spontAct3Sec(
                        None, respSpikeTrain, None, None)
                    allData = allData.append(pd.Series(tempDFDict),
                                             ignore_index=True)

    allDataPivoted = allData.set_index(
        keys=[mdFN['expID'], mdFN['freq'], mdFN['trialName']])
    allDataPivoted.to_excel(outFile)