示例#1
0
def _load_mcd_byname(path, name):
    """Utility to return only from electrode with name "name" from path 
    (str)"""
    fd = ns.File(path)
    for entity in fd.list_entities():
        segment = 3
        # only record if entity is segment type
        if entity.entity_type == segment:
            channelName = entity.label[0:4] + entity.label[23:]
            if name == channelName:
                data1 = []
                count1 = []
                time1 = []
                for item_idx in range(entity.item_count):
                    # apppend to data, time, count
                    item_info = entity.get_data(item_idx)
                    data1.append(item_info[0].tolist()[0])
                    time1 += [item_info[1]
                              ]  # change to be the actual times of sampl
                    count1 += [item_info[0]]

                #store data with name in the dictionary
                data = np.asarray(data1)
                time = np.asarray(time1)
                count = np.asarray(count1)
                spike_count = len(time)
                #return dictionary
                return data, time, spike_count
示例#2
0
def _load_mcd(path):
    """ Utility to load a .mcd file"""
    fd = ns.File(path)
    data = dict()  #raw recordings
    time = dict()  #time from mcd file
    count = dict()  #sample count
    for entity in fd.list_entities():
        segment = 3
        # only record if entity is segment type
        if entity.entity_type == segment:
            data1 = []
            time1 = []
            count1 = []  # lists of data to attach
            # loop for items in
            for item_idx in range(entity.item_count):
                # apppend to data, time, count
                item_info = entity.get_data(item_idx)
                data1 += item_info[0].tolist()[0]
                time1 += [item_info[1]
                          ]  # change to be the actual times of sampl
                count1 += [item_info[0]]

            channelName = entity.label[0:4] + entity.label[
                23:]  # channel names
            #store data with name in the dictionary
            data[channelName] = np.asarray(data1)
            time[channelName] = np.asarray(time1)
            count[channelName] = np.asarray(count1)

    #return dictionary
    return data, time, count
示例#3
0
def MCD_read(MCDFilePath):

# import necessary libraries

import neuroshare as ns

import numpy as np

#open file using the neuroshare bindings

fd = ns.File(MCDFilePath)

#create index

indx = 0

#create empty dictionary

data = dict()

#loop through data and find all analog entities

for entity in fd.list_entities():

analog = 2

#if entity is analog

if entity.entity_type == analog:

#store entity in temporary variable

dummie = fd.entities[indx]

#get data from temporary variable

data1,time,count=dummie.get_data()

#create channel names

channelName = entity.label[0:4]+entity.label[23:]

#store data with name in the dictionary

data[channelName] = np.array(data1)

#increase index

indx = indx + 1

#return dictionary

return data
示例#4
0
    def read_mcd(self, mcd_file):
        """Load mcd file to be analyze.

        Parameters
        ----------
        mcd_file : str
            Path to mcd file

        """
        self.data = ns.File(mcd_file)
        time_resolution = self.data.metadata_raw['TimeStampResolution']
        self.sample_rate = 1 / time_resolution
示例#5
0
def convert(dirfile):
    directory, filei = dirfile
    if verbose: print 'Running for ' + directory + ' ' + str(filei[-8:-4])
    fd = ns.File(mcd_locations + '/mcd/' + directory + filei)
    os.mkdir(database_path + 'numpy_database/' + directory + str(filei[-8:-4]))
    data = dict()  #raw recordings
    time = dict()  #time from mcd file
    count = dict()  #sample count
    for entity in fd.list_entities():
        segment = 3
        # only record if entity is segment type
        if entity.entity_type == segment:
            data1 = []
            time1 = []
            count1 = []
            # lists of data to attach
            # loop for items in
            for item_idx in range(entity.item_count):
                # apppend to data, time, count
                item_info = entity.get_data(item_idx)
                data1.append(item_info[0].tolist()[0])
                time1 += [item_info[1]]
            channelName = entity.label[24:]  # channel names
            #store data with name in the dictionary
            data[channelName] = np.asarray(data1)
            time[channelName] = np.asarray(time1)
            count[channelName] = len(time[channelName])
    # re-aligns the times of the spikes
    new_running_time = 0
    for name in data.keys():
        # find the last spike time of the recording
        if len(time[name]) > 0:  # otherwise no spikes
            if np.max(time[name]) > new_running_time:
                new_running_time = np.max(time[name])
            # save times and spike shapes
            np.save(
                database_path + 'numpy_database/' + directory + '/' +
                str(filei[-8:-4]) + '/time_' + name + '.npy', time[name])
            np.save(
                database_path + 'numpy_database/' + directory + '/' +
                str(filei[-8:-4]) + '/spikes_' + name + '.npy', data[name])
    return new_running_time, count
示例#6
0
def _load_mcd_subsample(path, name, frac_subsample):
    """Utility to return only from electrode with name "name" some fractional
    subsample of all electrode fires."""
    fd = ns.File(path)
    for entity in fd.list_entities():
        segment = 3
        # only record if entity is segment type
        if entity.entity_type == segment:
            channelName = entity.label[0:4] + entity.label[23:]
            if name == channelName:
                data1 = []
                count1 = []
                time1 = []
                if entity.item_count > 0:
                    for item_idx in range(entity.item_count):
                        # apppend to data, time, count
                        item_info = entity.get_data(item_idx)
                        data1.append(item_info[0].tolist()[0])
                        time1 += [item_info[1]
                                  ]  # change to be the actual times
                        count1 += [item_info[0]]

                    #store data with name in the dictionary
                    deck = range(entity.item_count)
                    np.random.shuffle(deck)
                    data = np.asarray(data1)[:int(frac_subsample *
                                                  len(deck)), :]
                    time = np.asarray(time1)[:int(frac_subsample * len(deck))]
                    count = np.asarray(count1)[:int(frac_subsample *
                                                    len(deck))]
                    spike_count = len(time)
                else:
                    data = np.asarray(data1)
                    time = np.asarray(time1)
                    count = np.asarray(count1)
                    spike_count = len(time)
                #return dictionary
                return data, time, spike_count
def get_spikes(chem):
    chem_dict = dict()
    files = get_files(chem)
    for f in files:
        print(f)
        title = '_'.join((f.split(".mcd")[0].split("_")[(
            len(f.split(".mcd")[0].split("_")) - 3):]))
        fd = ns.File(f)
        chNamesList = get_chNamesList(fd)
        spikes = dict()

        # fd.entities[0].get_data(200) gives you the 200th spike and the LFP or shape of spike,
        for numCh in range(0, 60):

            cur_entity = fd.entities[numCh]
            cur_spk_train = []
            for curSp in range(0, cur_entity.item_count):
                # curSp = 0
                cur_spk_train.append(cur_entity.get_data(curSp)[1])

            spikes[chNamesList[numCh]] = cur_spk_train

        chem_dict[title] = spikes
    return chem_dict
示例#8
0
# -*- coding: utf-8 -*-
"""
Read and plot mcd files using neuroshare
"""

import numpy as np
import matplotlib.pyplot as plt

import neuroshare as ns

file = ('/media/ycan/Erol1/20180712_YE_60MEA_Marmoset_eye2_21/1_fff_'
        'gauss1blink.mcd')
file = '/media/ycan/Erol1/20180802_YE_252MEA_Marmoset_eye1_421/1_fff_gauss1blink.mcd'
print(file)

fd = ns.File(file)
labels = []
analog = []
for entity in fd.list_entities():
    #    print(entity.label, entity.entity_type)
    labelsplit = entity.label.split()
    labels.append(labelsplit[3])
    analog.append(labelsplit[0] == 'anlg0001')
analog = np.array(analog)

if fd.entity_count == 63:
    x, y = 8, 8
    corners = [0, 6, 54, 60]
elif fd.entity_count == 256:
    x, y = 16, 16
    corners = [0, 14, 238, 252]
示例#9
0
last_max = 0
total_counts = {}
for idx, directory in enumerate(subdirectories):
    # loop through the portions of the experiment
    files_in_folder = np.sort(os.listdir(mcd_locations + '/mcd/' + directory))
    files = []
    os.mkdir(mcd_locations + 'numpy_database/' + directory)
    for filei in files_in_folder:
        if filei[0] == '.':
            pass
        else:
            files.append([directory + '/', filei])
    if idx is 0:
        # identify and save the names of the electrodes
        fd = ns.File(mcd_locations + '/mcd/' + subdirectories[0] + '/' +
                     files[0][1])
        enames = np.sort([entity.label[24:] for entity in fd.list_entities()])
        np.save(database_path + 'numpy_database/enames.npy', enames)

    # parallelize creation of the database
    with futures.ProcessPoolExecutor(max_workers=num_cpus) as executor:
        result = executor.map(convert, files)

    # correct times in the database
    durations = []
    for r in result:
        # track the times
        durations.append(r[0])
        # track the total number of spikes for each
        counts = r[1]
        for key in counts.keys():
def MCD_read(MCDFilePath):

    # open file using the neuroshare bindings
    fd = ns.File(MCDFilePath)

    #entity_type: 3 is spikes, 2 in analogue

    for i, entity in enumerate(fd.entities):
        print((i, entity.label, entity.entity_type))

    # (108, 'filt0001 0059 0048       57', 2)
    # 0059 is the number of the corresponding channel in spk, 0048 is the number in analogue
    # 57 is name of channel in matrix notation, 8*8 - 4 on corner channels
    # use matrix notation to spk to lfp

    #create empty dictionary
    data = dict()

    numCh = 60
    analog1 = fd.entities[numCh]  # open analog signal entity
    print(analog1.units)  #V
    print(analog1.sample_rate)  # 25,000/second

    temp_data_fft = dict()

    #get E names
    chNamesList = get_chNamesList(fd, LFP='True')

    fft_byE = pd.DataFrame(0,
                           index=chNamesList,
                           columns=('delta', 'theta', 'alpha', 'beta',
                                    'gamma'))

    chNamesList_spikes = get_chNamesList(fd, LFP='False')
    #spikes=get_spikes(fd, chNamesList_spikes)

    for numCh in range(60, 120):

        print("numCh is " + str(numCh))
        analog1 = fd.entities[numCh]  # open analog signal entity

        entity = fd.entities[numCh]
        print(fd.entities[numCh].label, entity.entity_type)
        # spikes is 0-59
        # filt0001 is 60 on
        # len(fd.entities) 120

        data1, times, count = analog1.get_data()
        # count 7,657,500 is number of samples
        # times a numeric array of when samples took place (s)
        # analog1.entity_type

        # create channel names
        channelName = entity.label[0:4] + entity.label[23:]
        channelNum = channelName.split(" ")[2]

        # store data with name in the dictionary
        data2 = np.array(data1)

        temp_data_fft = np.zeros(shape=(math.floor(max(times)), 50))
        sec = 1
        totalSec = math.floor(max(times))

        # August 10, 2019 downsample to 1k/sec from 25k/sec
        data3 = data2[0:(totalSec * 25000)]  # remove tail partial second

        # August 10, 2019 low pass FIR filter to eliminate aliasing
        sample_rate = 25000
        # The Nyquist rate of the signal.
        nyq_rate = sample_rate / 2.0

        # The desired width of the transition from pass to stop,
        # relative to the Nyquist rate.  We'll design the filter
        # with a 5 Hz transition width.
        width = 5.0 / nyq_rate

        # The desired attenuation in the stop band, in dB.
        ripple_db = 60.0

        # Compute the order and Kaiser parameter for the FIR filter.
        N, beta = kaiserord(ripple_db, width)

        # The cutoff frequency of the filter.
        cutoff_hz = 200.0

        # Use firwin with a Kaiser window to create a lowpass FIR filter.
        taps = firwin(N, cutoff_hz / nyq_rate, window=('kaiser', beta))

        # Use lfilter to filter x with the FIR filter.
        data4 = lfilter(taps, 1.0, data3)

        #down sample to 1000 samples/sec
        data[channelName] = signal.resample(data4,
                                            num=(1000 * totalSec),
                                            t=None,
                                            axis=0,
                                            window=None)

        #make an empty data frame
        fft_r = pd.DataFrame(0,
                             index=np.arange(totalSec * 2),
                             columns=('delta', 'theta', 'alpha', 'beta',
                                      'gamma'))
        # fft_r.loc[:,"alpha" ], fft_r.loc[1,:]

        iterations = np.arange(1, totalSec, 0.5)
        for sec in iterations:

            fs = 1000

            #August 10, 2019: move along the signal in 0.5s increments
            # take 2 full seconds of data
            start_signal = int((sec - 1) * fs)
            end_signal = int((sec) * fs)
            curData_temp = data[channelName][start_signal:end_signal]

            beta = 0.5  # default in matlab documentation
            w_kaiser = signal.get_window(window=('kaiser', beta),
                                         Nx=fs,
                                         fftbins=False)

            curData = w_kaiser * curData_temp
            # element wise operation

            #band pass filter
            # she would use firwin to create the filter, convolve will take the filter
            order = 3  #order of filter is same as number of obs that go into filter

            def butter_bandpass(lowcut, highcut, fs, order=order):
                nyq = 0.5 * fs
                low = lowcut / nyq
                high = highcut / nyq
                b, a = signal.butter(N=order, Wn=[low, high], btype='bandpass')
                return b, a

            def butter_bandpass_filter(data, lowcut, highcut, fs, order=order):
                # b in coming out nan, a is fine
                b, a = butter_bandpass(lowcut, highcut, fs, order=order)
                y = lfilter(b, a, data)
                return y

            #sample rate and desired cutoff frequencies in Hz
            band_want = "delta"
            lowcut = 1
            highcut = 4
            y = butter_bandpass_filter(curData,
                                       lowcut=lowcut,
                                       highcut=highcut,
                                       fs=1000)
            power_delta = get_fft(y, band_want)
            fft_r.loc[sec * 2, "delta"] = power_delta

            band_want = "theta"
            lowcut = 5
            highcut = 8
            y = butter_bandpass_filter(curData,
                                       lowcut=lowcut,
                                       highcut=highcut,
                                       fs=1000)
            power_theta = get_fft(curData, band_want)
            fft_r.loc[sec * 2, "theta"] = power_theta

            band_want = "alpha"
            lowcut = 9
            highcut = 14
            y = butter_bandpass_filter(curData,
                                       lowcut=lowcut,
                                       highcut=highcut,
                                       fs=1000)
            power_alpha = get_fft(curData, band_want)
            fft_r.loc[sec * 2, "alpha"] = power_alpha

            band_want = "beta"
            lowcut = 15
            highcut = 30
            y = butter_bandpass_filter(curData,
                                       lowcut=lowcut,
                                       highcut=highcut,
                                       fs=1000)
            power_beta = get_fft(curData, band_want)
            fft_r.loc[sec * 2, "beta"] = power_beta

            band_want = "gamma"
            lowcut = 31
            highcut = 50
            y = butter_bandpass_filter(curData,
                                       lowcut=lowcut,
                                       highcut=highcut,
                                       fs=1000)
            power_gamma = get_fft(curData, band_want)
            fft_r.loc[sec * 2, "gamma"] = power_gamma

        #end of for loop

        # do averaging across all seconds for each band put in the right electrode
        fft_byE.loc[channelNum, :] = fft_r.mean(axis=0)

    # end of loop through Channels

    return fft_byE