示例#1
0
def _get_tmask_blackrock(nev_fname, tslice, sys_name="task"):
    """ Find the rows of the nev file to use for training the decoder."""

    nev_hdf_fname = nev_fname + ".hdf"
    if not os.path.isfile(nev_hdf_fname):
        # convert .nev file to hdf file using Blackrock's n2h5 utility
        subprocess.call(["n2h5", nev_fname, nev_hdf_fname])

    import h5py

    nev_hdf = h5py.File(nev_hdf_fname, "r")

    path = "channel/digital00001/digital_set"
    ts = nev_hdf.get(path).value["TimeStamp"]
    msgs = nev_hdf.get(path).value["Value"]

    msgtype = np.right_shift(np.bitwise_and(msgs, parse.msgtype_mask), 8).astype(np.uint8)
    # auxdata = np.right_shift(np.bitwise_and(msgs, auxdata_mask), 8).astype(np.uint8)
    auxdata = np.right_shift(np.bitwise_and(msgs, parse.auxdata_mask), 8 + 3).astype(np.uint8)
    rawdata = np.bitwise_and(msgs, parse.rawdata_mask)

    # data is an N x 4 matrix that will be the argument to parse.registrations()
    data = np.vstack([ts, msgtype, auxdata, rawdata]).T

    # get system registrations
    reg = parse.registrations(data)

    syskey = None

    for key, system in reg.items():
        if sys_eq(system[0], sys_name):
            syskey = key
            break

    if syskey is None:
        raise Exception("No source registration saved in the file!")

    # get the corresponding hdf rows
    rows = parse.rowbyte(data)[syskey][:, 0]

    rows = rows / 30000.0

    lower, upper = 0 < rows, rows < rows.max() + 1
    l, u = tslice
    if l is not None:
        lower = l < rows
    if u is not None:
        upper = rows < u
    tmask = np.logical_and(lower, upper)

    return tmask, rows
示例#2
0
def get_spike_counts(te_list):
    F = dict()
    for te in te_list:
        try:
            task_entry = dbfn.TaskEntry(te)
            proceed=1
        except:
            print 'NO access to te: ', task_entry
            proceed = 0
        if proceed:
            plx = task_entry.plx
            binlen = 1/60.
            events = plx.events[:].data
            reg = parse.registrations(events)
            sys_name = 'task'
            for key, system in reg.items():
                if system[0] == sys_name:
                    syskey = key
            rows = parse.rowbyte(events)[syskey][:,0]
            #step = int(binlen/(1./60.)) 
            #interp_rows = rows[::step]
            #spike_bin_fn = psth.SpikeBin(plx.units, binlen)
            #spike_counts = np.array(list(plx.spikes.bin(interp_rows, spike_bin_fn)))

            hdf = task_entry.hdf
            #cursor = hdf.root.task[:]['cursor']
            go_ix = np.array([hdf.root.task_msgs[it-3][1] for it, t in enumerate(hdf.root.task_msgs[:]) if
                t[0] == 'reward'])

            rew_ix = np.array([t[1] for it, t in enumerate(hdf.root.task_msgs[:]) if
                t[0] == 'reward'])

            binned_spks = []
            spike_bin_fn2 = psth.SpikeBin(plx.units, 0.1)
            for i, (g, r) in enumerate(zip(go_ix, rew_ix)):
                spk = np.array(list(plx.spikes.bin(rows[g:r], spike_bin_fn2)))

                binned_spks.append(spk)
            
            bin_spk = np.vstack((binned_spks))
            F[te] = factor_analysis_tasks.FactorBMIBase.generate_FA_matrices(None, bin_spk=bin_spk)
    import pickle
    pickle.dump(F, open('joystick_FA.pkl', 'wb'))
    return F
def compare_hdfs(data, reg, hdf):
    system_list = [system[0] for k, system in reg.items()]

    f, ax = plt.subplots(nrows=len(system_list))
    if len(system_list) == 1:
        ax = [ax]

    for i_s, sys_name in enumerate(system_list):
        syskey = None
        for key, system in reg.items():

            #match each system in the nev_hdf to a table in the normal hdf:
            if sys_eq(system[0], sys_name):
                syskey = key
                break

        if syskey is None:
            raise Exception('No source registration saved in the file!')

        rows = parse.rowbyte(data)[syskey][:, 0]
        timestamps = rows / 30000.

        print sys_name, 'rows in nev_hdf: ', len(
            timestamps), 'rows in hdf: ', len(hdf.get_node('/' + sys_name))

        tab = hdf.get_node('/' + sys_name)
        if sys_name == 'brainamp':
            ts = np.squeeze(tab[:]['chan1']['ts_arrival'])
        else:
            ts = np.squeeze(tab[:]['ts'])

        ax[i_s].plot(np.diff(timestamps), label='.nev')
        ax[i_s].plot(np.diff(ts), label='.hdf')
        #ax[i_s].plot(timestamps-timestamps[0], label='.nev')
        #ax[i_s].plot(ts-ts[0], label='.hdf')

        ax[i_s].set_title(sys_name)
        ax[i_s].legend()

    plt.tight_layout()
def compare_hdfs(data, reg, hdf):
    system_list = [system[0] for k,system in reg.items()]

    f, ax = plt.subplots(nrows = len(system_list))
    if len(system_list)==1:
        ax = [ax]

    for i_s, sys_name in enumerate(system_list):
        syskey = None
        for key, system in reg.items():
            
            #match each system in the nev_hdf to a table in the normal hdf:
            if sys_eq(system[0], sys_name):
                syskey = key
                break

        if syskey is None:
            raise Exception('No source registration saved in the file!')

        rows = parse.rowbyte(data)[syskey][:,0]
        timestamps = rows / 30000.
        
        print sys_name, 'rows in nev_hdf: ', len(timestamps), 'rows in hdf: ', len(hdf.get_node('/'+sys_name))

        tab = hdf.get_node('/'+sys_name)
        if sys_name == 'brainamp':
            ts = np.squeeze(tab[:]['chan1']['ts_arrival'])
        else:
            ts = np.squeeze(tab[:]['ts'])


        ax[i_s].plot(np.diff(timestamps), label='.nev')
        ax[i_s].plot(np.diff(ts), label='.hdf')
        #ax[i_s].plot(timestamps-timestamps[0], label='.nev')
        #ax[i_s].plot(ts-ts[0], label='.hdf')

        ax[i_s].set_title(sys_name)
        ax[i_s].legend()
        
    plt.tight_layout()
示例#5
0
def shar_priv_hist(te_list, task_type, F, step=6):
    binlen = step*(1/60.)
    xbins = np.linspace(-14., 14., 50)
    ybins = np.linspace(-14., 14., 50)
    mat = {}
    for te in te_list:
        
        if te in F.keys():
            FA_kwargs = F[te]

            task_name = task_type[te]
            
            #Neural activity and cursor activity: 
            task_entry = dbfn.TaskEntry(te)
            hdf = task_entry.hdf
            plx = task_entry.plx

            cursor = hdf.root.task[:]['cursor']
            cursor = cursor[::step]

            events = plx.events[:].data
            reg = parse.registrations(events)
            sys_name = 'task'
            for key, system in reg.items():
                if system[0] == sys_name:
                    syskey = key
            rows = parse.rowbyte(events)[syskey][:,0]
            interp_rows = rows[::step]
            spike_bin_fn = psth.SpikeBin(plx.units, binlen)
            spike_counts = np.array(list(plx.spikes.bin(interp_rows, spike_bin_fn)))

            dig_x = np.digitize(cursor[:, 0], xbins)
            dig_y = np.digitize(cursor[:, 2], ybins)

            for i, (x, y) in enumerate(zip(dig_x, dig_y)):
                if i < spike_counts.shape[0]:
                    sc = spike_counts[i, :].reshape(-1, 1)
                    dmn = sc - FA_kwargs['fa_mu']
                    main_shar = (FA_kwargs['fa_main_shared'] * dmn)
                    main_priv = dmn - main_shar

                    try:
                        mat[x, y, 's', task_name].append(np.linalg.norm(main_shar))
                    except:
                        mat[x, y, 's', task_name] = [np.linalg.norm(main_shar)]

                    try:
                        mat[x, y, 'p', task_name].append(np.linalg.norm(main_priv))
                    except:
                        mat[x, y, 'p', task_name] = [np.linalg.norm(main_priv)]

                    try:
                        mat[x, y, 'f', task_name].append(np.linalg.norm(dmn))
                    except:
                        mat[x, y, 'f', task_name] = [np.linalg.norm(dmn)]

    task_type_list = np.array([task_type[t] for t in task_type.keys()])
    f, ax = plt.subplots(nrows=len(np.unique(task_type_list)), ncols=3)
    for it, t in enumerate(np.unique(task_type_list)):
        for iss, sig in enumerate(['s', 'p', 'f']):
            X = np.zeros((len(xbins), len(ybins)))
            for ix in range(len(xbins)):
                for iy in range(len(ybins)):
                    try:
                        X[ix, iy] = np.mean(mat[ix, iy, sig, t])
                    except:
                        pass
                        #print ix, iy
            c = ax[it, iss].pcolormesh(X, vmin=0, vmax=15)
            print np.max(X)
            ax[it, iss].set_title(t+' '+sig)
示例#6
0
def _get_tmask_plexon(plx, tslice, sys_name="task"):
    """
    Find the rows of the plx file to use for training the decoder

    Parameters
    ----------
    plx : plexfile instance
        The plexon file to sync
    tslice : list of length 2
        Specify the start and end time to examine the file, in seconds
    sys_name : string, optional
        The "system" being synchronized. When the task is running, each data source 
        (i.e., each HDF table) is allowed to be asynchronous and thus is independently 
        synchronized with the neural recording system.

    Returns
    -------
    tmask: np.ndarray of shape (N, ) of booleans
        Specifies which entries of "rows" (see below) are within the time bounds
    rows: np.ndarray of shape (N, ) of integers
        The times at which rows of the specified HDF table were recieved in the neural recording box
    """
    # Open plx file
    from plexon import plexfile

    if isinstance(plx, str) or isinstance(plx, unicode):
        plx = plexfile.openFile(plx)

    # Get the list of all the systems registered in the neural data file
    events = plx.events[:].data
    reg = parse.registrations(events)

    if len(reg.keys()) > 0:
        # find the key for the specified system data
        syskey = None
        for key, system in reg.items():
            if sys_eq(system[0], sys_name):
                syskey = key
                break

        if syskey is None:
            print reg.items()
            raise Exception("riglib.bmi.train._get_tmask: Training data source not found in neural data file!")
    elif len(reg.keys()) == 0:
        # try to find how many systems' rowbytes were in the HDF file
        rowbyte_data = parse.rowbyte(events)
        if len(rowbyte_data.keys()) == 1:
            print "No systems registered, but only one system registered with rowbytes! Using it anyway instead of throwing an error"
            syskey = rowbyte_data.keys()[0]
        else:
            raise Exception("No systems registered and I don't know which sys to use to train!")

    # get the corresponding hdf rows
    rows = parse.rowbyte(events)[syskey][:, 0]

    # Determine which rows are within the time bounds
    lower, upper = 0 < rows, rows < rows.max() + 1
    l, u = tslice
    if l is not None:
        lower = l < rows
    if u is not None:
        upper = rows < u
    tmask = np.logical_and(lower, upper)
    return tmask, rows
示例#7
0
def load_session(session_name,hdf_only=False,system='sdh',dbname='default'):
    if system is 'sdh' and dbname is 'default':
        plx_path = '/storage/bmi3d/plexon/'
        hdf_path = '/storage/bmi3d/rawdata/hdf/'
    elif system is 'sdh' and dbname is 'exorig':
        plx_path = '/storage/exorig/plexon/'
        hdf_path = '/storage/exorig/rawdata/hdf/'        
    elif system in ['arc','nucleus']:
        plx_path = '/storage/plexon/'
        hdf_path = '/storage/rawdata/hdf/'
    elif system in ['arc_backup']:
        plx_path = '/backup/exorig/plexon/'
        hdf_path = '/backup/exorig/rawdata/hdf/'

    '''

    Load all files associated with a recording session and extract timestamps.

    Parameters
    ----------
    session_name : string
        The name of the session of interest without file extension.

    Returns
    -------
    plx : plexon file
        The loaded plexon file.
    hdf : hdf file
        The loaded hdf5 file.
    ts_func : function
        A function that translates plexon timestamps to hdf row indices or vice
        versa for this session.

        Parameters:
        input_times : list of either plx timestamps (floats) or hdf timestamps
        (ints) to translate
        output_type : string ['hdf', 'plx'] specifying which type the output
        should be (should NOT be the same as the input type)

        Returns:
        output : list of either plx or hdf timestamps corresponding to input


    '''
    hdf = tables.openFile(hdf_path + session_name + '.hdf')
    
    if not hdf_only:
        plx = plexfile.openFile(plx_path + session_name + '.plx')
    
        def sys_eq(sys1, sys2):
            return sys1 in [sys2, sys2[1:]]

        events = plx.events[:].data
        # get system registrations
        reg = parse.registrations(events)
        syskey = None

        # find the key for the task data
        for key, system in reg.items():
            if sys_eq(system[0], 'task'):
                syskey = key
                break

        if syskey is None: 
            print 'NO SYSKEY Error'
            files_ok = False
            plx = ts_func =0
        
        else:
            ts = parse.rowbyte(events)[syskey] 

            # Use checksum in ts to make sure there are the right number of rows in hdf.
            if len(hdf.root.task)<len(ts):
                ts = ts[1:]
        
            if np.all(np.arange(len(ts))%256==ts[:,1]):
                print "Dropped frames detected!"
                files_ok = True 

            files_ok = True 
            if len(ts) < len(hdf.root.task):
                print "Warning! Frames missing at end of plx file. Plx recording may have been stopped early."

            ts = ts[:,0]

            # Define a function to translate plx timestamps to hdf and vice versa for
            # this session.
            def ts_func(input_times, output_type):

                if output_type == 'plx':
                    if len(input_times)>len(ts):
                        input_times = input_times[:len(ts)]
                    output = [ts[time] for time in input_times]

                if output_type == 'hdf':
                    output = [np.searchsorted(ts, time) for time in input_times]

                return np.array(output)

        # Check for previously saved binned spike file, save one if doesn't exist
        #filename = binned_spikes_path+session_name
        #if not os.path.isfile(filename+'.npz'):
        #    save_binned_spike_data(plx, hdf, ts_func, filename)

        return files_ok, plx, hdf, ts_func
    else:
        return hdf