示例#1
0
def cache_plx(plxfile):
    """
    Create cache for plexon file
    """
    os.environ['DJANGO_SETTINGS_MODULE'] = 'db.settings'
    from .tracker import dbq
    from . import namelist
    from .tracker import models
    from . import dbfunctions as dbfn
    from .json_param import Parameters
    from .tasktrack import Track
    from .tracker.models import TaskEntry, Feature, Sequence, Task, Generator, Subject, DataFile, System, Decoder

    from plexon import plexfile
    plexfile.openFile(str(plxfile))
示例#2
0
def test_continuous_edges():
	plx = plexfile.openFile(filename)
	lfp = plx.lfp[:10]
	data = lfp.data
	time = lfp.time
	assert np.allclose(data[1000:2000], plx.lfp[1:2].data)
	assert np.allclose(data[1000:2000], plx.lfp[time[1000]:time[2000]].data)
	assert np.allclose(data[1000:2001], plx.lfp[time[1000]:time[2000]+.00001].data)
	assert np.allclose(data[480:1080], plx.lfp[time[480]:time[1080]].data)
	assert np.allclose(data[479:1080], plx.lfp[time[479]:time[1080]].data)
	assert np.allclose(data[479:1079], plx.lfp[time[479]:time[1079]].data)
	assert np.allclose(data[480:1079], plx.lfp[time[480]:time[1079]].data)
def test_continuous_edges():
	plx = plexfile.openFile(filename)
	lfp = plx.lfp[:10]
	data = lfp.data
	time = lfp.time
	assert np.allclose(data[1000:2000], plx.lfp[1:2].data)
	assert np.allclose(data[1000:2000], plx.lfp[time[1000]:time[2000]].data)
	assert np.allclose(data[1000:2001], plx.lfp[time[1000]:time[2000]+.00001].data)
	assert np.allclose(data[480:1080], plx.lfp[time[480]:time[1080]].data)
	assert np.allclose(data[479:1080], plx.lfp[time[479]:time[1080]].data)
	assert np.allclose(data[479:1079], plx.lfp[time[479]:time[1079]].data)
	assert np.allclose(data[480:1079], plx.lfp[time[480]:time[1079]].data)
示例#4
0
    def plx(self):
        '''
        Return a reference to the opened plx file recorded during this TaskEntry
        '''
        try:
            self._plx
        except:
            from plexon import plexfile
            self._plx = plexfile.openFile(str(self.plx_filename))

            # parse out events
            from riglib.dio import parse
            self.strobe_data = parse.parse_data(self._plx.events[:].data)
        return self._plx
示例#5
0
def _get_neural_features_plx(
    files, binlen, extractor_fn, extractor_kwargs, tslice=None, units=None, source="task", strobe_rate=60.0
):
    """
    Extract the neural features used to train the decoder

    Parameters
    ----------
    files: dict
        keys of the dictionary are file-types (e.g., hdf, plexon, etc.), values are file names
    binlen: float
        Specifies the temporal resolution of the feature extraction
    extractor_fn: callable
        Function must have the call signature 
        neural_features, units, extractor_kwargs = extractor_fn(plx, neurows, binlen, units, extractor_kwargs)
    extractor_kwargs: dictionary
        Additional keyword arguments to the extractor_fn (specific to each feature extractor)

    Returns
    -------
    neural_features: np.ndarrya of shape (n_features, n_timepoints)
        Values of each feature to be used in training the decoder parameters
    units: np.ndarray of shape (N, -1)
        Specifies identty of each neural feature
    extractor_kwargs: dictionary
        Keyword arguments used to construct the feature extractor used online
    """

    hdf = tables.openFile(files["hdf"])

    plx_fname = str(files["plexon"])
    from plexon import plexfile

    try:
        plx = plexfile.openFile(plx_fname)
    except IOError:
        raise Exception("Could not open .plx file: %s" % plx_fname)

    # Use all of the units if none are specified
    if units == None:
        units = np.array(plx.units).astype(np.int32)

    tmask, rows = _get_tmask_plexon(plx, tslice, sys_name=source)
    neurows = rows[tmask]

    neural_features, units, extractor_kwargs = extractor_fn(files, neurows, binlen, units, extractor_kwargs)

    return neural_features, units, extractor_kwargs
def cache_plx(plxfile):
    """
    Create cache for plexon file
    """
    from plexon import plexfile
    plexfile.openFile(str(plxfile)) 
def probabilisticRewardTask_PSTH_WithChanMapping(hdf_filename, filename, block_num):
    """
	This method computes the PSTH for all sorted single-unit/multi-unit data using the plx files and txt files
	containing the associated channel numbers represented in the plx files. This is to be used when all 96/64 channels
	are not represented in the plx files. Assumes the channel numbers are stored in the .txt file of the same name with
	channel numbers deliminated with commas.
	"""
    # Define file paths and names
    plx_filename1_prefix = "Offline_eNe1"
    plx_filename2_prefix = "Offline_eNe2"
    TDT_tank = "/home/srsummerson/storage/tdt/" + filename
    # TDT_tank = '/backup/subnetsrig/storage/tdt/'+filename
    hdf_location = "/storage/rawdata/hdf/" + hdf_filename

    # Unpack behavioral data
    hdf = tables.openFile(hdf_location)

    # Task states
    state = hdf.root.task_msgs[:]["msg"]
    state_time = hdf.root.task_msgs[:]["time"]
    # Target information: high-value target= targetH, low-value target= targetL
    targetH = hdf.root.task[:]["targetH"]
    targetL = hdf.root.task[:]["targetL"]
    # Reward schedules for each target
    reward_scheduleH = hdf.root.task[:]["reward_scheduleH"]
    reward_scheduleL = hdf.root.task[:]["reward_scheduleL"]
    # Trial type: instructed (1) or free-choice (2) trial
    trial_type = hdf.root.task[:]["target_index"]
    cursor = hdf.root.task[:]["cursor"]

    ind_wait_states = np.ravel(np.nonzero(state == "wait"))
    ind_check_reward_states = np.ravel(np.nonzero(state == "check_reward"))
    ind_target_states = (
        ind_check_reward_states - 3
    )  # only look at targets when the trial was successful (2 states before reward state)
    ind_hold_center_states = ind_check_reward_states - 4  # only look at center holds for successful trials
    num_successful_trials = ind_check_reward_states.size
    target_times = state_time[ind_target_states]
    center_hold_times = state_time[ind_hold_center_states]
    # creates vector same size of state vectors for comparison. instructed (1) and free-choice (2)
    instructed_or_freechoice = trial_type[state_time[ind_target_states]]
    # creates vector of same size of state vectors for comparision. (0) = small reward, (1) = large reward.
    rewarded_reward_scheduleH = reward_scheduleH[state_time[ind_target_states]]
    rewarded_reward_scheduleL = reward_scheduleL[state_time[ind_target_states]]
    num_free_choice_trials = sum(instructed_or_freechoice) - num_successful_trials
    # creates vector of same size of target info: maxtrix of num_successful_trials x 3; (position_offset, reward_prob, left/right)
    targetH_info = targetH[state_time[ind_target_states]]
    targetL_info = targetL[state_time[ind_target_states]]

    target1 = np.zeros(100)
    target3 = np.zeros(ind_check_reward_states.size - 200)
    trial1 = np.zeros(target1.size)
    trial3 = np.zeros(target3.size)
    stim_trials = np.zeros(target3.size)

    # Initialize variables use for in performance computation

    neural_data_center_hold_times = np.zeros(len(center_hold_times))

    # Load syncing data for hdf file and TDT recording
    hdf_times = dict()
    mat_filename = filename + "_b" + str(block_num) + "_syncHDF.mat"
    sp.io.loadmat("/home/srsummerson/storage/syncHDF/" + mat_filename, hdf_times)

    print "Loaded sync data."

    hdf_rows = np.ravel(hdf_times["row_number"])
    hdf_rows = [val for val in hdf_rows]  # turn into a list so that the index method can be used later
    dio_tdt_sample = np.ravel(hdf_times["tdt_samplenumber"])
    dio_freq = np.ravel(hdf_times["tdt_dio_samplerate"])
    dio_recording_start = hdf_times["tdt_recording_start"]  # starting sample value
    dio_tstart = dio_recording_start / dio_freq  # starting time in seconds

    # Find corresponding timestamps for neural data from behavioral time points

    for i, time in enumerate(center_hold_times):
        hdf_index = np.argmin(np.abs(hdf_rows - time))
        neural_data_center_hold_times[i] = dio_tdt_sample[hdf_index] / dio_freq

    """
	Find target choices and trial type across the blocks.
	"""
    for i in range(0, 100):
        target_state1 = state[ind_check_reward_states[i] - 2]
        trial1[i] = instructed_or_freechoice[i]
        if target_state1 == "hold_targetL":
            target1[i] = 1
        else:
            target1[i] = 2
    for i in range(200, num_successful_trials):
        target_state3 = state[ind_check_reward_states[i] - 2]
        trial3[i - 200] = instructed_or_freechoice[i]
        if target_state3 == "hold_targetL":
            target3[i - 200] = 1
        else:
            target3[i - 200] = 2

            # Compute PSTH for units over all trials
    window_before = 2  # PSTH time window before alignment point in seconds
    window_after = 3  # PSTH time window after alignment point in seconds
    binsize = 100  # spike bin size in ms

    # Get behavior data for computing PSTH for units over trials (free-choice and instructed) where the LV target was selected
    target_state = state[ind_check_reward_states - 2]
    choose_lv = np.ravel(np.nonzero(target_state == "hold_targetL"))
    neural_choose_lv = neural_data_center_hold_times[choose_lv]
    # Get behavior data for computing PSTH for units over trials (free-choice and instructed) where the HV target was selected
    choose_hv = np.ravel(np.nonzero(target_state == "hold_targetH"))
    neural_choose_hv = neural_data_center_hold_times[choose_hv]

    total_units = 0

    print "Getting spike data."
    plx_location1 = TDT_tank + "/" + "Block-" + str(block_num) + "/"
    plx_location2 = TDT_tank + "/" + "Block-" + str(block_num) + "/"
    eNe1_channs = loadtxt(plx_location1 + plx_filename1_prefix + ".txt", delimiter=",")
    eNe2_channs = loadtxt(plx_location2 + plx_filename2_prefix + ".txt", delimiter=",")
    plx_location1 = plx_location1 + plx_filename1_prefix + ".plx"
    plx_location2 = plx_location2 + plx_filename2_prefix + ".plx"

    plx1 = plexfile.openFile(plx_location1)
    spike_file1 = plx1.spikes[:].data
    spike_file1 = remap_spike_channels(spike_file1, eNe1_channs)

    plx2 = plexfile.openFile(plx_location2)
    spike_file2 = plx2.spikes[:].data
    spike_file2 = remap_spike_channels(spike_file2, eNe2_channs)

    all_channs = np.append(eNe1_channs, eNe2_channs + 96)

    print "Computing PSTHs."
    psth_all_trials, smooth_psth_all_trials, labels_all_trials = computePSTH(
        spike_file1, spike_file2, neural_data_center_hold_times, window_before, window_after, binsize
    )
    psth_lv_trials, smooth_psth_lv_trials, labels_lv_trials = computePSTH(
        spike_file1, spike_file2, neural_data_center_hold_times[choose_lv], window_before, window_after, binsize
    )
    psth_hv_trials, smooth_psth_hv_trials, labels_hv_trials = computePSTH(
        spike_file1, spike_file2, neural_data_center_hold_times[choose_hv], window_before, window_after, binsize
    )

    psth_time_window = np.arange(-window_before, window_after - float(binsize) / 1000, float(binsize) / 1000)

    # Plot PSTHs all together
    print "Plotting."
    cmap_all = mpl.cm.brg
    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_all_trials.keys()[i]
        plt.plot(
            psth_time_window,
            psth_all_trials[unit_name],
            color=cmap_all(i / float(len(psth_all_trials))),
            label=unit_name,
        )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("PSTH")
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_PSTH-CenterHold.svg"
    )

    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_all_trials.keys()[i]
        if np.max(smooth_psth_all_trials[unit_name]) > 10:
            plt.plot(
                psth_time_window,
                smooth_psth_all_trials[unit_name],
                color=cmap_all(i / float(len(psth_all_trials))),
                label=unit_name,
            )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("Smooth PSTH")
    plt.legend()
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_SmoothPSTH-CenterHold.svg"
    )

    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_lv_trials.keys()[i]
        if np.max(smooth_psth_lv_trials[unit_name]) > 20:
            plt.plot(
                psth_time_window,
                smooth_psth_lv_trials[unit_name],
                color=cmap_all(i / float(len(psth_lv_trials))),
                label=unit_name,
            )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("Smooth PSTH for Trials with LV Target Selection")
    plt.legend()
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_SmoothPSTH-CenterHold-LV.svg"
    )

    plt.figure()
    for i in range(len(all_channs)):
        unit_name = psth_hv_trials.keys()[i]
        if np.max(smooth_psth_hv_trials[unit_name]) > 20:
            plt.plot(
                psth_time_window,
                smooth_psth_hv_trials[unit_name],
                color=cmap_all(i / float(len(psth_hv_trials))),
                label=unit_name,
            )
    plt.xlabel("Time (s)")
    plt.ylabel("spks/s")
    plt.title("Smooth PSTH for Trials with HV Target Selection")
    plt.legend()
    plt.savefig(
        "/home/srsummerson/code/analysis/Mario_Performance_figs/"
        + filename
        + "_b"
        + str(block_num)
        + "_SmoothPSTH-CenterHold-HV.svg"
    )

    plt.close("all")
    hdf.close()
    return
示例#8
0
def running_mean(x, N):
	cumsum = np.cumsum(np.insert(x, 0, 0)) 
	return (cumsum[N:] - cumsum[:-N]) / float(N) 

# Set up code for particular day and block
hdf_filename = 'mari20160128_04_te1327.hdf'
filename = 'Mario20160128'
plx_filename = 'Offline_eNe1.plx'
TDT_tank = '/home/srsummerson/storage/tdt/'+filename
hdf_location = '/storage/rawdata/hdf/'+hdf_filename
plx_location = '/home/srsummerson/storage/tdt/'+filename+'/'+plx_filename
#hdf_location = hdf_filename
block_num = 1

plx = plexfile.openFile(plx_location)
spike_file = plx.spikes[:].data

# Load behavior data
## self.stress_trial =1 for stress trial, 0 for regular trial
hdf = tables.openFile(hdf_location)

state = hdf.root.task_msgs[:]['msg']
state_time = hdf.root.task_msgs[:]['time']
#trial_type = hdf.root.task[:]['target_index']
stress_type = hdf.root.task[:]['stress_trial']

  
ind_wait_states = np.ravel(np.nonzero(state == 'wait'))   # total number of unique trials
#ind_center_states = np.ravel(np.nonzero(state == 'center'))   
ind_target_states = np.ravel(np.nonzero(state == 'target')) # total number of trials (includes repeats if trial was incomplete)
 def __init__(self, fname, tslice=slice(None, None)):
     self.plx = plexfile.openFile(fname)
     self.spike_waveforms = self.plx.spikes[tslice].waveforms
     self.spike_timestamps = self.plx.spikes[tslice].data
示例#10
0
from plexon import plexfile
import numpy as np
plx = plexfile.openFile("/home/james/Downloads/cart20121106_04.plx")
from riglib.nidaq import parse
ts = parse.rowbyte(plx.events[:].data)[0]
print("binning...")
bins = np.array(list(plx.spikes.bin(ts[:, 0])))
print("done!")
示例#11
0
def cache_plx(plxfile):
    """
    Create cache for plexon file
    """
    from plexon import plexfile
    plexfile.openFile(str(plxfile))
示例#12
0
def _get_tmask_plexon(plx, tslice, sys_name="task"):
    """
    Find the rows of the plx file to use for training the decoder

    Parameters
    ----------
    plx : plexfile instance
        The plexon file to sync
    tslice : list of length 2
        Specify the start and end time to examine the file, in seconds
    sys_name : string, optional
        The "system" being synchronized. When the task is running, each data source 
        (i.e., each HDF table) is allowed to be asynchronous and thus is independently 
        synchronized with the neural recording system.

    Returns
    -------
    tmask: np.ndarray of shape (N, ) of booleans
        Specifies which entries of "rows" (see below) are within the time bounds
    rows: np.ndarray of shape (N, ) of integers
        The times at which rows of the specified HDF table were recieved in the neural recording box
    """
    # Open plx file
    from plexon import plexfile

    if isinstance(plx, str) or isinstance(plx, unicode):
        plx = plexfile.openFile(plx)

    # Get the list of all the systems registered in the neural data file
    events = plx.events[:].data
    reg = parse.registrations(events)

    if len(reg.keys()) > 0:
        # find the key for the specified system data
        syskey = None
        for key, system in reg.items():
            if sys_eq(system[0], sys_name):
                syskey = key
                break

        if syskey is None:
            print reg.items()
            raise Exception("riglib.bmi.train._get_tmask: Training data source not found in neural data file!")
    elif len(reg.keys()) == 0:
        # try to find how many systems' rowbytes were in the HDF file
        rowbyte_data = parse.rowbyte(events)
        if len(rowbyte_data.keys()) == 1:
            print "No systems registered, but only one system registered with rowbytes! Using it anyway instead of throwing an error"
            syskey = rowbyte_data.keys()[0]
        else:
            raise Exception("No systems registered and I don't know which sys to use to train!")

    # get the corresponding hdf rows
    rows = parse.rowbyte(events)[syskey][:, 0]

    # Determine which rows are within the time bounds
    lower, upper = 0 < rows, rows < rows.max() + 1
    l, u = tslice
    if l is not None:
        lower = l < rows
    if u is not None:
        upper = rows < u
    tmask = np.logical_and(lower, upper)
    return tmask, rows
hdf_filename = 'mari20160418_04_te2002.hdf'
filename = 'Mario20160418'
plx_filename1 = 'Offline_eNe1.plx'
plx_filename2 = 'Offline_eNe2.plx'
TDT_tank = '/home/srsummerson/storage/tdt/'+filename
hdf_location = '/storage/rawdata/hdf/'+hdf_filename


#hdf_location = hdf_filename
block_num = 1

plx_location1 = '/home/srsummerson/storage/tdt/'+filename+'/'+'Block-'+ str(block_num) + '/'+plx_filename1
plx_location2 = '/home/srsummerson/storage/tdt/'+filename+'/'+'Block-'+ str(block_num) + '/'+plx_filename2

# Get spike data
plx1 = plexfile.openFile(plx_location1)
spike_file1 = plx1.spikes[:].data
plx2 = plexfile.openFile(plx_location2)
spike_file2 = plx2.spikes[:].data

# Load behavior data
## self.stress_trial =1 for stress trial, 0 for regular trial
state_time, ind_center_states, ind_check_reward_states, all_instructed_or_freechoice, all_stress_or_not, successful_stress_or_not,trial_success, target, reward = FreeChoiceBehavior_withStressTrials(hdf_location)

# Total number of trials
num_trials = ind_center_states.size
total_states = state_time.size

# Number of successful stress trials
tot_successful_stress = np.logical_and(trial_success,all_stress_or_not)
successful_stress_trials = float(np.sum(tot_successful_stress))/np.sum(all_stress_or_not)
示例#14
0
    def to_json(self):
        '''
        Create a JSON dictionary of the metadata associated with this block for display in the web interface
        '''
        print "starting TaskEntry.to_json()"
        from json_param import Parameters

        # Run the metaclass constructor for the experiment used. If this can be avoided, it would help to break some of the cross-package software dependencies,
        # making it easier to analyze data without installing software for the entire rig

        Exp = self.task.get(self.feats.all())        
        state = 'completed' if self.pk is not None else "new"

        js = dict(task=self.task.id, state=state, subject=self.subject.id, notes=self.notes)
        js['feats'] = dict([(f.id, f.name) for f in self.feats.all()])
        js['params'] = self.task.params(self.feats.all(), values=self.task_params)

        if len(js['params'])!=len(self.task_params):
            print 'param lengths: JS:', len(js['params']), 'Task: ', len(self.task_params)

        # Supply sequence generators which are declared to be compatible with the selected task class
        exp_generators = dict() 
        if hasattr(Exp, 'sequence_generators'):
            for seqgen_name in Exp.sequence_generators:
                try:
                    g = Generator.objects.using(self._state.db).get(name=seqgen_name)
                    exp_generators[g.id] = seqgen_name
                except:
                    print "missing generator %s" % seqgen_name
        js['generators'] = exp_generators

        ## Add the sequence, used when the block gets copied
        print "getting the sequence, if any"
        if issubclass(self.task.get(), experiment.Sequence):
            js['sequence'] = {self.sequence.id:self.sequence.to_json()}

        datafiles = DataFile.objects.using(self._state.db).filter(entry=self.id)

        ## Add data files linked to this task entry to the web interface. 
        try:
            backup_root = config.backup_root['root']
        except:
            backup_root = '/None'
        
        js['datafiles'] = dict()
        system_names = set(d.system.name for d in datafiles)
        for name in system_names:
            js['datafiles'][name] = [d.get_path() + ' (backup available: %s)' % d.is_backed_up(backup_root) for d in datafiles if d.system.name == name]

        js['datafiles']['sequence'] = issubclass(Exp, experiment.Sequence) and len(self.sequence.sequence) > 0
        
        # Parse the "report" data and put it into the JS response
        js['report'] = self.offline_report()

        if config.recording_sys['make'] == 'plexon':
            try:
                from plexon import plexfile # keep this import here so that only plexon rigs need the plexfile module installed
                plexon = System.objects.using(self._state.db).get(name='plexon')
                df = DataFile.objects.using(self._state.db).get(entry=self.id, system=plexon)

                _neuralinfo = dict(is_seed=Exp.is_bmi_seed)
                if Exp.is_bmi_seed:
                    plx = plexfile.openFile(str(df.get_path()), load=False)
                    path, name = os.path.split(df.get_path())
                    name, ext = os.path.splitext(name)

                    _neuralinfo['length'] = plx.length
                    _neuralinfo['units'] = plx.units
                    _neuralinfo['name'] = name

                js['bmi'] = dict(_neuralinfo=_neuralinfo)
            except MemoryError:
                print "Memory error opening plexon file!"
                js['bmi'] = dict(_neuralinfo=None)
            except (ObjectDoesNotExist, AssertionError, IOError):
                print "No plexon file found"
                js['bmi'] = dict(_neuralinfo=None)
        
        elif config.recording_sys['make'] == 'blackrock':
            try:
                print 'skipping .nev conversion'
                js['bmi'] = dict(_neuralinfo=None)
                
                # length, units = parse_blackrock_file(self.nev_file, self.nsx_files, self)
                
                # js['bmi'] = dict(_neuralinfo=dict(
                #     length=length, 
                #     units=units,
                #     name=name,
                #     is_seed=int(Exp.is_bmi_seed),
                #     ))
                      
            except (ObjectDoesNotExist, AssertionError, IOError):
                print "No blackrock files found"
                js['bmi'] = dict(_neuralinfo=None)
            except:
                import traceback
                traceback.print_exc()
                js['bmi'] = dict(_neuralinfo=None)
        elif config.recording_sys['make'] == 'TDT':
            print 'This code does not yet know how to open TDT files!'
            js['bmi'] = dict(_neuralinfo=None)
            #raise NotImplementedError("This code does not yet know how to open TDT files!")
        else:
            raise Exception('Unrecognized recording_system!')


        for dec in Decoder.objects.using(self._state.db).filter(entry=self.id):
            js['bmi'][dec.name] = dec.to_json()

        # include paths to any plots associated with this task entry, if offline
        files = os.popen('find /storage/plots/ -name %s*.png' % self.id)
        plot_files = dict()
        for f in files:
            fname = f.rstrip()
            keyname = os.path.basename(fname).rstrip('.png')[len(str(self.id)):]
            plot_files[keyname] = os.path.join('/static', fname)

        js['plot_files'] = plot_files
        js['flagged_for_backup'] = self.backup
        js['visible'] = self.visible
        print "TaskEntry.to_json finished!"
        return js
#!/usr/bin/python
"""
Deterimine how well the binning method is working
"""
import os
import numpy as np
import tables
from plexon import plexfile

hdf_data_basename = 'cart20130911_09.hdf'
hdf_data_file = os.path.join('/storage/rawdata/hdf', hdf_data_basename)

plx_data_basename = 'cart20130911_09.plx'
plx_data_file = os.path.join('/storage/plexon', plx_data_basename)

# open the files
plx = plexfile.openFile(plx_data_file)
hdf = tables.openFile(hdf_data_file)
# split vis-tact stims
file_list = [
    'cart20130126_01', 'cart20130128_01', 'cart20130129_01', 'cart20130130_01'
]
order_list = [0, 1, 1, 0]

#combined vis-tact stims
# file_list = ['cart20130123_02','cart20130124_01','cart20130125_01']
# order_list = [2,2,2]

for count, session in enumerate(file_list):

    #load files for session
    print count, "loading file"
    sys.stdout.flush()
    plx = plexfile.openFile(plx_path + session + '.plx')
    hdf = tables.openFile(hdf_path + session + '.hdf')
    seq = cPickle.load(open(seq_path + session[:-3] + '.pkl'))

    # Find times of stimuli
    print count, "finding stim times"
    sys.stdout.flush()
    stimtimes = tap_times(plx, min_duration=60)
    stimstart = stimtimes[0][0]

    # Get list of numdisplay state begin times
    #(exclude anything past 1 hour for damaged data files)
    # and match each to the corresponding stimulus occurrence
    states = parse.messages(plx.events[:3590].data)
    disptimes = [s[0] for s in states if s[1] == 'numdisplay']
    output, seq2 = match_taps(seq[:len(disptimes)], disptimes, stimtimes)
示例#17
0
def load_session(session_name,hdf_only=False,system='sdh',dbname='default'):
    if system is 'sdh' and dbname is 'default':
        plx_path = '/storage/bmi3d/plexon/'
        hdf_path = '/storage/bmi3d/rawdata/hdf/'
    elif system is 'sdh' and dbname is 'exorig':
        plx_path = '/storage/exorig/plexon/'
        hdf_path = '/storage/exorig/rawdata/hdf/'        
    elif system in ['arc','nucleus']:
        plx_path = '/storage/plexon/'
        hdf_path = '/storage/rawdata/hdf/'
    elif system in ['arc_backup']:
        plx_path = '/backup/exorig/plexon/'
        hdf_path = '/backup/exorig/rawdata/hdf/'

    '''

    Load all files associated with a recording session and extract timestamps.

    Parameters
    ----------
    session_name : string
        The name of the session of interest without file extension.

    Returns
    -------
    plx : plexon file
        The loaded plexon file.
    hdf : hdf file
        The loaded hdf5 file.
    ts_func : function
        A function that translates plexon timestamps to hdf row indices or vice
        versa for this session.

        Parameters:
        input_times : list of either plx timestamps (floats) or hdf timestamps
        (ints) to translate
        output_type : string ['hdf', 'plx'] specifying which type the output
        should be (should NOT be the same as the input type)

        Returns:
        output : list of either plx or hdf timestamps corresponding to input


    '''
    hdf = tables.openFile(hdf_path + session_name + '.hdf')
    
    if not hdf_only:
        plx = plexfile.openFile(plx_path + session_name + '.plx')
    
        def sys_eq(sys1, sys2):
            return sys1 in [sys2, sys2[1:]]

        events = plx.events[:].data
        # get system registrations
        reg = parse.registrations(events)
        syskey = None

        # find the key for the task data
        for key, system in reg.items():
            if sys_eq(system[0], 'task'):
                syskey = key
                break

        if syskey is None: 
            print 'NO SYSKEY Error'
            files_ok = False
            plx = ts_func =0
        
        else:
            ts = parse.rowbyte(events)[syskey] 

            # Use checksum in ts to make sure there are the right number of rows in hdf.
            if len(hdf.root.task)<len(ts):
                ts = ts[1:]
        
            if np.all(np.arange(len(ts))%256==ts[:,1]):
                print "Dropped frames detected!"
                files_ok = True 

            files_ok = True 
            if len(ts) < len(hdf.root.task):
                print "Warning! Frames missing at end of plx file. Plx recording may have been stopped early."

            ts = ts[:,0]

            # Define a function to translate plx timestamps to hdf and vice versa for
            # this session.
            def ts_func(input_times, output_type):

                if output_type == 'plx':
                    if len(input_times)>len(ts):
                        input_times = input_times[:len(ts)]
                    output = [ts[time] for time in input_times]

                if output_type == 'hdf':
                    output = [np.searchsorted(ts, time) for time in input_times]

                return np.array(output)

        # Check for previously saved binned spike file, save one if doesn't exist
        #filename = binned_spikes_path+session_name
        #if not os.path.isfile(filename+'.npz'):
        #    save_binned_spike_data(plx, hdf, ts_func, filename)

        return files_ok, plx, hdf, ts_func
    else:
        return hdf