def _write_signal(self, signal, epoch, i):
        # for now, we assume that all signals should go in /acquisition
        # this could be modified using Neo annotations
        signal_name = signal.name or "signal{0}".format(i)
        ts_name = "{0}_{1}".format(signal.segment.name, signal_name)
        ts = self._file.make_group("<MultiChannelTimeSeries>", ts_name, path="/acquisition/timeseries")
        conversion, base_unit = _decompose_unit(signal.dimensionality)
        attributes = {"conversion": conversion,
                      "unit": base_unit,
                      "resolution": float('nan')}
        if isinstance(signal, AnalogSignal):
            ts.set_dataset("starting_time", time_in_seconds(signal.t_start),
                           attrs={"rate": float(signal.sampling_rate.rescale("Hz"))})
        elif isinstance(signal, IrregularlySampledSignal):
            ts.set_dataset("timestamps", signal.times.rescale('second').magnitude)
        else:
            raise TypeError("signal has type {0}, should be AnalogSignal or IrregularlySampledSignal".format(signal.__class__.__name__))
        ts.set_dataset("data", signal.magnitude,
                       attrs=attributes)
        ts.set_dataset("num_samples", signal.shape[0])  # this is supposed to be created automatically, but is not
        ts.set_attr("source", signal.name or "unknown")
        ts.set_attr("description", signal.description or "")

        nwb_utils.add_epoch_ts(epoch,
                               time_in_seconds(signal.segment.t_start),
                               time_in_seconds(signal.segment.t_stop),
                               signal_name,
                               ts)
 def _write_event(self, event, nwb_epoch, i):
     event_name = event.name or "event{0}".format(i)
     ts_name = "{0}_{1}".format(event.segment.name, event_name)
     ts = self._file.make_group("<AnnotationSeries>", ts_name, path="/acquisition/timeseries")
     ts.set_dataset("timestamps", event.times.rescale('second').magnitude)
     ts.set_dataset("data", event.labels)
     ts.set_dataset("num_samples", event.size)   # this is supposed to be created automatically, but is not
     ts.set_attr("source", event.name or "unknown")
     ts.set_attr("description", event.description or "")
     nwb_utils.add_epoch_ts(nwb_epoch,
                            time_in_seconds(event.segment.t_start),
                            time_in_seconds(event.segment.t_stop),
                            event_name,
                            ts)
 def _write_neo_epoch(self, neo_epoch, nwb_epoch, i):
     neo_epoch_name = neo_epoch.name or "intervalseries{0}".format(i)
     ts_name = "{0}_{1}".format(neo_epoch.segment.name, neo_epoch_name)
     ts = self._file.make_group("<AnnotatedIntervalSeries>", ts_name,
                                path="/acquisition/timeseries")
     ts.set_dataset("timestamps", neo_epoch.times.rescale('second').magnitude)
     ts.set_dataset("durations", neo_epoch.durations.rescale('second').magnitude)
     ts.set_dataset("data", neo_epoch.labels)
     #ts.set_dataset("num_samples", neo_epoch.size)   # this is supposed to be created automatically, but is not
     ts.set_attr("source", neo_epoch.name or "unknown")
     ts.set_attr("description", neo_epoch.description or "")
     nwb_utils.add_epoch_ts(nwb_epoch,
                            time_in_seconds(neo_epoch.segment.t_start),
                            time_in_seconds(neo_epoch.segment.t_stop),
                            neo_epoch_name,
                            ts)
Example #4
0
    def _write_signal(self, signal, epoch, i):
        # for now, we assume that all signals should go in /acquisition
        # this could be modified using Neo annotations
        signal_name = signal.name or "signal{0}".format(i)
        ts_name = "{0}_{1}".format(signal.segment.name, signal_name)
        ts = self._file.make_group("<MultiChannelTimeSeries>",
                                   ts_name,
                                   path="/acquisition/timeseries")
        conversion, base_unit = _decompose_unit(signal.units)
        attributes = {
            "conversion": conversion,
            "unit": base_unit,
            "resolution": float('nan')
        }
        if isinstance(signal, AnalogSignal):
            sampling_rate = signal.sampling_rate.rescale("Hz")
            # The following line is a temporary hack. IOs should not modify the objects
            # being written, but NWB only allows Hz, and the common IO tests
            # require units to be the same on write and read.
            # the proper solution is probably to add an option to the common IO tests
            # to allow different units
            signal.sampling_rate = sampling_rate
            ts.set_dataset("starting_time",
                           time_in_seconds(signal.t_start),
                           attrs={"rate": float(sampling_rate)})
        elif isinstance(signal, IrregularlySampledSignal):
            ts.set_dataset("timestamps",
                           signal.times.rescale('second').magnitude)
        else:
            raise TypeError(
                "signal has type {0}, should be AnalogSignal or IrregularlySampledSignal"
                .format(signal.__class__.__name__))
        ts.set_dataset(
            "data",
            signal.magnitude,
            dtype=np.float64,  #signal.dtype,
            attrs=attributes)
        ts.set_dataset(
            "num_samples", signal.shape[0]
        )  # this is supposed to be created automatically, but is not
        #ts.set_dataset("num_channels", signal.shape[1])
        ts.set_attr("source", signal.name or "unknown")
        ts.set_attr("description", signal.description or "")

        nwb_utils.add_epoch_ts(epoch, time_in_seconds(signal.segment.t_start),
                               time_in_seconds(signal.segment.t_stop),
                               signal_name, ts)
Example #5
0
 def _write_event(self, event, nwb_epoch, i):
     event_name = event.name or "event{0}".format(i)
     ts_name = "{0}_{1}".format(event.segment.name, event_name)
     ts = self._file.make_group("<AnnotationSeries>",
                                ts_name,
                                path="/acquisition/timeseries")
     ts.set_dataset("timestamps", event.times.rescale('second').magnitude)
     ts.set_dataset("data", event.labels)
     ts.set_dataset(
         "num_samples", event.size
     )  # this is supposed to be created automatically, but is not
     ts.set_attr("source", event.name or "unknown")
     ts.set_attr("description", event.description or "")
     nwb_utils.add_epoch_ts(nwb_epoch,
                            time_in_seconds(event.segment.t_start),
                            time_in_seconds(event.segment.t_stop),
                            event_name, ts)
Example #6
0
 def _write_neo_epoch(self, neo_epoch, nwb_epoch, i):
     neo_epoch_name = neo_epoch.name or "intervalseries{0}".format(i)
     ts_name = "{0}_{1}".format(neo_epoch.segment.name, neo_epoch_name)
     ts = self._file.make_group("<AnnotatedIntervalSeries>",
                                ts_name,
                                path="/acquisition/timeseries")
     ts.set_dataset("timestamps",
                    neo_epoch.times.rescale('second').magnitude)
     ts.set_dataset("durations",
                    neo_epoch.durations.rescale('second').magnitude)
     ts.set_dataset("data", neo_epoch.labels)
     #ts.set_dataset("num_samples", neo_epoch.size)   # this is supposed to be created automatically, but is not
     ts.set_attr("source", neo_epoch.name or "unknown")
     ts.set_attr("description", neo_epoch.description or "")
     nwb_utils.add_epoch_ts(nwb_epoch,
                            time_in_seconds(neo_epoch.segment.t_start),
                            time_in_seconds(neo_epoch.segment.t_stop),
                            neo_epoch_name, ts)
    def _write_signal(self, signal, epoch, i):
        # for now, we assume that all signals should go in /acquisition
        # this could be modified using Neo annotations
        signal_name = signal.name or "signal{0}".format(i)
        ts_name = "{0}_{1}".format(signal.segment.name, signal_name)
        ts = self._file.make_group("<MultiChannelTimeSeries>", ts_name, path="/acquisition/timeseries")
        conversion, base_unit = _decompose_unit(signal.units)
        attributes = {"conversion": conversion,
                      "unit": base_unit,
                      "resolution": float('nan')}
        if isinstance(signal, AnalogSignal):
            sampling_rate = signal.sampling_rate.rescale("Hz")
            # The following line is a temporary hack. IOs should not modify the objects
            # being written, but NWB only allows Hz, and the common IO tests
            # require units to be the same on write and read.
            # the proper solution is probably to add an option to the common IO tests
            # to allow different units
            signal.sampling_rate = sampling_rate
            ts.set_dataset("starting_time", time_in_seconds(signal.t_start),
                           attrs={"rate": float(sampling_rate)})
        elif isinstance(signal, IrregularlySampledSignal):
            ts.set_dataset("timestamps", signal.times.rescale('second').magnitude)
        else:
            raise TypeError("signal has type {0}, should be AnalogSignal or IrregularlySampledSignal".format(signal.__class__.__name__))
        ts.set_dataset("data", signal.magnitude,
                       dtype=np.float64,  #signal.dtype,
                       attrs=attributes)
        ts.set_dataset("num_samples", signal.shape[0])   # this is supposed to be created automatically, but is not
        #ts.set_dataset("num_channels", signal.shape[1])
        ts.set_attr("source", signal.name or "unknown")
        ts.set_attr("description", signal.description or "")

        nwb_utils.add_epoch_ts(epoch,
                               time_in_seconds(signal.segment.t_start),
                               time_in_seconds(signal.segment.t_stop),
                               signal_name,
                               ts)
def create_trials(orig_h5, nuo):
    trial_id = orig_h5["trialIds/trialIds"].value
    trial_t = orig_h5["trialStartTimes/trialStartTimes"].value
    good_trials = orig_h5["trialPropertiesHash/value/4/4"].value
    ignore_ivals_start = [time for (time, good_trial) in zip(trial_t, good_trials) if good_trial == 0]
    # trial stop isn't stored. assume that it's twice the duration of other
    #   trials -- padding on the high side shouldn't matter
    ival = (trial_t[-1] - trial_t[0]) / (len(trial_t) - 1)
    trial_t = np.append(trial_t, trial_t[-1] + 2 * ival)
    ignore_ivals_stop = [time for (time, good_trial) in zip(trial_t[1:], good_trials) if good_trial == 0]
    ignore_intervals = [ignore_ivals_start, ignore_ivals_stop]
    # for i in range(10):  # version for only five epoch's to reduce time
    for i in range(len(trial_id)):  # use: in range(5): to reduce run time
        tid = trial_id[i]
        trial = "Trial_%d%d%d" % (int(tid / 100), int(tid / 10) % 10, tid % 10)
        # print trial # DEBUG
        start = trial_t[i]
        stop = trial_t[i + 1]
        epoch = ut.create_epoch(nuo, trial, start, stop)
        tags = []
        if good_trials[i] == 1:
            tags.append("Good trial")
        else:
            tags.append("Non-performing")
        for j in range(len(epoch_tags[trial])):
            tags.append(epoch_tags[trial][j])
        epoch.set_dataset("tags", tags)
        # keep with tradition and create a units field, even if it's empty
        if trial not in epoch_units:
            units = []
        else:
            units = epoch_units[trial]
        epoch.set_custom_dataset("units", units)
        # raw data path
        raw_path = "descrHash/value/%d" % (trial_id[i])
        # try:
        raw_file = parse_h5_obj(orig_h5[raw_path])[0]
        if len(raw_file) == 1:
            raw_file = "na"
        else:
            raw_file = str(raw_file)
        # except KeyError:
        #         raw_path = "descrHash/value/%d/" %(trial_id[i])
        #         try:
        #             raw_file_1 = parse_h5_obj(orig_h5[raw_path + "/1"])[0]
        #         except IndexError:
        #             raw_file_1 = ''
        #         try:
        #             raw_file_2 = parse_h5_obj(orig_h5[raw_path + "/2"])[0]
        #         except IndexError:
        #             raw_file_2 = ''
        #         raw_file = str(raw_file_1) + " and " + str(raw_file_2)
        #     except IndexError:
        #         raw_file = ''
        #        epoch.set_dataset("description", "Raw Voltage trace data files used to acuqire spike times data: " + raw_file + "\n\
        # ignore intervals: mark start and stop times of bad trials when mice are not performing")
        epoch.set_dataset("description", "Raw Voltage trace data files used to acuqire spike times data: " + raw_file)
        # epoch.set_ignore_intervals(ignore_intervals)
        # collect behavioral data
        ts = "/stimulus/presentation/auditory_cue"
        ut.add_epoch_ts(epoch, start, stop, "auditory_cue", ts)
        ts = "/stimulus/presentation/pole_in"
        ut.add_epoch_ts(epoch, start, stop, "pole_in", ts)
        ts = "/stimulus/presentation/pole_out"
        ut.add_epoch_ts(epoch, start, stop, "pole_out", ts)
        ts = "/acquisition/timeseries/lick_trace"
        ut.add_epoch_ts(epoch, start, stop, "lick_trace", ts)
        ts = "/stimulus/presentation/aom_input_trace"
        ut.add_epoch_ts(epoch, start, stop, "aom_input_trace", ts)
        ts = "/stimulus/presentation/simple_optogentic_stimuli"
        # ts = "/stimulus/presentation/laser_power"
        # DEBUG -- don't add this right now -- too many smaples make building file take too long
        # epoch.add_timeseries("laser_power", ts)
        ut.add_epoch_ts(epoch, start, stop, "simple_optogentic_stimuli", ts)
        else:
            raise TypeError("signal has type {0}, should be AnalogSignal or IrregularlySampledSignal".format(signal.__class__.__name__))
        ts.set_dataset("data", signal.magnitude,
<<<<<<< HEAD
                       dtype=np.float64,  #signal.dtype,
                       attrs=attributes)
        ts.set_dataset("num_samples", signal.shape[0])   # this is supposed to be created automatically, but is not
        #ts.set_dataset("num_channels", signal.shape[1])
=======
                       attrs=attributes)
        ts.set_dataset("num_samples", signal.shape[0])  # this is supposed to be created automatically, but is not
>>>>>>> 131f06f... rebase onto master
        ts.set_attr("source", signal.name or "unknown")
        ts.set_attr("description", signal.description or "")

        nwb_utils.add_epoch_ts(epoch,
                               time_in_seconds(signal.segment.t_start),
                               time_in_seconds(signal.segment.t_stop),
                               signal_name,
                               ts)

<<<<<<< HEAD
    def _write_spiketrains(self, spiketrains, segment):
        mod = self._file.make_group("<Module>", "Units", abort=False)
        #mod.set_custom_dataset('description', 'Spike times and waveforms')
        # create interfaces
        #spk_waves_iface = mod.make_group("EventWaveform")
        #spk_waves_iface.set_attr("source", "Data as reported in Nuo's file")
        spiketrain_group = mod.make_group("UnitTimes", abort=False)
        spiketrain_group.set_attr("source", "block {0}".format(segment.block.name))
        fmt = 'unit_{{0:0{0}d}}_{1}'.format(len(str(len(spiketrains))), segment.name)
Example #10
0
data = np.linspace(1., 100.0, 6*4*1000).reshape(24,1000)
times = np.linspace(0.0, 60.*2., 1000) 

# create an instance of MyNewTimeseries.  Name of group will be "my_new_timeseries
# it will be stored in /acquisition/timeseries

nts = f.make_group("<TrajectorySeries>", "hand_position", path="/acquisition/timeseries",
    attrs={"source": "source of data for my new timeseries"} )
nts.set_dataset("data", data, attrs={"conversion": 1.0, "resolution": 0.001, 
    "unit": "meter and radian; see definition of dimension trajectories in format specification"})
nts.set_dataset("timestamps", times)

# specify meaning of variables
reference_frame = ("Meaning of measurement values in array data, (e.g. sensor s1, s2, s3, s4; "
    "x, y, z, pitch, roll, yaw) should be described here")
nts.set_dataset("reference_frame", reference_frame)

# Add in sample epochs to specify the trials
trial_times = [ [0.5, 1.5], [2.5, 3.0], [3.5, 4.0]]

for trial_num in range(len(trial_times)):
    trial_name = "Trial_%03i" % (trial_num+1)
    start_time, stop_time = trial_times[trial_num]
    ep = utils.create_epoch(f, trial_name, start_time, stop_time)
    utils.add_epoch_ts(ep, start_time, stop_time, "hand_position", nts)


# All done.  Close the file
f.close()

Example #11
0
def create_trials(orig_h5, nuo):
    trial_id = orig_h5["trialIds/trialIds"].value
    trial_t = orig_h5["trialStartTimes/trialStartTimes"].value
    good_trials = orig_h5['trialPropertiesHash/value/4/4'].value
    ignore_ivals_start = [time for (time, good_trial) in zip(trial_t,good_trials) if good_trial == 0]
    # trial stop isn't stored. assume that it's twice the duration of other
    #   trials -- padding on the high side shouldn't matter
    ival = (trial_t[-1] - trial_t[0]) / (len(trial_t) - 1)
    trial_t = np.append(trial_t, trial_t[-1] + 2*ival)
    ignore_ivals_stop = [time for (time, good_trial) in zip(trial_t[1:],good_trials) if good_trial == 0]
    ignore_intervals = [ignore_ivals_start, ignore_ivals_stop]
    # for i in range(10):  # version for only five epoch's to reduce time
    for i in range(len(trial_id)):  # use: in range(5): to reduce run time
        tid = trial_id[i]
        trial = "Trial_%d%d%d" % (int(tid/100), int(tid/10)%10, tid%10)
        # print trial # DEBUG
        start = trial_t[i]
        stop = trial_t[i+1]
        epoch = ut.create_epoch(nuo, trial, start, stop)
        tags = []
        if good_trials[i] == 1:
            tags.append("Good trial")
        else:
            tags.append("Non-performing")
        for j in range(len(epoch_tags[trial])):
            tags.append(epoch_tags[trial][j])
        epoch.set_dataset("tags", tags)
        # keep with tradition and create a units field, even if it's empty
        if trial not in epoch_units:
            units = []
        else:
            units = epoch_units[trial]
        epoch.set_custom_dataset("units", units)
        # raw data path
        raw_path = "descrHash/value/%d" % (trial_id[i])
        # try:
        raw_file = parse_h5_obj(orig_h5[raw_path])[0]
        if len(raw_file) == 1:
            raw_file = 'na'
        else:
            raw_file = str(raw_file)
        # except KeyError:
        #         raw_path = "descrHash/value/%d/" %(trial_id[i])
        #         try:
        #             raw_file_1 = parse_h5_obj(orig_h5[raw_path + "/1"])[0]
        #         except IndexError:
        #             raw_file_1 = ''
        #         try:
        #             raw_file_2 = parse_h5_obj(orig_h5[raw_path + "/2"])[0]
        #         except IndexError:
        #             raw_file_2 = ''
        #         raw_file = str(raw_file_1) + " and " + str(raw_file_2)
        #     except IndexError:
        #         raw_file = ''
#        epoch.set_dataset("description", "Raw Voltage trace data files used to acuqire spike times data: " + raw_file + "\n\
# ignore intervals: mark start and stop times of bad trials when mice are not performing")
        epoch.set_dataset("description", "Raw Voltage trace data files used to acuqire spike times data: " + raw_file)
        #epoch.set_ignore_intervals(ignore_intervals)
        # collect behavioral data
        ts = "/stimulus/presentation/auditory_cue"
        ut.add_epoch_ts(epoch, start, stop, "auditory_cue", ts)
        ts = "/stimulus/presentation/pole_in"
        ut.add_epoch_ts(epoch, start, stop, "pole_in", ts)
        ts = "/stimulus/presentation/pole_out"
        ut.add_epoch_ts(epoch, start, stop, "pole_out", ts)
        ts = "/acquisition/timeseries/lick_trace"
        ut.add_epoch_ts(epoch, start, stop,"lick_trace", ts)
        ts = "/stimulus/presentation/aom_input_trace"
        ut.add_epoch_ts(epoch, start, stop,"aom_input_trace", ts)
        ts = "/stimulus/presentation/simple_optogentic_stimuli"
        #ts = "/stimulus/presentation/laser_power"
# DEBUG -- don't add this right now -- too many smaples make building file take too long
        #epoch.add_timeseries("laser_power", ts)
        ut.add_epoch_ts(epoch, start, stop, "simple_optogentic_stimuli", ts)
        file_name, dataset_name = fetch_stimulus_link(seed, x, y, dx, dy)
        file_name_base = file_name[len(OUTPUT_DIR):]  # strip OUTPUT_DIR from front of name
        #- img.set_data_as_remote_link(file_name, dataset_name)
        link_str = "extlink:%s,%s" % (file_name_base, dataset_name)
        img.set_dataset("data", link_str) # special string, causes creation of external link
        img.set_dataset("bits_per_pixel", 8)
        img.set_dataset("format", "raw")
        img.set_dataset("dimension", [x/dx, y/dy])
        img.set_attr("description", "type = " + str(type_s) + "; seed = " + str(seed))
        img.set_attr("comments", "Based on ran1.bin. Pixel values are 255 for light, 0 for dark")
        # create epoch
        stim_end = timestamps[-1] + 1
        epoch = ut.create_epoch(bfile, "stim_%d"%(i+1), stim_offset[i], stim_end)
        stim_start = stim_offset[i]
        ts_path = "/stimulus/presentation/"+img.name
        ut.add_epoch_ts(epoch, stim_start, stim_end, "stimulus", ts_path)

    # create module 'Cells' for the spikes    
    mod_name = "Cells"
    mod = bfile.make_group("<Module>", mod_name)
    mod.set_attr("description", "Spike times for the individual cells and stimuli")
    mod.set_attr("source", "Data as reported in the original file")
    # create interfaces
    spk_times_iface = mod.make_group("UnitTimes")
    spk_times_iface.set_attr("source", "Data as reported in the original crcns file")
    # determine number of cells
    spikes_mat = mfile["spikes"]
    num_cells = spikes_mat.shape[0]
    num_stims = spikes_mat.shape[1]
    # unit_list = []  ## Added for specification language conversion method
    for i in range(num_cells):
Example #13
0
def create_trials(orig_h5, simon):
    trial_id = orig_h5["trialIds/trialIds"].value
    trial_t = orig_h5["trialStartTimes/trialStartTimes"].value * 0.001
    # trial stop isn't stored. assume that it's twice the duration of other
    #   trials -- padding on the high side shouldn't matter
    ival = (trial_t[-1] - trial_t[0]) / (len(trial_t) - 1)
    trial_t = np.append(trial_t, trial_t[-1] + 2*ival)
    for i in range(len(trial_id)):
        tid = trial_id[i]
        trial = "Trial_%d%d%d" % (int(tid/100), int(tid/10)%10, tid%10)
        start = trial_t[i]
        stop = trial_t[i+1]
        #- epoch = simon.create_epoch(trial, start, stop)
        epoch = simon.make_group("<epoch_X>", trial)
        epoch.set_dataset("start_time", start)
        epoch.set_dataset("stop_time", stop)
        # pole_pos_path = "trialPropertiesHash/value/3/3"
        #         pole_pos = str(orig_h5[pole_pos_path].value[i])
        #         epoch.description = ("Stimulus position - in Zaber motor steps (approximately, 10,000 = 1 mm): " + pole_pos)
        if trial in epoch_roi_list:
            epoch.set_custom_dataset("ROIs", epoch_roi_list[trial])
            epoch.set_custom_dataset("ROI_planes", epoch_roi_planes[trial])
        tags = []
        if trial in epoch_trial_types:
            for j in range(len(epoch_trial_types[trial])):
                #- epoch.add_tag(epoch_trial_types[trial][j])
                tags.append(epoch_trial_types[trial][j])
        epoch.set_dataset("tags", tags)
        epoch.set_dataset("description", "Data that belong to " + trial)
        ts = "/processing/Licks/BehavioralEvents/lick_left"
        #- epoch.add_timeseries("lick_left", ts)
        ut.add_epoch_ts(epoch, start, stop, "lick_left", ts)
        ts = "/processing/Licks/BehavioralEvents/lick_right"
        ut.add_epoch_ts(epoch, start, stop, "lick_right", ts)
        ts = "/stimulus/presentation/water_left"
        ut.add_epoch_ts(epoch, start, stop, "water_left", ts)
        ts = "/stimulus/presentation/water_right"
        ut.add_epoch_ts(epoch, start, stop, "water_right", ts)
        ts = "/stimulus/presentation/pole_accessible"
        ut.add_epoch_ts(epoch, start, stop, "pole_accessible", ts)
        ts = "/processing/Whisker/BehavioralEpochs/pole_touch_protract"
        ut.add_epoch_ts(epoch, start, stop, "pole_touch_protract", ts)
        ts = "/processing/Whisker/BehavioralEpochs/pole_touch_retract"
        ut.add_epoch_ts(epoch, start, stop, "pole_touch_retract", ts)
        ts = "/stimulus/presentation/auditory_cue"
        ut.add_epoch_ts(epoch, start, stop, "auditory_cue", ts)
        ts = "/processing/Whisker/BehavioralTimeSeries/whisker_angle"
        ut.add_epoch_ts(epoch, start, stop, "whisker_angle", ts)
        ts = "/processing/Whisker/BehavioralTimeSeries/whisker_curve"
        ut.add_epoch_ts(epoch, start, stop, "whisker_curve", ts)
Example #14
0
            link_str)  # special string, causes creation of external link
        img.set_dataset("bits_per_pixel", 8)
        img.set_dataset("format", "raw")
        img.set_dataset("dimension", [x / dx, y / dy])
        img.set_attr("description",
                     "type = " + str(type_s) + "; seed = " + str(seed))
        img.set_attr(
            "comments",
            "Based on ran1.bin. Pixel values are 255 for light, 0 for dark")
        # create epoch
        stim_end = timestamps[-1] + 1
        epoch = ut.create_epoch(bfile, "stim_%d" % (i + 1), stim_offset[i],
                                stim_end)
        stim_start = stim_offset[i]
        ts_path = "/stimulus/presentation/" + img.name
        ut.add_epoch_ts(epoch, stim_start, stim_end, "stimulus", ts_path)

    # create module 'Cells' for the spikes
    mod_name = "Cells"
    mod = bfile.make_group("<Module>", mod_name)
    mod.set_attr("description",
                 "Spike times for the individual cells and stimuli")
    mod.set_attr("source", "Data as reported in the original file")
    # create interfaces
    spk_times_iface = mod.make_group("UnitTimes")
    spk_times_iface.set_attr("source",
                             "Data as reported in the original crcns file")
    # determine number of cells
    spikes_mat = mfile["spikes"]
    num_cells = spikes_mat.shape[0]
    num_stims = spikes_mat.shape[1]