示例#1
0
    def readdata(self, start=None, stop=None):

        if start is None:
            start = self.starttime

        if stop is None:
            stop = self.endtime

        if not (self._expand_time and self._unwrap_data):  #slice on records
            tslice = self.timeslice(start, stop, extra=0)
            data = self.data.default[tslice]
            return data

        tslice = self.timeslice(start, stop, extra=1)
        data = self.data.default[tslice]

        try:
            seg = Segment([start, stop])

            if self._field_groups['default'] == ['time']:
                valid = seg.contains(data)
                data = data[valid]
            elif isinstance(data, dict) and 'time' in data.keys():
                valid = seg.contains(data['time'])[0]
                data['time'] = data['time'][valid]

                if 'signal' in data.keys():
                    data['signal'] = data['signal'][valid]
        except:
            raise

        return data
示例#2
0
def test_segment_difference():
    s1 = Segment([[0, 10], [20, 30], [50, 100]])
    s2 = Segment([[4, 6], [15, 25], [40, 200]])
    s1.difference(s2)
    np.testing.assert_equal(
        s1._data,
        np.array([[0, 4], [6, 10], [15, 20], [25, 30], [40, 50], [100, 200]]))
示例#3
0
 def read(self) -> ThresholdSweep:
     sweep = ThresholdSweep()
     with self.open_file_for_read() as file:
         for group in file.values():
             te = ThresholdEvaluation(
                 threshold=group.attrs["threshold"],
                 first_detections=group["first_detections"][:],
                 correct_detections=group["correct_detections"][:],
                 incorrect_detections=group["incorrect_detections"][:],
                 detected_reference_segs=Segment(
                     group["detected_reference_segs"][:], check=False),
                 undetected_reference_segs=Segment(
                     group["undetected_reference_segs"][:], check=False),
             )
             sweep.add_threshold_evaluation(te)
     return sweep
示例#4
0
def add_segments(ax: Axes, segments: Segment, alpha=0.3, **kwargs):
    """ Draw vertical bands on the plot, one for each segment. """
    visible_segs = segments.intersection(ax.get_xlim())
    plot_segments(visible_segs,
                  axes=ax,
                  fullheight=True,
                  alpha=alpha,
                  **kwargs)
示例#5
0
 def _contains_any_detection(self, time_range: TimeRange) -> bool:
     seg = Segment(time_range)
     for sweep in self.threshold_sweeps:
         te = sweep.at_max_F1()
         if _contains_at_least_one(
                 seg, te.correct_detections) or _contains_at_least_one(
                     seg, te.incorrect_detections):
             return True
     else:
         return False
示例#6
0
文件: threshold.py 项目: tfiers/sharp
def evaluate_threshold(
    envelope: Signal,
    threshold: float,
    lockout_time: float,
    reference_segs: Segment,
) -> ThresholdEvaluation:
    """
    Evaluate the output of a detector for some threshold and lockout time.
    
    Calculates detections. These are the events where `envelope` crosses
    `threshold` (with a minimum distance of `lockout_time` between detections).
    Classifies detections into {correct, incorrect}, and reference segments
    into {detected, not_detected}. For each detected segment, finds the first
    event that intersected with it.

    :param envelope:
    :param threshold:
    :param lockout_time: Duration after a detection during which no other
                detections can be made. In seconds.
    :param reference_segs:  The (start, stop) tuples that indicate baseline
                "true" SWR segments. In seconds.
    :return:  Initalized ThresholdEvaluation object.
    """
    log.info(f"Evaluating threshold {threshold:.3g}")
    eval_segs = Segment(reference_segs._data -
                        [config.eval_start_extension, 0])
    detections = calc_detections(envelope, threshold, lockout_time)
    intersection = SegmentEventIntersection(eval_segs, detections)
    detection_is_correct = intersection.event_is_in_seg
    reference_seg_is_detected = intersection.num_events_in_seg > 0
    if intersection.first_event_in_seg.size > 0:
        first_detections = detections[intersection.first_event_in_seg]
    else:
        first_detections = array([])
    return ThresholdEvaluation(
        threshold=threshold,
        first_detections=first_detections,
        correct_detections=detections[detection_is_correct],
        incorrect_detections=detections[~detection_is_correct],
        detected_reference_segs=reference_segs[reference_seg_is_detected],
        undetected_reference_segs=reference_segs[~reference_seg_is_detected],
        detections=detections,
        detection_is_correct=detection_is_correct,
        reference_seg_is_detected=reference_seg_is_detected,
    )
示例#7
0
 def read(self) -> List[Segment]:
     return [Segment(array) for array in super().read()]
示例#8
0
def test_segment_intersection_2():
    s1 = Segment([[0, 10], [20, 30], [50, 100]])
    s2 = Segment([[4, 6], [15, 25], [40, 200]])
    s3 = Segment([[8, 20], [35, 60], [150, 180]])
    nose.tools.assert_equal(s1 & s2 & s3, s1.intersection(s2, s3))
示例#9
0
def getTestSet(behavior,
               ephys,
               config,
               event=[],
               replay=False,
               rm_no_spk=True,
               count_bins_each_event=True):
    """
    get train set from run epoch based on configuration

    Args:
        config - configuration object
        behavior - behavior data during run epoch
        ephys - spike data of run/sleep epoch
        event - replay events
        replay - whether test replay data or run data
                 run : False
                 replay: True
    returns:
        test_binned - test segments
        event_bins - number of spikes in each event
        n_spikes_all - number of spikes in each bin 
    """
    if replay:
        test = seg(event['postNREMevent'])
        bin_size_test = config.bin_size_sleep
    else:
        run = seg.fromlogical(behavior["speed"] > config.run_speed,
                              x=behavior["time"])
        run = run[run.duration > config.min_run_duration]
        # split segments into independent bins
        bin_size_test = config.bin_size_run
        run_binned = run.split(size=bin_size_test)
        r_data_not_to_use = config.r_data_not_to_use
        first_bin_idx = int(r_data_not_to_use * len(run_binned))
        used_data = run_binned[first_bin_idx:]
        r_train = config.r_train
        n_train = int(r_train * len(used_data))
        test = used_data[n_train:]
        bin_size_test = config.bin_size_run

    print "###################################################################################"
    print "Test set:"
    testing_time = np.sum(test.duration)
    print "testing_time={} s".format(testing_time)
    test_binned = test.split(size=bin_size_test)
    print "binsize={}, test bins = {}".format(bin_size_test, len(test_binned))

    # find number of spikes within test datasets
    n_spikes = []
    sum_tt = []
    mean_tt = []
    no_spike_bin_idx = []
    spike_bin_idx = []
    n_spikes_all = np.zeros(len(test_binned))
    max_bin = 0
    max_spike = 0

    for i, key in enumerate(ephys):
        tt = ephys[key]
        n_spikes.append(test_binned.contains(tt['spike_times'])[1])
        n_spikes_all = n_spikes_all + n_spikes[i]
        sum_tt.append(sum(n_spikes[i]))
        mean_tt.append(np.mean(n_spikes[i]))
    print "get spike count done"
    n_spikes_bin = []
    for i in range(len(n_spikes_all)):
        n_spikes_bin.append(
            np.asarray([n_spikes[j][i] for j in range(len(ephys))],
                       dtype=np.int32))
        if n_spikes_all[i] > max_spike:
            max_spike = n_spikes_all[i]
            max_bin = i
    print "get spike count per bin done"
    if rm_no_spk:
        for j, n in enumerate(n_spikes_all):
            if n == 0:
                no_spike_bin_idx.append(j)
            else:
                spike_bin_idx.append(j)
        if len(no_spike_bin_idx) > 0:
            test_binned = test_binned[spike_bin_idx]
        print "{} no spike bins removed".format(len(no_spike_bin_idx))

    testing_time = np.sum(test_binned.duration)
    print "testing_time={} s".format(testing_time)
    print ""

    if count_bins_each_event:
        # get number of bins in each event
        if replay:
            event_bins = [[]] * len(test)
            for j, evnt_bin in enumerate(test_binned):
                for i, env in enumerate(test):
                    if env[0] <= evnt_bin[0] and env[1] >= evnt_bin[1]:
                        event_bins[i] = event_bins[i] + [j]
                        break
            #spio.savemat('event_bins.mat',{'event_bins':event_bins})
            true_behavior = []
        else:
            event_bins = []
            true_behavior = interpolate.interp1d( behavior["time"], behavior["linear_position"],\
                    kind='linear', axis=0 ) ( test_binned.center )
        print "get #bins in each event done"
    else:
        event_bins = len(test_binned)
        true_behavior = []

    return test_binned, event_bins, n_spikes_all, true_behavior
示例#10
0
def test_segment_equal():
    s1 = Segment([[1, 2], [3, 4], [10, 20]])
    s2 = Segment([[1, 2], [3, 4], [10, 20]])
    nose.tools.assert_equal(s1, s2)
示例#11
0
def test_segment_construct_empty():
    s = Segment([])
    nose.tools.assert_is_instance(s, Segment)
示例#12
0
def test_segment_construct_from_array():
    s = Segment(np.ones((10, 2)))
    nose.tools.assert_is_instance(s, Segment)
示例#13
0
def test_segment_construct_from_nested_list():
    s = Segment([[1, 2], [3, 4], [5, 6]])
    nose.tools.assert_is_instance(s, Segment)
示例#14
0
def test_segment_construct_from_list():
    s = Segment([1, 2])
    nose.tools.assert_is_instance(s, Segment)
示例#15
0
def test_segment_difference_2():
    s1 = Segment([[0, 10], [20, 30], [50, 100]])
    s2 = Segment([[4, 6], [15, 25], [40, 200]])
    s3 = Segment([[8, 20], [35, 60], [150, 180]])
    nose.tools.assert_equal(s1 ^ s2 ^ s3, s1.difference(s2, s3))
示例#16
0
def _get_visible_events(ax, events):
    view = Segment(ax.get_xlim())
    intersection = SegmentEventIntersection(view, events)
    return events[intersection.event_is_in_seg]
示例#17
0
def test_segment_equal_2():
    Segment([[1, 2], [10, 20]]) == 'a'
示例#18
0
def getTrainSet(behavior, ephys, config):
    """
    get train set from run epoch based on configuration

    Args:
        config - configuration object
        behavior - behavior data during run epoch
        ephys - spike data of run epoch
    returns:
        train - train segments
        training_time - total time of the training dataset
    """
    print "###################################################################################"
    print "Train set:"
    # select run segments
    print "run speed={}, min run duration={}, binsize={}".format(
        config.run_speed, config.min_run_duration, config.bin_size_run)
    run = seg.fromlogical(behavior["speed"] > config.run_speed,
                          x=behavior["time"])
    run = run[run.duration > config.min_run_duration]

    # split segments into independent bins
    bin_size_test = config.bin_size_run
    run_binned = run.split(size=bin_size_test)
    r_data_not_to_use = config.r_data_not_to_use
    first_bin_idx = int(r_data_not_to_use * len(run_binned))
    used_data = run_binned[first_bin_idx:]
    r_train = config.r_train
    n_train = int(r_train * len(used_data))
    train = used_data[0:n_train]

    total_time = ephys[u'TT1']['spike_times'][-1] - ephys[u'TT1'][
        'spike_times'][0]
    print "total recording time = {} min, start time = {} s, end time = {} s".format(
        total_time / 60, ephys[u'TT1']['spike_times'][0],
        ephys[u'TT1']['spike_times'][-1])
    training_time = np.sum(train.duration)
    print "training_time={} s".format(training_time)

    # find number of spikes within train datasets
    n_spikes = []
    sum_tt = []
    mean_tt = []
    no_spike_bin_idx = []
    spike_bin_idx = []
    n_spikes_all = np.zeros(len(train))
    for i, key in enumerate(ephys):
        tt = ephys[key]
        n_spikes.append(train.contains(tt['spike_times'])[1])
        n_spikes_all = n_spikes_all + n_spikes[i]
        sum_tt.append(sum(n_spikes[i]))
    # remove bins without spike
    for j, n in enumerate(n_spikes_all):
        if n == 0:
            no_spike_bin_idx.append(j)
        else:
            spike_bin_idx.append(j)
    if len(no_spike_bin_idx) > 0:
        train = train[spike_bin_idx]

    print "number spikes for training:"
    print np.sum(sum_tt)
    print ""
    return train, training_time
示例#19
0
def test_segment_exclusive_2():
    s1 = Segment([[0, 10], [20, 30], [50, 100]])
    s2 = Segment([[4, 6], [15, 25], [40, 200]])
    s3 = Segment([[8, 20], [35, 60], [150, 180]])
    nose.tools.assert_equal(s1 & ~s2 & ~s3, s1.exclusive(s2, s3))
示例#20
0
def test_segment_intersection():
    s1 = Segment([[0, 10], [20, 30], [50, 100]])
    s2 = Segment([[4, 6], [15, 25], [40, 200]])
    s1.intersection(s2)
    np.testing.assert_equal(s1._data, np.array([[4, 6], [20, 25], [50, 100]]))
示例#21
0
 def __init__(self, segs: Segment, events: ndarray):
     """ Segments are assumed sorted. """
     self._isinseg, self._ninseg, self._contains = segs.contains(events)