예제 #1
0
def no_singularity_filter(a_unit,
                          starts,
                          len_samples,
                          bin_size=10,
                          s_f=30000,
                          history_bins=1):
    """
    :param starts: list or np array of starting points
    :param len_samples: length in samples of the 'trial'
    :param a_unit: one Unit objects (as in units.py)
    :param bin_size: size of the bin for the spike count
    :param history_bins: number of bins previous to starting points to include
    :return: True if the unit has at leas one spike in the raster
    """
    bin_size_samples = int(bin_size * s_f / 1000.)
    len_bin = int(len_samples / bin_size_samples)
    len_ms = int(len_bin * bin_size)
    history_samples = history_bins * bin_size_samples
    span_ms = len_ms + bin_size * history_bins

    raster = a_unit.get_raster(starts - history_samples,
                               span_ms,
                               span_is_ms=True,
                               return_ms=True)

    sparse_raster = bp.col_binned(bp.sparse_raster(raster), bin_size)

    return (sparse_raster.any())
예제 #2
0
def linear_fit(channels,
               thresholds,
               kwd_file,
               trial_starts,
               trial_recs,
               par_stream,
               bin_size=10,
               history_bins=15,
               s_f=30000):
    logger.info('Fitting a kernel')
    bin_size_samples = int(bin_size * s_f / 1000.)
    len_samples = par_stream.shape[0]
    model_pars = bp.col_binned(np.array([par_stream]),
                               bin_size_samples) / bin_size_samples

    s_v, chans = support_vector_from_raw(trial_starts,
                                         trial_recs,
                                         len_samples,
                                         channels,
                                         thresholds,
                                         kwd_file,
                                         bin_size=bin_size,
                                         s_f=s_f,
                                         history_bins=history_bins + 1,
                                         no_silent=True)

    target = np.tile(model_pars,
                     trial_starts.size).reshape(trial_starts.size, -1)
    logger.info('sv shape {0}, target shape {1}'.format(
        s_v.shape, target.shape))
    return ld.fit_kernel(s_v, target, history_bins + 1), chans
예제 #3
0
def support_vector_from_raw(starts,
                            recs,
                            len_samples,
                            channels,
                            thresholds,
                            kwd_file,
                            bin_size=10,
                            s_f=30000,
                            history_bins=1,
                            no_silent=True):
    bin_size_samples = int(bin_size * s_f / 1000.)
    len_bin = int(len_samples / bin_size_samples)
    len_ms = int(len_bin * bin_size)
    history_samples = history_bins * bin_size_samples
    span_ms = len_ms + bin_size * history_bins
    span_samples = int(span_ms * s_f / 1000.)

    logger.info('Creating support vector {0} chans, {1} trials'.format(
        channels.size, starts.size))
    # logger.info('span_ms = {}'.format(span_ms))
    # logger.info('{} units'.format(len(all_units)))

    all_frames = collect_frames(starts - history_samples, span_samples, s_f,
                                kwd_file, recs, channels)

    filter_band = [500, 10000]
    filter_pars = sp.make_butter_bandpass(s_f, filter_band[0], filter_band[1])
    [
        fr.apply_filter(sp.apply_butter_bandpass, filter_pars)
        for fr in all_frames
    ]
    all_spk_arr = collect_all_spk_arr(all_frames, thresholds)

    rst_sv = np.stack([
        bp.col_binned(all_spk_arr[t, :, :].T, bin_size_samples)
        for t in range(all_spk_arr.shape[0])
    ],
                      axis=2)

    if no_silent:
        good_chans = ~find_silent(rst_sv)
    else:
        good_chans = np.arange(channels.size)

    return rst_sv[good_chans, :, :], good_chans
예제 #4
0
def test_fit(channels,
             thresholds,
             kwd_file,
             starts,
             recs,
             trials_fit,
             trials_test,
             par_stream,
             bin_size=10,
             history_bins=15,
             s_f=30000,
             nonlinear_fun=lambda x: x):
    fitted_kernel, fitted_chans = linear_fit(channels,
                                             thresholds,
                                             kwd_file,
                                             starts[trials_fit],
                                             recs[trials_fit],
                                             par_stream,
                                             bin_size=bin_size,
                                             history_bins=history_bins,
                                             s_f=s_f)

    par_predict = linear_predict(channels[fitted_chans],
                                 thresholds[fitted_chans],
                                 kwd_file,
                                 starts[trials_test],
                                 recs[trials_test],
                                 par_stream.size,
                                 fitted_kernel,
                                 bin_size=bin_size,
                                 history_bins=history_bins,
                                 s_f=30000,
                                 no_silent=False)

    bin_size_samples = int(bin_size * s_f / 1000.)
    binned_pars = bp.col_binned(np.array([par_stream]),
                                bin_size_samples) / bin_size_samples
    target = np.tile(binned_pars,
                     trials_test.size).reshape(trials_test.size, -1)

    kernel_predict = nonlinear_fun(par_predict)
    assert (trials_fit.size > trials_test.size)
    residue = np.linalg.norm(kernel_predict - target) / np.linalg.norm(target)
    return fitted_kernel, kernel_predict, residue, target, fitted_chans
예제 #5
0
def support_vector(starts,
                   len_samples,
                   all_units,
                   bin_size=10,
                   s_f=30000,
                   history_bins=1,
                   no_silent=True):
    """
    :param starts: list or np array of starting points
    :param len_samples: length in samples of the 'trial'
    :param all_units: list of Unit objects (as in units.py)
    :param bin_size: size of the bin for the spike count
    :param history_bins: number of bins previous to starting points to include
    :param no_silent: exclude units that don't spike (to prevent singular support arrays)
    :return: np array [n_bins, n_units, n_trials] (compatible with other features sup vecs)
    """
    bin_size_samples = int(bin_size * s_f / 1000.)
    len_bin = int(len_samples / bin_size_samples)
    len_ms = int(len_bin * bin_size)

    history_samples = history_bins * bin_size_samples

    span_ms = len_ms + bin_size * history_bins
    # logger.info('span_ms = {}'.format(span_ms))
    sup_vec = []
    sup_vec_units = []
    # logger.info('{} units'.format(len(all_units)))
    for i, a_unit in enumerate(all_units):
        raster = a_unit.get_raster(starts - history_samples,
                                   span_ms,
                                   span_is_ms=True,
                                   return_ms=True)
        sparse_raster = bp.col_binned(bp.sparse_raster(raster), bin_size)

        if no_silent and not sparse_raster.any():
            logger.warn('Watch out, found lazy unit')
            pass
        else:
            sup_vec.append(sparse_raster.T)
            sup_vec_units.append(a_unit)
    # logger.info('sparse raster shape = {}'.format(sparse_raster.shape))
    # return sup_vec
    return np.stack(sup_vec, axis=0), sup_vec_units