Esempio n. 1
0
def bootstrap_pd_stats(b, cov, neural_data, model, ndim=3):
    '''
    Parameters
    ----------
    b : ndarray
      coefficients
    cov : ndarray
      covariance matrix
    neural_data : ndarray
      counts (GLM) or rates (OLS)
    model : string
      specification of fit model

    Returns
    -------
    pd : ndarray
      preferred directions
    ca : ndarray
      confidence angle of preferred direction
    kd : ndarray
      modulation depths
    kdse : ndarray
      standard errors of modulation depths
    '''
    # number of independent samples from original data
    d = 'd' if 'd' in model else 'v'
    nsamp = np.sum(~np.isnan(neural_data.sum(axis=1)))
    # compressing along last axis gives number of samples per bin
    # which is correct, since bootstrapping is done independently
    # for each bin
    
    # bootstrap a distribution of b values
    # using mean, covariance matrix from GLM (/ OLS).
    bootb = np.random.multivariate_normal(b, cov, (nsamp,))
    bootdict = fit.unpack_many_coefficients(bootb, model, ndim=ndim)
    if 'X' in model:
        bootpd = unitvec(bootdict[d], axis=2)
        # has shape nsamp, nbin, ndim
    else:
        bootpd = unitvec(bootdict[d], axis=1)
        # has shape nsamp, ndim

    # get mean pd to narrow kappa estimation
    bdict = fit.unpack_coefficients(b, model, ndim=ndim)
    if 'X' in model:
        nbin = bdict[d].shape[0]
        pd = unitvec(bdict[d], axis=1)
        pdca = np.zeros((nbin))
        for i in xrange(nbin):
            # estimate kappa
            k = ss.estimate_kappa(bootpd[:,i], mu=pd[i])
            # estimate ca (preferred direction Confidence Angle)
            pdca[i] = ss.measure_percentile_angle_ex_kappa(k)
            
        # calculate the standard error of the bootstrapped PDs
        kd = norm(bootdict[d], axis=2)
        kdse = np.std(kd, axis=0, ddof=1)
    else:
        nbin = 1
        pd = unitvec(bdict[d])
        k = ss.estimate_kappa(bootpd, mu=pd)
        pdca = ss.measure_percentile_angle_ex_kappa(k)
        bootkd = norm(bootdict[d], axis=1)
        kd = np.mean(bootkd, axis=0)
        kdse = np.std(bootkd, axis=0, ddof=1)
    return pd, pdca, kd, kdse
Esempio n. 2
0
def calc_all_pd_uncerts(count, bds, bsed, ndim=3):
    '''
    Calculate uncertainties in PD estimates from directional regression
    coefficients and their standard errors.

    Parameters
    ----------
    count : array_like
      spike counts (PSTHs), shape (n_trials, n_bins)
    bds : array_like
      coefficients from regression, shape (n_calcs, n_coefs)
    bsed : array_like
      standard errors of b coefficients, shape (n_calcs, n_coefs)

    Returns
    -------
    pd : array_like
      preferred directions, shape (n_windows, 3)
    k : array_like
      modulation depths of pds, shape (n_windows)
    k_se : array_like
      standard deviations of modulation depths
    theta_percentile : array_like
      95% percentile angle of pd, shape (n_windows)

    Notes
    -----
    Number of samples should be calculated from number of non-zero elements
    in regression. With the moving PD calculations, time-changing coefficients
    are regressed using 0 values many times, but these aren't really
    observations. Should be okay as it is - i.e. number of non-nan trials,
    since each non-nan trial contributes one time point to each estimate of PD

    2011-Feb-01 - I think this should just be non-nan elements, since I have now
    switched to using GLM and counts rather than rates, and zero-counts are just
    one observation of the Poisson process: implementation remains the same.
    '''
    warn(DeprecationWarning("Doesn't calculate using covariance matrix. "
                            "Use ___ instead."))
    assert type(bds) == np.ndarray
    assert type(bsed) == np.ndarray

    n_samp = np.sum(~np.isnan(count.sum(axis=1)))
    n_calcs = bds.shape[0]

    pd = np.empty((n_calcs, ndim))
    k = np.empty((n_calcs))
    k_se = np.empty((n_calcs))       # std err of modulation depth
    kappa = np.empty((n_calcs))      # spread of pd Fisher dist
    R = np.empty((n_calcs))          # resultants of pd Fisher dist
    theta_pc = np.empty((n_calcs))   # 95% percentile angle

    for i in xrange(n_calcs):
        # calc pds from regression coefficients
        b_d = bds[i]
        k[i] = norm(b_d)
        pd[i] = b_d / k[i]
        k_se[i], kappa[i], R[i] = \
            old_bootstrap_pd_stats(b_d, bsed[i], k[i], n_samp)
        theta_pc[i] = ss.measure_percentile_angle_ex_kappa(kappa[i])
    return pd, k, k_se, theta_pc