Ejemplo n.º 1
0
def get_sort_by_pd(bnd):
    '''
    Parameters
    ----------
    bnd : cell.BinnedData instance
      contains PSTHs (n, ...) and positions (n, ...) from construct_PSTHs
    tasks : array_like
      shape (n, 6), start and target positions of movements

    Returns
    -------
    indices to sort PSTH tasks relative to PD.
    '''
    #    bnd.ensure_flat_inplace()
    tasks = bnd.tasks
    starts, stops = tasks[:,:3], tasks[:,3:]
    dirs = stops - starts
    ndset = bnd.count.shape[2]    
    pds = np.zeros((ndset, 3))    

    for i in xrange(ndset):
        count, pos, time = bnd.get_for_glm(i)
        model = 'kd'
        b, cov = glm_any_model(count, pos, time, model=model)
        bdict = unpack_coefficients(b, model=model)
        pds[i] = bdict['d'] / np.linalg.norm(bdict['d'])
        
    angs = np.arccos(np.dot(pds, dirs.T))
    return np.argsort(angs, axis=-1)
Ejemplo n.º 2
0
def bootstrap_pd_stats(b, cov, neural_data, model, ndim=3):
    '''
    Parameters
    ----------
    b : ndarray
      coefficients
    cov : ndarray
      covariance matrix
    neural_data : ndarray
      counts (GLM) or rates (OLS)
    model : string
      specification of fit model

    Returns
    -------
    pd : ndarray
      preferred directions
    ca : ndarray
      confidence angle of preferred direction
    kd : ndarray
      modulation depths
    kdse : ndarray
      standard errors of modulation depths
    '''
    # number of independent samples from original data
    d = 'd' if 'd' in model else 'v'
    nsamp = np.sum(~np.isnan(neural_data.sum(axis=1)))
    # compressing along last axis gives number of samples per bin
    # which is correct, since bootstrapping is done independently
    # for each bin
    
    # bootstrap a distribution of b values
    # using mean, covariance matrix from GLM (/ OLS).
    bootb = np.random.multivariate_normal(b, cov, (nsamp,))
    bootdict = fit.unpack_many_coefficients(bootb, model, ndim=ndim)
    if 'X' in model:
        bootpd = unitvec(bootdict[d], axis=2)
        # has shape nsamp, nbin, ndim
    else:
        bootpd = unitvec(bootdict[d], axis=1)
        # has shape nsamp, ndim

    # get mean pd to narrow kappa estimation
    bdict = fit.unpack_coefficients(b, model, ndim=ndim)
    if 'X' in model:
        nbin = bdict[d].shape[0]
        pd = unitvec(bdict[d], axis=1)
        pdca = np.zeros((nbin))
        for i in xrange(nbin):
            # estimate kappa
            k = ss.estimate_kappa(bootpd[:,i], mu=pd[i])
            # estimate ca (preferred direction Confidence Angle)
            pdca[i] = ss.measure_percentile_angle_ex_kappa(k)
            
        # calculate the standard error of the bootstrapped PDs
        kd = norm(bootdict[d], axis=2)
        kdse = np.std(kd, axis=0, ddof=1)
    else:
        nbin = 1
        pd = unitvec(bdict[d])
        k = ss.estimate_kappa(bootpd, mu=pd)
        pdca = ss.measure_percentile_angle_ex_kappa(k)
        bootkd = norm(bootdict[d], axis=1)
        kd = np.mean(bootkd, axis=0)
        kdse = np.std(bootkd, axis=0, ddof=1)
    return pd, pdca, kd, kdse