def make_head_axis(self, left_tragus, right_tragus, nasion): x = vg.unitvec(right_tragus - ((left_tragus + right_tragus) / 2)) #make right +x yk = vg.unitvec(nasion - ((left_tragus + right_tragus) / 2)) #make anterior +y zk = vg.unitvec(np.cross(x, yk)) y = vg.unitvec(np.cross(zk, x)) z = np.cross(x, y) # make superior +z return tf.DCM2quat(np.array([x, y, z]))
def get_projected_speed(self, tno): '''calculate speed of trial number `tno`, projected along task direction Parameters ---------- tno : int index of trial to calculate Returns ------- proj : ndarray projected speed values, in m/s Notes ----- we have to do them one-at-a-time because positions has different numbers of elements in each trial. ''' trial = self.positions[tno] pos = trial[:,0:3] time = trial[:,3] vel = kin.get_vel(pos, time, tax=0, spax=-1) task_dir = unitvec(self.TargetPos[tno] - self.StartPos[tno]) proj = np.dot(vel, task_dir) return proj
def old_bootstrap_pd_stats(b, b_s, k, n_samp=int(1e3)): ''' Calculate, by bootstrapping, statistics of preferred directions. Parameters ---------- pd : array_like, shape (3) preferred directions b_s : float standard error of lin regress coefficients k : float modulation depth n_samp : int number of samples to use for bootstrap Returns ------- k_s : array_like standard errors of k, modulation depth, shape (?) kappa : array_like dispersion factors, shape (?) R : array_like length factor of pd distribution ??, shape (?) Notes ----- 1. Angles of preferred direction cones are determined as follows. 2. Regression coefficients (`b`) are recovered from pds and modulation depths. 3. Random populations of regression coefficients are constructed from distributions with the same mean and standard deviation as estimated from the regression procedure, with the same number of samples as the original data. 4. Preferred directions are calculated from these coefficient values (i.e. dividing by modulation depth). ''' warn(DeprecationWarning("Doesn't calculate using covariance matrix. " "Use ___ instead.")) assert (type(n_samp) == int) | (type(n_samp) == np.int_) assert (type(k) == np.float_) or (type(k) == float) assert b.shape == b_s.shape == (3,) #pd = np.asarray(pd) # reconstruct regression coefficients # Now generate n_samp samples from normal populations # (mean: b_k, sd: err_k). Will have shape (n_samp, 3). b_rnd = np.random.standard_normal(size=(n_samp,3)) b_rnd *= b_s b_rnd += b pd = unitvec(b) #io.savemat('bootstrap_b.mat', dict(b_rnd=b_rnd)) ks = norm(b_rnd, axis=1) k_s = np.std(ks, ddof=1) pds_rnd = b_rnd / ks[...,np.newaxis] kappa = ss.estimate_kappa(pds_rnd, mu=pd) R, S = ss.calc_R(pds_rnd) return k_s, kappa, R
def _map_data(data, task, pd, resolution=100): ''' Parameters ---------- data : ndarray shape (ntime, ntask) task : ndarray shape (ntask, 6) pd : ndarray shape (3,) Returns ------- mapped_data : ndarray shape (ntime, resolution, resolution) Notes ----- need to incorporate PD so that it lies at center of plot ''' if data.shape[-1] != task.shape[0]: raise ValueError('last axis of data (len %d) must be same length as\n' 'first axis of task (len %d)' % \ (data.shape[-1], data.shape[0])) # generate theta, phi grid # or rather map x,y grid to nearest of 26 targets thetas = np.linspace(0., np.pi, resolution) phis = np.linspace(0., 2 * np.pi, resolution) theta_grid = np.tile(thetas[:,None], (1, resolution)) phi_grid = np.tile(phis[None,:], (resolution, 1)) # convert polar to cartesian co-ordinates tp_grid = np.concatenate((theta_grid[None], phi_grid[None]), axis=0) xyz_grid = pol2cart(tp_grid, axis=0) # calculate direction of each task start = task[:,:3] stop = task[:,3:] direction_task = unitvec(stop - start, axis=-1) # rotate task directions until pd points towards 0,0,1 rotated_toz = rotate_targets(direction_task, cart2pol(pd)) # now rotate again to point pd towards 0,1,0 rotated_toy = rotate_targets(rotated_toz, np.array([np.pi/2., 0])) # calculate angle between each grid square and each direction angles = np.arccos(np.tensordot(rotated_toy, xyz_grid, [1,0])) # get index of closest direction for each grid square # = get argmin along 0th axis nearest = np.argmin(angles, axis=0) mapped_data = data[..., nearest] return mapped_data
def test_mean_deviation(): # test simple case - all valid angles B = np.array([[ 0.26097211, -1.76208686, 0.46576422], [-1.67251254, 1.0682131, -0.12919601], [-0.17027653, 0.66272588, -0.59754741]]) pd = unitvec(B, axis=1) angles = np.zeros((B.shape[0],)) angles[0] = np.arccos(np.dot(pd[0], pd[1])) angles[1] = np.arccos(np.dot(pd[0], pd[2])) angles[2] = np.arccos(np.dot(pd[1], pd[2])) np.testing.assert_almost_equal(np.mean(angles), mean_deviation(pd))
def get_dir(pos, tax=-2, spax=-1): '''Get instantaneous direction Parameters ---------- pos : array_like tax : int, optional time axis, defaults to 0 ''' dp = np.diff(pos, axis=tax) if (spax == -1) | (spax == len(pos.shape) - 1): return unitvec_f2d(dp) return unitvec(dp, axis=spax)
def test_unitvec_f2d(): a = np.array([[ 0.50654606, 0.05050327], [ 0.06780228, -0.10952565], [ 0.12116112, -0.14544285], [-0.0588865 , -0.14017103], [ 0.1167503 , -0.26414753], [-0.09625524, 0.07777135], [ 0.32561687, 0.08549398], [-0.16084578, -0.0788045 ], [ 0.37862188, -0.05553404], [-0.06879143, -0.15628546]]) uvf = unitvec_f2d(a) uv = unitvec(a, axis=1) np.testing.assert_array_equal(uv, uvf)
def test_unitvec(): a = np.array([[ 0.50654606, 0.05050327], [ 0.06780228, -0.10952565], [ 0.12116112, -0.14544285], [-0.0588865 , -0.14017103], [ 0.1167503 , -0.26414753], [-0.09625524, 0.07777135], [ 0.32561687, 0.08549398], [-0.16084578, -0.0788045 ], [ 0.37862188, -0.05553404], [-0.06879143, -0.15628546]]) uv = unitvec(a, axis=1) norm = np.sqrt(np.sum(a**2, axis=1)) other = a / norm[...,None] np.testing.assert_array_equal(uv, other)
def bootstrap_pd_stats(b, cov, neural_data, model, ndim=3): ''' Parameters ---------- b : ndarray coefficients cov : ndarray covariance matrix neural_data : ndarray counts (GLM) or rates (OLS) model : string specification of fit model Returns ------- pd : ndarray preferred directions ca : ndarray confidence angle of preferred direction kd : ndarray modulation depths kdse : ndarray standard errors of modulation depths ''' # number of independent samples from original data d = 'd' if 'd' in model else 'v' nsamp = np.sum(~np.isnan(neural_data.sum(axis=1))) # compressing along last axis gives number of samples per bin # which is correct, since bootstrapping is done independently # for each bin # bootstrap a distribution of b values # using mean, covariance matrix from GLM (/ OLS). bootb = np.random.multivariate_normal(b, cov, (nsamp,)) bootdict = fit.unpack_many_coefficients(bootb, model, ndim=ndim) if 'X' in model: bootpd = unitvec(bootdict[d], axis=2) # has shape nsamp, nbin, ndim else: bootpd = unitvec(bootdict[d], axis=1) # has shape nsamp, ndim # get mean pd to narrow kappa estimation bdict = fit.unpack_coefficients(b, model, ndim=ndim) if 'X' in model: nbin = bdict[d].shape[0] pd = unitvec(bdict[d], axis=1) pdca = np.zeros((nbin)) for i in xrange(nbin): # estimate kappa k = ss.estimate_kappa(bootpd[:,i], mu=pd[i]) # estimate ca (preferred direction Confidence Angle) pdca[i] = ss.measure_percentile_angle_ex_kappa(k) # calculate the standard error of the bootstrapped PDs kd = norm(bootdict[d], axis=2) kdse = np.std(kd, axis=0, ddof=1) else: nbin = 1 pd = unitvec(bdict[d]) k = ss.estimate_kappa(bootpd, mu=pd) pdca = ss.measure_percentile_angle_ex_kappa(k) bootkd = norm(bootdict[d], axis=1) kd = np.mean(bootkd, axis=0) kdse = np.std(bootkd, axis=0, ddof=1) return pd, pdca, kd, kdse