Exemplo n.º 1
0
def circfve(r_true, r_pred, lo=0, hi=2 * np.pi):
    """Calculates the fraction of variance explained (FVE) for circular data

    Assumes circular data are in the range [lo, hi].
    Uses SciPy's circular stats functions.

    Parameters
    ----------
    r_true : array_like
        Circular data (ground-truth)
    r_pred : array_like
        Circular data (predicted)
    low : float or int, optional
        Low boundary for circular variance range.  Default is 0.
    high : float or int, optional
        High boundary for circular variance range.  Default is ``2*pi``.
    """
    r_true = np.asarray(r_true)
    r_pred = np.asarray(r_pred)
    r_mu_true = spst.circmean(r_true, low=lo, high=hi)
    var_err = spst.circvar(angle_diff(r_true, r_pred, lo=lo, hi=hi),
                           low=lo,
                           high=hi)
    var_tot = spst.circvar(angle_diff(r_true, r_mu_true, lo=lo, hi=hi),
                           low=lo,
                           high=hi)
    if np.isclose(var_err, 0) and np.isclose(var_tot, 0):
        return 1
    if np.isclose(var_tot, 0):
        return 0
    return (1 - var_err / var_tot)
Exemplo n.º 2
0
    def calc_shape_loss(self, y, y_pred, suffix=''):
        """Calculate the shape loss"""
        if not isinstance(y, pd.core.frame.DataFrame):
            raise TypeError("'y' must be a pandas DataFrame, not %s" % type(y))
        if not isinstance(y_pred, pd.core.frame.DataFrame):
            raise TypeError(("'y_pred' must be a pandas DataFrame, "
                             "not %s" % type(y)))
        if not isinstance(suffix, six.string_types):
            raise TypeError("'suffix' must be a string, not %s" % type(suffix))

        # `y` and `y_pred` must have the same index, otherwise subtraction
        # produces nan:
        assert np.allclose(y_pred.index, y.index)

        cols = ['area', 'orientation', 'eccentricity']
        loss = np.zeros(len(cols))
        for i, col in enumerate(cols):
            yt = np.array(y.loc[:, col], dtype=float)
            yp = np.array(y_pred.loc[:, col + suffix], dtype=float)
            if col == 'orientation':
                # Use circular error:
                err = np.abs(utils.angle_diff(yt, np.nan_to_num(yp)))
                # err = np.abs(yt - np.nan_to_num(yp))
                # err = np.where(err > np.pi / 2, np.pi - err, err)
                # Use circular variance in `ss_tot`, which divides by len(yt).
                # Therefore, we also need to divide `ss_res` by len(yt), which
                # is the same as taking the mean instead of the sum.
                ss_res = np.mean(err**2)
                # ss_tot = np.sum((yt - np.mean(yt)) ** 2)
                ss_tot = spst.circvar(yt, low=-np.pi / 2, high=np.pi / 2)
                ll = 1 - (1 - ss_res / (ss_tot + 1e-12))
            else:
                ll = 1 - sklm.r2_score(yt, np.nan_to_num(yp))
            loss[i] = 2 if np.isnan(ll) else ll
        return np.sum(loss)
Exemplo n.º 3
0
def circular_stats(ep, spikes, position):
    circ_stats = pd.DataFrame(index=spikes.keys(),
                              columns=['circ_mean', 'circ_var'])
    for i in spikes.keys():
        circ_stats.loc[i, 'circ_mean'] = circmean(position['ry'].realign(
            spikes[i].restrict(ep)))
        circ_stats.loc[i, 'circ_var'] = circvar(position['ry'].realign(
            spikes[i].restrict(ep)))
    return circ_stats
Exemplo n.º 4
0
def computeCircularStats(epochs, spikes, position, names):
    circ_mean = pd.DataFrame(index=spikes.keys(), columns=names)
    circ_var = pd.DataFrame(index=spikes.keys(), columns=names)
    for n, ep in zip(names, epochs):
        for k in spikes:
            circ_mean.loc[k, n] = circmean(
                position.realign(spikes[k].restrict(ep)))
            circ_var.loc[k,
                         n] = circvar(position.realign(spikes[k].restrict(ep)))
    return circ_mean, circ_var
        def get_joint_variance(skeleton, index1, index2, index3):
            angles = []

            for i in range(skeleton.shape[0]):
                x1, y1 = skeleton[i, index1 * 3], skeleton[i, index1 * 3 + 1]
                x2, y2 = skeleton[i, index2 * 3], skeleton[i, index2 * 3 + 1]
                x3, y3 = skeleton[i, index3 * 3], skeleton[i, index3 * 3 + 1]
                angle = joint_angle(np.array([x1, y1]), np.array([x2, y2]), np.array([x3, y3]))
                angles.append(angle)

            variance = circvar(angles, low=0, high=360)
            return variance
Exemplo n.º 6
0
    def test_circfuncs(self):
        x = np.array([355,5,2,359,10,350])
        M = stats.circmean(x, high=360)
        Mval = 0.167690146
        assert_allclose(M, Mval, rtol=1e-7)

        V = stats.circvar(x, high=360)
        Vval = 42.51955609
        assert_allclose(V, Vval, rtol=1e-7)

        S = stats.circstd(x, high=360)
        Sval = 6.520702116
        assert_allclose(S, Sval, rtol=1e-7)
Exemplo n.º 7
0
    def test_circfuncs_small(self):
        x = np.array([20,21,22,18,19,20.5,19.2])
        M1 = x.mean()
        M2 = stats.circmean(x, high=360)
        assert_allclose(M2, M1, rtol=1e-5)

        V1 = x.var()
        V2 = stats.circvar(x, high=360)
        assert_allclose(V2, V1, rtol=1e-4)

        S1 = x.std()
        S2 = stats.circstd(x, high=360)
        assert_allclose(S2, S1, rtol=1e-4)
Exemplo n.º 8
0
def test_circstats():
    x = np.array([355, 5, 2, 359, 10, 350])
    M = stats.circmean(x, high=360)
    Mval = 0.167690146
    assert_allclose(M, Mval, rtol=1e-7)

    V = stats.circvar(x, high=360)
    Vval = 42.51955609
    assert_allclose(V, Vval, rtol=1e-7)

    S = stats.circstd(x, high=360)
    Sval = 6.520702116
    assert_allclose(S, Sval, rtol=1e-7)
Exemplo n.º 9
0
def test_circstats_small():
    x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
    M1 = x.mean()
    M2 = stats.circmean(x, high=360)
    assert_allclose(M2, M1, rtol=1e-5)

    V1 = x.var()
    V2 = stats.circvar(x, high=360)
    assert_allclose(V2, V1, rtol=1e-4)

    S1 = x.std()
    S2 = stats.circstd(x, high=360)
    assert_allclose(S2, S1, rtol=1e-4)
def calc_stats(df, which_place, which_picture):
    df_slice = np.array(
        df['number_response'][(df['place'] == which_place)
                              & (df['step1_picture'] == which_picture)])
    df_slice = np.deg2rad(df_slice)
    mean_result = np.round(
        np.rad2deg(stats.circmean(df_slice, low=-np.pi, high=np.pi)), 1)
    median_result = np.round(
        np.rad2deg(stats.circvar(df_slice, low=-np.pi, high=np.pi)), 1)
    std_result = np.round(
        np.rad2deg(stats.circstd(df_slice, low=-np.pi, high=np.pi)), 1)
    print(
        f'{which_place} ({which_picture}): {mean_result} (Mean); {median_result} (Var); {std_result} (STD)'
    )
Exemplo n.º 11
0
def circ_r2_score(y_true, y_pred):
    """Calculate circular R² (the coefficient of determination)

    The best possible score is 1.0, lower values are worse.

    .. versionadded:: 0.7

    Parameters
    ----------
    y_true : array-like
        Ground truth (correct) target values.
    y_pred : array-like
        Estimated target values.

    Returns
    -------
    z : float
        The R² score

    Notes
    -----
    *  If the ground-truth data has zero variance, R² will be zero.
    *  This is not a symmetric function

    """
    y_true = np.asarray(y_true)
    y_pred = np.asarray(y_pred)
    if y_true.size != y_pred.size:
        raise ValueError('"y_true" (%d) and "y_pred" (%d) must have the same '
                         'size.' % (y_true.size, y_pred.size))
    if y_true.size < 2:
        raise ValueError('Need at least two data points.')
    # Difference between two angles in [-pi/2, pi/2]:
    err = delta_angle(y_true, y_pred)
    # Use circular variance in `ss_tot`, which divides by len(y_true).
    # Therefore, we also need to divide `ss_res` by len(y_true), which
    # is the same as taking the mean instead of the sum:
    ss_res = np.mean(err**2, dtype=np.float32)
    ss_tot = np.asarray(circvar(y_true, low=-np.pi / 2, high=np.pi / 2),
                        dtype=np.float32)
    if np.isclose(ss_tot, 0):
        return 0.0  # zero variance in the ground-truth data
    return 1 - ss_res / ss_tot
Exemplo n.º 12
0
def test_circvar_axis():
    x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]])

    V1 = stats.circvar(x, high=360)
    V2 = stats.circvar(x.ravel(), high=360)
    assert_allclose(V1, V2, rtol=1e-11)

    V1 = stats.circvar(x, high=360, axis=1)
    V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
    assert_allclose(V1, V2, rtol=1e-11)

    V1 = stats.circvar(x, high=360, axis=0)
    V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
    assert_allclose(V1, V2, rtol=1e-11)
Exemplo n.º 13
0
    def analyze(self, atype, periodic=True):
        """Analyzes the data held in numpy vector"""
        if not periodic:
            self.mean = self.values.mean()
            self.stdev = self.values.std()
            self.var = self.values.var()
        else:
            p_high = 180
            if atype == "angle":
                p_low = 0
            else:
                p_low = -180
            self.mean = circmean(self.values, low=p_low, high=p_high)
            self.stdev = circstd(self.values, low=p_low, high=p_high)
            self.var = circvar(self.values, low=p_low, high=p_high)

        # Analyze normality
        # Note: this will be broken for distributions that go over a period
        # To be fixed
        self.anderson = stats.anderson(self.values, dist='norm')
Exemplo n.º 14
0
def test_circvar_axis():
    x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349],
                  [357, 9, 8, 358, 4, 356]])

    V1 = stats.circvar(x, high=360)
    V2 = stats.circvar(x.ravel(), high=360)
    assert_allclose(V1, V2, rtol=1e-11)

    V1 = stats.circvar(x, high=360, axis=1)
    V2 = [stats.circvar(x[i], high=360) for i in range(x.shape[0])]
    assert_allclose(V1, V2, rtol=1e-11)

    V1 = stats.circvar(x, high=360, axis=0)
    V2 = [stats.circvar(x[:, i], high=360) for i in range(x.shape[1])]
    assert_allclose(V1, V2, rtol=1e-11)
Exemplo n.º 15
0
def calc_stats(sets,DD=False):
    """Returns mean, standard deviation and variance of data in sets. If DD is 
    true then the circular equivalents are calculated. TO BE USED WITH CAUTION

    Parameters
    ----------
    

    sets: iterable set of data
    DD: boolean
    
    Returns
    ----------
    

    means: np.array
    var: np.array
    stds: np.array
        
    """
       
    means = np.array([])
    var = np.array([])
    stds = np.array([])
    for data in sets:
        mask = ~np.isnan(data)
        data = data[mask]
        if DD:
            u_east = np.nanmean(np.sin(data * np.pi/180))
            u_north = np.nanmean(np.cos(data * np.pi/180))
            tmp = np.arctan2(u_east, u_north) * 180/np.pi
            means = np.append(means,m.degrees(tmp)%360)
            var = np.append(var,sc.circvar(data))
            stds = np.append(stds,sc.circstd(data))
        else:
            means = np.append(means,np.mean(data))
            var = np.append(var,np.var(data))
            stds = np.append(stds,np.std(data))
        
    return means,var,stds
Exemplo n.º 16
0
 def test_empty(self):
     assert_(np.isnan(stats.circmean([])))
     assert_(np.isnan(stats.circstd([])))
     assert_(np.isnan(stats.circvar([])))
Exemplo n.º 17
0
 def test_circfuncs_array_like(self):
     x = [355, 5, 2, 359, 10, 350]
     assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
     assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
     assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
Exemplo n.º 18
0
 def test_empty(self):
     assert_(np.isnan(stats.circmean([])))
     assert_(np.isnan(stats.circstd([])))
     assert_(np.isnan(stats.circvar([])))
Exemplo n.º 19
0
def find_torsion_circular_variance(array_of_torsions):
    torsion_array = array_of_torsions
    torsion_variance = circvar(torsion_array)
    return torsion_variance
    def variance_for_movements_timeslices(self, movements: list, C: list,
                                          M: int):  # LATEST
        """This combines equations above to find the stop bar passing time (tk), half hour time segs, fills in missing time seg groups,
        works out the tkhat function and the coefficient of variation. This is done for all 3 series: forward, backward and timeslices.

        Parameters
            ----------
                movements: list of pd.DataFrame movements
                C: the list of c values to iterate through
                M: parameter for how many timeslices we look forward and backwards for (int)

            Returns
            ----------
                weighted_df: the pd.DataFrame with new values after weighting applied
        """

        M = M + 1

        list_of_cvalue_Dfs = []
        list_of_cvalue_Dfs_for_sf = []
        list_of_cvalue_Dfs_for_sb = []

        for c in C:
            movement = []
            movement_sf = []
            movement_sb = []
            example_size = []

            for m in movements:

                example_size.append(len(m))

                m = self.stop_line_passing_time_v2(m)  # adds tk values
                # m["time_to_DT"] = pd.to_datetime(
                #    m["t1"], unit="ms"
                # )  # adds date and time
                # m = m.sort_values(by="t1_to_DT")
                m["time_seg"] = [self.sort_time(i) for i in m.iterrows()
                                 ]  # adds time seg numbers

                padded_timeslices_grouped, un_grouped = self.pad_empty_timeslices_TS(
                    m
                )  # fills in empty time seg (1-48)  returns a groupby object for timeslices of the day

                # =============================================================================
                timeslice_variations = []
                forward_series_vars_for_TS = []
                backward_series_vars_for_TS = []

                # for timeslices of the day
                for (
                        timeslice
                ) in padded_timeslices_grouped:  # groupby object (tuple x 48)

                    forward_series = []
                    backward_series = []

                    TSn = timeslice[0]
                    TS_trajectectories = timeslice[1]

                    tk_hat = self.tkhat_function(TS_trajectectories,
                                                 c)  # tk values for each c

                    if len(tk_hat) >= 2:  # wont work if N < 2
                        var = circvar(tk_hat, high=c)
                    else:
                        var = 0

                    timeslice_variations.append(
                        var)  # all of the variances for timeslice

                    # # =============================================================================  FORWARD SERIES
                    for i in range(1, M):

                        forward_list = un_grouped[
                            (un_grouped["time_seg"] >= TSn)
                            & (un_grouped["time_seg"] <= TSn + i)]
                        forward_series.append(
                            forward_list
                        )  # for each timeslice we now have a forward series of dfs

                    forward_series_vars = []
                    for (TS) in (
                            forward_series
                    ):  # for each df in that forward series (for each timeslice )

                        tk_hat = self.tkhat_function(TS,
                                                     c)  # tk values for each c
                        if len(tk_hat) >= 2:  # wont work if N < 2
                            sf_var = self.coefficient_of_variation(tk_hat)
                        else:
                            sf_var = 0

                        forward_series_vars.append(sf_var)
                    forward_series_vars_for_TS.append(forward_series_vars)

                    # # =============================================================================  BACKWARD SERIES

                    for i in range(1, M):

                        backward_list = un_grouped[
                            (un_grouped["time_seg"] >= TSn - i)
                            & (un_grouped["time_seg"] <= TSn)]
                        backward_series.append(
                            backward_list
                        )  # for each timeslice we now have a forward series of dfs

                    backward_series_vars = []
                    for (TS) in (
                            backward_series
                    ):  # for each df in that forward series (for each timeslice )

                        tk_hat = self.tkhat_function(TS,
                                                     c)  # tk values for each c
                        if len(tk_hat) >= 2:  # wont work if N < 2
                            sb_var = self.coefficient_of_variation(tk_hat)
                        else:
                            sb_var = 0

                        backward_series_vars.append(sb_var)
                    backward_series_vars_for_TS.append(backward_series_vars)

                movement_sb.append(backward_series_vars_for_TS)
                df_for_sb = pd.DataFrame(movement_sb).fillna(0)

                movement_sf.append(forward_series_vars_for_TS)
                df_for_sf = pd.DataFrame(movement_sf).fillna(0)
                # # =============================================================================
                movement.append(
                    timeslice_variations
                )  # all of the TIMESLICE variances for each movement
                df_for_C = pd.DataFrame(movement).fillna(0)
            #
            list_of_cvalue_Dfs.append(
                df_for_C)  # list of Cvalue dfs for timeslice
            list_of_cvalue_Dfs_for_sf.append(df_for_sf)
            list_of_cvalue_Dfs_for_sb.append(df_for_sb)

        #        # this gives us the weighting percentages for each movement
        sum_of_movements = sum(example_size)

        weights = []

        for m in movements:
            w = self.movement_weighting(m, sum_of_movements)
            weights.append(w)

        return (
            weights,
            list_of_cvalue_Dfs,
            list_of_cvalue_Dfs_for_sf,
            list_of_cvalue_Dfs_for_sb,
        )
Exemplo n.º 21
0
    def Mstep(self, data):
        # data - list of observationTuples
        # Note: height, bearing are after taking command for dt
        # ==== Action model update =====
        mu_cmds = np.zeros((self.cmd_size, self.n_action)) # averaging cosines and sines
        n_cmds = np.zeros((self.cmd_size, ))
        for i, data_t in enumerate(data): # Note: t = i+1
            ht, bear, bid, cmd, dt, _ = data_t
            mu_alpha_t_1, sigma_alpha_t_1 = self.alphas[i]
            mu_delta_t, sigma_delta_t = self.deltas[i]
            mu_alpha_delta = np.concatenate((mu_alpha_t_1, mu_delta_t))
            sigma_alpha_delta = np.zeros((self.n_state*2, self.n_state*2))
            sigma_alpha_delta[0:self.n_state, 0:self.n_state] = sigma_alpha_t_1
            sigma_alpha_delta[-self.n_state:, -self.n_state:] = sigma_delta_t
            sigma_cmd = self.action_varn_model

            # D(s_t, s_t_1) = L*st_t_1 + m
            # L is Jacobian of D at mu_alpha_delta
            # m = D(mu_alpha_delta) - L*mu_alpha_delta
            P = np.array([[ -1,  0,  0, 1, 0, 0],
                          [  0, -1,  0, 0, 1, 0],
                          [  0,  0, -1, 0, 0, 1]])
            D = lambda s: rotMat(-s[2]).dot(np.dot(P, s))
            L = np.zeros((self.n_action, self.n_state*2))
            L[:, 0:self.n_state] = -rotMat(-mu_alpha_delta[2])
            L[:, self.n_state:2*self.n_state] = rotMat(-mu_alpha_delta[2])
            tmp = (lambda t: np.array([[-np.sin(t),  np.cos(t)],
                                       [-np.cos(t), -np.sin(t)]]))(mu_alpha_delta[2])
            L[0:2, 2] = tmp.dot(np.dot(P, mu_alpha_delta)[0:2])
            m = D(mu_alpha_delta) - L.dot(mu_alpha_delta)

            Inv = np.linalg.inv(sigma_cmd + L.dot(sigma_alpha_delta).dot(L.T))
            diff = L.dot(mu_alpha_delta) + m - self.action_mean_model.get(cmd)
            diff[2] = _norm_angle(diff[2])
            new_cmd = sigma_cmd.dot(Inv).dot(diff)
            mu_cmds[cmd, :] += new_cmd
            n_cmds[cmd] = n_cmds[cmd] + 1

        mu_cmds = mu_cmds / (n_cmds.reshape((-1,1)) + 1e-8)
        mu_cmds[n_cmds == 0] = 0

        #with np.printoptions(formatter={'float': '{: 10.3f}'.format}):
        #    print('mu_cmds update : \n{}'.format(np.column_stack([self.action_mean_model.cmd_mus, mu_cmds])))
        mu_cmds += self.action_mean_model.cmd_mus

        # mu_cmds = np.concatenate([mu_cmds[:, :2], np.arctan2(mu_cmds[:, 3], mu_cmds[:, 2])[:, np.newaxis]], axis=1)

        self.action_mean_model.update(mu_cmds)

        # ==== Sensor model update ==== 
        regressX = []
        regressY = []
        for i, data_t in enumerate(data):
            ht, bear, bid, cmd, dt, _ = data_t
            mu_gamma_t, sigma_gamma_t = self.gammas[i+1]
            if ht is not None:
                s_t = np.random.multivariate_normal(mu_gamma_t, sigma_gamma_t)
                dist_t = Lnorm(s_t[:2] - self.bpos[bid][:2])
                ang_t = self.hx_beacon(self.bpos[bid])(s_t)[1]
                regressX.append([dist_t, ang_t])
                regressY.append([ht, bear])
        regressX = np.array(regressX)
        regressY = np.array(regressY)
        self.sensor_mean_model.fit(regressX[:, 0], regressY[:, 0]) # update the regression coefs.
        sigma_1_sqr = np.mean((self.sensor_mean_model.predict(regressX[:, 0]) - regressY[:, 0])**2)
        sigma_2_sqr = np.mean(_norm_angle_vec(regressX[:, 1] - regressY[:, 1])**2)
        sigma_2_sqr_ = circvar(regressX[:, 1] - regressY[:, 1], high=math.pi, low=-math.pi)
        # print('ours: {}, scipy: {}'.format(sigma_2_sqr, sigma_2_sqr_))
        self.sensor_varn_model = np.diag([sigma_1_sqr, sigma_2_sqr])
        self.prior_mean_model  = np.copy(self.gammas[0][0])
        self.prior_varn_model  = np.copy(self.gammas[0][1])
        print('sigma_1 : {:.3f}, sigma_2 : {:.3f}'.format(math.sqrt(self.sensor_varn_model[0, 0]), math.sqrt(self.sensor_varn_model[1, 1])))
Exemplo n.º 22
0
# for running on an interactive cluster node
cellprofiler_path = "/g/almf/software/CP2C/CellProfiler11047/"
import sys

sys.path.append(cellprofiler_path)
import cellprofiler.cpmath.cpmorphology as morph
import Image

import numpy as np
from scipy import stats
from scipy.ndimage import convolve

x = np.array([0, 10, 20, 10, 20, 350]) / 360.0 * 2.0 * np.pi
stats.circvar(x)
stats.circmean(x)

import numpy as np
from scipy import stats
from scipy.ndimage import convolve

radius = 3
image = np.ones((10, 10))

kernel = np.ones((radius, radius)) / radius ** 2
image_sin_mean = convolve(np.sin(image), kernel, mode="constant")
image_cos_mean = convolve(np.cos(image), kernel, mode="constant")
image_var = 1 - (image_sin_mean ** 2 + image_cos_mean ** 2)


import cellprofiler.cpmath.filter as filter
import numpy as np
#step3 calculate miu_i for T,lat,long
#plt.hist(dat_f['leave_long'])
#plt.hist(dat_f['leave_lat'])
plt.hist(train['leave_hour'])
plt.show()
#miu_lat = dat_f['leave_lat'].mean()
#miu_long = dat_f['leave_long'].mean()
###################
#try use scipy.stat to calculate mean and var

prob_table = []
for i in np.sort(train['y_i'].unique()):
    prob_T_yi = [0] * 6
    data_list = train[train['y_i'] == i].leave_tz
    dat_mean = circmean(data_list, 5, 0)
    dat_var = circvar(data_list, 5, 0)
    #print(dat_mean,dat_var)
    if (abs(dat_var) <= 0.01):
        hour = train[train.y_i == i].iloc[0, 0]
        prob_T_yi[hour] = 1
        #print(i,data_list,str(dat_var == 0))
    elif (dat_var > 0):
        for hour in range(6):
            if (abs(hour - dat_mean) <= 3):
                distance = abs(hour - dat_mean)
            else:
                distance = 6 - abs(hour - dat_mean)
            prob_T_yi[hour] = 1 / (math.sqrt(
                2 * math.pi * dat_var)) * math.exp(-distance**2 /
                                                   (2 * abs(dat_var)))
            #print(i,dat_mean,dat_var,distance,prob_T_yi)
Exemplo n.º 24
0
def pos_make_df(data_pos, box_size_cm, timebase_pos, time_stamps_sessions_pos,
                loop_n, divider_n):
    data_pos_df = pd.DataFrame(data_pos)
    data_pos_df['time'] = np.array(data_pos_df['frame_counter'],
                                   dtype=float) / float(timebase_pos)  # in sec
    data_pos_df.set_index('time',
                          drop=True,
                          append=False,
                          inplace=True,
                          verify_integrity=False)

    # find amount of invalid tracking
    x1_fail = np.sum(data_pos_df.x1.values == 1023) / float(len(data_pos_df))
    x2_fail = np.sum(data_pos_df.x2.values == 1023) / float(len(data_pos_df))
    y1_fail = np.sum(data_pos_df.y1.values == 1023) / float(len(data_pos_df))
    y2_fail = np.sum(data_pos_df.y2.values == 1023) / float(len(data_pos_df))

    # get rid of 1023 values ...
    data_pos_df['x1'].replace(to_replace=1023,
                              inplace=True,
                              method='ffill',
                              axis=None)  # ffill first
    data_pos_df['x1'].replace(
        to_replace=1023, inplace=True, method='bfill',
        axis=None)  # then do bfill to get rid of 1023s at the end

    data_pos_df['x2'].replace(to_replace=1023,
                              inplace=True,
                              method='ffill',
                              axis=None)
    data_pos_df['x2'].replace(to_replace=1023,
                              inplace=True,
                              method='bfill',
                              axis=None)

    data_pos_df['y1'].replace(to_replace=1023,
                              inplace=True,
                              method='ffill',
                              axis=None)
    data_pos_df['y1'].replace(to_replace=1023,
                              inplace=True,
                              method='bfill',
                              axis=None)

    data_pos_df['y2'].replace(to_replace=1023,
                              inplace=True,
                              method='ffill',
                              axis=None)
    data_pos_df['y2'].replace(to_replace=1023,
                              inplace=True,
                              method='bfill',
                              axis=None)

    # get ratio (px to cm) ...
    # do the following calculations only on first session (base session)
    idx_start = int(time_stamps_sessions_pos[0])
    idx_stop = int(
        time_stamps_sessions_pos[1])  # take first session (base session)
    if np.diff(data_pos_df['frame_counter'].
               values[idx_stop - int(timebase_pos):idx_stop]).sum() == 0:
        #sys.stdout.write('Shortening position data for {} frames (nonsense)'.format(timebase_pos))
        idx_stop -= int(timebase_pos)
    first_session = data_pos_df.iloc[idx_start:idx_stop, :]

    deltax1 = np.max(first_session['x1']) - np.min(first_session['x1'])
    deltay1 = np.max(first_session['y1']) - np.min(first_session['y1'])
    deltax2 = np.max(first_session['x2']) - np.min(first_session['x2'])
    deltay2 = np.max(first_session['y2']) - np.min(first_session['y2'])
    px_to_cm = box_size_cm / np.mean([deltax1, deltay1, deltax2, deltay2
                                      ])  # assuming square arena
    #print('1 px = {} cm (assuming {} cm square box)'.format(px_to_cm,box_size_cm))

    # find correct LED ...
    x_art_all = np.zeros((loop_n, divider_n))
    y_art_all = np.zeros((loop_n, divider_n))
    # between the two LEDs try to find the center point as the point of minimum movement
    for i in xrange(loop_n):  # first loop_n position samples
        counter_divider = 0
        for divider in np.linspace(-1.5, 1.5, divider_n):
            art_point_x = divider * abs((first_session['x2'].values[i] -
                                         first_session['x1'].values[i]))
            art_point_y = divider * abs((first_session['y2'].values[i] -
                                         first_session['y1'].values[i]))

            if first_session['x1'].values[i] <= first_session['x2'].values[i]:
                x_art = first_session['x1'].values[i] + art_point_x
            if first_session['x1'].values[i] > first_session['x2'].values[i]:
                x_art = first_session['x1'].values[i] - art_point_x
            if first_session['y1'].values[i] <= first_session['y2'].values[i]:
                y_art = first_session['y1'].values[i] + art_point_y
            if first_session['y1'].values[i] > first_session['y2'].values[i]:
                y_art = first_session['y1'].values[i] - art_point_y
            x_art_all[i, counter_divider] = x_art
            y_art_all[i, counter_divider] = y_art

            counter_divider = counter_divider + 1

    dist_art_all = np.zeros((loop_n - 1, divider_n))
    for divider in xrange(divider_n):
        dist_art_all[:, divider] = np.sqrt(
            np.square(np.diff(x_art_all[:, divider])) +
            np.square(np.diff(y_art_all[:, divider])))

    total_dist_art = np.cumsum(dist_art_all, axis=0)[-1, :]
    fraction = np.linspace(-1.5, 1.5, divider_n)[np.argmin(total_dist_art)]

    if (fraction > 0.5):
        if (x1_fail < 0.3) and (y1_fail < 0.3):
            data_pos_df['correct_x'] = data_pos_df['x1']
            data_pos_df['correct_y'] = data_pos_df['y1']
        else:
            data_pos_df['correct_x'] = data_pos_df['x2']
            data_pos_df['correct_y'] = data_pos_df['y2']
    else:
        if (x2_fail < 0.3) and (y2_fail < 0.3):
            data_pos_df['correct_x'] = data_pos_df['x2']
            data_pos_df['correct_y'] = data_pos_df['y2']
        else:
            data_pos_df['correct_x'] = data_pos_df['x1']
            data_pos_df['correct_y'] = data_pos_df['y1']

    # smooth positions ...
    cols = ['x1', 'x2', 'y1', 'y2', 'correct_x', 'correct_y']
    for col in cols:
        #data_pos_df[col+'_inter'] = savgol_filter(data_pos_df[col], 25, 4) # Savitzky golay
        data_pos_df[col + '_inter'] = gaussian_filter1d(
            data_pos_df[col], 2,
            mode='nearest')  # smoothed position with sigma = 2

    # Get speed ...
    dist = np.sqrt(
        np.square(np.diff(data_pos_df['correct_x_inter'])) +
        np.square(np.diff(data_pos_df['correct_y_inter'])))
    time_diff = np.diff(data_pos_df.index)
    time_diff[time_diff == 0] = np.inf
    speed = np.hstack((0, dist * px_to_cm / time_diff))  # cm/s
    speed_filtered = gaussian_filter1d(speed,
                                       1)  # smoothed speed with sigma = 1
    data_pos_df['speed'] = speed
    data_pos_df['speed_filtered'] = speed_filtered

    #######################################################################################################################
    # correction of arena and head direction offset

    # correct rotation of arena if it is not perfectly positioned at 90 degree to camera
    # renew first_session data (do calculations only on base sesssion)
    first_session = data_pos_df.iloc[idx_start:idx_stop, :]

    center_x = int((np.max(first_session['correct_x_inter']) -
                    np.min(first_session['correct_x_inter'])))
    center_y = int((np.max(first_session['correct_y_inter']) -
                    np.min(first_session['correct_y_inter'])))
    center = (center_x, center_y)

    first_session_coords = np.array(np.column_stack(
        (first_session['correct_x_inter'], first_session['correct_y_inter'])),
                                    dtype=int)
    angle = cv2.minAreaRect(first_session_coords)[-1]
    if np.abs(angle) > 45:
        angle = 90 + angle
    sys.stdout.write(
        'Detected a arena rotation angle of {:.2f} degree.\n'.format(angle))
    M = cv2.getRotationMatrix2D(center, angle, 1)
    # rotation matrix is applied in the form:
    #M00x + M01y + M02
    #M10x + M11y + M12

    keys_to_correct = [['x1', 'y1'], ['x2', 'y2'], ['x1_inter', 'y1_inter'],
                       ['x2_inter', 'y2_inter'], ['correct_x', 'correct_y'],
                       ['correct_x_inter', 'correct_y_inter']]

    for pair in keys_to_correct:
        correct_xs, correct_ys = apply_rotation(data_pos_df, pair[0], pair[1],
                                                M)
        #sys.stdout.write('Corrected {} and {}.\n'.format(pair[0],pair[1]))
        # write corrected coordinates to dataframe
        data_pos_df[pair[0]] = correct_xs
        data_pos_df[pair[1]] = correct_ys

    # Correct head direction / LED offset:
    # Get LED direction ...
    diff_x_led = data_pos_df['x2_inter'] - data_pos_df['x1_inter']
    diff_y_led = data_pos_df['y2_inter'] - data_pos_df['y1_inter']
    led_angle = np.array([
        math.atan2(list(x)[0],
                   list(x)[1]) for x in zip(diff_x_led, diff_y_led)
    ])
    led_angle = (led_angle + 2 * np.pi) % (2 * np.pi)
    data_pos_df['led_angle'] = led_angle

    # Get moving direction ...
    diff_x_move = np.diff(data_pos_df['correct_x_inter'])
    diff_y_move = np.diff(data_pos_df['correct_y_inter'])
    mov_angle = np.array([
        math.atan2(list(x)[0],
                   list(x)[1]) for x in zip(diff_x_move, diff_y_move)
    ])
    mov_angle = np.hstack((mov_angle, 0))
    mov_angle = (mov_angle + 2 * np.pi) % (2 * np.pi)
    data_pos_df['mov_angle'] = mov_angle

    # Calculate head direction / LED offset
    # ... renew first_session df:
    # to calculate only over first session
    first_session = data_pos_df.iloc[idx_start:idx_stop, :]
    mov_angle_first = first_session['mov_angle'][
        first_session['speed'] >
        20].values  # filter at 20 cm/s speed (that's quite random)
    led_angle_first = first_session['led_angle'][
        first_session['speed'] > 20].values
    diff_mov_led = mov_angle_first - led_angle_first
    diff_mov_led[diff_mov_led < 0] = 2 * np.pi + diff_mov_led[diff_mov_led < 0]
    diff_mov_led[diff_mov_led > 2 *
                 np.pi] = diff_mov_led[diff_mov_led > 2 * np.pi] - 2 * np.pi

    head_offset = circmean(diff_mov_led)
    head_offset_var = circvar(diff_mov_led)
    sys.stdout.write(
        'Head angle offset: {:.2f} degrees | Variance: {:.2f}\n'.format(
            math.degrees(head_offset), head_offset_var))
    if head_offset_var > 1:
        sys.stdout.write(
            'Head angle offset variance > 1: This is not accurate.\n')

    # ... and correct LED angle:
    #led_angle_corr = [led_angle - head_offset if head_offset < 0 else led_angle + head_offset][0]
    led_angle_corr = led_angle + head_offset
    led_angle_corr[
        led_angle_corr < 0] = 2 * np.pi + led_angle_corr[led_angle_corr < 0]
    led_angle_corr[
        led_angle_corr > 2 *
        np.pi] = led_angle_corr[led_angle_corr > 2 * np.pi] - 2 * np.pi

    data_pos_df['head_angle'] = led_angle_corr

    # there is a problem here - pandas has problems reading this stuff because it has a
    # little endian compiler issue when adding the angle vector to the DataFrame.
    # Values can still be read though.
    return data_pos_df, px_to_cm, head_offset, head_offset_var
Exemplo n.º 25
0
import glob
import numpy as np

from scipy.stats import circmean, circvar
fns = sorted(glob.glob("./*.txt"))

fp1 = open('bonds.dat', 'w')
fp2 = open('bend.dat', 'w')
fp3 = open('tor.dat', 'w')
for f in fns:
    types = f.split('/')[1].split('.')[0]
    d = np.loadtxt(f)
    if (len(types.split('-')) == 4):
        fp3.write("%s %f %f\n" % (types, circmean(d, low=-180., high=180.),
                                  np.sqrt(circvar(d, low=-180., high=180.))))
    elif (len(types.split('-')) == 3):
        fp2.write("%s %f %f\n" % (types, np.mean(d), np.std(d)))
    else:
        fp1.write("%s %f %f\n" % (types, np.mean(d) * 0.1, np.std(d) * 0.1))
Exemplo n.º 26
0
 def test_circfuncs_array_like(self):
     x = [355,5,2,359,10,350]
     assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
     assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
     assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
Exemplo n.º 27
0
        bars = ax1.bar(edges[:-1],hist,width=width,color='#737373',edgecolor=edgecolors)
        #ax1.yaxis.set_major_locator(pl.MaxNLocator(3))
        #ax1.yaxis.set_major_locator(pl.LinearLocator(3))
        max_hist=np.max(hist)
        tick_locations=[int(max_hist/2.),max_hist]
        print(tick_locations)
        ax1.yaxis.set_major_locator(pl.FixedLocator(tick_locations))
        xpos=-1.5*max_hist

        #draw_straight_axis(xpos,ax1.yaxis.get_ticklocs(),ax1)
        #ax1.yaxis.set_major_formatter(ticks)
        ax1.yaxis.set_major_formatter(ticker.FixedFormatter(['',str(max_hist)]))
        ax1.set_rlabel_position(90)
        ax1.xaxis.set_major_formatter(ticks)
        mean_angle=circmean(data['Corrected_Kino_Angle'])
        std_angle=np.sqrt(circvar(data['Corrected_Kino_Angle']))

        ymin,ymax=ax1.get_ylim()
        ax1.plot([mean_angle,mean_angle],[0,ymax],'b')
        std_angles=np.linspace(mean_angle-old_div(std_angle,2),mean_angle+old_div(std_angle,2),11)
        std_radii=np.ones(11)*2.*max_hist/3.
        #ax1.plot([mean_angle-std_angle/2,mean_angle+std_angle/2],[2.*max_hist/3.,2.*max_hist/3.],'r')
        ax1.plot(std_angles,std_radii,'r')
        ax1.set_ylim([0,max_hist])

        pl.savefig('kinos_histogram_'+filename+'_'+sheet+extension)
        pl.close(fig2)
#===========================================================================
        # Plot histogram of bundle orientations
        hist,edges=np.histogram(data['Corrected_Bundle_Angle'],density=False,bins=bins,range=[0,2.*np.pi])
        print('Bundle angles, number of bins: ',len(hist))
Exemplo n.º 28
0
def get_framework_skew_var(skew_list):
    skew_variance = circvar(skew_list)
    return skew_variance