Beispiel #1
0
def test_circ():

    h = np.loadtxt('../data/h.txt')
    # h = h*2
    m, s = norm.fit(h)
    print('default mean {} std {}'.format(m, s))
    c_m = circmean(h)
    c_s = circstd(h)
    c_mh = circmean(h, high=180)
    c_sh = circstd(h, high=180)
    print('default circ mean {} std {}'.format(circmean(h), circstd(h)))
    print('mean {} std {}'.format(circmean(h, high=180), circstd(h, high=180)))
    xmin, xmax = (0, 180)
    x = np.linspace(xmin, xmax, 180)
    y = norm.pdf(x, m, s)
    y_circ = norm.pdf(x, c_m, c_s)
    y_circh = norm.pdf(x, c_mh, c_sh)
    y_flip = norm.pdf(x, 174.23, 2.64)
    fl = plt.plot(x, y_flip, label='With flip ')
    l = plt.plot(x, y, label="Default mean")
    c = plt.plot(x, y_circ, label='Circular mean')
    ch = plt.plot(x, y_circh, label='Circular with limit')
    v = plt.hist(h, normed=True, label='Hue')
    plt.legend(loc='upper right')
    # plt.legend((l), ('mean'))
    # plt.legend((l,c,ch), ('Default mean', 'Circuled', 'Circlued with high 180'))
    plt.show()
Beispiel #2
0
def spherical_uncertainty(popt, pcov):
    # Convert cartesian standard deviations to spherical standard
    # deviations with Monte Carlo method.

    n_samples = 1000000
    vs = np.random.multivariate_normal(popt, pcov, n_samples)[:, 0:3]
    sph = spherical_fast(vs)
    return (np.std(sph[:, 0]), circstd(sph[:, 1]), circstd(sph[:, 2]))
Beispiel #3
0
    def test_circstd_nan(self):
        """ Test custom circular std with NaN."""

        ref_std = scistats.circstd(self.test_angles, **self.circ_kwargs)
        ref_nan = scistats.circstd(self.test_nan, **self.circ_kwargs)
        test_nan = pystats.nan_circstd(self.test_nan, **self.circ_kwargs)

        assert np.isnan(ref_nan)
        assert ref_std == test_nan
Beispiel #4
0
    def test_circstd_nan(self):
        """ Test custom circular std with NaN."""
        from scipy import stats

        ref_std = stats.circstd(self.test_angles, **self.circ_kwargs)
        ref_nan = stats.circstd(self.test_nan, **self.circ_kwargs)
        test_nan = pysat.utils.nan_circstd(self.test_nan, **self.circ_kwargs)

        assert np.isnan(ref_nan)
        assert ref_std == test_nan
Beispiel #5
0
    def test_circstd_nan(self):
        """ Test custom circular std with NaN."""
        from scipy import stats

        ref_std = stats.circstd(self.test_angles, **self.circ_kwargs)
        ref_nan = stats.circstd(self.test_nan, **self.circ_kwargs)
        test_nan = pysat.utils.nan_circstd(self.test_nan, **self.circ_kwargs)

        assert np.isnan(ref_nan)
        assert ref_std == test_nan
Beispiel #6
0
def _mc_error(x, batches=5, circular=False):
    """Calculate the simulation standard error, accounting for non-independent samples.

    The trace is divided into batches, and the standard deviation of the batch
    means is calculated.

    Parameters
    ----------
    x : Numpy array
        An array containing MCMC samples
    batches : integer
        Number of batches
    circular : bool
        Whether to compute the error taking into account `x` is a circular variable
        (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).

    Returns
    -------
    mc_error : float
        Simulation standard error
    """
    if x.ndim > 1:

        dims = np.shape(x)
        trace = np.transpose([t.ravel() for t in x])

        return np.reshape([_mc_error(t, batches) for t in trace], dims[1:])

    else:
        if batches == 1:
            if circular:
                std = st.circstd(x, high=np.pi, low=-np.pi)
            else:
                std = np.std(x)
            return std / np.sqrt(len(x))

        try:
            batched_traces = np.resize(x, (batches, int(len(x) / batches)))
        except ValueError:
            # If batches do not divide evenly, trim excess samples
            resid = len(x) % batches
            new_shape = (batches, (len(x) - resid) / batches)
            batched_traces = np.resize(x[:-resid], new_shape)

        if circular:
            means = st.circmean(batched_traces, high=np.pi, low=-np.pi, axis=1)
            std = st.circstd(means, high=np.pi, low=-np.pi)
        else:
            means = np.mean(batched_traces, 1)
            std = np.std(means)

        return std / np.sqrt(batches)
Beispiel #7
0
def _mc_error(ary, batches=5, circular=False):
    """Calculate the simulation standard error, accounting for non-independent samples.

    The trace is divided into batches, and the standard deviation of the batch
    means is calculated.

    Parameters
    ----------
    ary : Numpy array
        An array containing MCMC samples
    batches : integer
        Number of batches
    circular : bool
        Whether to compute the error taking into account `ary` is a circular variable
        (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).

    Returns
    -------
    mc_error : float
        Simulation standard error
    """
    if ary.ndim > 1:

        dims = np.shape(ary)
        trace = np.transpose([t.ravel() for t in ary])

        return np.reshape([_mc_error(t, batches) for t in trace], dims[1:])

    else:
        if _not_valid(ary, check_shape=False):
            return np.nan
        if batches == 1:
            if circular:
                std = stats.circstd(ary, high=np.pi, low=-np.pi)
            else:
                std = np.std(ary)
            return std / np.sqrt(len(ary))

        batched_traces = np.resize(ary, (batches, int(len(ary) / batches)))

        if circular:
            means = stats.circmean(batched_traces,
                                   high=np.pi,
                                   low=-np.pi,
                                   axis=1)
            std = stats.circstd(means, high=np.pi, low=-np.pi)
        else:
            means = np.mean(batched_traces, 1)
            std = np.std(means)

        return std / np.sqrt(batches)
Beispiel #8
0
    def time_circ_std(self):
        try:
            data = np.random.randn(10000, 1000)
            import numba

            def _circfunc(samples, high, low):
                samples = np.asarray(samples)
                if samples.size == 0:
                    return np.nan, np.nan
                return samples, _angle(samples, low, high, np.pi)

            @numba.vectorize(nopython=True)
            def _angle(samples, low, high, pi=np.pi):
                ang = (samples - low) * 2.0 * pi / (high - low)
                return ang

            def _circular_standard_deviation(samples,
                                             high=2 * np.pi,
                                             low=0,
                                             axis=None):
                pi = np.pi
                samples, ang = _circfunc(samples, high, low)
                S = np.sin(ang).mean(axis=axis)
                C = np.cos(ang).mean(axis=axis)
                R = np.hypot(S, C)
                return ((high - low) / 2.0 / pi) * np.sqrt(-2 * np.log(R))

            return _circular_standard_deviation(data)
        except ImportError:
            data = np.random.randn(10000, 1000)
            return circstd(data)
Beispiel #9
0
def get_average_objects(clusters, kind):
    """Create the average object out of a sequence of clusters.

    Parameters
    ----------
    clusters : sequence of pandas.DataFrames
        table with rows of markings (fans or blotches) to be averaged
    kind : {'fan', 'blotch}
        Switch to control the circularity for the average angle calculation.

    Returns
    -------
    Generator providing single row pandas.DataFrames with the average values
    """
    logger.debug("Averaging clusters.")
    for cluster_df in clusters:
        # first filter for outliers more than 1 std away
        # for
        # reduced = df[df.apply(lambda x: np.abs(x - x.mean()) / x.std() < 1).all(axis=1)]
        logger.debug("Averaging %i objects.", len(cluster_df))
        logger.debug("x.mean: %f", cluster_df.x.mean())
        logger.debug("y.mean: %f", cluster_df.y.mean())
        meandata = cluster_df.mean()
        # this determines the upper limit for circular mean
        high = 180 if kind == 'blotch' else 360
        meandata.angle = circmean(cluster_df.angle, high=high)
        meandata['angle_std'] = circstd(cluster_df.angle, high=high)
        meandata['n_votes'] = len(cluster_df)
        yield meandata.to_frame().T
Beispiel #10
0
    def calculate(self, cellNum, onLand):
        """
        Calculate the required statistics (mean, variance,
        autocorrelation and regularized anomaly coefficient) for the
        given cell.

        :param int cellNum: The cell number to process.
        :param boolean onLand: If ``True``, then the cell is (mostly
                               or entirely) over land. If ``False``,
                               the cell is over water.

        :returns: mean, standard deviation, autocorrelation, residual
                  correlation and the minimum parameter value.
        """

        p = self.extractParameter(cellNum, onLand)

        if self.angular:
            mu = circmean(np.radians(p))
            sig = circstd(np.radians(p))
        else:
            mu = np.mean(p)
            sig = np.std(p)

        # Calculate the autocorrelations:
        alphas = np.correlate(p, p, 'full')
        n = len(p)

        # Grab only the lag-one autocorrelation coeff.
        alpha = alphas[n] / alphas.max()
        alpha = acf(p)[-1]
        phi = np.sqrt(1 - alpha**2)
        mn = min(p)

        return mu, sig, alpha, phi, mn
Beispiel #11
0
def test_circular_standard_deviation_1d(data):
    high = 8
    low = 4
    assert np.allclose(
        _circular_standard_deviation(data, high=high, low=low),
        circstd(data, high=high, low=low),
    )
Beispiel #12
0
    def test_circstd(self):
        """ Test custom circular std."""

        ref_std = scistats.circstd(self.test_angles, **self.circ_kwargs)
        test_std = pystats.nan_circstd(self.test_angles, **self.circ_kwargs)

        assert ref_std == test_std
Beispiel #13
0
    def custom_augmentation_HSV(self, image):
        # Assumes input image is in RGB color space, and returns image in RGB space
        H_Scale = 1.0
        S_Scale = 1.0

        HSV_image = skimage.color.rgb2hsv(image)
        H = HSV_image[:, :, 0]
        H_rad = H * [2 * math.pi] - math.pi
        S = HSV_image[:, :, 1]
        V = HSV_image[:, :, 2]

        mean_H_rad = circmean(H_rad)
        std_H_rad = circstd(H_rad)
        mean_S = np.mean(S, axis=(0, 1))
        std_S = np.std(S, axis=(0, 1))

        H_rad_centered = np.angle(np.exp(1j * (H_rad - mean_H_rad)))
        H_rad_centered_augmented = H_rad_centered + np.random.normal(loc=0, scale=(H_Scale * std_H_rad))
        H_rad_augmented = np.angle(np.exp(1j * (H_rad_centered_augmented + mean_H_rad)))
        H_augmented = np.divide(H_rad_augmented + math.pi, 2 * math.pi)

        S_centered = S - mean_S
        S_centered_augmented = S_centered + np.random.normal(loc=0, scale=(S_Scale * std_S))
        S_augmented = S_centered_augmented + mean_S

        image_perturbed_HSV = np.empty(image.shape)
        image_perturbed_HSV[:, :, 0] = H_augmented
        image_perturbed_HSV[:, :, 1] = S_augmented
        image_perturbed_HSV[:, :, 2] = V

        image_perturbed = skimage.color.hsv2rgb(image_perturbed_HSV)
        image_perturbed = np.rint(np.clip(image_perturbed, 0, 255)).astype('uint8')
        image_perturbed = (image_perturbed - 193.09203) / (56.450138 + 1e-7)
        return image_perturbed
Beispiel #14
0
    def test_circstd(self):
        """ Test custom circular std."""
        from scipy import stats

        ref_std = stats.circstd(self.test_angles, **self.circ_kwargs)
        test_std = pysat.utils.nan_circstd(self.test_angles, **self.circ_kwargs)
        ans1 = ref_std == test_std

        assert ans1
Beispiel #15
0
    def particles_in_tollerance() -> bool:
        particles = particle_filter.get_particles()
        pose = particle_filter.get_current_pose()

        return (position_difference(pose, seed_pose) < pos_tol
                and angular_difference(pose, seed_pose) < heading_tol
                and np.std(particles[:, 0]) < pos_std_dev * 1.1
                and np.std(particles[:, 1]) < pos_std_dev * 1.1
                and circstd(particles[:, 2]) < heading_std_dev * 1.1)
Beispiel #16
0
    def test_circstd(self):
        """ Test custom circular std."""
        from scipy import stats

        ref_std = stats.circstd(self.test_angles, **self.circ_kwargs)
        test_std = pysat.utils.nan_circstd(self.test_angles,
                                           **self.circ_kwargs)
        ans1 = ref_std == test_std

        assert ans1
Beispiel #17
0
def HessianAnalysis(inmap,
                    pxksz=3,
                    mode='reflect',
                    nruns=1,
                    s_inmap=None,
                    mask=None):

    if np.logical_or(s_inmap is None, nruns < 2):

        output = HessianAnalysisLITE(inmap, pxksz=pxksz, mode=mode)
        return {
            'lplus': output['lplus'],
            's_lplus': np.nan,
            'lminus': output['lminus'],
            's_lminus': np.nan,
            'theta': output['theta'],
            's_theta': np.nan,
            'sima': output['sima']
        }

    else:

        sz = np.shape(inmap)
        arrlplus = np.zeros([nruns, sz[0], sz[1]])
        arrlminus = np.zeros([nruns, sz[0], sz[1]])
        arrtheta = np.zeros([nruns, sz[0], sz[1]])

        for i in range(0, nruns):

            inmaprand = np.random.normal(loc=inmap, scale=s_inmap + 0. * inmap)
            output = HessianAnalysisLITE(inmaprand, pxksz=pxksz, mode=mode)

            arrlplus[i] = output['lplus']
            arrlminus[i] = output['lminus']
            arrtheta[i] = output['theta']
            sima = output['sima']

        theta = circmean(arrtheta, axis=0, low=-np.pi / 2, high=np.pi / 2)
        s_theta = circstd(arrtheta, axis=0, low=-np.pi / 2, high=np.pi / 2)
        lminus = np.mean(arrlminus, axis=0)
        s_lminus = np.std(arrlminus, axis=0)
        lplus = np.mean(arrlplus, axis=0)
        s_lplus = np.std(arrlplus, axis=0)

        return {
            'lplus': lplus,
            's_lplus': s_lplus,
            'lminus': lminus,
            's_lminus': s_lminus,
            'theta': theta,
            's_theta': s_theta,
            'sima': sima
        }
Beispiel #18
0
    def ClassicalDCF_calculation(self, correction_factor=1):
        """
        Bfield calculation for the classical DCF.

        everything is calculated using CGS units
        """
        mean_density = np.mean(self.density_data * (2.3 * 1.67e-24))
        velocity_dispersion = np.mean(self.velocity_data * 1e5)
        sigma_pol = stats.circstd(self.polarization_data, high=np.pi, low=0)

        return correction_factor * np.sqrt(
            4 * np.pi * mean_density) * (velocity_dispersion / sigma_pol)
Beispiel #19
0
    def back_angle(df, CL):
        """add 0,1 column to find shovel's back position 
        find the mean and standard deviation of upper_body_position for shovel's front side
        find confidence interval for shovel's front side
        find the indexes at which showel swings back and returns forward
        :param: ES_Swg_Res_Cnt , ES_Sys_RHM_SAO_State, confidence level
        :return: dataframe, time of swinging back and, time of returning forward
        """
        df['upper_body_position'] = df.ES_Swg_Res_Cnt * 360 / 8192
        c0 = df.ES_Sys_RHM_SAO_State == 9.0
        c1 = df.ES_Sys_RHM_SAO_State == 5.0
        c2 = df.ES_Sys_RHM_SAO_State == 6.0
        #c3=df.ES_Sys_RHM_SAO_State ==30.0
        c4 = df.ES_Sys_RHM_SAO_State == 10.0
        #c5=df.ES_Sys_RHM_SAO_State ==8.0
        c6 = df.ES_Sys_RHM_SAO_State == 7.0
        c7 = df.ES_Sys_RHM_SAO_State == 11.0
        #dd=df[c0|c1|c2|c3|c4|c5|c6|c7]
        #dd=df[c0|c1|c2|c4|c6|c7]
        dd = df[c0 | c1 | c2]
        mu = circmean(dd['upper_body_position'], 360, 0)
        std = circstd(dd['upper_body_position'], 360, 0)
        conf_int = stats.norm.interval(CL, loc=mu, scale=std)
        f = conf_int[0]
        l = conf_int[1]

        if l > 360:
            l = l - 360
        if f < 0:
            f = 360 + f
            print(f)

        m1 = min(f, l)
        m2 = max(f, l)
        print(m1)
        print(m2)
        if (mu > m1) and (mu < m2):
            df['back_angle'] = np.where(
                np.logical_or(df['upper_body_position'] < m1,
                              df['upper_body_position'] > m2), 1, 0)
        else:
            df['back_angle'] = np.where(
                np.logical_and(df['upper_body_position'] < m2,
                               df['upper_body_position'] > m1), 1, 0)

        df['intoback'] = pd.DataFrame(df.back_angle.transpose().diff())
        df1 = df.copy()
        df1.reset_index(inplace=True, drop=False)
        goInside = df1.index[df1.intoback == 1]
        goOutside = df1.index[df1.intoback == -1]

        return df, goInside, goOutside
Beispiel #20
0
    def np_hist(self, h, s, v, l, a, b):

        h_mean = circmean(h, high=180)
        h_sigma = circstd(h, high=180)

        s_mean, s_sigma = norm.fit(s)
        v_mean, v_sigma = norm.fit(v)

        l_mean, l_sigma = norm.fit(l)
        u_mean, u_sigma = norm.fit(a)
        lv_mean, lv_sigma = norm.fit(b)
        return (h_mean, h_sigma), (s_mean, s_sigma), (v_mean, v_sigma), (
            l_mean, l_sigma), (u_mean, u_sigma), (lv_mean, lv_sigma)
Beispiel #21
0
    def test_circfuncs(self):
        x = np.array([355,5,2,359,10,350])
        M = stats.circmean(x, high=360)
        Mval = 0.167690146
        assert_allclose(M, Mval, rtol=1e-7)

        V = stats.circvar(x, high=360)
        Vval = 42.51955609
        assert_allclose(V, Vval, rtol=1e-7)

        S = stats.circstd(x, high=360)
        Sval = 6.520702116
        assert_allclose(S, Sval, rtol=1e-7)
Beispiel #22
0
    def test_circfuncs_small(self):
        x = np.array([20,21,22,18,19,20.5,19.2])
        M1 = x.mean()
        M2 = stats.circmean(x, high=360)
        assert_allclose(M2, M1, rtol=1e-5)

        V1 = x.var()
        V2 = stats.circvar(x, high=360)
        assert_allclose(V2, V1, rtol=1e-4)

        S1 = x.std()
        S2 = stats.circstd(x, high=360)
        assert_allclose(S2, S1, rtol=1e-4)
Beispiel #23
0
def test_circstats():
    x = np.array([355, 5, 2, 359, 10, 350])
    M = stats.circmean(x, high=360)
    Mval = 0.167690146
    assert_allclose(M, Mval, rtol=1e-7)

    V = stats.circvar(x, high=360)
    Vval = 42.51955609
    assert_allclose(V, Vval, rtol=1e-7)

    S = stats.circstd(x, high=360)
    Sval = 6.520702116
    assert_allclose(S, Sval, rtol=1e-7)
Beispiel #24
0
def test_circstats_small():
    x = np.array([20, 21, 22, 18, 19, 20.5, 19.2])
    M1 = x.mean()
    M2 = stats.circmean(x, high=360)
    assert_allclose(M2, M1, rtol=1e-5)

    V1 = x.var()
    V2 = stats.circvar(x, high=360)
    assert_allclose(V2, V1, rtol=1e-4)

    S1 = x.std()
    S2 = stats.circstd(x, high=360)
    assert_allclose(S2, S1, rtol=1e-4)
def calc_stats(df, which_place, which_picture):
    df_slice = np.array(
        df['number_response'][(df['place'] == which_place)
                              & (df['step1_picture'] == which_picture)])
    df_slice = np.deg2rad(df_slice)
    mean_result = np.round(
        np.rad2deg(stats.circmean(df_slice, low=-np.pi, high=np.pi)), 1)
    median_result = np.round(
        np.rad2deg(stats.circvar(df_slice, low=-np.pi, high=np.pi)), 1)
    std_result = np.round(
        np.rad2deg(stats.circstd(df_slice, low=-np.pi, high=np.pi)), 1)
    print(
        f'{which_place} ({which_picture}): {mean_result} (Mean); {median_result} (Var); {std_result} (STD)'
    )
Beispiel #26
0
def anglePlotter(angleDB, numbClasses, activation):
    groupedAngleDB = angleDB.groupby('class')
    classesByIncreasingAngles = groupedAngleDB.apply(lambda group: (np.degrees(st.circmean(group['angle'])))).sort_values().index
    # assert len(set(angleDB['class'])) == numbClasses
    numbCols = 3 if numbClasses > 2 else 2
    numbRows = (numbClasses - 1) // numbCols + 1
    fig = plt.figure()
    for classID, classColor in enumerate(classesByIncreasingAngles):
        group = groupedAngleDB.get_group(classColor)
        meanAngle, angleSTD = np.degrees(st.circmean(group['angle'])), np.degrees(st.circstd(group['angle']))
        ax = fig.add_subplot(numbRows, numbCols, classID+1)
        ax.hist(np.degrees(group['angle']), 8, normed=True, histtype='bar', rwidth=0.8, color=classColor, edgecolor='k')
        ax.axvline(meanAngle, ls='--', c='k'); ax.set_yticks([])
        ax.set_title(r'$%.1f \pm %.1f$' % (meanAngle, angleSTD))
    fig.tight_layout(); plt.savefig(os.path.join('plotDir/', str(numbClasses), activation + '.angles.png'))
Beispiel #27
0
def HOG_PRSlite(phi, weights=None):
    # Calculates the projected Rayleigh statistic of the distributions of angles phi.
    #
    # INPUTS
    # phi      - angles between -pi/2 and pi/2
    # weights  - statistical weights
    #
    # OUTPUTS
    # Zx       - value of the projected Rayleigh statistic
    # s_Zx     -
    # meanPhi  -

    if weights is None:
        weights = np.ones_like(phi)

    angles = phi  #2.*phi

    circX = np.sum(weights * np.cos(angles)) / np.sum(weights)
    circY = np.sum(weights * np.sin(angles)) / np.sum(weights)
    mrv = np.sqrt(circX**2 + circY**2)

    Zx = np.sum(weights * np.cos(angles)) / np.sqrt(np.sum(weights) / 2.)
    #Zx=np.sum(np.cos(angles))/np.sqrt(np.size(angles)/2.)
    temp = np.sum(np.cos(angles) * np.cos(angles))
    s_Zx = np.sqrt((2. * temp - Zx * Zx) / np.size(angles))

    Zy = np.sum(weights * np.sin(angles)) / np.sqrt(np.sum(weights) / 2.)
    #Zy=np.sum(np.sin(angles))/np.sqrt(np.size(angles)/2.)
    temp = np.sum(np.sin(angles) * np.sin(angles))
    s_Zy = np.sqrt((2. * temp - Zy * Zy) / np.size(angles))

    Z = np.sqrt(Zx**2 + Zy**2)
    s_Z = np.sqrt(s_Zx**2 + s_Zy**2)

    meanphi = circmean(angles, low=-np.pi, high=np.pi)  #0.5*np.arctan2(Zy, Zx)
    stdphi = circstd(angles, low=-np.pi, high=np.pi)

    #import pdb; pdb.set_trace()
    #return Zx, s_Zx, meanPhi
    return {
        'Z': Z,
        's_Z': s_Z,
        'Zx': Zx,
        's_Zx': s_Zx,
        'meanphi': meanphi,
        'stdphi': stdphi,
        'mrv': mrv
    }
Beispiel #28
0
 def do_iteration(self):
     mfq = np.nan
     mphases_ = []
     max_std_phases = 1.0
     its = 0
     while max_std_phases > self.stdp_limit and its < self.its_limit:
         out = self.run_sim()
         phase_dur, fl_phase_dur, ex_phase_dur, phases = self.calc_phase(
             self.time_vec, out, self.phase_diffs)
         if len(phase_dur) > 10:
             mphases_ = circmean(phases[-5:, :], 1.0, 0.0, 0)
             max_std_phases = np.max(circstd(phases[-5:, :], 1.0, 0.0, 0))
             mfq = 1.0 / np.nanmean(phase_dur[-5:])
         its += 1
     gaits = self.classify_gait_simple((ex_phase_dur / phase_dur)[-5:],
                                       phases[-5:, :])
     return (mfq, mphases_, gaits[-1:])
Beispiel #29
0
    def compute_values(self, I_stream, Q_stream, phase_groundtruth):
        '''
        Compute phase error and noise
        '''
        # The original basic datapath applied to data
        measured_phase = -np.arctan2(Q_stream, I_stream)  # compute phase

        # Filter noise in phase images
        filtered_phase = []
        for image in measured_phase:
            filtered_phase.append(ndimage.gaussian_filter(image, 4))
        filtered_phase = np.asarray(filtered_phase)

        # Compute noise and error
        phase_error = np.mean(filtered_phase - phase_groundtruth, axis=0)
        phase_noise = circstd(filtered_phase, axis=0)
        return phase_error, phase_noise
Beispiel #30
0
def doy_std(series):
    """
    Circular std pandas.Timestamp series as day of year.

    Parameters
    ----------
    series: pandas.Series of pandas.Timestamp
        Series to analyse.

    Returns
    -------
    float:
        Standard deviation in units of days.
    """
    # Exclude NaN before computing mean
    series = series.loc[(~series.isnull())]

    return ss.circstd(series.apply(lambda x: x.dayofyear), low=1, high=365)
def AngleAnalyzer_finegrid(sma,ang,loc,mass,snap):
    '''
    For particular perturber parameters, simulation, and snapshot, returns ie values of the disk over base simulations
    Parameters:
    sma: semi-major axis of perturber
    ang: inclination of perturber
    loc: base directory of perturber simulations
    mass: mass of perturber
    snap: Snapshot out of the 500 orbits
    
    Returns: ie values for the disk over base simulations'''
    redirs=directories(sma,ang,loc)[0]
    os.chdir(redirs[mass])
    bases=get_bases()
    circstds=[]
    for i in range(len(bases)):
        circstds.append(stats.circstd(AngleAnalyzer_data_finegrid(sma,ang,loc,mass,snap,bases[i])))
    circstds=np.array(circstds)*(180/pi)
    return circstds
Beispiel #32
0
def plot_phase_information(ax, peak_pos, obs_phases, sim_phases):
    """ Plots the phase information for the detected peaks

    Parameters
    ----------
    For a description of the parameters see 'plot_panel_row'.

    """
    sim_mean_phases = circmean(sim_phases, axis=0)
    sim_phase_stds = circstd(sim_phases, axis=0)
    sim_phase_ranges = np.array(
        (sim_mean_phases - sim_phase_stds, sim_mean_phases + sim_phase_stds)).T
    lower_freq, upper_freq = ax.get_xlim()
    for freq, obs_phase, sim_phase, sim_phase_range in zip(
            peak_pos, obs_phases, sim_mean_phases, sim_phase_ranges):
        if lower_freq < freq < upper_freq:
            plot_phase_clock(get_theta(obs_phase), sim_phase, sim_phase_range,
                             freq, ax)
    return ax
Beispiel #33
0
def serial_bias(prevcurr, error, window, step):
    xxx = np.arange(-np.pi, np.pi, step)
    m_err = []
    std_err = []
    for t in xxx:
        idx = (prevcurr >= t - window / 2) & (prevcurr < t + window / 2)
        if t - window / 2 < -np.pi:
            idx = (prevcurr >= t - window / 2) & (
                prevcurr < t + window / 2) | (prevcurr > np.pi -
                                              (window / 2 -
                                               (np.pi - np.abs(t))))
        if t + window / 2 > np.pi:
            idx = (prevcurr >= t - window / 2) & (
                prevcurr < t + window / 2) | (prevcurr < -np.pi +
                                              (window / 2 -
                                               (np.pi - np.abs(t))))
        m_err.append(sps.circmean(error[idx], low=-np.pi, high=np.pi))
        std_err.append(sps.circstd(error[idx]) / np.sqrt(np.sum(idx)))
    return np.array(m_err), np.array(std_err)
def test_circstd_axis():
    x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]])

    S1 = stats.circstd(x, high=360)
    S2 = stats.circstd(x.ravel(), high=360)
    assert_allclose(S1, S2, rtol=1e-11)

    S1 = stats.circstd(x, high=360, axis=1)
    S2 = [stats.circstd(x[i], high=360) for i in range(x.shape[0])]
    assert_allclose(S1, S2, rtol=1e-11)

    S1 = stats.circstd(x, high=360, axis=0)
    S2 = [stats.circstd(x[:, i], high=360) for i in range(x.shape[1])]
    assert_allclose(S1, S2, rtol=1e-11)
Beispiel #35
0
    def analyze(self, atype, periodic=True):
        """Analyzes the data held in numpy vector"""
        if not periodic:
            self.mean = self.values.mean()
            self.stdev = self.values.std()
            self.var = self.values.var()
        else:
            p_high = 180
            if atype == "angle":
                p_low = 0
            else:
                p_low = -180
            self.mean = circmean(self.values, low=p_low, high=p_high)
            self.stdev = circstd(self.values, low=p_low, high=p_high)
            self.var = circvar(self.values, low=p_low, high=p_high)

        # Analyze normality
        # Note: this will be broken for distributions that go over a period
        # To be fixed
        self.anderson = stats.anderson(self.values, dist='norm')
def Scipy_CircStd(angleDict, low, high):
	angles= np.array(ExpandToIndividualParticles(angleDict))
	return stats.circstd( angles, low = low, high=high );
Beispiel #37
0
 def test_empty(self):
     assert_(np.isnan(stats.circmean([])))
     assert_(np.isnan(stats.circstd([])))
     assert_(np.isnan(stats.circvar([])))
Beispiel #38
0
 def test_circfuncs_array_like(self):
     x = [355,5,2,359,10,350]
     assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
     assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
     assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
Beispiel #39
0
def analyze_color(rgb_img, mask, hist_plot_type=None):
    """Analyze the color properties of an image object
    Inputs:
    rgb_img          = RGB image data
    mask             = Binary mask made from selected contours
    hist_plot_type   = 'None', 'all', 'rgb','lab' or 'hsv'
    
    Returns:
    analysis_image   = histogram output
    
    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param hist_plot_type: str
    :return analysis_images: list
    """

    params.device += 1

    if len(np.shape(rgb_img)) < 3:
        fatal_error("rgb_img must be an RGB image")

    # Mask the input image
    masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
    # Extract the blue, green, and red channels
    b, g, r = cv2.split(masked)
    # Convert the BGR image to LAB
    lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
    # Extract the lightness, green-magenta, and blue-yellow channels
    l, m, y = cv2.split(lab)
    # Convert the BGR image to HSV
    hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
    # Extract the hue, saturation, and value channels
    h, s, v = cv2.split(hsv)

    # Color channel dictionary
    channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}

    # Histogram plot types
    hist_types = {"ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
                  "RGB": ("b", "g", "r"),
                  "LAB": ("l", "m", "y"),
                  "HSV": ("h", "s", "v")}

    if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
        fatal_error("The histogram plot type was " + str(hist_plot_type) +
                    ', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
    # Store histograms, plotting colors, and plotting labels
    histograms = {
        "b": {"label": "blue", "graph_color": "blue",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["b"]], [0], mask, [256], [0, 255])]},
        "g": {"label": "green", "graph_color": "forestgreen",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["g"]], [0], mask, [256], [0, 255])]},
        "r": {"label": "red", "graph_color": "red",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["r"]], [0], mask, [256], [0, 255])]},
        "l": {"label": "lightness", "graph_color": "dimgray",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["l"]], [0], mask, [256], [0, 255])]},
        "m": {"label": "green-magenta", "graph_color": "magenta",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["m"]], [0], mask, [256], [0, 255])]},
        "y": {"label": "blue-yellow", "graph_color": "yellow",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["y"]], [0], mask, [256], [0, 255])]},
        "h": {"label": "hue", "graph_color": "blueviolet",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["h"]], [0], mask, [256], [0, 255])]},
        "s": {"label": "saturation", "graph_color": "cyan",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["s"]], [0], mask, [256], [0, 255])]},
        "v": {"label": "value", "graph_color": "orange",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["v"]], [0], mask, [256], [0, 255])]}
    }

    # Create list of bin labels for 8-bit data
    binval = np.arange(0, 256)
    bin_values = [l for l in binval]

    analysis_images = []
    # Create a dataframe of bin labels and histogram data
    dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
                            'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
                            'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
                            'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
                            'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})

    # Make the histogram figure using plotnine
    if hist_plot_type is not None:
        if hist_plot_type.upper() == 'RGB':
            df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['blue', 'green', 'red'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'LAB':
            df_lab = pd.melt(dataset, id_vars=['bins'],
                             value_vars=['lightness', 'green-magenta', 'blue-yellow'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['yellow', 'magenta', 'dimgray'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'HSV':
            df_hsv = pd.melt(dataset, id_vars=['bins'],
                             value_vars=['hue', 'saturation', 'value'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['blueviolet', 'cyan', 'orange'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'ALL':
            s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
                           'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
            color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
                              'dimgray', 'red', 'cyan', 'orange']
            df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel',
                             value_name='Pixels')
            hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(color_channels)
                        )
            analysis_images.append(hist_fig)

    # Hue values of zero are red but are also the value for pixels where hue is undefined
    # The hue value of a pixel will be undefined when the color values are saturated
    # Therefore, hue values of zero are excluded from the calculations below

    # Calculate the median hue value
    # The median is rescaled from the encoded 0-179 range to the 0-359 degree range
    hue_median = np.median(h[np.where(h > 0)]) * 2

    # Calculate the circular mean and standard deviation of the encoded hue values
    # The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
    hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
    hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2

    # Store into lists instead for pipeline and print_results
    # stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median}

    # Plot or print the histogram
    if hist_plot_type is not None:
        if params.debug == 'print':
            hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
        elif params.debug == 'plot':
            print(hist_fig)

    # Store into global measurements
    # RGB signal values are in an unsigned 8-bit scale of 0-255
    rgb_values = [i for i in range(0, 256)]
    # Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
    hue_values = [i * 2 + 1 for i in range(0, 180)]
    # Percentage values on a 0-100 scale (lightness, saturation, and value)
    percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
    # Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
    diverging_values = [i for i in range(-128, 128)]
    # outputs.measurements['color_data'] = {
    #     'histograms': {
    #         'blue': {'signal_values': rgb_values, 'frequency': histograms["b"]["hist"]},
    #         'green': {'signal_values': rgb_values, 'frequency': histograms["g"]["hist"]},
    #         'red': {'signal_values': rgb_values, 'frequency': histograms["r"]["hist"]},
    #         'lightness': {'signal_values': percent_values, 'frequency': histograms["l"]["hist"]},
    #         'green-magenta': {'signal_values': diverging_values, 'frequency': histograms["m"]["hist"]},
    #         'blue-yellow': {'signal_values': diverging_values, 'frequency': histograms["y"]["hist"]},
    #         'hue': {'signal_values': hue_values, 'frequency': histograms["h"]["hist"]},
    #         'saturation': {'signal_values': percent_values, 'frequency': histograms["s"]["hist"]},
    #         'value': {'signal_values': percent_values, 'frequency': histograms["v"]["hist"]}
    #     },
    #     'color_features': {
    #         'hue_circular_mean': hue_circular_mean,
    #         'hue_circular_std': hue_circular_std,
    #         'hue_median': hue_median
    #     }
    # }
    outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["b"]["hist"], label=rgb_values)
    outputs.add_observation(variable='green_frequencies', trait='green frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["g"]["hist"], label=rgb_values)
    outputs.add_observation(variable='red_frequencies', trait='red frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["r"]["hist"], label=rgb_values)
    outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["l"]["hist"], label=percent_values)
    outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["m"]["hist"], label=diverging_values)
    outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["y"]["hist"], label=diverging_values)
    outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["h"]["hist"], label=hue_values)
    outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["s"]["hist"], label=percent_values)
    outputs.add_observation(variable='value_frequencies', trait='value frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["v"]["hist"], label=percent_values)
    outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_circular_mean, label='degrees')
    outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_median, label='degrees')
    outputs.add_observation(variable='hue_median', trait='hue median',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_median, label='degrees')

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images