def Scipy_CircMean(angleDict, low=0, high=0):
	anglesFullList = np.array(ExpandToIndividualParticles(angleDict))
	if (low==0 and high == 0):
		mean = stats.circmean(anglesFullList) # probably need convert to numpy arrray right ? Lets see
	else :
		mean = stats.circmean(anglesFullList, high=high, low=low)
	return mean
Exemplo n.º 2
0
def circ_corr(alpha1, alpha2):
    """Helper to compute the circular correlation."""
    alpha1_bar = stats.circmean(alpha1)
    alpha2_bar = stats.circmean(alpha2)
    num = np.sum(np.sin(alpha1 - alpha1_bar) * np.sin(alpha2 - alpha2_bar))
    den = np.sqrt(np.sum(np.sin(alpha1 - alpha1_bar) ** 2) * np.sum(np.sin(alpha2 - alpha2_bar) ** 2))
    rho = num / den
    return rho
Exemplo n.º 3
0
    def test_circstd_nan(self):
        """ Test custom circular mean with NaN."""
        from scipy import stats

        ref_mean = stats.circmean(self.test_angles, **self.circ_kwargs)
        ref_nan = stats.circmean(self.test_nan, **self.circ_kwargs)
        test_nan = pysat.utils.nan_circmean(self.test_nan, **self.circ_kwargs)

        assert np.isnan(ref_nan)
        assert ref_mean == test_nan
Exemplo n.º 4
0
def get_average_objects(clusters, kind):
    """Create the average object out of a sequence of clusters.

    Parameters
    ----------
    clusters : sequence of pandas.DataFrames
        table with rows of markings (fans or blotches) to be averaged
    kind : {'fan', 'blotch}
        Switch to control the circularity for the average angle calculation.

    Returns
    -------
    Generator providing single row pandas.DataFrames with the average values
    """
    logger.debug("Averaging clusters.")
    for cluster_df in clusters:
        # first filter for outliers more than 1 std away
        # for
        # reduced = df[df.apply(lambda x: np.abs(x - x.mean()) / x.std() < 1).all(axis=1)]
        logger.debug("Averaging %i objects.", len(cluster_df))
        logger.debug("x.mean: %f", cluster_df.x.mean())
        logger.debug("y.mean: %f", cluster_df.y.mean())
        meandata = cluster_df.mean()
        # this determines the upper limit for circular mean
        high = 180 if kind == 'blotch' else 360
        meandata.angle = circmean(cluster_df.angle, high=high)
        meandata['angle_std'] = circstd(cluster_df.angle, high=high)
        meandata['n_votes'] = len(cluster_df)
        yield meandata.to_frame().T
Exemplo n.º 5
0
    def test_circmean(self):
        """ Test custom circular mean."""
        from scipy import stats

        ref_mean = stats.circmean(self.test_angles, **self.circ_kwargs)
        test_mean = pysat.utils.nan_circmean(self.test_angles,
                                             **self.circ_kwargs)
        ans1 = ref_mean == test_mean

        assert ans1
Exemplo n.º 6
0
def get_average_object(df, kind):
    "Create the average object out of a cluster of data."
    # first filter for outliers more than 1 std away
    # for
    # reduced = df[df.apply(lambda x: np.abs(x - x.mean()) / x.std() < 1).all(axis=1)]
    meandata = df.mean()
    # this determines the upper limit for circular mean
    high = 180 if kind == 'blotch' else 360
    avg = circmean(df.angle, high=high)
    meandata.angle = avg
    return meandata
Exemplo n.º 7
0
    def test_circfuncs_small(self):
        x = np.array([20,21,22,18,19,20.5,19.2])
        M1 = x.mean()
        M2 = stats.circmean(x, high=360)
        assert_allclose(M2, M1, rtol=1e-5)

        V1 = x.var()
        V2 = stats.circvar(x, high=360)
        assert_allclose(V2, V1, rtol=1e-4)

        S1 = x.std()
        S2 = stats.circstd(x, high=360)
        assert_allclose(S2, S1, rtol=1e-4)
Exemplo n.º 8
0
    def test_circfuncs(self):
        x = np.array([355,5,2,359,10,350])
        M = stats.circmean(x, high=360)
        Mval = 0.167690146
        assert_allclose(M, Mval, rtol=1e-7)

        V = stats.circvar(x, high=360)
        Vval = 42.51955609
        assert_allclose(V, Vval, rtol=1e-7)

        S = stats.circstd(x, high=360)
        Sval = 6.520702116
        assert_allclose(S, Sval, rtol=1e-7)
Exemplo n.º 9
0
def mean_longitude(longitudes):
    """
    Compute sample mean longitude, assuming longitude in degrees from -180 to
    180.

    >>> lons = (-170.5, -178.3, 166)
    >>> np.mean(lons)  # doctest: +SKIP
    -60.933
    >>> mean_longitude(lons)  # doctest: +ELLIPSIS
    179.08509...

    :type longitudes: :class:`~numpy.ndarray` (or list, ..)
    :param longitudes: Geographical longitude values ranging from -180 to 180
        in degrees.
    """
    return circmean(np.array(longitudes), low=-180, high=180)
Exemplo n.º 10
0
def test_circmean_axis():
    x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349], [357, 9, 8, 358, 4, 356]])
    M1 = stats.circmean(x, high=360)
    M2 = stats.circmean(x.ravel(), high=360)
    assert_allclose(M1, M2, rtol=1e-14)

    M1 = stats.circmean(x, high=360, axis=1)
    M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
    assert_allclose(M1, M2, rtol=1e-14)

    M1 = stats.circmean(x, high=360, axis=0)
    M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
    assert_allclose(M1, M2, rtol=1e-14)
Exemplo n.º 11
0
def smoother_blockCircMean(dt,y,output_dates,date_incr,mode='l'):
    means = []
    if isinstance(y,list):
        y = np.array(y)
    # convert to radians
    y = np.radians(y)
    for i in range(len(output_dates)):
        # check if more than 50% of values are valid
        # if so compute mean
        if mode == 'l':
            idx = find_included_times(dt,\
                                  sdate = output_dates[i] - \
                                          timedelta(hours=date_incr),\
                                  edate = output_dates[i], twin=0)
        elif mode == 'c':
            idx = find_included_times(dt,\
                                  sdate = output_dates[i] - \
                                          timedelta(hours=date_incr/2.),\
                                  edate = output_dates[i] +
                                          timedelta(hours=date_incr/2.),\
                                  twin=0)
        elif mode == 'r':
            idx = find_included_times(dt,\
                                  sdate = output_dates[i],
                                  edate = output_dates[i] + \
                                          timedelta(hours=date_incr),\
                                  twin=0)
        block = y[idx]
        nominator = len(block[np.isnan(block)])
        denominator = len(block)
        if denominator == 0:
            ratio = 1
        else:
            ratio = nominator/float(denominator)
        if ratio < 0.5:
            #means.append(circmean(block[~np.isnan(block)]))
            means.append(circmean(block,nan_policy='omit'))
        else:
            means.append(np.nan)
    means = np.array(means)
    # convert to radians
    means = np.degrees(means)
    return means
Exemplo n.º 12
0
def _balloon_color_data(tri, data, itype):
    """Return the data array that is to be mapped to the colormap of the
    balloon.

    Parameters
    ----------
    tri : Triangulation
        The matplotlib triangulation for the sphere
    data : ndarray, double, complex double
        The data array
    itype : 'magnitude', 'phase', 'amplitude'
        Whether to plot magnitude levels or the phase.

    Returns
    -------
    color_data : ndarray, double
        The data array for the colormap.
    vmin : double
        The minimum of the color data

    vmax : double
        The maximum of the color data


    """
    if itype == 'phase':
        cdata = np.mod(np.angle(data), 2 * np.pi)
        vmin = 0
        vmax = 2 * np.pi
        colors = circmean(cdata[tri.triangles], axis=1)
    elif itype == 'magnitude':
        cdata = np.abs(data)
        vmin = np.min(cdata)
        vmax = np.max(cdata)
        colors = np.mean(cdata[tri.triangles], axis=1)
    elif itype == 'amplitude':
        vmin = np.min(data)
        vmax = np.max(data)
        colors = np.mean(data[tri.triangles], axis=1)
    else:
        raise ValueError("Invalid type of data mapping.")

    return colors, vmin, vmax
Exemplo n.º 13
0
def plot_phase_information(ax, peak_pos, obs_phases, sim_phases):
    """ Plots the phase information for the detected peaks

    Parameters
    ----------
    For a description of the parameters see 'plot_panel_row'.

    """
    sim_mean_phases = circmean(sim_phases, axis=0)
    sim_phase_stds = circstd(sim_phases, axis=0)
    sim_phase_ranges = np.array(
        (sim_mean_phases - sim_phase_stds, sim_mean_phases + sim_phase_stds)).T
    lower_freq, upper_freq = ax.get_xlim()
    for freq, obs_phase, sim_phase, sim_phase_range in zip(
            peak_pos, obs_phases, sim_mean_phases, sim_phase_ranges):
        if lower_freq < freq < upper_freq:
            plot_phase_clock(get_theta(obs_phase), sim_phase, sim_phase_range,
                             freq, ax)
    return ax
Exemplo n.º 14
0
def serial_bias(prevcurr, error, window, step):
    xxx = np.arange(-np.pi, np.pi, step)
    m_err = []
    std_err = []
    for t in xxx:
        idx = (prevcurr >= t - window / 2) & (prevcurr < t + window / 2)
        if t - window / 2 < -np.pi:
            idx = (prevcurr >= t - window / 2) & (
                prevcurr < t + window / 2) | (prevcurr > np.pi -
                                              (window / 2 -
                                               (np.pi - np.abs(t))))
        if t + window / 2 > np.pi:
            idx = (prevcurr >= t - window / 2) & (
                prevcurr < t + window / 2) | (prevcurr < -np.pi +
                                              (window / 2 -
                                               (np.pi - np.abs(t))))
        m_err.append(sps.circmean(error[idx], low=-np.pi, high=np.pi))
        std_err.append(sps.circstd(error[idx]) / np.sqrt(np.sum(idx)))
    return np.array(m_err), np.array(std_err)
Exemplo n.º 15
0
def mean_longitude(longitudes):
    """
    Compute sample mean longitude, assuming longitude in degrees from -180 to
    180.

    >>> lons = (-170.5, -178.3, 166)
    >>> np.mean(lons)  # doctest: +SKIP
    -60.933
    >>> mean_longitude(lons)  # doctest: +ELLIPSIS
    179.08509...

    :type longitudes: :class:`~numpy.ndarray` (or list, ..)
    :param longitudes: Geographical longitude values ranging from -180 to 180
        in degrees.
    """
    from scipy.stats import circmean
    mean_longitude = circmean(np.array(longitudes), low=-180, high=180)
    mean_longitude = _normalize_longitude(mean_longitude)
    return mean_longitude
def AngleAnalyzer_finegrid(sma,ang,loc,mass,snap):
    '''
    Function which gets the mean ie values over all four simulations for particular set of pertruber parameters
    Parameters:
    sma: semi-major axis of perturber
    ang: inclination of perturber
    loc: base directory of perturber simulations
    mass: mass of perturber
    snap: Snapshot out of the 500 orbits
    
    Returns: The circular mean of ie for stars in the disk for the four simulations'''
    redirs=directories(sma,ang,loc)[0]
    os.chdir(redirs[mass])
    bases=get_bases()
    circmeans=[]
    for i in range(len(bases)):
        circmeans.append(stats.circmean(AngleAnalyzer_data_finegrid(sma,ang,loc,mass,snap,bases[i])))
    circmeans=np.array(circmeans)*(180/pi)
    return circmeans
Exemplo n.º 17
0
def test_circmean_axis():
    x = np.array([[355, 5, 2, 359, 10, 350], [351, 7, 4, 352, 9, 349],
                  [357, 9, 8, 358, 4, 356]])
    M1 = stats.circmean(x, high=360)
    M2 = stats.circmean(x.ravel(), high=360)
    assert_allclose(M1, M2, rtol=1e-14)

    M1 = stats.circmean(x, high=360, axis=1)
    M2 = [stats.circmean(x[i], high=360) for i in range(x.shape[0])]
    assert_allclose(M1, M2, rtol=1e-14)

    M1 = stats.circmean(x, high=360, axis=0)
    M2 = [stats.circmean(x[:, i], high=360) for i in range(x.shape[1])]
    assert_allclose(M1, M2, rtol=1e-14)
Exemplo n.º 18
0
    def analyze(self, atype, periodic=True):
        """Analyzes the data held in numpy vector"""
        if not periodic:
            self.mean = self.values.mean()
            self.stdev = self.values.std()
            self.var = self.values.var()
        else:
            p_high = 180
            if atype == "angle":
                p_low = 0
            else:
                p_low = -180
            self.mean = circmean(self.values, low=p_low, high=p_high)
            self.stdev = circstd(self.values, low=p_low, high=p_high)
            self.var = circvar(self.values, low=p_low, high=p_high)

        # Analyze normality
        # Note: this will be broken for distributions that go over a period
        # To be fixed
        self.anderson = stats.anderson(self.values, dist='norm')
Exemplo n.º 19
0
def mean_longitude(longitudes):
    """
    Compute sample mean longitude, assuming longitude in degrees from -180 to
    180.

    >>> lons = (-170.5, -178.3, 166)
    >>> np.mean(lons)  # doctest: +SKIP
    -60.933
    >>> mean_longitude(lons)  # doctest: +ELLIPSIS
    179.08509...

    :type longitudes: :class:`~numpy.ndarray` (or list, ..)
    :param longitudes: Geographical longitude values ranging from -180 to 180
        in degrees.
    """
    mean_longitude = circmean(np.array(longitudes), low=-180, high=180)
    while mean_longitude < -180:
        mean_longitude += 360
    while mean_longitude > 180:
        mean_longitude -= 360
    return mean_longitude
Exemplo n.º 20
0
    def update_panel_heading(self, t, panel_jump, gain_yaw, open_loop,
                             open_loop_value):
        if open_loop == 1:
            self.panel_heading = open_loop_value
        else:  #if it is closed-loop
            self.panel_heading = (self.panel_heading + self.velheading *
                                  gain_yaw + panel_jump) % 360
            #make the panel yaw be the previously stored panel yaw value + the change in heading of the animal by the yaw gain, plus the panel jump value

        self.time_list.append(self.time)
        self.panel_heading_list.append(self.panel_heading)
        if self.count >= 2:
            self.goal_heading = 360 * circmean(
                [x * 2 * np.pi / 360
                 for x in self.panel_heading_list]) / 2 / np.pi
        self.count += 1

        # Cull old data points
        while (t - self.time_list[0]) > self.time_window:
            self.time_list.pop(0)
            self.panel_heading_list.pop(0)
Exemplo n.º 21
0
def folded_bias(prevcurr, error, window, step):
    xxx = np.arange(-np.pi, np.pi, step)
    t_err = []
    err = []
    for t in xxx:
        idx = (prevcurr >= t - window / 2) & (prevcurr < t + window / 2)
        if t - window / 2 < -np.pi:
            idx = (prevcurr >= t - window / 2) & (
                prevcurr < t + window / 2) | (prevcurr > np.pi -
                                              (window / 2 -
                                               (np.pi - np.abs(t))))
        if t + window / 2 > np.pi:
            idx = (prevcurr >= t - window / 2) & (
                prevcurr < t + window / 2) | (prevcurr < -np.pi +
                                              (window / 2 -
                                               (np.pi - np.abs(t))))
        t_err.append(list(error[idx]))
    for t in reversed(range(int(len(xxx) / 2))):
        err.append([x * -1 for x in t_err[t]] + t_err[-t - 1])
    m_err = [sps.circmean(x, low=-np.pi, high=np.pi) for x in err]
    se_err = [sps.circstd(x) / np.sqrt(len(x)) for x in err]
    return np.array(m_err), np.array(se_err)
Exemplo n.º 22
0
def mean_time(times, kind='time'):
    """mean_time [summary]

    [extended_summary]

    Args:
        times ([type]): [description]
        kind (str, optional): [description]. Defaults to 'time'.

    Returns:
        [type]: [description]
    """
    day = 24 * 60**2
    angles = times_to_angles(times, day)
    mean_angle = circmean(angles, high=360)
    if kind == 'time':
        return dt.time(
            *angles_to_time(mean_angle))  #np.timedelta64(sd_seconds,'s')
    if kind == 'angles':
        return mean_angle
    if kind == 'datetime':
        return dt.datetime(2021, 1, 1, *angles_to_time(mean_angle))
    if kind == 'timedelta':
        return dt.timedelta(seconds=(mean_angle * day) / 360)
	def test_BasicSimpleMeanStartEdgeCrossing(self): # goes on the left side from origin, up to 349 degrees
		mymean =  round(StaCircStats.Sta_CircWMean({6:1,344:3}, low=0, high=360),4)
		scimean = round(stats.circmean(samples=np.array([6,344,344, 344]), high=360, low=0),4)
		self.assertEqual(scimean, mymean)
Exemplo n.º 24
0
def circ_corrcc(x, y, tail='two-sided', correction_uniform=False):
    """Correlation coefficient between two circular variables.

    Parameters
    ----------
    x : np.array
        First circular variable (expressed in radians)
    y : np.array
        Second circular variable (expressed in radians)
    tail : string
        Specify whether to return 'one-sided' or 'two-sided' p-value.
    correction_uniform : bool
        Use correction for uniform marginals

    Returns
    -------
    r : float
        Correlation coefficient
    pval : float
        Uncorrected p-value

    Notes
    -----
    Adapted from the CircStats MATLAB toolbox (Berens 2009).

    Use the :py:func:`numpy.deg2rad` function to convert angles from degrees
    to radians.

    Please note that NaN are automatically removed.

    If the ``correction_uniform`` is True, an alternative equation from
    Jammalamadaka & Sengupta (2001, p. 177) is used.
    If the marginal distribution of ``x`` or ``y`` is uniform, the mean is
    not well defined, which leads to wrong estimates of the circular
    correlation. The alternative equation corrects for this by choosing the
    means in a way that maximizes the postitive or negative correlation.

    References
    ----------
    .. [1] Berens, P. (2009). CircStat: A MATLAB Toolbox for Circular
           Statistics. Journal of Statistical Software, Articles, 31(10), 1–21.
           https://doi.org/10.18637/jss.v031.i10

    .. [2] Jammalamadaka, S. R., & Sengupta, A. (2001). Topics in circular
           statistics (Vol. 5). world scientific.

    Examples
    --------
    Compute the r and p-value of two circular variables

    >>> from pingouin import circ_corrcc
    >>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
    >>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
    >>> r, pval = circ_corrcc(x, y)
    >>> print(r, pval)
    0.942 0.06579836070349088

    With the correction for uniform marginals

    >>> r, pval = circ_corrcc(x, y, correction_uniform=True)
    >>> print(r, pval)
    0.547 0.28585306869206784
    """
    from scipy.stats import norm
    x = np.asarray(x)
    y = np.asarray(y)

    # Check size
    if x.size != y.size:
        raise ValueError('x and y must have the same length.')

    # Remove NA
    x, y = remove_na(x, y, paired=True)
    n = x.size

    # Compute correlation coefficient
    x_sin = np.sin(x - circmean(x))
    y_sin = np.sin(y - circmean(y))

    if not correction_uniform:
        # Similar to np.corrcoef(x_sin, y_sin)[0][1]
        r = np.sum(x_sin * y_sin) / np.sqrt(
            np.sum(x_sin**2) * np.sum(y_sin**2))
    else:
        r_minus = np.abs(np.sum(np.exp((x - y) * 1j)))
        r_plus = np.abs(np.sum(np.exp((x + y) * 1j)))
        denom = 2 * np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2))
        r = (r_minus - r_plus) / denom

    # Compute T- and p-values
    tval = np.sqrt((n * (x_sin**2).mean() *
                    (y_sin**2).mean()) / np.mean(x_sin**2 * y_sin**2)) * r

    # Approximately distributed as a standard normal
    pval = 2 * norm.sf(abs(tval))
    pval = pval / 2 if tail == 'one-sided' else pval
    return np.round(r, 3), pval
Exemplo n.º 25
0
def analyze_color(rgb_img, mask, hist_plot_type=None):
    """Analyze the color properties of an image object
    Inputs:
    rgb_img          = RGB image data
    mask             = Binary mask made from selected contours
    hist_plot_type   = 'None', 'all', 'rgb','lab' or 'hsv'
    
    Returns:
    analysis_image   = histogram output
    
    :param rgb_img: numpy.ndarray
    :param mask: numpy.ndarray
    :param hist_plot_type: str
    :return analysis_images: list
    """

    params.device += 1

    if len(np.shape(rgb_img)) < 3:
        fatal_error("rgb_img must be an RGB image")

    # Mask the input image
    masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
    # Extract the blue, green, and red channels
    b, g, r = cv2.split(masked)
    # Convert the BGR image to LAB
    lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
    # Extract the lightness, green-magenta, and blue-yellow channels
    l, m, y = cv2.split(lab)
    # Convert the BGR image to HSV
    hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
    # Extract the hue, saturation, and value channels
    h, s, v = cv2.split(hsv)

    # Color channel dictionary
    channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}

    # Histogram plot types
    hist_types = {"ALL": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
                  "RGB": ("b", "g", "r"),
                  "LAB": ("l", "m", "y"),
                  "HSV": ("h", "s", "v")}

    if hist_plot_type is not None and hist_plot_type.upper() not in hist_types:
        fatal_error("The histogram plot type was " + str(hist_plot_type) +
                    ', but can only be one of the following: None, "all", "rgb", "lab", or "hsv"!')
    # Store histograms, plotting colors, and plotting labels
    histograms = {
        "b": {"label": "blue", "graph_color": "blue",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["b"]], [0], mask, [256], [0, 255])]},
        "g": {"label": "green", "graph_color": "forestgreen",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["g"]], [0], mask, [256], [0, 255])]},
        "r": {"label": "red", "graph_color": "red",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["r"]], [0], mask, [256], [0, 255])]},
        "l": {"label": "lightness", "graph_color": "dimgray",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["l"]], [0], mask, [256], [0, 255])]},
        "m": {"label": "green-magenta", "graph_color": "magenta",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["m"]], [0], mask, [256], [0, 255])]},
        "y": {"label": "blue-yellow", "graph_color": "yellow",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["y"]], [0], mask, [256], [0, 255])]},
        "h": {"label": "hue", "graph_color": "blueviolet",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["h"]], [0], mask, [256], [0, 255])]},
        "s": {"label": "saturation", "graph_color": "cyan",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["s"]], [0], mask, [256], [0, 255])]},
        "v": {"label": "value", "graph_color": "orange",
              "hist": [float(l[0]) for l in cv2.calcHist([channels["v"]], [0], mask, [256], [0, 255])]}
    }

    # Create list of bin labels for 8-bit data
    binval = np.arange(0, 256)
    bin_values = [l for l in binval]

    analysis_images = []
    # Create a dataframe of bin labels and histogram data
    dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
                            'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
                            'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
                            'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
                            'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})

    # Make the histogram figure using plotnine
    if hist_plot_type is not None:
        if hist_plot_type.upper() == 'RGB':
            df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_rgb, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['blue', 'green', 'red'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'LAB':
            df_lab = pd.melt(dataset, id_vars=['bins'],
                             value_vars=['lightness', 'green-magenta', 'blue-yellow'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_lab, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['yellow', 'magenta', 'dimgray'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'HSV':
            df_hsv = pd.melt(dataset, id_vars=['bins'],
                             value_vars=['hue', 'saturation', 'value'],
                             var_name='Color Channel', value_name='Pixels')
            hist_fig = (ggplot(df_hsv, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(['blueviolet', 'cyan', 'orange'])
                        )
            analysis_images.append(hist_fig)

        elif hist_plot_type.upper() == 'ALL':
            s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
                           'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
            color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
                              'dimgray', 'red', 'cyan', 'orange']
            df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='Color Channel',
                             value_name='Pixels')
            hist_fig = (ggplot(df_all, aes(x='bins', y='Pixels', color='Color Channel'))
                        + geom_line()
                        + scale_x_continuous(breaks=list(range(0, 256, 25)))
                        + scale_color_manual(color_channels)
                        )
            analysis_images.append(hist_fig)

    # Hue values of zero are red but are also the value for pixels where hue is undefined
    # The hue value of a pixel will be undefined when the color values are saturated
    # Therefore, hue values of zero are excluded from the calculations below

    # Calculate the median hue value
    # The median is rescaled from the encoded 0-179 range to the 0-359 degree range
    hue_median = np.median(h[np.where(h > 0)]) * 2

    # Calculate the circular mean and standard deviation of the encoded hue values
    # The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
    hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
    hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2

    # Store into lists instead for pipeline and print_results
    # stats_dict = {'mean': circular_mean, 'std' : circular_std, 'median': median}

    # Plot or print the histogram
    if hist_plot_type is not None:
        if params.debug == 'print':
            hist_fig.save(os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
        elif params.debug == 'plot':
            print(hist_fig)

    # Store into global measurements
    # RGB signal values are in an unsigned 8-bit scale of 0-255
    rgb_values = [i for i in range(0, 256)]
    # Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
    hue_values = [i * 2 + 1 for i in range(0, 180)]
    # Percentage values on a 0-100 scale (lightness, saturation, and value)
    percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
    # Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
    diverging_values = [i for i in range(-128, 128)]
    # outputs.measurements['color_data'] = {
    #     'histograms': {
    #         'blue': {'signal_values': rgb_values, 'frequency': histograms["b"]["hist"]},
    #         'green': {'signal_values': rgb_values, 'frequency': histograms["g"]["hist"]},
    #         'red': {'signal_values': rgb_values, 'frequency': histograms["r"]["hist"]},
    #         'lightness': {'signal_values': percent_values, 'frequency': histograms["l"]["hist"]},
    #         'green-magenta': {'signal_values': diverging_values, 'frequency': histograms["m"]["hist"]},
    #         'blue-yellow': {'signal_values': diverging_values, 'frequency': histograms["y"]["hist"]},
    #         'hue': {'signal_values': hue_values, 'frequency': histograms["h"]["hist"]},
    #         'saturation': {'signal_values': percent_values, 'frequency': histograms["s"]["hist"]},
    #         'value': {'signal_values': percent_values, 'frequency': histograms["v"]["hist"]}
    #     },
    #     'color_features': {
    #         'hue_circular_mean': hue_circular_mean,
    #         'hue_circular_std': hue_circular_std,
    #         'hue_median': hue_median
    #     }
    # }
    outputs.add_observation(variable='blue_frequencies', trait='blue frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["b"]["hist"], label=rgb_values)
    outputs.add_observation(variable='green_frequencies', trait='green frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["g"]["hist"], label=rgb_values)
    outputs.add_observation(variable='red_frequencies', trait='red frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["r"]["hist"], label=rgb_values)
    outputs.add_observation(variable='lightness_frequencies', trait='lightness frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["l"]["hist"], label=percent_values)
    outputs.add_observation(variable='green-magenta_frequencies', trait='green-magenta frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["m"]["hist"], label=diverging_values)
    outputs.add_observation(variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["y"]["hist"], label=diverging_values)
    outputs.add_observation(variable='hue_frequencies', trait='hue frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["h"]["hist"], label=hue_values)
    outputs.add_observation(variable='saturation_frequencies', trait='saturation frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["s"]["hist"], label=percent_values)
    outputs.add_observation(variable='value_frequencies', trait='value frequencies',
                            method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
                            value=histograms["v"]["hist"], label=percent_values)
    outputs.add_observation(variable='hue_circular_mean', trait='hue circular mean',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_circular_mean, label='degrees')
    outputs.add_observation(variable='hue_circular_std', trait='hue circular standard deviation',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_median, label='degrees')
    outputs.add_observation(variable='hue_median', trait='hue median',
                            method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
                            value=hue_median, label='degrees')

    # Store images
    outputs.images.append(analysis_images)

    return analysis_images
Exemplo n.º 26
0
        edgecolors=['k' for i in hist]
        bars = ax1.bar(edges[:-1],hist,width=width,color='#737373',edgecolor=edgecolors)
        #ax1.yaxis.set_major_locator(pl.MaxNLocator(3))
        #ax1.yaxis.set_major_locator(pl.LinearLocator(3))
        max_hist=np.max(hist)
        tick_locations=[int(max_hist/2.),max_hist]
        print(tick_locations)
        ax1.yaxis.set_major_locator(pl.FixedLocator(tick_locations))
        xpos=-1.5*max_hist

        #draw_straight_axis(xpos,ax1.yaxis.get_ticklocs(),ax1)
        #ax1.yaxis.set_major_formatter(ticks)
        ax1.yaxis.set_major_formatter(ticker.FixedFormatter(['',str(max_hist)]))
        ax1.set_rlabel_position(90)
        ax1.xaxis.set_major_formatter(ticks)
        mean_angle=circmean(data['Corrected_Kino_Angle'])
        std_angle=np.sqrt(circvar(data['Corrected_Kino_Angle']))

        ymin,ymax=ax1.get_ylim()
        ax1.plot([mean_angle,mean_angle],[0,ymax],'b')
        std_angles=np.linspace(mean_angle-old_div(std_angle,2),mean_angle+old_div(std_angle,2),11)
        std_radii=np.ones(11)*2.*max_hist/3.
        #ax1.plot([mean_angle-std_angle/2,mean_angle+std_angle/2],[2.*max_hist/3.,2.*max_hist/3.],'r')
        ax1.plot(std_angles,std_radii,'r')
        ax1.set_ylim([0,max_hist])

        pl.savefig('kinos_histogram_'+filename+'_'+sheet+extension)
        pl.close(fig2)
#===========================================================================
        # Plot histogram of bundle orientations
        hist,edges=np.histogram(data['Corrected_Bundle_Angle'],density=False,bins=bins,range=[0,2.*np.pi])
Exemplo n.º 27
0
def hpd(ary, credible_interval=0.94, circular=False):
    """
    Calculate highest posterior density (HPD) of array for given credible_interval.

    The HPD is the minimum width Bayesian credible interval (BCI). This implementation works only
    for unimodal distributions.

    Parameters
    ----------
    x : Numpy array
        An array containing posterior samples
    credible_interval : float, optional
        Credible interval to compute. Defaults to 0.94.
    circular : bool, optional
        Whether to compute the hpd taking into account `x` is a circular variable
        (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).

    Returns
    -------
    np.ndarray
        lower and upper value of the interval.

    Examples
    --------
    Calculate the hpd of a Normal random variable:

    .. ipython::

        In [1]: import arviz as az
           ...: import numpy as np
           ...: data = np.random.normal(size=2000)
           ...: az.hpd(data, credible_interval=.68)
    """
    if ary.ndim > 1:
        hpd_array = np.array(
            [hpd(row, credible_interval=credible_interval, circular=circular) for row in ary.T]
        )
        return hpd_array
    # Make a copy of trace
    ary = ary.copy()
    n = len(ary)

    if circular:
        mean = st.circmean(ary, high=np.pi, low=-np.pi)
        ary = ary - mean
        ary = np.arctan2(np.sin(ary), np.cos(ary))

    ary = np.sort(ary)
    interval_idx_inc = int(np.floor(credible_interval * n))
    n_intervals = n - interval_idx_inc
    interval_width = ary[interval_idx_inc:] - ary[:n_intervals]

    if len(interval_width) == 0:
        raise ValueError(
            "Too few elements for interval calculation. "
            "Check that credible_interval meets condition 0 =< credible_interval < 1"
        )

    min_idx = np.argmin(interval_width)
    hdi_min = ary[min_idx]
    hdi_max = ary[min_idx + interval_idx_inc]

    if circular:
        hdi_min = hdi_min + mean
        hdi_max = hdi_max + mean
        hdi_min = np.arctan2(np.sin(hdi_min), np.cos(hdi_min))
        hdi_max = np.arctan2(np.sin(hdi_max), np.cos(hdi_max))

    return np.array([hdi_min, hdi_max])
Exemplo n.º 28
0
 def test_empty(self):
     assert_(np.isnan(stats.circmean([])))
     assert_(np.isnan(stats.circstd([])))
     assert_(np.isnan(stats.circvar([])))
Exemplo n.º 29
0
 def test_empty(self):
     assert_(np.isnan(stats.circmean([])))
     assert_(np.isnan(stats.circstd([])))
     assert_(np.isnan(stats.circvar([])))
Exemplo n.º 30
0
def hpd(x, credible_interval=0.94, transform=lambda x: x, circular=False):
    """
    Calculate highest posterior density (HPD) of array for given credible_interval.

    The HPD is the minimum width Bayesian credible interval (BCI). This implementation works only
    for unimodal distributions.

    Parameters
    ----------
    x : Numpy array
        An array containing posterior samples
    credible_interval : float, optional
        Credible interval to plot. Defaults to 0.94.
    transform : callable
        Function to transform data (defaults to identity)
    circular : bool, optional
        Whether to compute the error taking into account `x` is a circular variable
        (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).

    Returns
    -------
    np.ndarray
        lower and upper value of the interval.
    """
    if x.ndim > 1:
        return np.array([
            hpd(row,
                credible_interval=credible_interval,
                transform=transform,
                circular=circular) for row in x.T
        ])
    # Make a copy of trace
    x = transform(x.copy())
    len_x = len(x)

    if circular:
        mean = st.circmean(x, high=np.pi, low=-np.pi)
        x = x - mean
        x = np.arctan2(np.sin(x), np.cos(x))

    x = np.sort(x)
    interval_idx_inc = int(np.floor(credible_interval * len_x))
    n_intervals = len_x - interval_idx_inc
    interval_width = x[interval_idx_inc:] - x[:n_intervals]

    if len(interval_width) == 0:
        raise ValueError(
            "Too few elements for interval calculation. "
            "Check that credible_interval meets condition 0 =< credible_interval < 1"
        )

    min_idx = np.argmin(interval_width)
    hdi_min = x[min_idx]
    hdi_max = x[min_idx + interval_idx_inc]

    if circular:
        hdi_min = hdi_min + mean
        hdi_max = hdi_max + mean
        hdi_min = np.arctan2(np.sin(hdi_min), np.cos(hdi_min))
        hdi_max = np.arctan2(np.sin(hdi_max), np.cos(hdi_max))

    return np.array([hdi_min, hdi_max])
Exemplo n.º 31
0
# for running on an interactive cluster node
cellprofiler_path = "/g/almf/software/CP2C/CellProfiler11047/"
import sys

sys.path.append(cellprofiler_path)
import cellprofiler.cpmath.cpmorphology as morph
import Image

import numpy as np
from scipy import stats
from scipy.ndimage import convolve

x = np.array([0, 10, 20, 10, 20, 350]) / 360.0 * 2.0 * np.pi
stats.circvar(x)
stats.circmean(x)

import numpy as np
from scipy import stats
from scipy.ndimage import convolve

radius = 3
image = np.ones((10, 10))

kernel = np.ones((radius, radius)) / radius ** 2
image_sin_mean = convolve(np.sin(image), kernel, mode="constant")
image_cos_mean = convolve(np.cos(image), kernel, mode="constant")
image_var = 1 - (image_sin_mean ** 2 + image_cos_mean ** 2)


import cellprofiler.cpmath.filter as filter
import numpy as np
Exemplo n.º 32
0
def mean_time(x): return pd.Series(circmean(x,high=360),name='mean')
def std_time(x): return pd.Series(circstd(x,high=360),name='std')
def showModelDynamics(modeldata):
    """ Model Dynamics for particular competition """

    columns = 4
    f1, ax_array = plt.subplots(1, columns, figsize=(8, 2), sharey=True)
    f1.subplots_adjust(hspace=.3, wspace=.3, left=0.1, right=0.9)
    ''' colormap '''
    cmap = plt.cm.jet
    cNorm = colors.Normalize(vmin=np.min(modeldata.competition),
                             vmax=np.max(modeldata.competition))
    scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
    low_col = scalarMap.to_rgba(np.min(modeldata.competition))
    high_col = scalarMap.to_rgba(np.max(modeldata.competition))

    for i, ax in enumerate(ax_array):
        if i == 0:
            ax.set_ylabel('value')
        else:
            ax.tick_params(labelleft='off')
        run = int(i * (max(modeldata.Run) + 1) / (columns - 0.9))
        competition = modeldata[modeldata.Run == run].competition.iloc[0]
        competition_data = modeldata[modeldata.competition == competition]

        xVals = competition_data.Step.unique()
        proxy_mean = competition_data.groupby(['Step'
                                               ])['mean_proxy_value'].mean()
        proxy_std = competition_data.groupby(['Step'
                                              ])['mean_proxy_value'].std()
        # ax.scatter(competition_data.Step,
        #           competition_data.mean_proxy_value,
        #           alpha=0.2, c=high_col, marker=".", label='_nolegend_')
        ax.plot(xVals, proxy_mean, c=high_col, label='proxy')
        ax.fill_between(xVals,
                        proxy_mean - proxy_std,
                        proxy_mean + proxy_std,
                        alpha=0.2,
                        color=high_col)
        goal_mean = competition_data.groupby(['Step'
                                              ])['mean_goal_value'].mean()
        goal_std = competition_data.groupby(['Step'])['mean_goal_value'].std()
        # ax.scatter(competition_data.Step,
        #           competition_data.mean_goal_value,
        #           alpha=0.2, c=low_col, marker=".", label='_nolegend_')
        ax.plot(xVals, goal_mean, c=low_col, label='goal')
        ax.fill_between(xVals,
                        goal_mean - goal_std,
                        goal_mean + goal_std,
                        alpha=0.2,
                        color=low_col)
        ax.set_xlabel('step')
        # ax.set_title('c{:.1f} r{:02d}'.format(competition, run))
        ax.set_ylim([
            np.min([
                modeldata.mean_proxy_value.min(),
                modeldata.mean_goal_value.min()
            ]),
            np.max([
                modeldata.mean_proxy_value.max(),
                modeldata.mean_goal_value.max()
            ])
        ])
        if i == 0:
            ax.legend()

    f1.savefig('Dynamics.pdf')

    f2, ax_array = plt.subplots(1, columns, figsize=(8, 1.2), sharey=True)
    f2.subplots_adjust(hspace=.3, wspace=.3, left=0.1, right=0.9)
    for i, ax in enumerate(ax_array):
        if i == 0:
            ax.set_ylabel('mean practice (°)')
        else:
            ax.tick_params(labelleft='off')
        run = int(i * (max(modeldata.Run) + 1) / (columns - 0.9))
        competition = modeldata[modeldata.Run == run].competition.iloc[0]
        #        print(competition/np.pi * 180)
        competition_data = modeldata[modeldata.competition == competition]
        xVals = competition_data.Step.unique()
        yVals_raw = competition_data.groupby(['Step'])['mean_practice']
        practice_mean = np.empty(np.size(xVals))
        practice_std = np.empty(np.size(xVals))
        i = 0
        for name, group in yVals_raw:
            practice_mean[i] = stats.circmean(group, -np.pi, np.pi)
            practice_std[i] = stats.circstd(group, -np.pi, np.pi)
            i += 1

        practice_mean = practice_mean / np.pi * 180
        practice_std = practice_std / np.pi * 180
        #        ax.scatter(competition_data.Step,
        #                   competition_data.mean_practice,
        #                   alpha=0.2, c="k", marker=".")
        ax.plot(xVals, practice_mean, c='k')
        ax.fill_between(xVals,
                        practice_mean - practice_std,
                        practice_mean + practice_std,
                        alpha=0.2,
                        color="k")
        ax.plot([0, np.max(xVals)], [0, 0], c='grey', ls='--', lw=0.5)
        ax.set_xlabel('step')
        # ax.set_ylim([0,1])

    f2.savefig('PracticeDynamics.pdf')
	def test_FloatValuesBasicVerificationSmaller(self): # goes on the left side from origin, up to 349 degrees
		mymean =  round(StaCircStats.Sta_CircWMean({6:1,343.5:3}, low=0, high=360),4)
		scimean = round(stats.circmean(samples=np.array([6,344,344, 344]), high=360, low=0),4)
		self.assertGreater(scimean,mymean)
            start_index = anemometer_analyzer.find_nearest(
                sec_since_release_sublist, start_time)
            end_time = start_time + bin_duration
            end_index = anemometer_analyzer.find_nearest(
                sec_since_release_sublist, end_time)

            if bin_count < total_bin_number:
                direction_slice = direction_sublist[start_index:end_index]
                speed_slice = speed_sublist[start_index:
                                            end_index]  #still in m/s
            else:
                print('this last bin is not a full %d seconds in length ' %
                      (bin_duration))
                binned_wind_dir.append(
                    circmean(direction_sublist[start_index:-1],
                             high=360,
                             low=0))
                binned_windspeeds.append(np.mean(
                    speed_sublist[start_index:-1]))
                break
            binned_wind_dir.append(circmean(direction_slice, high=360, low=0))
            binned_windspeeds.append(np.mean(speed_slice))  # still in m/s
            start_time = end_time
            bin_count += 1

        # now calculating the angle of the path from the release site to the trap in question

        trap_number = d[experiment_date]['trap_num']
        trap_lettering_list = [
            'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'
        ]
	def test_BasicSimpleMean2(self):
		mymean = StaCircStats.Sta_CircWMean({30:1,50:3}, low=0, high=360)
		scimean = stats.circmean(samples=np.array([30,50,50, 50]), high=360, low=0)
		self.assertEqual(round(scimean, 4) , round(mymean,4 ))
Exemplo n.º 37
0
 def test_circfuncs_array_like(self):
     x = [355,5,2,359,10,350]
     assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
     assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
     assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
Exemplo n.º 38
0
def main():
    # parse args --------------------------------------------------------------
    args = _parse_args()

    # set device and data format ----------------------------------------------
    if args.cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
        args.devices = ''
    else:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.devices
    if not args.devices or args.model == 'mobilenet_v2':
        # note: tensorflow supports b01c pooling on cpu only
        K.set_image_data_format('channels_last')
    else:
        K.set_image_data_format('channels_first')

    # load model --------------------------------------------------------------
    model = load_network(args.model,
                         args.weights_filepath,
                         args.input_type,
                         args.input_height,
                         args.input_width,
                         args.output_type,
                         sampling=args.n_samples > 1,
                         n_classes=args.n_classes,
                         mobilenet_v2_alpha=args.mobilenet_v2_alpha)

    # parse for image files ---------------------------------------------------
    # note: we do not search for mask files, but derive masks from either the
    # depth or rgb image during preprocessing
    DEPTH_SUFFIX = '_Depth.pgm'
    RGB_SUFFIX = '_RGB.png'
    MASK_SUFFIX = '_Mask.png'
    # get filepaths
    mask_filepaths = get_files_by_extension(args.image_or_image_basepath,
                                            extension=MASK_SUFFIX.lower(),
                                            flat_structure=True,
                                            recursive=True,
                                            follow_links=True)

    if args.input_type in [INPUT_DEPTH, INPUT_DEPTH_AND_RGB]:
        depth_filepaths = get_files_by_extension(
            args.image_or_image_basepath,
            extension=DEPTH_SUFFIX.lower(),
            flat_structure=True,
            recursive=True,
            follow_links=True)
        assert len(depth_filepaths) == len(mask_filepaths)
        filepaths = list(zip(depth_filepaths, mask_filepaths))
        assert all(
            depth_fp.replace(DEPTH_SUFFIX, '') == mask_fp.replace(
                MASK_SUFFIX, '') for depth_fp, mask_fp in filepaths)

    if args.input_type in [INPUT_RGB, INPUT_DEPTH_AND_RGB]:
        rgb_filepaths = get_files_by_extension(args.image_or_image_basepath,
                                               extension=RGB_SUFFIX.lower(),
                                               flat_structure=True,
                                               recursive=True,
                                               follow_links=True)
        assert len(rgb_filepaths) == len(mask_filepaths)
        filepaths = list(zip(rgb_filepaths, mask_filepaths))
        assert all(
            rgb_fp.replace(RGB_SUFFIX, '') == mask_fp.replace(MASK_SUFFIX, '')
            for rgb_fp, mask_fp in filepaths)

    if args.input_type == INPUT_DEPTH_AND_RGB:
        filepaths = list(zip(depth_filepaths, rgb_filepaths, mask_filepaths))

    # define preprocessing function -------------------------------------------
    def load_and_preprocess(inputs):
        # unpack inputs
        if args.input_type == INPUT_DEPTH_AND_RGB:
            depth_filepath, rgb_filepath, mask_filepath = inputs
        elif args.input_type == INPUT_DEPTH:
            depth_filepath, mask_filepath = inputs
        else:
            rgb_filepath, mask_filepath = inputs

        # pack shape
        shape = (args.input_height, args.input_width)

        # load mask
        mask = img_utils.load(mask_filepath)
        mask_resized = pre.resize_mask(mask, shape)
        mask_resized = mask_resized > 0

        # prepare depth input
        if args.input_type in [INPUT_DEPTH, INPUT_DEPTH_AND_RGB]:
            # load
            depth = img_utils.load(depth_filepath)

            # create mask
            # mask = depth > 0
            # mask_resized = pre.resize_mask(mask.astype('uint8')*255, shape) > 0

            # mask (redundant, since mask is derived from depth image)
            # depth = pre.mask_img(depth, mask)

            # resize
            depth = pre.resize_depth_img(depth, shape)

            # 01 -> 01c
            depth = depth[..., None]

            # preprocess
            depth = pre.preprocess_img(
                depth,
                mask=mask_resized,
                scale01=args.input_preprocessing == 'scale01',
                standardize=args.input_preprocessing == 'standardize',
                zero_mean=True,
                unit_variance=True)

            # convert to correct data format
            if K.image_data_format() == 'channels_last':
                axes = 'b01c'
            else:
                axes = 'bc01'
            depth = img_utils.dimshuffle(depth, '01c', axes)

            # repeat if sampling is enabled
            if args.n_samples > 1:
                depth = np.repeat(depth, args.n_samples, axis=0)

        # prepare rgb input
        if args.input_type in [INPUT_RGB, INPUT_DEPTH_AND_RGB]:
            # load
            rgb = img_utils.load(rgb_filepath)

            # create mask
            # if args.input_type == INPUT_RGB:
            #     # derive mask from rgb image
            #     mask = rgb > 0
            #     mask_resized = pre.resize_mask(mask.astype('uint8')*255,
            #                                    shape) > 0
            # else:
            #     # mask rgb image using mask derived from depth image
            #    rgb = pre.mask_img(rgb, mask)

            # resize
            rgb = pre.resize_depth_img(rgb, shape)

            # preprocess
            rgb = pre.preprocess_img(
                rgb,
                mask=mask_resized,
                scale01=args.input_preprocessing == 'scale01',
                standardize=args.input_preprocessing == 'standardize',
                zero_mean=True,
                unit_variance=True)

            # convert to correct data format
            if K.image_data_format() == 'channels_last':
                axes = 'b01c'
            else:
                axes = 'bc01'
            rgb = img_utils.dimshuffle(rgb, '01c', axes)

            # repeat if sampling is enabled
            if args.n_samples > 1:
                rgb = np.repeat(rgb, args.n_samples, axis=0)

        # return preprocessed images
        if args.input_type == INPUT_DEPTH_AND_RGB:
            return depth, rgb
        elif args.input_type == INPUT_DEPTH:
            return depth,
        else:
            return rgb,

    # define postprocessing function ------------------------------------------
    def postprocess(output):
        if args.output_type == OUTPUT_BITERNION:
            return post.biternion2deg(output)
        elif args.output_type == OUTPUT_REGRESSION:
            return post.rad2deg(output)
        else:
            return post.class2deg(np.argmax(output, axis=-1), args.n_classes)

    # process files -----------------------------------------------------------
    len_cnt = len(str(len(filepaths)))
    plt.ion()
    fig = plt.figure(figsize=(8, 6))
    for i, inputs in enumerate(filepaths):
        print("[{:0{}d}/{:0{}d}]: {}".format(i + 1, len_cnt, len(filepaths),
                                             len_cnt, inputs))
        # load and preprocess inputs
        nw_inputs = load_and_preprocess(inputs)

        # predict
        nw_output = model.predict(nw_inputs, batch_size=args.n_samples)

        # postprocess output
        output = postprocess(nw_output)

        # visualize inputs and predicted angle
        plt.clf()
        # visualize inputs
        for j, inp in enumerate(nw_inputs):
            # first element of input batch
            img = inp[0]

            # convert to 01c
            if K.image_data_format() == 'channels_last':
                axes = '01c'
            else:
                axes = 'c01'
            img = img_utils.dimshuffle(img, axes, '01c')

            # inverse preprocessing
            img = pre.preprocess_img_inverse(
                img,
                scale01=args.input_preprocessing == 'scale01',
                standardize=args.input_preprocessing == 'standardize',
                zero_mean=True,
                unit_variance=True)
            # show
            ax = fig.add_subplot(1, len(nw_inputs) + 1, j + 1)
            if img.shape[-1] == 1:
                ax.imshow(img[:, :, 0],
                          cmap='gray',
                          vmin=img[img != 0].min(),
                          vmax=img.max())
            else:
                ax.imshow(img)
            ax.axis('off')

        # visualize output
        ax = fig.add_subplot(1,
                             len(nw_inputs) + 1,
                             len(nw_inputs) + 1,
                             polar=True)
        ax.set_theta_zero_location('S', offset=0)
        ax.hist(np.deg2rad(output),
                width=np.deg2rad(2),
                density=True,
                alpha=0.5 if args.n_samples > 1 else 1.0,
                color='#1f77b4')
        if args.n_samples > 1:
            mean_rad = circmean(np.deg2rad(output))
            std_rad = circstd(np.deg2rad(output))
            x = np.deg2rad(np.linspace(0, 360, 360))
            pdf_values = norm.pdf(x, mean_rad, std_rad)
            ax.plot(x, pdf_values, color='#1f77b4', zorder=2, linewidth=2)
            ax.fill(x, pdf_values, color='#1f77b4', zorder=2, alpha=0.3)
        ax.set_yscale('symlog')
        ax.set_ylim([0, 20])

        plt.tight_layout()
        # plt.savefig(f'./img{i}.png', bbox_inches='tight', dpi=75)
        plt.pause(0.0005)
Exemplo n.º 39
0
 def test_circfuncs_array_like(self):
     x = [355, 5, 2, 359, 10, 350]
     assert_allclose(stats.circmean(x, high=360), 0.167690146, rtol=1e-7)
     assert_allclose(stats.circvar(x, high=360), 42.51955609, rtol=1e-7)
     assert_allclose(stats.circstd(x, high=360), 6.520702116, rtol=1e-7)
Exemplo n.º 40
0
def test_circular_mean_scipy(mean):
    """Test our `_circular_mean()` function gives same result than Scipy version."""
    rvs = st.vonmises.rvs(loc=mean, kappa=1, size=1000)
    mean_az = _circular_mean(rvs)
    mean_sp = st.circmean(rvs, low=-np.pi, high=np.pi)
    np.testing.assert_almost_equal(mean_az, mean_sp)
Exemplo n.º 41
0
def _init_(directory, start, end, levels):
    print '#### Save merra to npy .... ####'
    years = [start[0]]
    months = range(start[1], end[1] + 1)
    days = range(start[2], end[2] + 1)
    # get all time stamps available
    for year in years:
        for month in months:
            for day in days:
                year_str = str(year)
                month_str = str(month).zfill(2)
                day_str = str(day).zfill(2)
                suffix = year_str + month_str + day_str
                filename_high = directory + "MERRA2_400.tavg3_3d_asm_Nv." + suffix + ".nc4"
                if (os.path.exists(filename_high)):
                    fileList_high.append(filename_high)
                    dayStrings.append(suffix)

                    # We also require to have the corresponding file for lowest
                    filename_low = directory + "MERRA2_400.tavg1_2d_slv_Nx." + suffix + ".nc4"
                    if not os.path.exists(filename_low):
                        print(
                            "WARNING: Can not find corresponding lower level",
                            filename_low)
                    else:
                        fileList_low.append(filename_low)

    ndays = len(fileList_high)
    print("Found", len(fileList_high),
          "matching files in date order for high altitudes")
    print("Parsing data into time series")
    print(dayStrings)

    for dayString in dayStrings:
        nc_f = directory + 'MERRA2_400.tavg3_3d_asm_Nv.' + dayString + '.nc4'
        nc_f_low = directory + 'MERRA2_400.tavg1_2d_slv_Nx.' + dayString + '.nc4'
        nc_fid = Dataset(nc_f, 'r')
        nc_fid_low = Dataset(nc_f_low, 'r')

    latlonPairs = []
    for lat in nc_fid.variables['lat'][:]:
        for lon in nc_fid.variables['lon'][:]:
            latlonPairs.append([lat, lon])

    latlonPairs_numpy = []
    for lat in range(len(nc_fid.variables['lat'][:])):
        for lon in range(len(nc_fid.variables['lon'][:])):
            latlonPairs_numpy.append([lat, lon])
    nlev = len(levels)

    # prepare the arrays filled with zeros
    for (index_lat, index_lon) in latlonPairs:
        timeseries_heights = np.zeros((nlev + 3, 8 * ndays))
        timeseries_heights[nlev] = 50 * np.ones(
            (8 * ndays))  # 50 metres (fixed)
        timeseries_heights[nlev + 1] = 10 * np.ones(
            (8 * ndays))  # 10 metres (fixed)
        timeseries_heights[nlev + 2] = 2 * np.ones(
            (8 * ndays))  # 2  metres (fixed)
        timeseries_wind = np.zeros((nlev + 3, 8 * ndays), dtype='2float32')
        timeseries_features = np.zeros((nlev + 3, 8 * ndays), dtype='2float32')

        dict_timeseries_heights[index_lat, index_lon] = timeseries_heights
        dict_timeseries_wind[index_lat, index_lon] = timeseries_wind
        dict_timeseries_features[index_lat, index_lon] = timeseries_features

    # Running through days and save the data in the dicts
    # Creating the features directly from the wind speed.
    day = 0
    for dayString in dayStrings:
        nc_f = directory + 'MERRA2_400.tavg3_3d_asm_Nv.' + dayString + '.nc4'
        nc_f_low = directory + 'MERRA2_400.tavg1_2d_slv_Nx.' + dayString + '.nc4'

        nc_fid = Dataset(nc_f, 'r')
        nc_fid_low = Dataset(nc_f_low, 'r')

        print(nc_f, "day", day + 1)

        for (index_lat, index_lon), (numpy_lat,
                                     numpy_lon) in zip(latlonPairs,
                                                       latlonPairs_numpy):
            for time in range(8):  # 8 3-hour intervals
                # get the low levels
                timeindex = time * 3
                level = len(levels)  # At 50m: Call this "Level 73"
                eastwinds = nc_fid_low.variables['U50M'][timeindex:timeindex +
                                                         3, numpy_lat,
                                                         numpy_lon]
                northwinds = nc_fid_low.variables['V50M'][timeindex:timeindex +
                                                          3, numpy_lat,
                                                          numpy_lon]
                eastwind_mean = np.mean(eastwinds)
                northwind_mean = np.mean(northwinds)
                timeseries_wind[level, day * 8 +
                                time] = [eastwind_mean, northwind_mean]
                features = from_wind_to_features(eastwind_mean, northwind_mean)
                timeseries_features[level, day * 8 +
                                    time] = [features[0], features[1]]

                level = len(levels) + 1  # At 10m: Call this "Level 74"
                eastwinds = nc_fid_low.variables['U10M'][timeindex:timeindex +
                                                         3, numpy_lat,
                                                         numpy_lon]
                northwinds = nc_fid_low.variables['V10M'][timeindex:timeindex +
                                                          3, numpy_lat,
                                                          numpy_lon]
                eastwind_mean = np.mean(eastwinds)
                northwind_mean = np.mean(northwinds)
                timeseries_wind[level, day * 8 +
                                time] = [eastwind_mean, northwind_mean]
                features = from_wind_to_features(eastwind_mean, northwind_mean)
                timeseries_features[level, day * 8 +
                                    time] = [features[0], features[1]]

                level = len(levels) + 2  # At 2m: Call this "Level 75"
                eastwinds = nc_fid_low.variables['U2M'][timeindex:timeindex +
                                                        3, numpy_lat,
                                                        numpy_lon]
                northwinds = nc_fid_low.variables['V2M'][timeindex:timeindex +
                                                         3, numpy_lat,
                                                         numpy_lon]
                eastwind_mean = np.mean(eastwinds)
                northwind_mean = np.mean(northwinds)
                timeseries_wind[level, day * 8 +
                                time] = [eastwind_mean, northwind_mean]
                features = from_wind_to_features(eastwind_mean, northwind_mean)
                timeseries_features[level, day * 8 +
                                    time] = [features[0], features[1]]

                for level in range(nlev):  # 72 pressure levels
                    timeseries_heights[
                        level, day * 8 + time] = nc_fid.variables['H'][
                            time, level, numpy_lat,
                            numpy_lon] - nc_fid.variables['PHIS'][
                                time, numpy_lat,
                                numpy_lon] / 9.81  # height above sealevel - height of surface/9.81 = actual height (source: NASA email)
                    eastwind = nc_fid.variables['U'][time, level, numpy_lat,
                                                     numpy_lon]  # eastwind
                    northwind = nc_fid.variables['V'][time, level, numpy_lat,
                                                      numpy_lon]  # northwind
                    timeseries_wind[level,
                                    day * 8 + time] = [eastwind, northwind]
                    features = from_wind_to_features(eastwind, northwind)
                    timeseries_features[level, day * 8 +
                                        time] = [features[0], features[1]]

                circular_mean = circmean(timeseries_features[:, day * 8 + time,
                                                             1])
                for level in range(len(timeseries_features)):
                    timeseries_features[level, day * 8 + time,
                                        1] = compute_angular_distance(
                                            circular_mean,
                                            timeseries_features[level,
                                                                day * 8 + time,
                                                                1])

            dict_timeseries_heights[index_lat,
                                    index_lon][:, day * 8:day * 8 +
                                               8] = timeseries_heights[:, day *
                                                                       8:day *
                                                                       8 + 8]
            dict_timeseries_wind[index_lat,
                                 index_lon][:, day * 8:day * 8 +
                                            8] = timeseries_wind[:, day *
                                                                 8:day * 8 + 8]
            dict_timeseries_features[
                index_lat,
                index_lon][:, day * 8:day * 8 +
                           8] = timeseries_features[:, day * 8:day * 8 + 8]

        day = day + 1

    # Save to npy:
    start = dayStrings[0]
    stop = dayStrings[-1]

    for (index_lat, index_lon) in latlonPairs:
        prefix = str(format(index_lat, '.2f')) + '_' + str(
            format(index_lon, '.2f')) + '_' + start + '_' + stop

        np.save(directory + prefix + '_winds',
                dict_timeseries_wind[index_lat, index_lon][4:, :, :])
        np.save(directory + prefix + '_heights',
                dict_timeseries_heights[index_lat, index_lon][4:, :])
        np.save(directory + prefix + '_features',
                dict_timeseries_features[index_lat, index_lon][4:, :, :])
    return prefix
Exemplo n.º 42
0
def circ_corrcc(x, y, tail='two-sided'):
    """Correlation coefficient between two circular variables.

    Parameters
    ----------
    x : np.array
        First circular variable (expressed in radians)
    y : np.array
        Second circular variable (expressed in radians)
    tail : string
        Specify whether to return 'one-sided' or 'two-sided' p-value.

    Returns
    -------
    r : float
        Correlation coefficient
    pval : float
        Uncorrected p-value

    Notes
    -----
    Adapted from the CircStats MATLAB toolbox (Berens 2009).

    Use the np.deg2rad function to convert angles from degrees to radians.

    Please note that NaN are automatically removed.

    Examples
    --------
    Compute the r and p-value of two circular variables

    >>> from pingouin import circ_corrcc
    >>> x = [0.785, 1.570, 3.141, 3.839, 5.934]
    >>> y = [0.593, 1.291, 2.879, 3.892, 6.108]
    >>> r, pval = circ_corrcc(x, y)
    >>> print(r, pval)
    0.942 0.06579836070349088
    """
    from scipy.stats import norm
    x = np.asarray(x)
    y = np.asarray(y)

    # Check size
    if x.size != y.size:
        raise ValueError('x and y must have the same length.')

    # Remove NA
    x, y = remove_na(x, y, paired=True)
    n = x.size

    # Compute correlation coefficient
    x_sin = np.sin(x - circmean(x))
    y_sin = np.sin(y - circmean(y))
    # Similar to np.corrcoef(x_sin, y_sin)[0][1]
    r = np.sum(x_sin * y_sin) / np.sqrt(np.sum(x_sin**2) * np.sum(y_sin**2))

    # Compute T- and p-values
    tval = np.sqrt((n * (x_sin**2).mean() *
                    (y_sin**2).mean()) / np.mean(x_sin**2 * y_sin**2)) * r

    # Approximately distributed as a standard normal
    pval = 2 * norm.sf(abs(tval))
    pval = pval / 2 if tail == 'one-sided' else pval
    return np.round(r, 3), pval
Exemplo n.º 43
0
import glob
import numpy as np

from scipy.stats import circmean, circvar
fns = sorted(glob.glob("./*.txt"))

fp1 = open('bonds.dat', 'w')
fp2 = open('bend.dat', 'w')
fp3 = open('tor.dat', 'w')
for f in fns:
    types = f.split('/')[1].split('.')[0]
    d = np.loadtxt(f)
    if (len(types.split('-')) == 4):
        fp3.write("%s %f %f\n" % (types, circmean(d, low=-180., high=180.),
                                  np.sqrt(circvar(d, low=-180., high=180.))))
    elif (len(types.split('-')) == 3):
        fp2.write("%s %f %f\n" % (types, np.mean(d), np.std(d)))
    else:
        fp1.write("%s %f %f\n" % (types, np.mean(d) * 0.1, np.std(d) * 0.1))
Exemplo n.º 44
0
def pinhole(pixel_i, pixel_j, pixel_width=5, pixel_height=5,
            source_aperture=50.8, sample_aperture=12.7,
            source_distance=8500, detector_distance=4000,
            beamstop=50.8,
            wavelength=8, wavelength_resolution=0.12, aligned_wavelength=None,
            N=5000, phi_mask=7.1,
            Iq=None):

    PI, PJ = np.meshgrid(pixel_i, pixel_j)

    # ===== Generate a population of neutrons at the sample position =====
    Rsource = source_aperture/2
    Rsample = sample_aperture/2
    Rbeamstop = beamstop/2
    Dsource = source_distance
    Ddetector = detector_distance
    delta_el, delta_y, delta_p = aperture_alignment(
        wavelength, aligned_wavelength, Dsource, Ddetector)
    s_az, s_el, s_L, s_x, s_y = neutrons_on_sample(
        Rsource, Rsample, Rbeamstop, Dsource, Ddetector,
        wavelength, wavelength_resolution, aligned_wavelength,
        N)

    # ==== Compute image on detector without sample ====
    #
    #d_az, d_el, d_L, d_x, d_y = ballistics(s_az, s_el, s_L, s_x, s_y, Ddetector)

    ### filter through beam stop
    ##idx = (d_x**2 + (d_y-delta_y)**2 < Rbeamstop**2)
    ##s_az, s_el, s_L, s_x, s_y = [w[idx] for w in (s_az, s_el, s_L, s_x, s_y)]
    ##d_az, d_el, d_L, d_x, d_y = [w[idx] for w in (d_az, d_el, d_L, d_x, d_y)]

    #plot(d_x/pixel_width, d_y/pixel_height, "G: neutron detector pixel"); return

    # ==== Scatter off sample ====
    mode = None
    #mode = 'sum'
    #mode = 'scatter'
    if mode == 'sum' and Iq is not None:
        # For each pixel, compute the scattering angle between the neutron
        # on a direct path to the detector vs the pixel center, and compute
        # I(q) based on that.  Seems to underestimate the dQ/Q resolution
        # for the pixels, so don't use this without figuring out what's wrong.
        raise NotImplementedError("experimental code; see source")
        # pixel centers relative to beam center
        cx, cy = PI*pixel_width, PJ*pixel_height
        pixel_r = sqrt(cx**2 + cy**2)
        pixel_theta = arctan2(pixel_r, Ddetector)/2
        #pixel_phi = arctan2(cy, cx)
        pixel_nominal_q = 4*pi * sin(pixel_theta)/wavelength
        # find neutron position on the detector without scattering
        d_az, d_el, d_L, d_x, d_y = ballistics(s_az, s_el, s_L, s_x, s_y, Ddetector)
        # find scattering angle from each neutron to each pixel
        r = sqrt(((d_x-s_x)[:, None] - cx.flatten()[None, :])**2
                 + ((d_y-s_y)[:, None] - (cy+delta_p).flatten()[None, :])**2)
        theta = arctan2(r, Ddetector)/2
        # find q value for each neutron at each pixel
        q = 4*pi*sin(theta)/d_L[:, None]
        # accumulate scattering patterns across all neutrons
        I = Iq(q)
        pixel_Iq = np.sum(I, axis=0).reshape(PI.shape)
        pixel_dIq = pixel_Iq/sqrt(len(s_x))
        pixel_q = np.mean(q, axis=0).reshape(PI.shape)
        pixel_dq = np.std(q, axis=0, ddof=1).reshape(PI.shape)
        #print("pixel_Iq", pixel_q.shape, pixel_Iq.shape)

    if mode == 'scatter' and Iq is not None:
        # For each neutron figure out the relative probability of the neutron
        # arriving in each individual pixel, then choose one to add it to.
        # The result is way off, probably because it doesn't include the
        # probability that the neutron goes to none of the pixels.
        raise NotImplementedError("experimental code; see source")
        # pixel centers relative to beam center
        cx, cy = PI*pixel_width, PJ*pixel_height
        pixel_r = sqrt(cx**2 + cy**2)
        pixel_theta = arctan2(pixel_r, Ddetector)/2
        #pixel_phi = arctan2(cy, cx)
        pixel_q = 4*pi * sin(pixel_theta)/wavelength

        # find neutron position on the detector without scattering
        d_az, d_el, d_L, d_x, d_y = ballistics(s_az, s_el, s_L, s_x, s_y, Ddetector)
        # find scattering angle from each neutron to each pixel

        # For each neutron generate the probability distribution corresponding
        # to the various pixels that the neutron might land in and pick one.
        counts = np.zeros(pixel_q.size, 'i')
        counts_q = np.zeros(pixel_q.size, 'd')
        for xk, yk, Lk in zip(d_x-s_x, d_y-s_y, d_L):
            r = sqrt((xk - cx)**2 + (yk-delta_p - cy)**2)
            theta = arctan2(r, Ddetector)/2
            # find q value for each neutron at each pixel
            q = (4*pi*sin(theta)/Lk).flatten()
            # accumulate scattering patterns across all neutrons
            invcdf = np.cumsum(Iq(q))
            U = np.random.uniform(0, invcdf[-1])
            index = np.searchsorted(invcdf, U)
            counts[index] += 1
            counts_q[index] += q[index]
        counts_q /= counts + (counts==0)
        counts.reshape(cx.shape)
        counts_q.reshape(cx.shape)

    stats = []
    current_j = 1000001 # arbitrary unlikely number
    for p_i, p_j in zip(PI.flat, PJ.flat):
        if current_j != p_j:
            print("pixel j=%d"%p_j)
            current_j = p_j

        ## Generate a new set of points on the sample for each pixel
        #s_az, s_el, s_L, s_x, s_y = neutrons_on_sample(
        #    Rsource, Rsample, Rbeamstop, Dsource, Ddetector,
        #    wavelength, wavelength_resolution, aligned_wavelength,
        #    N)

        # ==== Compute scattering theta, phi for pixel ====
        # find random point in pixel i, j to scatter to
        xl, xu = (p_i-0.5)*pixel_width, (p_i+0.5)*pixel_width
        yl, yu = delta_p+(p_j-0.5)*pixel_height, delta_p+(p_j+0.5)*pixel_height
        p_x, p_y = rand(len(s_x))*(xu-xl)+xl, rand(len(s_x))*(yu-yl)+yl
        #plot(px, py, "px,py pixel locations"); return

        # find the scattering angle necessary to reach point P on the detector
        q_az = arctan2(p_x-s_x, np.ones_like(s_x)*Ddetector)
        q_el = throwing_angle(to_velocity(s_L), 0.001*Ddetector/cos(q_az),
                              0.001*(p_y-s_y))
        #q_theta = arccos(sin(s_el)*sin(q_el) + cos(s_el)*cos(q_el)*cos(q_az-s_az))
        #q_theta_2 = arctan2(sqrt((d_x-p_x)**2+(d_y-p_y)**2)), Ddetector)
        #q_phi = arctan2(q_el, q_az)

        # Note that q scattering calculations look at positions on the detector
        # assuming neutrons travel in a straight line, and not the positions
        # according to ballistics.  The ballistics are taken into account by the
        # choice of initial angle such that the neutron will hit the target
        # position.  The scattering function operates solely on incident and
        # scattered angle with no hint of gravity, and so the resolution
        # function which mixes the scattering theory must reflect this.
        q_theta, q_phi = nominal_q(s_x, s_y, s_az, s_el, q_az, q_el, Ddetector)
        q = 4*pi*sin(q_theta)/s_L
        #return

        # filter through beam stop, corrected for gravity alignment
        #print(Rbeamstop**2, xu**2 + (yu-delta_p)**2, xl**2 + (yl-delta_p)**2)
        idx = (p_x**2 + (p_y-delta_p)**2 > Rbeamstop**2)
        q_theta, q_phi, q = [w[idx] for w in (q_theta, q_phi, q)]

        # ==== calculate stats ====
        cx, cy = p_i*pixel_width, p_j*pixel_height
        theta_nominal = arctan2(sqrt(cx**2+cy**2), Ddetector)/2
        phi_nominal = arctan2(cy, cx)
        q_nominal = 4*pi*sin(theta_nominal)/wavelength
        qperp_nominal = 0

        # Approximate q_perp as arc length between nominal phi and actual phi
        # at radius q.
        qperp = q*(q_phi-phi_nominal)

        if len(q) > 1:
            theta_mean, theta_std = np.mean(q_theta), np.std(q_theta)
            phi_mean, phi_std = circmean(q_phi, -pi, pi), circstd(q_phi, -pi, pi)
            q_mean, q_std = np.mean(q), np.std(q)
            qperp_mean, qperp_std = np.mean(qperp), np.std(qperp)
            # weight each neutron by the sample scattering
            I = np.sum(Iq(q))/len(q) if Iq is not None else 0
            dI = I/sqrt(len(q))
        else:
            print("no q values for (%d, %d)"%(p_i, p_j))
            theta_mean, theta_std = theta_nominal, 0.
            phi_mean, phi_std = phi_nominal, 0.
            q_mean, q_std = q_nominal, 0.
            qperp_mean, qperp_std = qperp_nominal, 0.
            I, dI = [], []
        stats.append([
            theta_nominal, theta_mean, theta_std,
            phi_nominal, phi_mean, phi_std,
            q_nominal, q_mean, q_std,
            qperp_nominal, qperp_mean, qperp_std,
            I, dI,
            ])

    config = "src-ap:%.1fcm samp-ap:%.1fcm src-dist:%.1fm det-dist:%.1fm L:%.1fA" % (
        source_aperture/10, sample_aperture/10,
        Dsource/1000, Ddetector/1000, wavelength)
    if len(stats) == 0:
        pass  # No samples fell in detector region
    elif len(stats) == 1:
        # print stats
        pixel_config = "%s pixel:%d,%d (%dX%d mm^2)" %(
            config, p_i, p_j, pixel_width, pixel_height)
        print(pixel_config)
        #print("  nominal lambda: %.4f  actual lambda: %.4f +/- %.4f (Ang)"
        #      % (wavelength, np.mean(s_L), np.std(s_L)))
        print("  nominal 1/lambda: %.4f  actual 1/lambda: %.4f +/- %.4f (1/Ang)"
              % (1./wavelength, np.mean(1./s_L), np.std(1./s_L)))
        print("  nominal theta: %.4f  actual theta: %.4f +/- %.4f (degrees)"
              % (degrees(theta_nominal), degrees(theta_mean), degrees(theta_std)))
        #print("  nominal phi: %.4f  actual phi: %.4f +/- %.4f (degrees)"
        #      % (degrees(phi_nominal), degrees(phi_mean), degrees(phi_std)))
        print("  nominal q: %.4f  actual q: %.4f +/- %.4f (1/Ang)"
              % (q_nominal, q_mean, q_std))

        #plt.hist(degrees(q_az), bins=50); plt.title("G: scattered rotation"); plt.figure()
        #plt.hist(degrees(q_el), bins=50); plt.title("G: scattered elevation"); plt.figure()
        #plt.hist(degrees(q_theta), bins=50); plt.title("G: Q theta"); plt.figure()
        #plt.hist(q, bins=50, density=True); plt.title("G: Q"); plt.figure()

        # plot resolution
        qual = "for pixel %d,%d"%(p_i, p_j)
        #plot_angles(q_theta, q_phi); plt.figure()
        plot_q(q, q_phi, "Q %s"%qual, plot_phi=True)
        #plot_q(q, q_phi, "Q %s"%qual, plot_phi=False)
        #plot_q(np.log10(q), q_phi, "Q %s"%qual, plot_phi=False)
        #plot_qperp(q, qperp, "Q %s"%qual)
        plt.suptitle(pixel_config)
    elif len(pixel_i) == 1 or len(pixel_j) == 1:
        stats = np.array(stats)
        plt.suptitle(config)
        plt.subplot(221)
        plt.plot(stats[:, 6], degrees(stats[:, 2]), '.')
        plt.grid(True)
        plt.xlabel(r'$Q (1/A)$')
        plt.ylabel(r'$\Delta\theta (\degree)$')
        plt.subplot(222)
        plt.plot(stats[:, 6], degrees(stats[:, 5]), '.')
        plt.grid(True)
        plt.xlabel(r'$Q (1/A)$')
        plt.ylabel(r'$\Delta\phi (\degree)$')
        if Iq is not None:
            q, dq, I, dI = stats[:, 7], stats[:, 8], stats[:, 12], stats[:, 13]
            plt.subplot(223)
            plt.plot(q, 100*dq/q, '.')
            plt.grid(True)
            plt.xlabel(r'$Q (1/A)$')
            plt.ylabel(r'$\Delta Q/Q (\%)$')
            plt.subplot(224)
            plt.errorbar(q, I, dI, fmt='.')
            plt.xscale('log')
            plt.yscale('log')
            if mode == 'sum':
                pixel_r, pixel_q, pixel_Iq, pixel_dIq, pixel_dq = (
                    v.flatten() for v in (pixel_r, pixel_q, pixel_Iq, pixel_dIq, pixel_dq)
                    )
                mask = pixel_r >= Rbeamstop
                #plt.loglog(pixel_q[mask], pixel_Iq[mask], '.')
                plt.loglog(pixel_q, pixel_Iq, '.')
                np.savetxt("res_sum.dat", np.array([pixel_q, pixel_Iq, pixel_dIq, pixel_dq]).T)
            if mode == 'scatter':
                qp, Ip = pixel_q.flatten(), counts.flatten()
                qp = counts_q.flatten()
                mask = (pixel_r.flatten() >= Rbeamstop) & (qp > 0)
                qp, Ip = qp[mask], Ip[mask]
                plt.loglog(qp, Ip, '.')
                coeff = np.polyfit(log(qp), log(Ip), 1)
                plt.loglog(qp, exp(np.polyval(coeff, log(qp))), '-')
                print("fit to line", coeff)
            if False:  # add fit to line slope (for power law and fractal)
                coeff = np.polyfit(log(q[1:-1]), log(I[1:-1]), 1)
                plt.loglog(q, exp(np.polyval(coeff, log(q))), '-')
                print("fit to line", coeff)
            plt.grid(True)
            plt.xlabel(r'$Q (1/A)$')
            plt.ylabel(r'$I (1/cm)$')
            np.savetxt("res_Iq.dat", np.array([q, I, dI, dq]).T)
            if 1:
                plt.figure()
                plt.plot(stats[:, 6], stats[:, 7], '.')
                plt.xlabel("Q nominal")
                plt.ylabel("Q mean")
        else:
            plt.subplot(223)
            plt.plot(stats[:, 6], stats[:, 8], '.')
            plt.grid(True)
            plt.xlabel(r'$Q (1/A)$')
            plt.ylabel(r'$\Delta Q_\parallel (1/A)$')
            plt.subplot(224)
            plt.plot(stats[:, 6], stats[:, 11], '.')
            plt.grid(True)
            plt.xlabel(r'$Q (1/A)$')
            plt.ylabel(r'$\Delta Q_\perp (1/A)$')
    else:
        stats = np.array(stats)
        plt.suptitle(config)
        plt.subplot(131)
        data, title = degrees(stats[:, 2]), r"$\Delta\theta$"
        data = np.ma.array(data, mask=(stats[:, 2] == 0))
        data = data.reshape(len(pixel_i), len(pixel_j))
        #mask = (PI**2 + PJ**2 < phi_mask**2)
        #data = np.ma.array(data, mask=mask)
        #data, title = stats[:, 1]-stats[:, 0], r"$\theta - \hat\theta$"
        #data = np.clip(stats[:, 1]-stats[:, 0], 0, 0.02)
        plt.pcolormesh(pixel_i, pixel_j, data)
        plt.grid(True)
        plt.axis('equal')
        plt.title(title)
        plt.colorbar()
        plt.subplot(132)
        data, title = degrees(stats[:, 5]), r"$\Delta\phi$"
        #data, title = stats[:, 4]-stats[:, 3], r"$\phi - \hat\phi$"
        data = np.ma.array(data, mask=(stats[:, 5] == 0))
        data = data.reshape(len(pixel_i), len(pixel_j))
        #mask = (PI < phi_mask) & (abs(PJ) < phi_mask)
        plt.pcolormesh(pixel_i, pixel_j, data)
        plt.grid(True)
        plt.axis('equal')
        plt.title(title)
        plt.colorbar()
        plt.subplot(133)
        #data, title = stats[:, 8], r"$\Delta q$"
        data, title = stats[:, 8]/stats[:, 6], r"$\Delta q/q$"
        data = np.ma.array(data, mask=(stats[:, 8] == 0))
        data = data.reshape(len(pixel_i), len(pixel_j))
        #mask = (PI**2+PJ**2 < phi_mask**2)
        #data = np.ma.array(data, mask=mask)
        #data, title = stats[:, 7]-stats[:, 6], r"$q - \hat q$"
        #data = np.clip(stats[:, 7]-stats[:, 6], 0, 0.0005)
        plt.pcolormesh(pixel_i, pixel_j, data)
        plt.grid(True)
        plt.axis('equal')
        plt.title(title)
        plt.colorbar()
Exemplo n.º 45
0
def _mc_error(ary, batches=5, circular=False):
    """Calculate the simulation standard error, accounting for non-independent samples.

    The trace is divided into batches, and the standard deviation of the batch
    means is calculated.

    Parameters
    ----------
    ary : Numpy array
        An array containing MCMC samples
    batches : integer
        Number of batches
    circular : bool
        Whether to compute the error taking into account `ary` is a circular variable
        (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).

    Returns
    -------
    mc_error : float
        Simulation standard error
    """
    _numba_flag = Numba.numba_flag
    if ary.ndim > 1:

        dims = np.shape(ary)
        trace = np.transpose([t.ravel() for t in ary])

        return np.reshape([_mc_error(t, batches) for t in trace], dims[1:])

    else:
        if _not_valid(ary, check_shape=False):
            return np.nan
        if batches == 1:
            if circular:
                if _numba_flag:
                    std = _circular_standard_deviation(ary,
                                                       high=np.pi,
                                                       low=-np.pi)
                else:
                    std = stats.circstd(ary, high=np.pi, low=-np.pi)
            else:
                if _numba_flag:
                    std = np.float(_sqrt(svar(ary), np.zeros(1)))
                else:
                    std = np.std(ary)
            return std / np.sqrt(len(ary))

        batched_traces = np.resize(ary, (batches, int(len(ary) / batches)))

        if circular:
            means = stats.circmean(batched_traces,
                                   high=np.pi,
                                   low=-np.pi,
                                   axis=1)
            if _numba_flag:
                std = _circular_standard_deviation(means,
                                                   high=np.pi,
                                                   low=-np.pi)
            else:
                std = stats.circstd(means, high=np.pi, low=-np.pi)
        else:
            means = np.mean(batched_traces, 1)
            if _numba_flag:
                std = _sqrt(svar(means), np.zeros(1))
            else:
                std = np.std(means)

        return std / np.sqrt(batches)
Exemplo n.º 46
0
    def get_tilts_planarfit(self, config):
        """Compute tilt angles in all windows using planar fit method

        Parameters
        ----------
        config : OrderedDict
            Dictionary with parsed configuration file.

        Returns
        -------
        pandas.DataFrame
            tilt angles for each window.

        """
        wind_names = ["wind_speed_u", "wind_speed_v", "wind_speed_w"]
        mot3d_names = ["acceleration_x", "acceleration_y",
                       "acceleration_z", "rate_x", "rate_y", "rate_z"]

        # Iterate over the list of tuples (key=window time stamp, file-list)
        msg_suffix = " tilt angles via planar fit for window %s"
        for (k, l) in self.win_files:
            logger.info("Begin" + msg_suffix, k)
            ec_list = []
            ec_ok_files = []
            # Iterate over the file-list
            for ec_file in l:
                logger.info("Begin preparing %s", osp.basename(ec_file))
                # Get a file name prefix to be shared by the output files
                # from this period.  Note iname is THE SAME AS THE INDEX IN
                # prep_flags
                iname = osp.basename(ec_file)
                # Try to prepare file; set flags, and populate the list of
                # OK files and summaries. Continue if preparation fails.
                try:
                    ec_prep, prep_flags = prepare_period(ec_file, config)
                except (NavigationError, SonicError) as err:
                    self.prep_flags.loc[iname, "failed_prep_flag"] = True
                    for flagname, flag in err.flags.items():
                        self.prep_flags.loc[iname, flagname] = flag
                        self.tilts.loc[k, "n" + flagname] += np.int(flag)
                    logger.info("Skip %s", ec_file)
                    continue
                else:
                    for flagname, flag in prep_flags.iteritems():
                        self.prep_flags.loc[iname, flagname] = flag
                        self.tilts.loc[k, "n" + flagname] += np.int(flag)
                ec_list.append(pd.DataFrame(ec_prep.mean()).transpose())
                ec_ok_files.append(ec_file)
                logger.info("End preparing %s", osp.basename(ec_file))

            l = ec_ok_files
            n_ok = len(ec_list)
            self.tilts.loc[k, "nfiles_ok"] = n_ok
            if n_ok < 3:
                logger.info("Aborting window without enough adequate files %s",
                            k)
                for ec_file in l:
                    self.tilts.loc[k, "failed_tilt_flag"] = True
                continue
            else:
                ec_win = pd.concat(ec_list, keys=ec_ok_files)
                logger.info("Combining %s periods in window %s", n_ok, k)
                # Extract means from each period file for planar fitting
                wnd = ec_win.loc[:, wind_names]
                mot3d = ec_win.loc[:, mot3d_names[0:3]]
                # Tilt angles for motion sensor
                mot3d_pfit = planarfit(mot3d.values)
                self.tilts.loc[k, "phi_motion"] = mot3d_pfit.phi
                self.tilts.loc[k, "theta_motion"] = mot3d_pfit.theta
                # Tilt angles for sonic anemometer
                sonic_pfit = planarfit(wnd.values)
                self.tilts.loc[k, "phi_sonic"] = sonic_pfit.phi
                self.tilts.loc[k, "theta_sonic"] = sonic_pfit.theta
                wind_direction_mean = circmean(ec_win["wind_direction"].values,
                                               high=np.degrees(2 * np.pi))
                self.tilts.loc[k, "wind_direction"] = wind_direction_mean
            logger.info("End" + msg_suffix, k)
Exemplo n.º 47
0
    def Histogram(self, binsize=5.0, showfit='yes'):
        # Method to create an histogram from a vector catalog

        # Initialization
        # binsize: Bin size for the histogram in degrees.
        # showfit: Parameter to show the Gaussian fit on the final plot.
        #          Should be 'yes' or 'no'.

        print()
        print('========================================================')
        print('Constructing a trusty histogram of polarization angles')
        print('========================================================')
        print()

        print('Calculating mean and standard deviaton of the catalog')
        print()

        # Conversion from degrees to radians
        conv_rad = math.pi / 180.0
        conv_deg = conv_rad**-1.0

        # Calculating the regular mean
        norm_mean = np.mean(self.O)
        norm_std = np.std(self.O)

        print('Mean: ' + str(norm_mean))
        print('Standard deviation: ' + str(norm_std))
        print()

        # Calculating the circular mean and circular standard deviation
        # assuming boundaries of -90 to 90 degrees
        circ_mean = conv_deg * stats.circmean(
            conv_rad * self.O, low=-math.pi / 2.0, high=math.pi / 2.0)
        circ_std = conv_deg * stats.circstd(
            conv_rad * self.O, low=-math.pi / 2.0, high=math.pi / 2.0)

        print('Circular mean: ' + str(circ_mean))
        print('Circular standard deviation: ' + str(circ_std))
        print()

        # Calculating the number of bins by rounding to lowest integer
        number_bins = math.floor(180.0 / (1.0 * binsize))
        eff_binsize = 180.0 / number_bins

        print('Number of bins: ' + str(number_bins))
        print('Bin size used: ' + str(eff_binsize))
        print()

        # Creating the histogram arrays
        histo_bins = np.zeros(number_bins)
        histo_values = np.zeros(number_bins)
        # Identifying the values of the bins
        for i in range(0, number_bins):
            histo_bins[i] = (i + 0.5) * eff_binsize - 90.0
        # Counting the number of vectors per bin - BRUTE FORCE VERSION
        # CONSIDER USING NUMPY HISTOGRAM FUNCTION INSTEAD
        for i in range(0, number_bins):
            # Limits of the bin
            min_limit = histo_bins[i] - 0.5 * eff_binsize
            max_limit = histo_bins[i] + 0.5 * eff_binsize
            # Checking every vector if they should be in the bin
            for j in range(0, self.size):
                if (self.O[j] >= min_limit) and (self.O[j] < max_limit):
                    histo_values[i] = histo_values[i] + 1

        # Find array value closest to circular mean
        abs_array = np.abs(histo_bins - circ_mean)
        smallest_diff = abs_array.argmin()
        # Calculating shift needed to center the histogram on circular mean
        shift = math.ceil(number_bins / 2.0) - smallest_diff
        # Rolling the histogram
        print('Using the circular mean to center the histogram')
        print('Rolling histogram by ' + str(shift) + ' elements')
        print()
        histo_bins_rolled = np.roll(histo_bins, shift)
        histo_values_rolled = np.roll(histo_values, shift)

        # Fixing values of rolled bins so array stays sorted
        if shift > 0:
            for i in range(0, shift):
                histo_bins_rolled[i] = histo_bins_rolled[i] - 180.0
        if shift < 0:
            for i in range(number_bins + shift, number_bins):
                histo_bins_rolled[i] = histo_bins_rolled[i] + 180.0

        # Attempting Gaussian fit to the data
        # Defining the Gaussian function, stolen from the internet
        # https://lmfit.github.io/lmfit-py/model.html
        def gaussian(x, amp, cen, wid):
            y = amp * np.exp(-(x - cen)**2 / (2 * wid**2))
            return y

        print('Fitting a Gaussian profile to the histogram')
        print()

        # Calling the lmfit package to model the 'gaussian' function
        gaussian_model = Model(gaussian)
        gaussian_fit = gaussian_model.fit(histo_values_rolled,
                                          x=histo_bins_rolled,
                                          amp=np.mean(histo_values_rolled),
                                          cen=circ_mean,
                                          wid=circ_std)
        # Creating the array containing the parameters
        fit_params = np.empty([3, 2])
        # Amplitude
        fit_params[0, 0] = gaussian_fit.params['amp'].value
        fit_params[0, 1] = gaussian_fit.params['amp'].stderr
        # Mean
        fit_params[1, 0] = gaussian_fit.params['cen'].value
        fit_params[1, 1] = gaussian_fit.params['cen'].stderr
        # Standard deviation
        fit_params[2, 0] = gaussian_fit.params['wid'].value
        fit_params[2, 1] = gaussian_fit.params['wid'].stderr

        print('Goodness of fit')
        print('Number of iterations: ' + str(gaussian_fit.nfev))
        print('Reduced chi-squared: ' + str(gaussian_fit.redchi))
        print()

        print('Fitted parameters')
        print('Amplitude:          ' + str(gaussian_fit.params['amp'].value))
        print('                  ± ' + str(gaussian_fit.params['amp'].stderr))
        print()
        print('Mean:               ' + str(gaussian_fit.params['cen'].value))
        print('                  ± ' + str(gaussian_fit.params['cen'].stderr))
        print()
        print('Standard deviation: ' + str(gaussian_fit.params['wid'].value))
        print('                  ± ' + str(gaussian_fit.params['wid'].stderr))
        print()

        print('Plotting a dapper histogram')
        print()

        # Plotting the histogram
        histo_plot = plt.figure(figsize=(6, 3))  # Creating the figure object
        plt.bar(histo_bins_rolled,
                histo_values_rolled,
                width=eff_binsize,
                fill=False)  # Creating the bar plot
        plt.xlabel('Polarization Angle (Degree)')
        plt.ylabel('Number')
        plt.tight_layout()  # Using all available space in the plot window
        # Setting range
        xmin = histo_bins_rolled[0] - eff_binsize / 2.0
        xmax = histo_bins_rolled[number_bins - 1] + eff_binsize / 2.0
        plt.xlim(xmin, xmax)
        # Modifying the axes directly
        axes = plt.gca()  # Calling the axes object of the figure
        axes.xaxis.set_ticks_position('both')  # Adding ticks to each side
        axes.yaxis.set_ticks_position('both')  # Adding ticks to each side
        # Plotting the best fit Gaussian
        # Creating x values
        gaussian_bins = np.arange(xmin, xmax, 0.1)
        # Calculating the y values
        gaussian_values = gaussian(gaussian_bins, fit_params[0, 0],
                                   fit_params[1, 0], fit_params[2, 0])
        # Plotting the Gaussian fit
        if showfit == 'yes':
            plt.plot(gaussian_bins, gaussian_values, 'k')

        print('Returning the histogram and the fit parameters')
        print()
        print('Stay classy!')
        # Returning figure and fits parameters
        return histo_plot, fit_params
def showModel(modeldata):
    """ model vizualizations (particular timestep)"""

    visualizeStep = max(modeldata.Step.unique())
    f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, figsize=(8, 2))
    f.subplots_adjust(hspace=.3, wspace=.6, left=0.1, right=0.9)
    plt.suptitle('Model average, Step ' + str(visualizeStep))

    modeldata = modeldata[modeldata.Step == visualizeStep]
    ''' colormap '''
    cmap = plt.cm.jet
    cNorm = colors.Normalize(vmin=np.min(modeldata.competition),
                             vmax=np.max(modeldata.competition))
    scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cmap)
    low_col = scalarMap.to_rgba(min(modeldata.competition))
    high_col = scalarMap.to_rgba(max(modeldata.competition))
    ''' compute means & std '''
    xVals = modeldata.competition.unique()
    Gm = modeldata.groupby(['competition'])['mean_goal_value'].mean()
    Gstd = modeldata.groupby(['competition'])['mean_goal_value'].std()
    Pm = modeldata.groupby(['competition'])['mean_proxy_value'].mean()
    Pstd = modeldata.groupby(['competition'])['mean_proxy_value'].std()

    ax1.scatter(modeldata.competition,
                modeldata.mean_goal_value,
                alpha=0.5,
                c=low_col,
                marker=".",
                label='_nolegend_')
    ax1.scatter(modeldata.competition,
                modeldata.mean_proxy_value,
                alpha=0.5,
                c=high_col,
                marker=".",
                label='_nolegend_')
    ax1.plot(xVals, Pm, color=high_col, label='proxy')
    ax1.fill_between(xVals, Pm - Pstd, Pm + Pstd, color=high_col, alpha=0.2)
    ax1.plot(xVals, Gm, color=low_col, label='goal')
    ax1.fill_between(xVals, Gm - Gstd, Gm + Gstd, color=low_col, alpha=0.2)

    ax1.legend()
    ax1.set_ylabel('mean value')
    ax1.set_xlabel('competition')

    utility_mean = modeldata.groupby(['competition'])['mean_utility'].mean()
    utility_std = modeldata.groupby(['competition'])['mean_utility'].std()
    ax2.scatter(modeldata.competition,
                modeldata.mean_utility,
                alpha=0.2,
                c='g',
                marker=".",
                label='iterations')
    ax2.plot(xVals, utility_mean, c='g')
    ax2.fill_between(xVals,
                     utility_mean - utility_std,
                     utility_mean + utility_std,
                     alpha=0.2,
                     color='g')
    ax2.set_ylabel('mean utility', color='g')
    ax2.set_xlabel('competition')

    groups_m = modeldata.groupby(['competition'])['mean_practice']
    practice_mean = np.zeros(len(list(variable_parameters.values())[0]))
    i = 0
    for name, group in groups_m:
        practice_mean[i] = stats.circmean(group, -np.pi, np.pi)
        i += 1
    groups_std = modeldata.groupby(['competition'])['mean_practice']
    practice_std = np.zeros(len(list(variable_parameters.values())[0]))
    i = 0
    for name, group in groups_std:
        practice_std[i] = stats.circstd(group, -np.pi, np.pi)
        i += 1
    practice_mean = practice_mean / np.pi * 180
    practice_std = practice_std / np.pi * 180
    ax2a = ax2.twinx()
    ax2a.scatter(modeldata.competition,
                 modeldata.mean_practice / np.pi * 180,
                 alpha=0.2,
                 c='k',
                 marker=".",
                 label='iterations')
    ax2a.plot(xVals, practice_mean, c='k')
    ax2a.fill_between(xVals,
                      practice_mean - practice_std,
                      practice_mean + practice_std,
                      alpha=0.2,
                      color='k')
    ax2a.set_ylim([0, max(modeldata.mean_practice / np.pi * 180)])
    ax2a.set_ylabel('mean practice (°)')
    ''' vector plot '''
    G_oc_m = modeldata.groupby(['competition'])['mean_goal_oc'].mean()
    ax3.set_ylabel('goal (oc)')
    ax3.set_xlabel('proxy')

    for i in range(len(xVals)):
        colorVal = scalarMap.to_rgba(xVals[i])
        ax3.arrow(0, 0, Pm.values[i], G_oc_m.values[i], color=colorVal)

    ax3.set_ylim([0, np.max(Pm) + 1])
    ax3.set_xlim([0, np.max(Pm) + 1])
    ax3.set_aspect('equal')
    cbar_ax = f.add_axes([0.92, 0.15, 0.01, 0.7])
    mpl.colorbar.ColorbarBase(cbar_ax,
                              cmap=cmap,
                              norm=cNorm,
                              orientation='vertical',
                              label='competition')

    f.savefig('Model.pdf')