コード例 #1
0
def Roff_FR_before_REM (X, H, dt):

    pre_REM = []
    period = int(60 / dt)
    time_vec = np.linspace(-period*dt, (period*dt)/2, int(1.5*period + 1))

    for i in range(len(H[0]) - 1):
        if H[0][i] != 1 and H[0][i+1] == 1:
            if i - period < 0 or (i + (period/2) + 1) > len(H[0]):
                continue
            pre_REM.append(X[i - period: i + int(period/2) + 1, 1])

    Roff_avg_FRs = []
    for i in range(len(pre_REM[0])):
        temp = []
        for j in range(len(pre_REM)):
            temp.append(pre_REM[j][i])
        Roff_avg_FRs.append(np.mean(temp))

    plt.figure()
    plt.plot(time_vec, Roff_avg_FRs)
    plt.vlines(0, min(Roff_avg_FRs), max(Roff_avg_FRs), linestyles='dashed', color='gray')
    plt.xlabel('Time (t=0 corresponds to timepoint just before REM) (s)')
    plt.ylabel('Average REM-Off Firing Rate (Hz)')
    plt.title('REM-off Firing Rate Before and During REM Sleep')
    plt.show()
    
    return Roff_avg_FRs, time_vec
コード例 #2
0
def Effect_size_plot(Stats,
                     ab_treshold=0,
                     p_treshold=0.05,
                     effect='effect',
                     p_value_method='we.eBH'):

    sig = (Stats[p_value_method] < p_treshold
           )  #&(Stats['rab.all']>1)&(Stats[effect].abs()>1)

    assert effect == 'effect', 'plot is made for effect size'
    abundance_FC_plot(Stats, sig.index[sig])

    ax = plt.gca()
    plt.vlines(ab_treshold, *ax.get_ylim(), linestyles='dashed')

    if 'BH' in p_value_method:
        Pvalue_method_name = "P_{BH}"
    else:
        Pvalue_method_name = 'P'

    ax.text(0.7,
            0.1,
            f"${Pvalue_method_name} < {p_treshold}$",
            transform=ax.transAxes)

    sig = sig & (Stats['rab.all'] > ab_treshold)

    sig = Stats.loc[sig].sort_values(effect, ascending=False).index

    return sig
コード例 #3
0
def bifurcated_staged_decision_scores(test_score_staged, title_text, save_text,
                                      Y_test, W_test):
    plt.figure(figsize=(12, 8))
    for i, j in zip([0, 1, 4, 9, 15, 19], xrange(6)):
        plt.subplot(str(23) + str(j + 1))
        plt.hist(test_score_staged[i][Y_test == -1],
                 bins=100,
                 weights=W_test[Y_test == -1],
                 normed=True,
                 histtype='stepfilled',
                 alpha=0.5)
        plt.hist(test_score_staged[i][Y_test == 1],
                 bins=100,
                 weights=W_test[Y_test == 1],
                 normed=True,
                 color='r',
                 histtype='stepfilled',
                 alpha=0.5)
        cutoff = np.percentile(test_score_staged[i], 85)
        plt.vlines(x=cutoff, ymin=0, ymax=3.2, color='r')
        plt.xticks(fontsize='small')
        plt.yticks(fontsize='small')
        plt.ylim(0, 3.2)
        if i == 19:
            plt.title('stage:' + str(20), fontsize='small')
        else:
            plt.title('stage:' + str(i + 1), fontsize='small')
    plt.suptitle('Boosting ' + title_text + ' - staged discriminant score',
                 fontsize='small')
    plt.savefig('../Graphs/' + save_text + '_staged_bi.png')
コード例 #4
0
def plotDataFrame(show, filename, dataframe, axes=None, vlines=None, vlineColors=None, title=None):
    """plots puck properties

    :param show: show the plot created
    :param filename: save the plot to filename
    :param dataframe: dataframe with layers as columns and elementIds as index
    :param axes: matplotlib axes object
    :param hlines: x-coordinates with a vertical line to draw
    """
    if axes is None:
        fig = plt.figure()
        ax = fig.gca()
    else:
        ax = axes

    if title:
        ax.set_title(title)
    dataframe.plot(ax=ax)
    legendKwargs = {'bbox_to_anchor':(1.05, 1), 'loc':'upper left'} if axes is None else {'loc':'lower left'}
    ax.legend(**legendKwargs)

    if vlines is not None:
        if vlineColors is None:
            vlineColors = 'black'
        plt.vlines(vlines, dataframe.min().min(), dataframe.max().max(), colors=vlineColors, linestyles='dashed')

    if axes is None:
        plt.subplots_adjust(right=0.75, left=0.08)
        if filename:
            plt.savefig(filename)
        if show:
            plt.show()
        plt.close(fig)
コード例 #5
0
 def probability(self):
     x = np.array([i for i in range(self.r_low, self.r_up + 1)])
     n = len(self.sample)
     y = np.array([len(list(filter(lambda xi: xi == i, self.sample))) / n for i in x])
     plt.ylim(0.0, 1.0)
     plt.plot(x, y, 'ro')
     plt.vlines(x, 0, ymax=y, colors='r', lw=1, alpha=0.5)
コード例 #6
0
ファイル: utils.py プロジェクト: ygidtu/pysashimi
def add_additional_background(region: SpliceRegion):
    if region.sites is None:
        return
    _, _, y1, y2 = pylab.axis()

    for site, color in region.sites.items():
        try:
            pylab.vlines(x=site,
                         ymin=y1,
                         ymax=y2,
                         color=color,
                         linestyles="dashed",
                         lw=0.5)
        except IndexError as err:
            logger.warning("Indicator line is out of bound: " + str(err))

    if region.focus is not None:
        for left, right in region.focus.items():
            try:
                fill_x = [left, right, right, left]

                _, _, y1, y2 = pylab.axis()
                fill_y = [y1, y1, y2, y2]

                pylab.fill(fill_x, fill_y, alpha=0.1, color='grey')
            except IndexError as err:
                logger.warning("focus region is out of bound: " + str(err))
コード例 #7
0
def histogram_corrmat(corr_mat_lin, log=True, GAN="GAN", fig=None, label=""):
    if fig is None:
        fig = plt.figure(figsize=[4, 3])
    else:
        plt.figure(num=fig.number)
    plt.hist(corr_mat_lin.flatten()[~np.isnan(corr_mat_lin.flatten())],
             60,
             density=True,
             alpha=0.7,
             label=label)
    corr_mean = np.nanmean(corr_mat_lin)
    corr_medi = np.nanmedian(corr_mat_lin)
    _, YMAX = plt.ylim()
    plt.vlines(corr_mean, 0, YMAX, linestyles="dashed", color="black")
    plt.vlines(corr_medi, 0, YMAX, linestyles="dashed", color="red")
    plt.xlabel("corr(log(V_iH_jV_i), log(Lambda_j))"
               if log else "corr(V_iH_jV_i, Lambda_j)")
    plt.ylabel("density")
    if fig is not None:
        origtitle = fig.axes[0].get_title()
    else:
        origtitle = ""
    plt.title(
        origtitle +
        "Histogram of Non-Diag Correlation\n %s on %s scale\n mean %.3f median %.3f"
        % (GAN, "log" if log else "lin", corr_mean, corr_medi))
    plt.legend()
    # plt.show()
    return fig
コード例 #8
0
def plot_step(width,tstep, height, base, data):
    plt.vlines(x=tstep, ymin=base, ymax=height, colors='red')
    plt.vlines(x=tstep+width, ymin=base, ymax=height, colors='red')
    plt.hlines(y=base, xmin=np.min(data), xmax=tstep, colors='red')
    plt.hlines(y=height, xmin=tstep, xmax=tstep+width,colors='red')
    plt.hlines(y=base, xmin=tstep+width, xmax=np.max(data),colors='red')
    return 
コード例 #9
0
    def plot_correlogram(self, forecast_error):
        """
        Plots the correlogram for a given forecast.

            forecast_error (numpy array): Forecast error for Holt-Winters
        """

        # Autocorrelation
        error_mean_delta = forecast_error - np.mean(forecast_error)
        total_error = np.dot(error_mean_delta, error_mean_delta)
        lags = np.zeros(self.window)

        for i in xrange(1, self.window + 1):
            lags[i - 1] = np.dot(error_mean_delta[i:],
                                 error_mean_delta[:-i]) / total_error

        plt.vlines(range(1, self.window + 1), 0, lags)
        plt.hlines(0, 0, self.window + 2)

        # Significance level for autocorrelation
        ac_bound = 2. / np.sqrt(self.n)
        plt.hlines(ac_bound,
                   0,
                   self.window + 2,
                   color="k",
                   linestyles="dashed")
        plt.hlines(-ac_bound,
                   0,
                   self.window + 2,
                   color="k",
                   linestyles="dashed")
        plt.xlabel("Lag")
        plt.ylabel("Autocorrelation")
        plt.title("Correlogram for Forecast Error")
        plt.show()
コード例 #10
0
ファイル: utils.py プロジェクト: stamate/pdkit
def plot_walk_turn_segments(data, window=[1, 1, 1]):
    c, pk, p = cluster_walk_turn(data, window=window)

    contour_heights = data[pk] - p

    colors = [['red', 'green'][i] for i in c]
    plt.plot(data)
    plt.scatter(pk, data[pk], color=colors)
    plt.vlines(x=pk, ymin=contour_heights, ymax=data[pk], color=colors)
コード例 #11
0
def ztest_cdf_sum(null_values, mut_rates, observed_values, plot=False,
                  plot_title='CDF sum', show_plot=True):
    """
    Use the sum of the cdf values
    The central limit theorem to get the normal distribution limit of the Monte Carlo test
    For tied values, using the average of the cdf values.
    :param null_values: List or numpy array of the scores for every mutation in the null model
    :param mut_rates: List or numpy array of the mutation rates for every mutation in the null model
    :param observed_values: List or numpy array of the scores for the observed mutations.
    :param plot: If True, will plot a histogram of the null distribution and show the observed value.
    :return: Dictionary
    """
    num_obs = len(observed_values)
    sorted_exp_values, sorted_mut_rates = sort_multiple_arrays_using_one(null_values, mut_rates)

    # Reduce to unique values. May reduce length by a lot for discrete distributions
    # Method from https://stackoverflow.com/a/43094244
    sorted_mut_rates = np.array([np.sum(m) for m in np.split(sorted_mut_rates,
                                                             np.cumsum(np.unique(sorted_exp_values,
                                                                                 return_counts=True)[1]))[:-1]])
    sorted_exp_values = np.unique(sorted_exp_values)

    weights = sorted_mut_rates / sorted_mut_rates.sum()
    # For repeated values, use the mid-point of the cdf jump.
    # Will centre the null results on 0.5, even in skewed discrete cases.
    cumsum = np.cumsum(weights) - weights / 2

    cdf_var = get_cdf_var(cumsum, weights)

    observed_cumsum = cumsum[np.searchsorted(sorted_exp_values, observed_values)]

    obs_metric = observed_cumsum.sum()

    # Parameters for the normal distribution
    loc = num_obs * 0.5
    scale = np.sqrt(num_obs) * np.sqrt(cdf_var)

    if plot:
        x = np.linspace(*norm.interval(0.9999, loc=loc, scale=scale), 1000)
        plt.plot(x, norm.pdf(x, loc=loc, scale=scale), 'r--', label='Expected')
        ylim = plt.gca().get_ylim()
        plt.vlines(obs_metric, 0, ylim[1], label='Observed', color='k')
        plt.ylim([0, ylim[1]])
        plt.title(plot_title)
        plt.xlabel("CDF sum")
        plt.ylabel('Frequency')
        plt.legend()
        if show_plot:
            plt.show()

    pvalue = z_pvalue(obs_metric, loc=loc, scale=scale)

    results = {
        'pvalue': pvalue, 'cdf_mean': obs_metric / num_obs
    }
    return results
コード例 #12
0
def plot_tangent(function, target_x, x):
    katamuki = numeric_diff(function, target_x)
    target_y = function(target_x)
    seppen = target_y - katamuki * target_x
    y = katamuki * x + seppen

    ## dot and lines
    plt.plot(target_x, target_y, marker='.')
    plt.vlines(target_x, -1, target_y, "m", linestyle=":")
    plt.hlines(target_y, 0, target_x, "m", linestyle=":")
    plt.plot(x, y)
コード例 #13
0
def closest_local_max(signal, point):
    local_maxs = np.where((signal > np.roll(signal, 1)) & (signal > np.roll(signal, -1)))[0]
    local_max = local_maxs[np.argmin(np.abs(local_maxs - point))]

    if False:
        plt.figure()
        plt.plot(signal)
        plt.plot(local_max, signal[local_max], 'o')
        plt.vlines([point], signal.min(), signal.max())
        plt.show()

    return local_max
コード例 #14
0
def cell_edge_width(im, features):
    h, w = im.shape
    h2 = h // 2

    # look for frequency content at the frequency of the finger period
    if False:
        mid = im[h2 - 50:h2 + 50, :]
        period = int(round(features['finger_period']))
        period_avg = np.empty((period, im.shape[1]), np.float32)
        for offset in range(period):
            period_avg[offset, :] = mid[offset::period, :].mean(axis=0)
        col_var = period_avg.max(axis=0) - period_avg.min(axis=0)
    else:
        im_peaks = im[features['_peak_row_nums'], :]
        im_fingers = im[features['_finger_row_nums'][:-1], :]
        diff = (im_peaks - im_fingers)
        # col_var = diff.mean(axis=0)
        col_var = np.median(diff, axis=0)

        if False:
            view = ImageViewer(im_fingers)
            ImageViewer(im_peaks)
            ImageViewer(diff)
            view.show()

    col_var -= col_var.min()
    col_var /= col_var.max()
    interior = np.where(col_var > parameters.CELL_EDGE_STD_THRESH)[0]
    left, right = interior[[0, -1]]

    features['_col_var'] = col_var
    if features['_alg_mode'] == 'multi cell':
        # since one side might be impure (= low intensity & low variation) select the
        #  smaller of the two estimates
        edge_width = max(1, min(left, w - right))
        left, right = edge_width, w - edge_width
        features['cell_edge_left'] = left
        features['cell_edge_right'] = right
        features['cell_edge_tb'] = edge_width
    else:
        features['cell_edge_left'] = max(left, 1)
        features['cell_edge_right'] = min(w - 1, right)
        features['cell_edge_tb'] = ((w - right) + left) // 2

    if False:
        print left, (w - right)
        # print features['cell_edge_width']
        plt.figure()
        plt.plot(col_var)
        plt.vlines([left, right], 0, col_var.max())
        view = ImageViewer(im)
        view.show()
        sys.exit()
コード例 #15
0
def monte_carlo_test(null_values, mut_rates, observed_values, metric_function, num_draws, plot=False,
                     num_plot_bins=100, plot_title=None, testing_random_seed=None, show_plot=True, rerr=1e-7):
    """
    Use a chosen metric e.g. np.median, np.mean, np.sum etc for the Monte Carlo test.
    :param null_values: List or numpy array of the scores for every mutation in the null model
    :param mut_rates: List or numpy array of the mutation rates for every mutation in the null model
    :param observed_values: List or numpy array of the scores for the observed mutations.
    :param metric_function: Function that takes an array of floats as an input and returns a number. For example,
    numpy.mean or numpy.median
    :param num_draws: The number of random draws to use to build the null distribution of values.
    :param plot: If True, will plot a histogram of the null distribution and show the observed value.
    :param num_plot_bins: Number of bins to use for the histogram if plot=True
    :param testing_random_seed: Int. If this is set, it will reset the numpy random seed before every time the test
    is run.
    :param rerr:  Relative compensation for errors in summing of floating points. The values from the draws will
    be compared to the observed value * (1±rerr) (the more conservative case for each tail).
    :return: Dictionary
    """
    if testing_random_seed is not None:
        np.random.seed(testing_random_seed)

    if plot_title is None:
        plot_title = 'Monte Carlo Test - {}'.format(metric_function.__name__)

    num_obs = len(observed_values)
    obs_metric = metric_function(observed_values)
    samples = get_samples_from_mutational_spectrum(null_values, mut_rates, num_obs, num_draws)
    mc_metrics = np.sort(metric_function(samples, axis=1))
    if plot:
        bins = np.linspace(min(min(mc_metrics), obs_metric), max(max(mc_metrics), obs_metric), num_plot_bins)
        plt.hist(mc_metrics, bins=bins)
        ylim = plt.gca().get_ylim()
        plt.vlines(obs_metric, 0, ylim[1], color='k')
        plt.ylim(ylim)
        plt.title(plot_title)
        plt.xlabel(metric_function.__name__)
        plt.ylabel('Frequency')
        if show_plot:
            plt.show()

    pvalue, num_smaller_or_equal, num_larger_or_equal = monte_carlo_p_value(num_draws, mc_metrics,
                                                                            obs_metric, rerr)

    results = {
        'observed': obs_metric,
        'null_mean': np.mean(mc_metrics),
        'null_median': np.median(mc_metrics),
        'pvalue': pvalue,
        'num_smaller_or_equal': num_smaller_or_equal,
        'num_larger_or_equal': num_larger_or_equal
    }
    return results
コード例 #16
0
        def plot_correlation(self, other_interface, lines=False, bounds=None):
            """ Plot this interface's view on the underlying product vs
            another interface's view.

            Each vulnerability will be represented as a point, with the x coordinate
            representing the likelihood that this interface will discover the
            vulnerability in the next round, and the y coordinate representing
            the likelihood that the other interface will discover it this round.

            This plot is helpful for understanding the discovery correlation that
            arises with learning.

            Parameters
            ----------
            other_interface: Interface object
               The interface object must be built upon the same underlying 'product'
               as the present interface

            lines: True/False
               If true, will overlay horizontal and vertical lines representing the
               mean discovery profile for each actor, and label them with the mean
               value.

            bounds: None or float
               If float will set the x and y limits to this value.

            TODO
            ----
            It might be a good idea to make the bounds an x/y tuple if we want to
            initialize the interfaces with different `max_area` values.

            """
            plt.plot(self.circles.area, other_interface.circles.area, '.', alpha=.1)

            if bounds == None:
                window_size = max(self.circles.area.max(), other_interface.circles.area.max())
            else:
                window_size = bounds

            if lines:
                y_mean = np.mean(other_interface.circles.area)
                x_mean = np.mean(self.circles.area)
                plt.hlines(y_mean, 0, window_size)
                plt.text(window_size, y_mean, 'mean=%f'%y_mean, ha='right', va='bottom')
                plt.vlines(x_mean, 0, window_size)
                plt.text(x_mean, window_size, 'mean=%f'%x_mean, rotation=90, ha='right', va='top')

            plt.xlim(0, window_size)
            plt.ylim(0, window_size)
            plt.box('off')
            plt.xlabel(self.name + ' likelihood of discovery', fontsize=14)
            plt.ylabel(other_interface.name + ' likelihood of discovery', fontsize=14)
コード例 #17
0
ファイル: problem_set1.py プロジェクト: wingillis/neuralData
def plot_spikes(time,voltage,APTimes,titlestr):
    """
    plot_spikes takes four arguments - the recording time array, the voltage
    array, the time of the detected action potentials, and the title of your
    plot.  The function creates a labeled plot showing the raw voltage signal
    and indicating the location of detected spikes with red tick marks (|)
    """
    plt.figure()
    plt.plot(time, voltage)
    plt.vlines(APTimes, 500, 520, color='r')
    ##Your Code Here    
    
    plt.show()
コード例 #18
0
ファイル: feature_analysis.py プロジェクト: SRHerzog/ut
def plot_feat_ranges(X, **kwargs):
    if isinstance(X[0], tuple) and len(X[0]) == 2:
        x_mins, x_maxs = zip(*X)
        n_feats = len(x_mins)
    else:
        X = array(X)
        x_mins = X.min(axis=0)
        x_maxs = X.max(axis=0)
        n_feats = X.shape[1]
    plt.figure(figsize=kwargs.pop('figsize', (12, 5)))
    plt.vlines(range(n_feats), x_mins, x_maxs, **kwargs)
    if n_feats < 40:
        plt.xticks(range(n_feats))
コード例 #19
0
ファイル: plots.py プロジェクト: peri-source/peri
def sample_compare(N, samples, truestate, burn=0):
    h = samples[burn:]
    strue = truestate

    mu = h.mean(axis=0)
    std = h.std(axis=0)
    pl.figure(figsize=(20,4))
    pl.errorbar(range(len(mu)), (mu-strue), yerr=5*std/np.sqrt(h.shape[0]),
            fmt='.', lw=0.15, alpha=0.5)
    pl.vlines([0,3*N-0.5, 4*N-0.5], -1, 1, linestyle='dashed', lw=4, alpha=0.5)
    pl.hlines(0, 0, len(mu), linestyle='dashed', lw=5, alpha=0.5)
    pl.xlim(0, len(mu))
    pl.ylim(-0.02, 0.02)
    pl.show()
コード例 #20
0
ファイル: plot_util.py プロジェクト: artemyk/chordsentiment
def wordshift_plot(topvals, xpadding=0.0, textopts={}):
    c1 = plt.rcParams['axes.color_cycle'][0]
    c2 = plt.rcParams['axes.color_cycle'][1]

    all_ixs = np.arange(len(topvals))

    scolor = 0

    is_pos = np.array((topvals.ws >= 0).tolist())

    kw = {
        'color': topvals.cols.values.tolist()
    } if 'cols' in topvals.columns else {}
    plt.barh(range(len(topvals)),
             topvals.ws.values,
             edgecolor='None',
             height=.4,
             **kw)

    plt.vlines(0, -20, 20, color='k', lw=LINEWIDTH)
    xmin, xmax = None, None
    invTrans = plt.gca().transData.inverted()
    for y, x in enumerate(topvals.ws):
        cword = topvals.index.values[y]
        ckw = textopts.copy()
        if 'cols' in topvals.columns:
            ckw['color'] = topvals.iloc[y].cols

        textobj = plt.text(x,
                           y - 0.1,
                           ' ' + cword + ' ',
                           ha='left' if x > 0 else 'right',
                           va='center',
                           **ckw)
        plt.draw()
        we = textobj.get_window_extent()
        cxmax, _ = invTrans.transform((we.xmax, we.ymax))
        if xmax is None or xmax < cxmax:
            xmax = cxmax
        cxmin, _ = invTrans.transform((we.xmin, we.ymin))
        if xmin is None or xmin > cxmin:
            xmin = cxmin
    plt.xlim([xmin - xpadding, xmax + xpadding])
    #plt.xlim([xmin*1.1-xpadding, xmax*1.1+xpadding])
    plt.ylim([-0.5, len(topvals)])
    plt.yticks([])
    #plt.gca().spines['top'].setp('color', 'k')
    plt.gca().spines['left'].set_visible(False)
    plt.gca().spines['right'].set_visible(False)
    plt.locator_params(axis='x', nbins=5)
コード例 #21
0
def analyse_module(features):
    im = np.ascontiguousarray(features["_im_ratio_cropped"])
    h, w = im.shape
    # mask out rows and columns
    border = 20
    border_cols = features['_divisions_cols'] - features['_divisions_cols'][0]
    for c in border_cols:
        im[:, max(c - border, 0):min(c + border + 1, w)] = 0
    border_rows = features['_divisions_rows'] - features['_divisions_rows'][0]
    for r in border_rows:
        im[max(r - border, 0):min(r + border + 1, h), :] = 0

    # scale so max is around
    scale = ((2**15) / im.max())
    im *= scale

    f = {}
    hist = ip.histogram_percentiles(im, f, skip_zero=True)
    hist = hist[:f['hist_percentile_99.9']]
    hist_norm = hist / hist.max()
    lower = np.where(hist_norm > 0.02)[0][0]
    upper = 2 * f['hist_peak'] - lower
    high_vals = (hist[upper:].sum() / float(hist.sum()))
    features['module_bright_area_fraction'] = high_vals

    if False:
        print "%s: %0.01f%%" % (features['fn'], high_vals)
        ip.print_metrics(f)
        plt.figure()
        plt.xlabel("PL/EL ratio")
        plt.ylabel("Count")
        plt.title("Above threshold: %0.02f%%" % high_vals)
        xs = np.arange(len(hist)) / float(scale)
        plt.plot(xs, hist)
        plt.vlines([upper / float(scale)], ymin=0, ymax=hist.max())
        if False:
            plt.savefig(
                os.path.join(r"C:\Users\Neil\Desktop\M1\hist",
                             features['fn'] + '_1.png'))
            im = features["_im_ratio_cropped"]
            im[im > f['hist_percentile_99']] = f['hist_percentile_99']
            ip.save_image(
                os.path.join(r"C:\Users\Neil\Desktop\M1\hist",
                             features['fn'] + '_0.png'), im)
        else:
            plt.show()
            view = ImageViewer(im[::3, ::3])
            view.show()
        sys.exit()
コード例 #22
0
ファイル: my.py プロジェクト: otosense/oplot
def vlines(x,
           ymin=0,
           ymax=None,
           marker='o',
           marker_kwargs=None,
           colors='k',
           linestyles='solid',
           label='',
           hold=None,
           data=None,
           **kwargs):
    if ymax is None:
        ymax = x
        x = arange(len(ymax))

        if ymax is None:
            raise ValueError("Need to specify ymax")

    if marker is not None:
        if marker_kwargs is None:
            marker_kwargs = {}
        plt.plot(x, ymax, marker, **marker_kwargs)

    return plt.vlines(x,
                      ymin=ymin,
                      ymax=ymax,
                      colors=colors,
                      linestyles=linestyles,
                      label=label,
                      hold=hold,
                      data=data,
                      **kwargs)
コード例 #23
0
def find_cuts(cropped, mode, features):
    w = cropped.shape[1]
    bt_profile = np.median(cropped, axis=0)
    bt_profile = ndimage.gaussian_filter1d(bt_profile, 2)

    if mode == "SP":
        bt_profile /= bt_profile.max()
        bottom_thresh = parameters.CUTTING_THRESH_BOTTOM_SP
        top_thresh = parameters.CUTTING_THRESH_TOP_SP
        features['_sp_profile'] = bt_profile
    elif mode == "plir":
        bottom_thresh = parameters.CUTTING_THRESH_BOTTOM_PLIR
        top_thresh = parameters.CUTTING_THRESH_TOP_PLIR
        features['_plir_profile'] = bt_profile
    else:
        print "ERROR: unknown cutting mode"

    peak_pos = np.argmax(bt_profile)
    first_half = bt_profile[:peak_pos][::-1]
    locs = np.where(first_half < bottom_thresh)[0]
    if len(locs) == 0:
        bottom_cut = 0
    else:
        bottom_cut = len(first_half) - locs[0] - 1

    second_half = bt_profile[peak_pos:]
    locs = np.where(second_half < top_thresh)[0]
    if len(locs) == 0:
        top_cut = w - 1
    else:
        top_cut = peak_pos + locs[0]

    if features['marker_loc'] > 0:
        b = features['marker_loc'] - bottom_cut
        t = top_cut - features['marker_loc']
    else:
        b, t = w - bottom_cut - 1, w - top_cut - 1

    if False:
        print mode
        print features['marker_loc'], b, t
        plt.figure()
        plt.plot(bt_profile)
        plt.vlines([bottom_cut, top_cut], 0, bt_profile.max())
        plt.show()

    return b, t
コード例 #24
0
def vlines_ranges(X, aggr=('min', 'median', 'max'), axis=0, **kwargs):
    """vlines plot statistics of X matrix data"""
    if isinstance(aggr, int):
        if aggr == 2:
            aggr = ('min', 'max')
        elif aggr == 3:
            aggr = ('min', 'median', 'max')
    assert len(aggr) >= 2, "aggr must have at least 2 elements"

    lo_val = getattr(np, aggr[0])(X, axis=axis)
    hi_val = getattr(np, aggr[-1])(X, axis=axis)
    x = np.arange(len(lo_val))
    plt.vlines(x, ymin=lo_val, ymax=hi_val, **kwargs)
    if len(aggr) > 2:
        markers = 'oxsd'
        for i, a in enumerate(aggr[1:-1]):
            plt.plot(x, getattr(np, a)(X, axis=axis), markers[i], **kwargs)
コード例 #25
0
def plot_psd_score(filename):
    
    ds = xr.open_dataset(filename)
    
    resolved_scale = find_wavelength_05_crossing(filename)
    
    plt.figure(figsize=(10, 5))
    ax = plt.subplot(121)
    ax.invert_xaxis()
    plt.plot((1./ds.wavenumber), ds.psd_ref, label='reference', color='k')
    plt.plot((1./ds.wavenumber), ds.psd_study, label='reconstruction', color='lime')
    plt.xlabel('wavelength [km]')
    plt.ylabel('Power Spectral Density [m$^{2}$/cy/km]')
    plt.xscale('log')
    plt.yscale('log')
    plt.legend(loc='best')
    plt.grid(which='both')
    
    ax = plt.subplot(122)
    ax.invert_xaxis()
    plt.plot((1./ds.wavenumber), (1. - ds.psd_diff/ds.psd_ref), color='k', lw=2)
    plt.xlabel('wavelength [km]')
    plt.ylabel('PSD Score [1. - PSD$_{err}$/PSD$_{ref}$]')
    plt.xscale('log')
    plt.hlines(y=0.5, 
              xmin=np.ma.min(np.ma.masked_invalid(1./ds.wavenumber)), 
              xmax=np.ma.max(np.ma.masked_invalid(1./ds.wavenumber)),
              color='r',
              lw=0.5,
              ls='--')
    plt.vlines(x=resolved_scale, ymin=0, ymax=1, lw=0.5, color='g')
    ax.fill_betweenx((1. - ds.psd_diff/ds.psd_ref), 
                     resolved_scale, 
                     np.ma.max(np.ma.masked_invalid(1./ds.wavenumber)),
                     color='green',
                     alpha=0.3, 
                     label=f'resolved scales \n $\lambda$ > {int(resolved_scale)}km')
    plt.legend(loc='best')
    plt.grid(which='both')
    
    logging.info(' ')
    logging.info(f'  Minimum spatial scale resolved = {int(resolved_scale)}km')
    
    plt.show()
    
    return resolved_scale
コード例 #26
0
def plot_volcano(logFC, p_val, sample_name, saveName, logFC_thresh):
    fig = pl.figure()
    ## To plot and save
    pl.scatter(logFC[(p_val > 0.05) | (abs(logFC) < logFC_thresh)],
               -np.log10(p_val[(p_val > 0.05) | (abs(logFC) < logFC_thresh)]),
               color='blue',
               alpha=0.5)
    pl.scatter(logFC[(p_val < 0.05) & (abs(logFC) > logFC_thresh)],
               -np.log10(p_val[(p_val < 0.05) & (abs(logFC) > logFC_thresh)]),
               color='red')
    pl.hlines(-np.log10(0.05), min(logFC), max(logFC))
    pl.vlines(-logFC_thresh, min(-np.log10(p_val)), max(-np.log10(p_val)))
    pl.vlines(logFC_thresh, min(-np.log10(p_val)), max(-np.log10(p_val)))
    pl.xlim(-3, 3)
    pl.xlabel('Log Fold Change')
    pl.ylabel('-log10(p-value)')
    pl.savefig(saveName)
    pl.close(fig)
コード例 #27
0
def histogram_corrmat(corr_mat_lin, log=True, GAN="GAN"):
    fig = plt.figure(figsize=[4, 3])
    plt.hist(corr_mat_lin.flatten()[~np.isnan(corr_mat_lin.flatten())],
             60,
             density=True)
    corr_mean = np.nanmean(corr_mat_lin)
    corr_medi = np.nanmedian(corr_mat_lin)
    _, YMAX = plt.ylim()
    plt.vlines(corr_mean, 0, YMAX, linestyles="dashed", color="black")
    plt.vlines(corr_medi, 0, YMAX, linestyles="dashed", color="red")
    plt.xlabel("corr(log(V_iH_jV_i), log(Lambda_j))"
               if log else "corr(V_iH_jV_i, Lambda_j)")
    plt.ylabel("density")
    plt.title(
        "Histogram of Non-Diag Correlation\n %s on %s scale\n mean %.3f median %.3f"
        % (GAN, "log" if log else "lin", corr_mean, corr_medi))
    plt.show()
    return fig
コード例 #28
0
ファイル: wave.py プロジェクト: nblago/kpy
def showspec(npyfile):
    s = np.load(npyfile)[0]
    
    hwl =np.array( [3970.07, 4101.76, 4340.47, 4861.33, 6562.80])/10.
    tll = np.array([6875, 7610])/10.
    
    #for si in s['sky_spaxel_ids_A'][0]['spectra']:
    for si in s['spectra']:
        plt.plot(s['nm'], si)
        
    mmax = np.max(s['spectra'])
    mmin = np.min(s['spectra'])
    
    for w in hwl:
        plt.vlines(w, mmin, mmax)
        
    for w in tll:
        plt.vlines(w, mmin, mmax, 'b')
    plt.show()
コード例 #29
0
ファイル: wave.py プロジェクト: rswalters/kpy
def showspec(npyfile):
    s = np.load(npyfile)[0]

    hwl = np.array([3970.07, 4101.76, 4340.47, 4861.33, 6562.80]) / 10.
    tll = np.array([6875, 7610]) / 10.

    #for si in s['sky_spaxel_ids_A'][0]['spectra']:
    for si in s['spectra']:
        plt.plot(s['nm'], si)

    mmax = np.max(s['spectra'])
    mmin = np.min(s['spectra'])

    for w in hwl:
        plt.vlines(w, mmin, mmax)

    for w in tll:
        plt.vlines(w, mmin, mmax, 'b')
    plt.show()
コード例 #30
0
def show_relu(x, y, v, with_min=False):
    v_scaled = scale(v)
    plt.imshow(v_scaled, interpolation='bicubic')

    x_min, y_min, v_min, v_max = get_mins_max(x, y, v)
    nx = x.shape[0]
    cen = nx/2.0
    vs_min = v_scaled.min()
    vs_max = v_scaled.max()
    levels = np.linspace(vs_min, vs_max, 300)
    plt.contour(v_scaled, levels)

    plt.vlines(cen, 0, nx, color='gray')
    plt.hlines(cen, 0, nx, color='gray')

    if with_min:
        plt.vlines(cen + x_min, 0, nx, color='red', linestyle='--')
        plt.hlines(cen + y_min, 0, nx, color='red', linestyle='--')
    plt.draw()
コード例 #31
0
ファイル: util.py プロジェクト: mackelab/poisson-gpfa
def raster(event_times_list, color='k'):
    """
    Creates a raster plot

    Parameters
    ----------
    event_times_list : iterable
                       a list of event time iterables
    color : string
            color of vlines

    Returns
    -------
    ax : an axis containing the raster plot
    """
    ax = plt.gca()
    for ith, trial in enumerate(event_times_list):
        plt.vlines(trial, ith + .5, ith + 1.5, color=color)
    plt.ylim(.5, len(event_times_list) + .5)
    return ax
コード例 #32
0
ファイル: xiaozou.py プロジェクト: GitLeftZhou/learnPython
def ACF_PACF(ts, lag=20):
    lag_acf = acf(ts, nlags=lag)
    lag_pacf = pacf(ts, nlags=lag, method='ols')
    # 画 ACF:
    plt.subplot(121)
    plt.vlines(range(lag), [0], lag_acf, linewidth=5.0)
    plt.plot(lag_acf)
    plt.axhline(y=0, linestyle=':', color='blue')
    plt.axhline(y=-1.96 / np.sqrt(len(ts)), linestyle='--', color='red')
    plt.axhline(y=1.96 / np.sqrt(len(ts)), linestyle='--', color='red')
    plt.title('Autocorrelation Function')
    # 画 PACF:
    plt.subplot(122)
    plt.vlines(range(lag), [0], lag_pacf, linewidth=5.0)
    plt.plot(lag_pacf)
    plt.axhline(y=0, linestyle=':', color='blue')
    plt.axhline(y=-1.96 / np.sqrt(len(ts)), linestyle='--', color='red')
    plt.axhline(y=1.96 / np.sqrt(len(ts)), linestyle='--', color='red')
    plt.title('Partial Autocorrelation Function')
    plt.tight_layout()
コード例 #33
0
ファイル: util.py プロジェクト: peterjiz/poisson-gpfa
def raster(event_times_list, color='k'):
    """
    Creates a raster plot

    Parameters
    ----------
    event_times_list : iterable
                       a list of event time iterables
    color : string
            color of vlines

    Returns
    -------
    ax : an axis containing the raster plot
    """
    ax = plt.gca()
    for ith, trial in enumerate(event_times_list):
        plt.vlines(trial, ith + .5, ith + 1.5, color=color)
    plt.ylim(.5, len(event_times_list) + .5)
    return ax
コード例 #34
0
ファイル: myelin_map_funcs.py プロジェクト: atsuch/myelin_map
def plot_mask_dist(t1_fn, t2_fn, eye_mask_fn, temp_bone_mask_fn, stat=None):

    t1 = nb.load(t1_fn).get_data().ravel()
    t2 = nb.load(t2_fn).get_data().ravel()

    eye_mask = nb.load(eye_mask_fn).get_data().ravel()
    temp_mask = nb.load(temp_bone_mask_fn).get_data().ravel()

    if stat == 'mean':
        t1_eye_stat = np.mean(t1[eye_mask])
        t1_temp_stat = np.mean(t1[temp_mask])
        t2_eye_stat = np.mean(t2[eye_mask])
        t2_temp_stat = np.mean(t2[temp_mask])

    elif stat == 'median':
        t1_eye_stat = np.median(t1[eye_mask])
        t1_temp_stat = np.median(t1[temp_mask])
        t2_eye_stat = np.median(t2[eye_mask])
        t2_temp_stat = np.median(t2[temp_mask])

    elif stat == 'mode' or stat == None:
        t1_eye_stat = stats.mode(t1[eye_mask][t1[eye_mask] > 0])[0][0]
        t1_temp_stat = stats.mode(t1[temp_mask][t1[temp_mask] > 0])[0][0]
        t2_eye_stat = stats.mode(t2[eye_mask][t2[eye_mask] > 0])[0][0]
        t2_temp_stat = stats.mode(t2[temp_mask][t2[temp_mask] > 0])[0][0]

    fig = plt.figure(figsize=[15, 10])
    plt.subplot(2, 1, 1)
    plt.title('Eye ' + stat)
    sns.distplot(t1[eye_mask], label='T1')
    ymin, ymax = fig.gca().axes.get_ybound()
    plt.vlines(x=t1_eye_stat, ymin=ymin, ymax=ymax)
    sns.distplot(t2[eye_mask], label='T2')
    plt.vlines(x=t2_eye_stat, ymin=ymin, ymax=ymax)
    plt.legend()
    plt.show()

    fig = plt.figure(figsize=[15, 10])
    plt.subplot(2, 1, 2)
    plt.title('Temporal Bone ' + stat)
    sns.distplot(t1[temp_mask], label='T1')
    ymin, ymax = fig.gca().axes.get_ybound()
    plt.vlines(x=t1_temp_stat, ymin=ymin, ymax=ymax)
    sns.distplot(t2[temp_mask], label='T2')
    plt.vlines(x=t2_temp_stat, ymin=ymin, ymax=ymax)
    plt.legend()
    plt.show()

    print('T1 eye: {}\nT2 eye: {}\nT1 temp: {}\nT2 temp: {}'.format(
        t1_eye_stat, t2_eye_stat, t1_temp_stat, t2_temp_stat))
コード例 #35
0
ファイル: nbinom.py プロジェクト: dvav/dgeclust
    def plot_clusters(self, fig=None, npoints=100):
        """Plot LFC clusters"""

        # data
        beta = self.beta[self.iact]
        occ = self.occ[self.iact]
        x = np.linspace(beta.min()-1, beta.max()+1, npoints)
        y = np.exp(st.normalln(x, self.m0, 1 / np.sqrt(self.t0)))

        # plot
        fig = pl.figure() if fig is None else fig
        pl.figure(fig.number)

        pl.plot(x, y)
        pl.axvline(0, linestyle='--', color='k')
        pl.vlines(beta[1:], [0], occ[1:] / occ[1:].sum(), color='r')

        pl.grid()
        pl.xlabel('LFC')
        pl.ylabel('density')
        pl.legend(['LFC prior', 'null cluster', 'non-null clusters'], loc=0)

        pl.tight_layout()
コード例 #36
0
ファイル: views.py プロジェクト: JasonGodlove/RehabTasker
def plot_hist(past_users,prediction):
    #Plots the histogram and prediction     
    sns.set(style="white")
    
    fig = plt.figure()
    sbin =  np.array(range(12))+.5   
    sns.distplot(past_users,bins=sbin,color='B',norm_hist=False)
    '''
    c = sns.color_palette('Spectral', 12)#"RdYlGn",'Spectral'
    c.reverse() #so red is on far right
    for i in range(12):
        sns.distplot(past_users,bins=sbin[i:],color=c[i],kde=False,norm_hist=False)
    '''
    
    #plt.hist(past_users,bins=np.array(range(12))+.5)
    axis = plt.axis()
    plt.axis([.5,11.5,axis[2],axis[3]])
    plt.vlines(prediction,axis[2],axis[3],linewidth=4,color='k')
    plt.yticks(list(drange(0,axis[3],.1)),fontsize=16)
    plt.xticks(range(1,12),fontsize=16)
    plt.xlabel('# of Days to Complete the Task',fontsize=16)
    plt.ylabel('Proportion of Users',fontsize=16    )
    
    return fig
コード例 #37
0
ファイル: window_selection.py プロジェクト: seancug/LASIF
def plot_windows(data_trace, synthetic_trace, windows, dominant_period,
                 filename=None, debug=False):
    """
    Helper function plotting the picked windows in some variants. Useful for
    debugging and checking what's actually going on.

    If using the debug option, please use the same data_trace and
    synthetic_trace as you used for the select_windows() function. They will
    be augmented with certain values used for the debugging plots.

    :param data_trace: The data trace.
    :type data_trace: obspy.core.trace.Trace
    :param synthetic_trace: The synthetic trace.
    :type synthetic_trace: obspy.core.trace.Trace
    :param windows: The windows, as returned by select_windows()
    :type windows: list
    :param dominant_period: The dominant period of the data. Used for the
        tapering.
    :type dominant_period: float
    :param filename: If given, a file will be written. Otherwise the plot
        will be shown.
    :type filename: basestring
    :param debug: Toggle plotting debugging information. Optional. Defaults
        to False.
    :type debug: bool
    """
    import matplotlib.pylab as plt
    from obspy.signal.invsim import cosTaper

    plt.figure(figsize=(16, 10))
    plt.subplots_adjust(hspace=0.3)

    npts = synthetic_trace.stats.npts

    # Plot the raw data.
    time_array = np.linspace(0, (npts - 1) * synthetic_trace.stats.delta, npts)
    plt.subplot(411)
    plt.plot(time_array, data_trace.data, color="black", label="data")
    plt.plot(time_array, synthetic_trace.data, color="red",
             label="synthetics")
    plt.xlim(0, time_array[-1])
    plt.title("Raw data")

    # Plot the chosen windows.
    bottom = np.ones(npts) * -10.0
    top = np.ones(npts) * 10.0
    for left_idx, right_idx in windows:
        top[left_idx: right_idx + 1] = -10.0
    plt.subplot(412)
    plt.plot(time_array, data_trace.data, color="black", label="data")
    plt.plot(time_array, synthetic_trace.data, color="red",
             label="synthetics")
    ymin, ymax = plt.ylim()
    plt.fill_between(time_array, bottom, top, color="red", alpha="0.5")
    plt.xlim(0, time_array[-1])
    plt.ylim(ymin, ymax)
    plt.title("Chosen windows")

    # Plot the tapered data.
    final_data = np.zeros(npts)
    final_data_scaled = np.zeros(npts)
    synth_data = np.zeros(npts)
    synth_data_scaled = np.zeros(npts)

    for left_idx, right_idx in windows:
        right_idx += 1
        length = right_idx - left_idx

        # Setup the taper.
        p = (dominant_period / synthetic_trace.stats.delta / length) / 2.0
        if p >= 0.5:
            p = 0.49
        elif p < 0.1:
            p = 0.1
        taper = cosTaper(length, p=p)

        data_window = taper * data_trace.data[left_idx: right_idx].copy()
        synth_window = taper * synthetic_trace.data[left_idx: right_idx].copy()

        data_window_scaled = data_window / data_window.ptp() * 2.0
        synth_window_scaled = synth_window / synth_window.ptp() * 2.0

        final_data[left_idx: right_idx] = data_window
        synth_data[left_idx: right_idx] = synth_window
        final_data_scaled[left_idx: right_idx] = data_window_scaled
        synth_data_scaled[left_idx: right_idx] = synth_window_scaled

    plt.subplot(413)
    plt.plot(time_array, final_data, color="black")
    plt.plot(time_array, synth_data, color="red")
    plt.xlim(0, time_array[-1])
    plt.title("Tapered windows")

    plt.subplot(414)
    plt.plot(time_array, final_data_scaled, color="black")
    plt.plot(time_array, synth_data_scaled, color="red")
    plt.xlim(0, time_array[-1])
    plt.title("Tapered windows, scaled to same amplitude")

    if debug:
        first_valid_index = data_trace.stats.first_valid_index * \
            synthetic_trace.stats.delta
        noise_level = data_trace.stats.noise_level

        data_p, data_t, data_e = find_local_extrema(
            data_trace.data, start_index=first_valid_index)
        synth_p, synth_t, synth_e = find_local_extrema(
            synthetic_trace.data, start_index=first_valid_index)

        for _i in xrange(1, 3):
            plt.subplot(4, 1, _i)
            ymin, ymax = plt.ylim()
            xmin, xmax = plt.xlim()
            plt.vlines(first_valid_index, ymin, ymax, color="green",
                       label="Theoretical First Arrival")
            plt.hlines(noise_level, xmin, xmax, color="0.5",
                       label="Noise Level", linestyles="--")
            plt.hlines(-noise_level, xmin, xmax, color="0.5", linestyles="--")

            plt.hlines(noise_level * 5, xmin, xmax, color="0.8",
                       label="Minimal acceptable amplitude", linestyles="--")
            plt.hlines(-noise_level * 5, xmin, xmax, color="0.8",
                       linestyles="--")
            if _i == 2:
                plt.scatter(time_array[data_e], data_trace.data[data_e],
                            color="black", s=10)
                plt.scatter(time_array[synth_e], synthetic_trace.data[synth_e],
                            color="red", s=10)
            plt.ylim(ymin, ymax)
            plt.xlim(xmin, xmax)

        plt.subplot(411)
        plt.legend(prop={"size": "small"})

    plt.suptitle(data_trace.id)

    if filename:
        plt.savefig(filename)
    else:
        plt.show()
コード例 #38
0
ファイル: gauss_model.py プロジェクト: ekta1224/GALEXFlares
    mu = 50

    plt.figure(figsize=(8,12))    
    data = photons(start, stop, rate, sigma, amp, mu)
    
    plt.subplot(411)
    sigmas = np.arange(-19.5, 20.5, 0.1) 
    lls_sig = [] 
    for sig in sigmas:
        ll = gauss_lnlike(start, stop, rate, sig, amp, mu, data)
        lls_sig.append(ll)
    plt.plot(sigmas, lls_sig)
    ymin = np.sort(lls_sig)[np.isfinite(np.sort(lls_sig))][0]
    ymax = np.sort(lls_sig)[np.isfinite(np.sort(lls_sig))][-1]
    print ymin, ymax
    plt.vlines(x=sigma, ymin=ymin, ymax=ymax, colors='red', linestyle='--')
    plt.xlabel(r'$\sigma$')
    plt.ylabel('likelihood')

    plt.subplot(412)
    lls_mu = []
    mus = np.arange(0.0001,100, .5)
    for m in mus:
        ll = gauss_lnlike(start, stop, rate, sigma, amp, m, data)
        lls_mu.append(ll)
    ymin = np.sort(lls_mu)[np.isfinite(np.sort(lls_mu))][0]
    ymax = np.sort(lls_mu)[np.isfinite(np.sort(lls_mu))][-1]
    print ymin, ymax
    plt.plot(mus, lls_mu)
    plt.vlines(x=mu, ymin=ymin, ymax=ymax, colors='red', linestyle='--')
    plt.xlabel(r'$\mu$')
コード例 #39
0
def plot_ice_cover_eb_simple(
        ice_cover, energy_balance, observed_ice, date, temp, snotot, filename):
    """

    :param ice_cover:
    :param energy_balance:
    :param observed_ice:
    :param date:
    :param temp:
    :param snotot:
    :param filename:
    :return:

    Note: http://matplotlib.org/mpl_examples/color/named_colors.png
    """

    fsize = (16, 16)
    plt.figure(figsize=fsize)
    #fig = pplt.figure(figsize=fsize)
    plt.clf()


    ############## First subplot
    plt.subplot2grid((5, 1), (0, 0), rowspan=2)

    # depending on how many days are in the plot, the line weight of the modelled data should be adjusted
    modelledLineWeight = 1100/len(ice_cover)

    # dont need to keep the colunm coordinates, but then again, why not..? Usefull for debuging
    allColumnCoordinates = []

    # plot total snow depth on land
    plb.plot(date, snotot, "gray")

    plb.title('{0} - {1} days plotted.'.format(filename, len(ice_cover)))

    # a variable for the lowest point on the ice_cover. It is used for setting the lower left y-limit .
    lowest_point = 0.

    # Plot ice_cover
    for ic in ice_cover:

        # some idea of progress on the plotting
        if ic.date.day == 1:
            print((ic.date).strftime('%Y%m%d'))

        # make data for plotting. [icelayers.. [fro, too, icetype]].
        columncoordinates = []
        too = -ic.water_line  # water line is on xaxis

        for i in range(len(ic.column)-1, -1, -1):
            layer = ic.column[i]
            fro = too
            too = too + layer.height
            columncoordinates.append([fro, too, layer.type])

            if fro < lowest_point:
                lowest_point = fro

            # add coordinates to a vline plot
            plb.vlines(ic.date, fro, too, lw=modelledLineWeight, color=layer.get_colour()) #ic.getColour(layer.type))

        allColumnCoordinates.append(columncoordinates)


    # plot observed ice columns
    for ic in observed_ice:

        if len(ic.column) == 0:
            height = 0.05
            plb.vlines(ic.date, -height, height, lw=4, color='white')
            plb.vlines(ic.date, -height, height, lw=2, color='red')
        else:
            # some idea of progress on the plotting
            print("Plotting observations.")

            # make data for plotting. [ice layers.. [fro, too, icetype]].
            too = -ic.water_line  # water line is on xaxis

            for i in range(len(ic.column)-1, -1, -1):
                layer = ic.column[i]
                fro = too
                too = too + layer.height

                if fro < lowest_point:
                    lowest_point = fro

                padding = 0.
                padding_color = 'white'
                # outline the observations in orange if I have modelled the ice height after observation.
                if ic.metadata.get('IceHeightAfter') == 'Modeled':
                    padding_color = 'orange'
                # add coordinates to a vline plot
                plb.vlines(ic.date, fro-padding, too+padding, lw=6, color=padding_color)
                plb.vlines(ic.date, fro, too, lw=4, color=layer.get_colour())

    # the limits of the left side y-axis is defined relative the lowest point in the ice cover
    # and the highest point of the observed snow cover.
    plb.ylim(lowest_point*1.1, max(snotot)*1.05)

    # Plot temperatures on a separate y axis
    plb.twinx()
    temp_pluss = []
    temp_minus = []

    for i in range(0, len(temp), 1):
        if temp[i] >= 0:
            temp_pluss.append(temp[i])
            temp_minus.append(np.nan)
        else:
            temp_minus.append(temp[i])
            temp_pluss.append(np.nan)

    plb.plot(date, temp, "black")
    plb.plot(date, temp_pluss, "red")
    plb.plot(date, temp_minus, "blue")
    plb.ylim(-4*(max(temp)-min(temp)), max(temp))


    ########################################

    temp_atm = []
    temp_surf = []
    atm_minus_surf = []
    itterations = []
    EB = []
    S = []
    L = []
    H = []
    LE = []
    T = []
    R = []
    G = []
    s_inn = []
    albedo = []
    SC = []
    R_i = []
    stability_correction = []
    CC = []
    SM = []


    if energy_balance[0].date > date[0]:
        i = 0
        while energy_balance[0].date > date[i]:
            temp_atm.append(np.nan)
            temp_surf.append(np.nan)
            atm_minus_surf.append(np.nan)
            itterations.append(np.nan)
            EB.append(np.nan)
            S.append(np.nan)
            L.append(np.nan)
            H.append(np.nan)
            LE.append(np.nan)
            T.append(np.nan)
            R.append(np.nan)
            G.append(np.nan)
            s_inn.append(np.nan)
            albedo.append(np.nan)
            SC.append(np.nan)
            R_i.append(np.nan)
            stability_correction.append(np.nan)
            CC.append(np.nan)
            SM.append(np.nan)
            i += 1

    for eb in energy_balance:
        if eb.EB is None:
            temp_atm.append(np.nan)
            temp_surf.append(np.nan)
            atm_minus_surf.append(np.nan)
            itterations.append(np.nan)
            EB.append(np.nan)
            S.append(np.nan)
            L.append(np.nan)
            H.append(np.nan)
            LE.append(np.nan)
            T.append(np.nan)
            R.append(np.nan)
            G.append(np.nan)
            s_inn.append(np.nan)
            albedo.append(np.nan)
            SC.append(np.nan)
            R_i.append(np.nan)
            stability_correction.append(np.nan)
            CC.append(np.nan)
            SM.append(np.nan)

        else:
            temp_atm.append(eb.temp_atm)
            temp_surf.append(eb.temp_surface)
            atm_minus_surf.append(eb.temp_atm-eb.temp_surface)
            itterations.append(eb.iterations)
            EB.append(eb.EB)
            S.append(eb.S)
            L.append(eb.L_a+eb.L_t)
            H.append(eb.H)
            LE.append(eb.LE)
            T.append(eb.H+eb.LE)
            R.append(eb.R)
            G.append(eb.G)
            s_inn.append(eb.s_inn)
            albedo.append(eb.albedo)
            SC.append(eb.SC)
            R_i.append(eb.R_i)
            stability_correction.append(eb.stability_correction)
            CC.append(eb.CC)
            SM.append(eb.SM)


    #############################
    plt.subplot2grid((5, 1), (2, 0), rowspan=3)


    plb.plot(date, SM, "red", lw=2)
    plb.plot(date, SC, "blue", lw=2)
    plb.plot(date, [0.]*len(date), "white", lw=2)
    #plb.plot(date, H, "blue")
    #plb.plot(date, LE, "navy")
    #plb.plot(date, T, "blue")
    plb.plot(date, R, "black")
    #plb.plot(date, G, "crimson")
    #plb.plot(date, L, "green", lw=1)
    #plb.plot(date, S, "gold", lw=1)
    #plb.plot(date, s_inn, "gold", lw=1)

    #plb.plot(date, CC, "pink", lw=1)
    #plb.plot(date, EB, "black")

    plb.ylim(-5000, 5000)
    plb.xlim(date[0], date[-1])
     #fig.tight_layout()
    plb.ylabel("Q [kJ/m2/24hrs]")


    plb.savefig(filename)
コード例 #40
0
ファイル: step_model.py プロジェクト: ekta1224/GALEXFlares
    print len(fakedata)
    fakedata = np.array(fakedata)
    plt.subplot(411)
    lls = []
    widths = np.arange(1, 25, .1)

    for w in widths:
        ll = ln_like(w, tstep, fakedata)
        lls.append(ll)

    plt.xlabel('step width')
    plt.ylabel('likelihood')
    plt.plot(widths, lls)
    ymin = np.sort(lls)[np.isfinite(np.sort(lls))][0]
    ymax = np.sort(lls)[np.isfinite(np.sort(lls))][-1]
    plt.vlines(x=width, ymin=ymin, ymax=ymax, colors='red', linestyle='--')
    plt.subplot(412)
    lls = []
    bs = []
    heights = []
    tsteps = np.arange(0,100,.1)

    for t in tsteps:
        ll = ln_like(width, t, fakedata)
        lls.append(ll)

        b = get_hb(width, t, fakedata)[1]
        bs.append(b)

        height = get_hb(width, t, fakedata)[0]
        heights.append(height)
コード例 #41
0
plt.axis('equal')
Magnification = numpy.arange(0, 1.01, 0.01)
for FStop in [0.5, 0.8, 1, 1.2, 1.4, 2]:
    plt.plot(Magnification, Magnification / (2 * FStop * (1 + Magnification)),
             label='f/' + str('%0.2f' % FStop))
plt.plot(Magnification,
         Magnification / (2 * options.FStop * (1 + Magnification)), 'g--',
         linewidth=5, label='f/' + str('%0.2f' % options.FStop))
plt.legend(loc='upper left')
plt.hlines(NumericalApertureAverage, 0, 1)
plt.text(0.618, NumericalApertureAverage, 'NA flat panel')
plt.hlines(NumericalApertureDetermined, 0, 1)
plt.text(0.618, NumericalApertureDetermined, 'simulated NA of our lens')
plt.hlines(NumericalApertureJBAG, 0, 1)
plt.text(0.618, NumericalApertureJBAG, 'NA JBAG (?)')
plt.vlines(1 / Demagnification, 0, 1, 'g', '--')
plt.text(1 / Demagnification + 0.25, 0.8, 'Our calculated\nDemagnification: ' +
         str(Demagnification) + 'x=' + str(round(1 / Demagnification, 3)))

plt.title('NA')
plt.xlabel('Magnification')
plt.ylabel('NA')
plt.xlim([0, 1])

# Plot X-ray spectra
plt.subplot(235)
# http://stackoverflow.com/a/11249430/323100
Spectra = [
    (os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_040kV.txt')),
    (os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_046kV.txt')),
    (os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_053kV.txt')),
コード例 #42
0
re_z = re.compile(r'power_21cm_z(\d+\.\d+)\.dat')
kpl_pos = ks[k0+1:]
for filename in glob.glob('lidz_mcquinn_k3pk/*7.3*dat'):
    print 'Reading', filename
    d = n.array([map(float, L.split()) for L in open(filename).readlines()])
    ks, pk = d[:,0], d[:,1]
    z_file = float(re_z.match(os.path.basename(filename)).groups()[0])
    z = C.pspec.f2z(.151)
    k3pk = ks**3 / (2*n.pi**2) * pk
    p.subplot(122)
    p.plot(ks, k3pk * mean_temp(z)**2, 'm-')
    
tau_h = 100 + 15. #in ns
k_h = C.pspec.dk_deta(C.pspec.f2z(.151)) * tau_h
p.subplot(121)
p.vlines(k_h, -1e7, 1e8, linestyles='--', linewidth=1.5)
p.vlines(-k_h, -1e7, 1e8, linestyles='--', linewidth=1.5)
#p.gca().set_yscale('log', nonposy='clip')
p.xlabel(r'$k_\parallel\ [h\ {\rm Mpc}^{-1}]$', fontsize='large')
p.ylabel(r'$P(k)[\ {\rm mK}^2\ (h^{-1}\ {\rm Mpc})^3]$',fontsize='large')
p.ylim(-.6e7,1.75e7) #original
#p.ylim(1e5,5e16)
p.grid()


p.subplot(122)
#if ONLY_POS_K: p.plot([.5], [248**2], 'mv', label='GMRT2013')
#else: p.plot([-.5, .5], [248**2, 248**2], 'mv', label='GMRT2013')
p.vlines(k_h, -1e7, 1e7, linestyles='--', linewidth=1.5)
#theoretical_ks = n.linspace(.058,.5, 100)
#theor_errs = 1441090 * n.array(theoretical_ks)**3  / (2*n.pi**2)
コード例 #43
0
ファイル: aistool.py プロジェクト: irbdavid/mex
    def update(self):
        """ This redraws the various axes """
        plt.sca(self.ig_ax)
        plt.cla()

        if debug:
            print('DEBUG: Plotting ionogram...')

        alpha = 0.5
        self.current_ionogram.interpolate_frequencies() # does nothing if not required
        self.current_ionogram.plot(ax=self.ig_ax, colorbar=False,
            vmin=self.vmin, vmax=self.vmax,
            color='white', verbose=debug,
            overplot_digitization=True,alpha=alpha,errors=False,
            overplot_model=False, overplot_expected_ne_max=True)
        if debug:
            print('DEBUG: ... done')
        plt.colorbar(cax=self.cbar_ax, orientation='horizontal',
            ticks=mpl.ticker.MultipleLocator())
        plt.sca(self.cbar_ax)
        plt.xlabel(r'spec. dens. / $V^2m^{-2}Hz^{-1}$')
        plt.sca(self.ig_ax)

        # Plasma and cyclotron lines
        if len(self.selected_plasma_lines) > 0:
            extent = plt.ylim()
            for v in self.selected_plasma_lines:
                plt.vlines(v, extent[0], extent[1], 'red',alpha=alpha)

        if len(self.selected_cyclotron_lines) > 0:
            extent = plt.xlim()
            for v in self.selected_cyclotron_lines:
                plt.hlines(v, extent[0], extent[1], 'red',alpha=alpha)

        f = self.current_ionogram.digitization.morphology_fp_local
        if np.isfinite(f):
            plt.vlines(
                np.arange(1., 5.) * f / 1E6, plt.ylim()[0],
                plt.ylim()[1],
                color='red', lw=1.,alpha=alpha)

        # If current digitization is invertible, do it and plot it
        if self.current_ionogram.digitization:
            if debug:
                print('DEBUG: Inverting, computing model...')

            d = self.current_ionogram.digitization
            plt.sca(self.ne_ax)
            plt.cla()
            if d.is_invertible():
                winning = d.invert()
                if winning & np.all(d.density > 0.) & np.all(d.altitude > 0.):
                    plt.plot(d.density, d.altitude, color='k')
            plt.xlim(5.E1, 5E5)
            plt.ylim(0,499)
            alt = np.arange(0., 499., 5.)
            if self.current_ionogram.sza < 89.9:
                plt.plot(self.ionospheric_model(alt,
                        np.deg2rad(self.current_ionogram.sza)), alt, color='green')
            plt.grid()
            plt.xscale('log')
            plt.xlabel(r'$n_e / cm^{-3}$')
            plt.ylabel('alt. / km')
            fname = self.digitization_db.filename
            if len(fname) > 30: fname = fname[:10] + '...' + fname[-20:]
            plt.title('Database: ' + fname)

        if debug:
            print('DEBUG: Plotting timeseries....')

        # Timeseries integrated bar
        plt.sca(self.tser_ax)
        plt.cla()
        plt.imshow(self.tser_arr[::-1,:], vmin=self.vmin, vmax=self.vmax,
            interpolation='Nearest', extent=self.extent, origin='upper',aspect='auto')
        plt.xlim(self.extent[0], self.extent[1])
        plt.ylim(self.extent[2], self.extent[3])
        plt.ylim(0., 5.5)
        plt.vlines(self.current_ionogram.time,
            self.extent[2], self.extent[3], self.stored_color)
        plt.hlines(self.timeseries_frequency, self.extent[0],  self.extent[1],
            self.stored_color, 'dashed')
        plt.ylabel('f / MHz')

        # Frequency bar
        plt.sca(self.freq_ax)
        plt.cla()
        freq_extent = (self.extent[0], self.extent[1],
            ais.ais_max_delay*1E3, ais.ais_min_delay*1E3)
        inx = 1.0E6 * (self.current_ionogram.frequencies.shape[0] *
            self.timeseries_frequency) /\
            (self.current_ionogram.frequencies[-1] - self.current_ionogram.frequencies[0])

        self._freq_bar_data = self.tser_arr_all[:,int(inx),:]
        plt.imshow(self.tser_arr_all[:,int(inx),:], vmin=self.vmin, vmax=self.vmax,
            interpolation='Nearest', extent=freq_extent, origin='upper',aspect='auto')
        plt.xlim(freq_extent[0], freq_extent[1])
        plt.ylim(freq_extent[2], freq_extent[3])
        plt.vlines(self.current_ionogram.time,
            freq_extent[2],freq_extent[3], self.stored_color)
        plt.ylabel(r'$\tau_D / ms$')

        title = "AISTool v%s, Orbit = %d, Ionogram=%s " % (__version__,
            self.orbit, celsius.spiceet_to_utcstr(self.current_ionogram.time,
            fmt='C'))

        if self.browsing:
            title += '[Browsing] '
        if self.minimum_interaction_mode:
            title += '[Quick] '
        if self._digitization_saved == False:
            title += 'UNSAVED '
        if self.get_status() is not None:
            title += '[Status = %s] ' % self.get_status()

        pos, sza = mex.mso_r_lat_lon_position(float(self.current_ionogram.time),
            sza=True)

        title += '\nMSO: Altitude = %.1f km, Elevation = %.1f, Azimuth = %.1f deg, SZA = %.1f' % (pos[0] - mex.mars_mean_radius_km, mex.modpos(pos[1]), mex.modpos(pos[2]), sza)

        pos = mex.iau_pgr_alt_lat_lon_position(float(self.current_ionogram.time))
        title += '\nIAU: Altitude = %.1f km, Latitude = %.1f, Longitude = %.1f deg' % (
            pos[0], pos[1], mex.modpos(pos[2]))

        plt.sca(self.tser_ax)
        plt.title(title)

        # Message history:
        if len(self._messages):
            txt = ''
            for i, s in enumerate(self._messages):
                txt += str(i + self._message_counter) + ': ' + s + '\n'
            plt.annotate(txt, (0.05, 0.995), xycoords='figure fraction',
                fontsize=8, horizontalalignment='left', verticalalignment='top')

        # Axis formatters need redoing after each cla()
        nf = mpl.ticker.NullFormatter

        loc_f = celsius.SpiceetLocator()
        loc_t = celsius.SpiceetLocator()
        self.freq_ax.xaxis.set_major_formatter(celsius.SpiceetFormatter(loc_f))
        self.tser_ax.xaxis.set_major_formatter(nf())

        self.freq_ax.xaxis.set_major_locator(loc_f)
        self.tser_ax.xaxis.set_major_locator(loc_t)
        if debug:
            print('DEBUG: drawing...')

        self.figure.canvas.draw()
        return self
コード例 #44
0
# generating poisson data
num_neurons=10
max_time=100
spike_array=(np.random.random([num_neurons,max_time]) <0.1)
neurons,time=np.where(spike_array==1)


# In[4]:

# plotting rasters in two ways
plt.figure(figsize=(20, 10))
plt.subplot(211)
plt.imshow(spike_array,aspect=2,interpolation='none',cmap='Greys')
plt.subplot(212)
plt.vlines(time,neurons,neurons+1)
plt.xlabel('time')
plt.ylabel('trial')
plt.show()


# In the cell below, make the raster plots for the data in the array allSpikes (same format as spike_array above)

# In[ ]:




# ### Lets make a histogram of average spiking rate

# In[5]:
コード例 #45
0
ファイル: aspera_mf.py プロジェクト: irbdavid/mex
#             old_ax = plt.gca()
#             plt.colorbar(cax=celsius.make_colorbar_cax(), ticks=[0,1,2], cmap=cmap)
#             plt.ylabel(r'log$_{10}$ Counts')
#             plt.sca(old_ax)
#
#
#
#






if __name__ == '__main__':
    plt.close('all')
    fig, ax = plt.subplots(2,1, sharex=True)

    start = mex.orbits[8000].periapsis - 2.5 * 3600
    finish = mex.orbits[8000].periapsis + 2.5 * 3600
    verbose = True

    plt.set_cmap(plt.cm.Spectral_r)
    plt.sca(ax[0])
    plot_aspera_els(start, finish, verbose=verbose)
    plt.sca(ax[1])
    plot_aspera_ima(start, finish, verbose=verbose)
    plt.vlines((start, finish), *plt.ylim())
    plt.xlim(start - 3600., finish + 3600.)
    plt.show()
コード例 #46
0
point_coord = [a0.shape[0]/2,a0.shape[1]/2]
steps = ffs[:,point_coord[0], point_coord[1] ]
dsteps = np.hstack((steps,steps))
N=np.linspace(1,ffs.shape[0]*2,ffs.shape[0]*2)
pp.plot(N,dsteps, 'bo--',label="measured")

Np=np.linspace(0.5,ffs.shape[0]+0.5,ffs.shape[0]*100)
Np2=np.linspace(0.5,ffs.shape[0]*2+0.5,ffs.shape[0]*2*100)
a0c = coeff[0,0,point_coord[0],point_coord[1]]
a1c = coeff[0,1,point_coord[0],point_coord[1]]
phc = coeff[1,1,point_coord[0],point_coord[1]]
curve1 = a0c + a1c * np.sin(2*np.pi*(Np-1)/(ffs.shape[0]) + phc + np.pi/2)
curve = np.hstack((curve1,curve1))
pp.plot(Np2,curve, 'r-',label="fitted")
pp.hlines(coeff[0,0,point_coord[0],point_coord[1]],0, ffs.shape[0]*2, 'k','--')
pp.vlines(ffs.shape[0]+0.5, coeff[0,0,point_coord[0],point_coord[1]]-coeff[0,1,point_coord[0],point_coord[1]],coeff[0,0,point_coord[0],point_coord[1]]+coeff[0,1,point_coord[0],point_coord[1]],'k','--')
pp.title('stepping curve of pixel (' + str(point_coord[0]) + ',' + str(point_coord[1]) + '). The curve is displayed twice.')
pp.ylabel('intensity [adu.]')
pp.xlabel('phase steps')
pp.legend()
pp.grid()

pp.show()


if process_sample:
    datm = np.zeros((19,800,770))
    for i in range(19):
        datm[i] = e17.io.h5read(filepath+"paximage_ct_4809%02d.h5"%(i+27))["raw_data"]
    
    
コード例 #47
0
ファイル: find_delay.py プロジェクト: jaycedowell/aipy
        pl.figure(figcnt)
        
        nplots = len(DD[pol].keys())
        rows = Nant
        cols = Nant
        bls = DD[pol].keys()
        bls.sort()

        for plno,bl in enumerate(bls):
            #Make the plots
            i,j = np.argwhere(AntNos==bl[0]).squeeze(),np.argwhere(AntNos==bl[1]).squeeze()
            plindex = i*Nant+j
            
            ax = pl.subplot(rows,cols,plindex)
           
            pl.vlines(D_ij[pol][bl]/bl_len[bl],0,1.1,color='k')
            pl.vlines((Tau[pol][i]-Tau[pol][j])/bl_len[bl],0,1.1,color='r')
            pl.vlines((-1,1),0,1.1,linestyles='dotted',color='k')
            pl.plot(delays/bl_len[bl],DD[pol][bl],'b')
           
            pl.xlim([-1.5,1.5])
            pl.ylim([0,1.1])
            pl.yticks([])
            pl.xticks([])

            if i == 0: pl.text(0.5,1.2,str(AntNos[j]),transform=ax.transAxes)
            if j == Nant-1: pl.text(1.2,0.5,str(AntNos[i]),transform=ax.transAxes)
    
        #Give a sample plot
        pl.subplot(337)
        bl = bls[0]
コード例 #48
0
# Scale-average between El Nino periods of 2--8 years
avg = np.logical_and(scale >= 2, scale < 8)
Cdelta = 0.776  # this is for the MORLET wavelet
scale_avg = scale[:, np.newaxis].dot(np.ones(n)[np.newaxis, :])  # expand scale --> (J+1)x(N) array
scale_avg = power / scale_avg  # [Eqn(24)]
scale_avg = variance * dj * dt / Cdelta * sum(scale_avg[avg, :])  # [Eqn(24)]
scaleavg_signif = wave_signif(variance, dt=dt, scale=scale, sigtest=2, lag1=lag1, dof=([2, 7.9]), mother=mother)

#------------------------------------------------------ Plotting

#--- Plot time series
plt.figure(figsize=(18, 9))
plt.subplot(211)
plt.plot(time, sst1)
plt.vlines(tsunami, -0.8 ,0.8, lw=2 )
plt.xlim(xlim[:])
plt.ylim(-0.35,0.35)
plt.xlabel('Time (seconds)')
plt.ylabel('TEC (TECU)')
plt.title('a) AINP TEC values, PRN=4')
plt.grid()
plt.hold(False)

# --- Plot 2--8 yr scale-average time series
#plt.subplot(222)
#plt.plot(time, scale_avg)
#plt.xlim(xlim[:])
#plt.xlabel('Time (year)')
#plt.ylabel('Avg variance (degC^2)')
#plt.title('d) 2-8 yr Scale-average Time Series')
コード例 #49
0
def select_windows(data_trace, synthetic_trace, event_latitude,
                   event_longitude, event_depth_in_km,
                   station_latitude, station_longitude, minimum_period,
                   maximum_period,
                   min_cc=0.10, max_noise=0.10, max_noise_window=0.4,
                   min_velocity=2.4, threshold_shift=0.30,
                   threshold_correlation=0.75, min_length_period=1.5,
                   min_peaks_troughs=2, max_energy_ratio=10.0,
                   min_envelope_similarity=0.2,
                   verbose=False, plot=False):
    """
    Window selection algorithm for picking windows suitable for misfit
    calculation based on phase differences.

    Returns a list of windows which might be empty due to various reasons.

    This function is really long and a lot of things. For a more detailed
    description, please see the LASIF paper.

    :param data_trace: The data trace.
    :type data_trace: :class:`~obspy.core.trace.Trace`
    :param synthetic_trace: The synthetic trace.
    :type synthetic_trace: :class:`~obspy.core.trace.Trace`
    :param event_latitude: The event latitude.
    :type event_latitude: float
    :param event_longitude: The event longitude.
    :type event_longitude: float
    :param event_depth_in_km: The event depth in km.
    :type event_depth_in_km: float
    :param station_latitude: The station latitude.
    :type station_latitude: float
    :param station_longitude: The station longitude.
    :type station_longitude: float
    :param minimum_period: The minimum period of the data in seconds.
    :type minimum_period: float
    :param maximum_period: The maximum period of the data in seconds.
    :type maximum_period: float
    :param min_cc: Minimum normalised correlation coefficient of the
        complete traces.
    :type min_cc: float
    :param max_noise: Maximum relative noise level for the whole trace.
        Measured from maximum amplitudes before and after the first arrival.
    :type max_noise: float
    :param max_noise_window: Maximum relative noise level for individual
        windows.
    :type max_noise_window: float
    :param min_velocity: All arrivals later than those corresponding to the
        threshold velocity [km/s] will be excluded.
    :type min_velocity: float
    :param threshold_shift: Maximum allowable time shift within a window,
        as a fraction of the minimum period.
    :type threshold_shift: float
    :param threshold_correlation: Minimum normalised correlation coeeficient
        within a window.
    :type threshold_correlation: float
    :param min_length_period: Minimum length of the time windows relative to
        the minimum period.
    :type min_length_period: float
    :param min_peaks_troughs: Minimum number of extrema in an individual
        time window (excluding the edges).
    :type min_peaks_troughs: float
    :param max_energy_ratio: Maximum energy ratio between data and
        synthetics within a time window. Don't make this too small!
    :type max_energy_ratio: float
    :param min_envelope_similarity: The minimum similarity of the envelopes of
        both data and synthetics. This essentially assures that the
        amplitudes of data and synthetics can not diverge too much within a
        window. It is a bit like the inverse of the ratio of both envelopes
        so a value of 0.2 makes sure neither amplitude can be more then 5
        times larger than the other.
    :type min_envelope_similarity: float
    :param verbose: No output by default.
    :type verbose: bool
    :param plot: Create a plot of the algortihm while it does its work.
    :type plot: bool
    """
    # Shortcuts to frequently accessed variables.
    data_starttime = data_trace.stats.starttime
    data_delta = data_trace.stats.delta
    dt = data_trace.stats.delta
    npts = data_trace.stats.npts
    synth = synthetic_trace.data
    data = data_trace.data
    times = data_trace.times()

    # Fill cache if necessary.
    if not TAUPY_MODEL_CACHE:
        from obspy.taup import TauPyModel  # NOQA
        TAUPY_MODEL_CACHE["model"] = TauPyModel("AK135")
    model = TAUPY_MODEL_CACHE["model"]

    # -------------------------------------------------------------------------
    # Geographical calculations and the time of the first arrival.
    # -------------------------------------------------------------------------
    dist_in_deg = geodetics.locations2degrees(station_latitude,
                                              station_longitude,
                                              event_latitude, event_longitude)
    dist_in_km = geodetics.calcVincentyInverse(
        station_latitude, station_longitude, event_latitude,
        event_longitude)[0] / 1000.0

    # Get only a couple of P phases which should be the first arrival
    # for every epicentral distance. Its quite a bit faster than calculating
    # the arrival times for every phase.
    # Assumes the first sample is the centroid time of the event.
    tts = model.get_travel_times(source_depth_in_km=event_depth_in_km,
                                 distance_in_degree=dist_in_deg,
                                 phase_list=["ttp"])
    # Sort just as a safety measure.
    tts = sorted(tts, key=lambda x: x.time)
    first_tt_arrival = tts[0].time

    # -------------------------------------------------------------------------
    # Window settings
    # -------------------------------------------------------------------------
    # Number of samples in the sliding window. Currently, the length of the
    # window is set to a multiple of the dominant period of the synthetics.
    # Make sure it is an uneven number; just to have a trivial midpoint
    # definition and one sample does not matter much in any case.
    window_length = int(round(float(2 * minimum_period) / dt))
    if not window_length % 2:
        window_length += 1

    # Use a Hanning window. No particular reason for it but its a well-behaved
    # window and has nice spectral properties.
    taper = np.hanning(window_length)

    # =========================================================================
    # check if whole seismograms are sufficiently correlated and estimate
    # noise level
    # =========================================================================

    # Overall Correlation coefficient.
    norm = np.sqrt(np.sum(data ** 2)) * np.sqrt(np.sum(synth ** 2))
    cc = np.sum(data * synth) / norm
    if verbose:
        _log_window_selection(data_trace.id,
                              "Correlation Coefficient: %.4f" % cc)

    # Estimate noise level from waveforms prior to the first arrival.
    idx_end = int(np.ceil((first_tt_arrival - 0.5 * minimum_period) / dt))
    idx_end = max(10, idx_end)
    idx_start = int(np.ceil((first_tt_arrival - 2.5 * minimum_period) / dt))
    idx_start = max(10, idx_start)

    if idx_start >= idx_end:
        idx_start = max(0, idx_end - 10)

    abs_data = np.abs(data)
    noise_absolute = abs_data[idx_start:idx_end].max()
    noise_relative = noise_absolute / abs_data.max()

    if verbose:
        _log_window_selection(data_trace.id,
                              "Absolute Noise Level: %e" % noise_absolute)
        _log_window_selection(data_trace.id,
                              "Relative Noise Level: %e" % noise_relative)

    # Basic global rejection criteria.
    accept_traces = True
    if (cc < min_cc) and (noise_relative > max_noise / 3.0):
        msg = "Correlation %.4f is below threshold of %.4f" % (cc, min_cc)
        if verbose:
            _log_window_selection(data_trace.id, msg)
        accept_traces = msg

    if noise_relative > max_noise:
        msg = "Noise level %.3f is above threshold of %.3f" % (
            noise_relative, max_noise)
        if verbose:
            _log_window_selection(
                data_trace.id, msg)
        accept_traces = msg

    # Calculate the envelope of both data and synthetics. This is to make sure
    # that the amplitude of both is not too different over time and is
    # used as another selector. Only calculated if the trace is generally
    # accepted as it is fairly slow.
    if accept_traces is True:
        data_env = obspy.signal.filter.envelope(data)
        synth_env = obspy.signal.filter.envelope(synth)

    # -------------------------------------------------------------------------
    # Initial Plot setup.
    # -------------------------------------------------------------------------
    # All the plot calls are interleaved. I realize this is really ugly but
    # the alternative would be to either have two functions (one with plots,
    # one without) or split the plotting function in various subfunctions,
    # neither of which are acceptable in my opinion. The impact on
    # performance is minimal if plotting is turned off: all imports are lazy
    # and a couple of conditionals are cheap.
    if plot:
        import matplotlib.pylab as plt  # NOQA
        import matplotlib.patheffects as PathEffects  # NOQA

        if accept_traces is True:
            plt.figure(figsize=(18, 12))
            plt.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.95,
                                wspace=None, hspace=0.0)
            grid = (31, 1)

            # Axes showing the data.
            data_plot = plt.subplot2grid(grid, (0, 0), rowspan=8)
        else:
            # Only show one axes it the traces are not accepted.
            plt.figure(figsize=(18, 3))

        # Plot envelopes if needed.
        if accept_traces is True:
            plt.plot(times, data_env, color="black", alpha=0.5, lw=0.4,
                     label="data envelope")
            plt.plot(synthetic_trace.times(), synth_env, color="#e41a1c",
                     alpha=0.4, lw=0.5, label="synthetics envelope")

        plt.plot(times, data, color="black", label="data", lw=1.5)
        plt.plot(synthetic_trace.times(), synth, color="#e41a1c",
                 label="synthetics",  lw=1.5)

        # Symmetric around y axis.
        middle = data.mean()
        d_max, d_min = data.max(), data.min()
        r = max(d_max - middle, middle - d_min) * 1.1
        ylim = (middle - r, middle + r)
        xlim = (times[0], times[-1])
        plt.ylim(*ylim)
        plt.xlim(*xlim)

        offset = (xlim[1] - xlim[0]) * 0.005
        plt.vlines(first_tt_arrival, ylim[0], ylim[1], colors="#ff7f00", lw=2)
        plt.text(first_tt_arrival + offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "first arrival", verticalalignment="top",
                 horizontalalignment="left", color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        plt.vlines(first_tt_arrival - minimum_period / 2.0, ylim[0], ylim[1],
                   colors="#ff7f00", lw=2)
        plt.text(first_tt_arrival - minimum_period / 2.0 - offset,
                 ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                 "first arrival - min period / 2", verticalalignment="bottom",
                 horizontalalignment="right", color="#ee6e00",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        for velocity in [6, 5, 4, 3, min_velocity]:
            tt = dist_in_km / velocity
            plt.vlines(tt, ylim[0], ylim[1], colors="gray", lw=2)
            if velocity == min_velocity:
                hal = "right"
                o_s = -1.0 * offset
            else:
                hal = "left"
                o_s = offset
            plt.text(tt + o_s, ylim[0] + (ylim[1] - ylim[0]) * 0.02,
                     str(velocity) + " km/s", verticalalignment="bottom",
                     horizontalalignment=hal, color="0.15")
        plt.vlines(dist_in_km / min_velocity + minimum_period / 2.0,
                   ylim[0], ylim[1], colors="gray", lw=2)
        plt.text(dist_in_km / min_velocity + minimum_period / 2.0 - offset,
                 ylim[1] - (ylim[1] - ylim[0]) * 0.02,
                 "min surface velocity + min period / 2",
                 verticalalignment="top",
                 horizontalalignment="right", color="0.15", path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])

        plt.hlines(noise_absolute, xlim[0], xlim[1], linestyle="--",
                   color="gray")
        plt.hlines(-noise_absolute, xlim[0], xlim[1], linestyle="--",
                   color="gray")
        plt.text(offset, noise_absolute + (ylim[1] - ylim[0]) * 0.01,
                 "noise level", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")
        plt.gca().xaxis.set_ticklabels([])

        # Plot the basic global information.
        ax = plt.gca()
        txt = (
            "Total CC Coeff: %.4f\nAbsolute Noise: %e\nRelative Noise: %.3f"
            % (cc, noise_absolute, noise_relative))
        ax.text(0.01, 0.95, txt, transform=ax.transAxes,
                fontdict=dict(fontsize="small", ha='left', va='top'),
                bbox=dict(boxstyle="round", fc="w", alpha=0.8))
        plt.suptitle("Channel %s" % data_trace.id, fontsize="larger")

    # Show plot and return if not accepted.
        if accept_traces is not True:
            txt = "Rejected: %s" % (accept_traces)
            ax.text(0.99, 0.95, txt, transform=ax.transAxes,
                    fontdict=dict(fontsize="small", ha='right', va='top'),
                    bbox=dict(boxstyle="round", fc="red", alpha=1.0))
            plt.show()
    if accept_traces is not True:
        return []

    # Initialise masked arrays. The mask will be set to True where no
    # windows are chosen.
    time_windows = np.ma.ones(npts)
    time_windows.mask = False
    if plot:
        old_time_windows = time_windows.copy()

    # Elimination Stage 1: Eliminate everything half a period before or
    # after the minimum and maximum travel times, respectively.
    # theoretical arrival as positive.
    min_idx = int((first_tt_arrival - (minimum_period / 2.0)) / dt)
    max_idx = int(math.ceil((
        dist_in_km / min_velocity + minimum_period / 2.0) / dt))
    time_windows.mask[:min_idx + 1] = True
    time_windows.mask[max_idx:] = True
    if plot:
        plt.subplot2grid(grid, (8, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TRAVELTIME ELIMINATION")
        old_time_windows = time_windows.copy()

    # -------------------------------------------------------------------------
    # Compute sliding time shifts and correlation coefficients for time
    # frames that passed the traveltime elimination stage.
    # -------------------------------------------------------------------------
    # Allocate arrays to collect the time dependent values.
    sliding_time_shift = np.ma.zeros(npts, dtype="float32")
    sliding_time_shift.mask = True
    max_cc_coeff = np.ma.zeros(npts, dtype="float32")
    max_cc_coeff.mask = True

    for start_idx, end_idx, midpoint_idx in _window_generator(npts,
                                                              window_length):
        if not min_idx < midpoint_idx < max_idx:
            continue

        # Slice windows. Create a copy to be able to taper without affecting
        # the original time series.
        data_window = data[start_idx: end_idx].copy() * taper
        synthetic_window = \
            synth[start_idx: end_idx].copy() * taper

        # Elimination Stage 2: Skip windows that have essentially no energy
        # to avoid instabilities. No windows can be picked in these.
        if synthetic_window.ptp() < synth.ptp() * 0.001:
            time_windows.mask[midpoint_idx] = True
            continue

        # Calculate the time shift. Here this is defined as the shift of the
        # synthetics relative to the data. So a value of 2, for instance, means
        # that the synthetics are 2 timesteps later then the data.
        cc = np.correlate(data_window, synthetic_window, mode="full")

        time_shift = cc.argmax() - window_length + 1
        # Express the time shift in fraction of the minimum period.
        sliding_time_shift[midpoint_idx] = (time_shift * dt) / minimum_period

        # Normalized cross correlation.
        max_cc_value = cc.max() / np.sqrt((synthetic_window ** 2).sum() *
                                          (data_window ** 2).sum())
        max_cc_coeff[midpoint_idx] = max_cc_value

    if plot:
        plt.subplot2grid(grid, (9, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="NO ENERGY IN CC WINDOW")
        # Axes with the CC coeffs
        plt.subplot2grid(grid, (15, 0), rowspan=4)
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.hlines(-threshold_shift, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.hlines(threshold_shift, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.text(5, -threshold_shift - (2) * 0.03,
                 "threshold", verticalalignment="top",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, sliding_time_shift, color="#377eb8",
                 label="Time shift in fraction of minimum period", lw=1.5)
        ylim = plt.ylim()
        plt.yticks([-0.75, 0, 0.75])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.ylim(ylim[0], ylim[1] + ylim[1] - ylim[0])
        plt.ylim(-1.0, 1.0)
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

        plt.subplot2grid(grid, (10, 0), rowspan=4)
        plt.hlines(threshold_correlation, xlim[0], xlim[1], color="0.15",
                   linestyle="--")
        plt.hlines(1, xlim[0], xlim[1], color="lightgray")
        plt.hlines(0, xlim[0], xlim[1], color="lightgray")
        plt.text(5, threshold_correlation + (1.4) * 0.01,
                 "threshold", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                     PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, max_cc_coeff, color="#4daf4a",
                 label="Maximum CC coefficient", lw=1.5)
        plt.ylim(-0.2, 1.2)
        plt.yticks([0, 0.5, 1])
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

    # Elimination Stage 3: Mark all areas where the normalized cross
    # correlation coefficient is under threshold_correlation as negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[max_cc_coeff < threshold_correlation] = True
    if plot:
        plt.subplot2grid(grid, (14, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="CORRELATION COEFF THRESHOLD ELIMINATION")

    # Elimination Stage 4: Mark everything with an absolute travel time
    # shift of more than # threshold_shift times the dominant period as
    # negative
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[np.ma.abs(sliding_time_shift) > threshold_shift] = True
    if plot:
        plt.subplot2grid(grid, (19, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TIME SHIFT THRESHOLD ELIMINATION")

    # Elimination Stage 5: Mark the area around every "travel time shift
    # jump" (based on the traveltime time difference) negative. The width of
    # the area is currently chosen to be a tenth of a dominant period to
    # each side.
    if plot:
        old_time_windows = time_windows.copy()
    sample_buffer = int(np.ceil(minimum_period / dt * 0.1))
    indices = np.ma.where(np.ma.abs(np.ma.diff(sliding_time_shift)) > 0.1)[0]
    for index in indices:
        time_windows.mask[index - sample_buffer: index + sample_buffer] = True
    if plot:
        plt.subplot2grid(grid, (20, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="TIME SHIFT JUMPS ELIMINATION")

    # Clip both to avoid large numbers by division.
    stacked = np.vstack([
        np.ma.clip(synth_env, synth_env.max() * min_envelope_similarity * 0.5,
                   synth_env.max()),
        np.ma.clip(data_env, data_env.max() * min_envelope_similarity * 0.5,
                   data_env.max())])
    # Ratio.
    ratio = stacked.min(axis=0) / stacked.max(axis=0)

    # Elimination Stage 6: Make sure the amplitudes of both don't vary too
    # much.
    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[ratio < min_envelope_similarity] = True
    if plot:
        plt.subplot2grid(grid, (25, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="ENVELOPE AMPLITUDE SIMILARITY ELIMINATION")

    if plot:
        plt.subplot2grid(grid, (21, 0), rowspan=4)
        plt.hlines(min_envelope_similarity, xlim[0], xlim[1], color="gray",
                   linestyle="--")
        plt.text(5, min_envelope_similarity + (2) * 0.03,
                 "threshold", verticalalignment="bottom",
                 horizontalalignment="left", color="0.15",
                 path_effects=[
                 PathEffects.withStroke(linewidth=3, foreground="white")])
        plt.plot(times, ratio, color="#9B59B6",
                 label="Envelope amplitude similarity", lw=1.5)
        plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0])
        plt.ylim(0.05, 1.05)
        plt.xticks([300, 600, 900, 1200, 1500, 1800])
        plt.xlim(xlim)
        plt.gca().xaxis.set_ticklabels([])
        plt.legend(loc="lower right", fancybox=True, framealpha=0.5,
                   fontsize="small")

    # First minimum window length elimination stage. This is cheap and if
    # not done it can easily destabilize the peak-and-trough marching stage
    # which would then have to deal with way more edge cases.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant periodele
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (26, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 1")

    # -------------------------------------------------------------------------
    # Peak and trough marching algorithm
    # -------------------------------------------------------------------------
    final_windows = []
    for i in flatnotmasked_contiguous(time_windows):
        # Cut respective windows.
        window_npts = i.stop - i.start
        synthetic_window = synth[i.start: i.stop]
        data_window = data[i.start: i.stop]

        # Find extrema in the data and the synthetics.
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)

        window_mask = np.ones(window_npts, dtype="bool")

        closest_peaks = find_closest(data_p, synth_p)
        diffs = np.diff(closest_peaks)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_p[idx - 1]
            else:
                start = 0
            if idx < (len(synth_p) - 1):
                end = synth_p[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        closest_troughs = find_closest(data_t, synth_t)
        diffs = np.diff(closest_troughs)

        for idx in np.where(diffs == 1)[0]:
            if idx > 0:
                start = synth_t[idx - 1]
            else:
                start = 0
            if idx < (len(synth_t) - 1):
                end = synth_t[idx + 1]
            else:
                end = -1
            window_mask[start: end] = False

        window_mask = np.ma.masked_array(window_mask,
                                         mask=window_mask)

        if window_mask.mask.all():
            continue

        for j in flatnotmasked_contiguous(window_mask):
            final_windows.append((i.start + j.start, i.start + j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False
    if plot:
        plt.subplot2grid(grid, (27, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="PEAK AND TROUGH MARCHING ELIMINATION")

    # Loop through all the time windows, remove windows not satisfying the
    # minimum number of peaks and troughs per window. Acts mainly as a
    # safety guard.
    old_time_windows = time_windows.copy()
    for i in flatnotmasked_contiguous(old_time_windows):
        synthetic_window = synth[i.start: i.stop]
        data_window = data[i.start: i.stop]
        data_p, data_t = find_local_extrema(data_window)
        synth_p, synth_t = find_local_extrema(synthetic_window)
        if np.min([len(synth_p), len(synth_t), len(data_p), len(data_t)]) < \
                min_peaks_troughs:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (28, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="PEAK/TROUGH COUNT ELIMINATION")

    # Second minimum window length elimination stage.
    if plot:
        old_time_windows = time_windows.copy()
    min_length = \
        min(minimum_period / dt * min_length_period, maximum_period / dt)
    for i in flatnotmasked_contiguous(time_windows):
        # Step 7: Throw away all windows with a length of less then
        # min_length_period the dominant period.
        if (i.stop - i.start) < min_length:
            time_windows.mask[i.start: i.stop] = True
    if plot:
        plt.subplot2grid(grid, (29, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="MINIMUM WINDOW LENGTH ELIMINATION 2")

    # Final step, eliminating windows with little energy.
    final_windows = []
    for j in flatnotmasked_contiguous(time_windows):
        # Again assert a certain minimal length.
        if (j.stop - j.start) < min_length:
            continue

        # Compare the energy in the data window and the synthetic window.
        data_energy = (data[j.start: j.stop] ** 2).sum()
        synth_energy = (synth[j.start: j.stop] ** 2).sum()
        energies = sorted([data_energy, synth_energy])
        if energies[1] > max_energy_ratio * energies[0]:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due to energy ratio between "
                    "data and synthetics.")
            continue

        # Check that amplitudes in the data are above the noise
        if noise_absolute / data[j.start: j.stop].ptp() > \
                max_noise_window:
            if verbose:
                _log_window_selection(
                    data_trace.id,
                    "Deselecting window due having no amplitude above the "
                    "signal to noise ratio.")
        final_windows.append((j.start, j.stop))

    if plot:
        old_time_windows = time_windows.copy()
    time_windows.mask[:] = True
    for start, stop in final_windows:
        time_windows.mask[start:stop] = False

    if plot:
        plt.subplot2grid(grid, (30, 0), rowspan=1)
        _plot_mask(time_windows, old_time_windows,
                   name="LITTLE ENERGY ELIMINATION")

    if verbose:
        _log_window_selection(
            data_trace.id,
            "Done, Selected %i window(s)" % len(final_windows))

    # Final step is to convert the index value windows to actual times.
    windows = []
    for start, stop in final_windows:
        start = data_starttime + start * data_delta
        stop = data_starttime + stop * data_delta
        windows.append((start, stop))

    if plot:
        # Plot the final windows to the data axes.
        import matplotlib.transforms as mtransforms  # NOQA
        ax = data_plot
        trans = mtransforms.blended_transform_factory(ax.transData,
                                                      ax.transAxes)
        for start, stop in final_windows:
            ax.fill_between([start * data_delta, stop * data_delta], 0, 1,
                            facecolor="#CDDC39", alpha=0.5, transform=trans)

        plt.show()

    return windows
コード例 #50
0
#cal second subplot object
ax2=plt.subplot(2,1,2)#(num rows, num columns, subplot position)
#apply letter label: coordinates in subplot object space (x,y) where (0,0) = bottom left
#also make sure transform uses correct subplot object)
plt.text(0.07,0.92,'(b)',horizontalalignment='center',verticalalignment='center',transform=ax2.transAxes)
#plot curve
plt.plot(x,z,color='black',linewidth=1.5,label='Cos')
#set labels, labels sizes, ticks, ticks sizes
plt.xlabel('Time [s]',fontsize=9)
plt.ylabel('Power [arb]',fontsize=9)
plt.xticks(fontsize=9)
plt.yticks(fontsize=9)
plt.xlim(0,20)
plt.ylim(-1.5,1.5)
#Add Horizontal lines (x position, ymin,ymax)
plt.vlines(5,-1.5,1.5,color='gray',linestyle='dashed',linewidth=2.0)
plt.vlines(15,-0.75,0.75,color='gray',linestyle='dashed',linewidth=2.0)

#saving the plot
#for the paper draft, its best to use png. When we actually submit a paper
#we'll need to save the plot as a .eps file instead.
savefile='Figure2_2subplots.png'
plt.savefig(savefile,dpi=300,facecolor='w',edgecolor='k')

############################################
###Three or more Subplots (same x-axis)#####
############################################
#I've found using a figsize of (3.5,8) for higher numbers of subplots
#is usually sufficient.

######################################################
コード例 #51
0
def plot_ice_cover_eb(
        ice_cover, energy_balance, observed_ice, date, temp, snotot, filename, prec=None, wind=None, clouds=None):
    """

    :param ice_cover:
    :param energy_balance:
    :param observed_ice:
    :param date:
    :param temp:
    :param snotot:
    :param filename:
    :param prec:
    :param wind:
    :param clouds:
    :return:

    Note: http://matplotlib.org/mpl_examples/color/named_colors.png
    """

    fsize = (16, 16)
    plt.figure(figsize=fsize)
    #fig = pplt.figure(figsize=fsize)
    plt.clf()


    ############## First subplot
    plt.subplot2grid((11, 1), (0, 0), rowspan=2)

    # depending on how many days are in the plot, the line weight of the modelled data should be adjusted
    modelledLineWeight = 1100/len(ice_cover)

    # dont need to keep the colunm coordinates, but then again, why not..? Usefull for debuging
    allColumnCoordinates = []

    # plot total snow depth on land
    plb.plot(date, snotot, "gray")

    plb.title('{0} - {1} days plotted.'.format(filename, len(ice_cover)))

    # a variable for the lowest point on the ice_cover. It is used for setting the lower left y-limit .
    lowest_point = 0.

    # Plot ice_cover
    for ic in ice_cover:

        # some idea of progress on the plotting
        if ic.date.day == 1:
            print((ic.date).strftime('%Y%m%d'))

        # make data for plotting. [icelayers.. [fro, too, icetype]].
        columncoordinates = []
        too = -ic.water_line  # water line is on xaxis

        for i in range(len(ic.column)-1, -1, -1):
            layer = ic.column[i]
            fro = too
            too = too + layer.height
            columncoordinates.append([fro, too, layer.type])

            if fro < lowest_point:
                lowest_point = fro

            # add coordinates to a vline plot
            plb.vlines(ic.date, fro, too, lw=modelledLineWeight, color=layer.get_colour()) #ic.getColour(layer.type))

        allColumnCoordinates.append(columncoordinates)


    # plot observed ice columns
    for ic in observed_ice:

        if len(ic.column) == 0:
            height = 0.05
            plb.vlines(ic.date, -height, height, lw=4, color='white')
            plb.vlines(ic.date, -height, height, lw=2, color='red')
        else:
            # some idea of progress on the plotting
            print("Plotting observations.")

            # make data for plotting. [ice layers.. [fro, too, icetype]].
            too = -ic.water_line  # water line is on xaxis

            for i in range(len(ic.column)-1, -1, -1):
                layer = ic.column[i]
                fro = too
                too = too + layer.height

                if fro < lowest_point:
                    lowest_point = fro

                padding = 0.
                padding_color = 'white'
                # outline the observations in orange if I have modelled the ice height after observation.
                if ic.metadata.get('IceHeightAfter') == 'Modeled':
                    padding_color = 'orange'
                # add coordinates to a vline plot
                plb.vlines(ic.date, fro-padding, too+padding, lw=6, color=padding_color)
                plb.vlines(ic.date, fro, too, lw=4, color=layer.get_colour())

    # the limits of the left side y-axis is defined relative the lowest point in the ice cover
    # and the highest point of the observed snow cover.
    plb.ylim(lowest_point*1.1, max(snotot)*1.05)

    # Plot temperatures on a separate y axis
    plb.twinx()
    temp_pluss = []
    temp_minus = []

    for i in range(0, len(temp), 1):
        if temp[i] >= 0:
            temp_pluss.append(temp[i])
            temp_minus.append(np.nan)
        else:
            temp_minus.append(temp[i])
            temp_pluss.append(np.nan)

    plb.plot(date, temp, "black")
    plb.plot(date, temp_pluss, "red")
    plb.plot(date, temp_minus, "blue")
    plb.ylim(-4*(max(temp)-min(temp)), max(temp))


    ########################################

    temp_atm = []
    temp_surf = []
    atm_minus_surf = []
    itterations = []
    EB = []
    S = []
    L = []
    H = []
    LE = []
    R = []
    G = []
    s_inn = []
    albedo = []
    SC = []
    R_i = []
    stability_correction = []
    CC = []
    SM = []


    if energy_balance[0].date > date[0]:
        i = 0
        while energy_balance[0].date > date[i]:
            temp_atm.append(np.nan)
            temp_surf.append(np.nan)
            atm_minus_surf.append(np.nan)
            itterations.append(np.nan)
            EB.append(np.nan)
            S.append(np.nan)
            L.append(np.nan)
            H.append(np.nan)
            LE.append(np.nan)
            R.append(np.nan)
            G.append(np.nan)
            s_inn.append(np.nan)
            albedo.append(np.nan)
            SC.append(np.nan)
            R_i.append(np.nan)
            stability_correction.append(np.nan)
            CC.append(np.nan)
            SM.append(np.nan)
            i += 1

    for eb in energy_balance:
        if eb.EB is None:
            temp_atm.append(np.nan)
            temp_surf.append(np.nan)
            atm_minus_surf.append(np.nan)
            itterations.append(np.nan)
            EB.append(np.nan)
            S.append(np.nan)
            L.append(np.nan)
            H.append(np.nan)
            LE.append(np.nan)
            R.append(np.nan)
            G.append(np.nan)
            s_inn.append(np.nan)
            albedo.append(np.nan)
            SC.append(np.nan)
            R_i.append(np.nan)
            stability_correction.append(np.nan)
            CC.append(np.nan)
            SM.append(np.nan)

        else:
            temp_atm.append(eb.temp_atm)
            temp_surf.append(eb.temp_surface)
            atm_minus_surf.append(eb.temp_atm-eb.temp_surface)
            itterations.append(eb.iterations)
            EB.append(eb.EB)
            S.append(eb.S)
            L.append(eb.L_a+eb.L_t)
            H.append(eb.H)
            LE.append(eb.LE)
            R.append(eb.R)
            G.append(eb.G)
            s_inn.append(eb.s_inn)
            albedo.append(eb.albedo)
            SC.append(eb.SC)
            R_i.append(eb.R_i)
            stability_correction.append(eb.stability_correction)
            CC.append(eb.CC)
            SM.append(eb.SM)


    ############### Second sub plot ##########################
    plt.subplot2grid((11, 1), (2, 0), rowspan=1)
    plb.bar(date, itterations, label="Iterations for T_sfc", color="gray")
    plb.xlim(date[0], date[-1])
    plb.xticks([])
    plb.ylabel("#")
    # l = plb.legend()
    # l.set_zorder(20)


    ############## CC, wind and prec ##########################
    plt.subplot2grid((11, 1), (3, 0), rowspan=1)

    # plot precipitation
    prec_mm = [p*1000. for p in prec]
    plb.bar(date, prec_mm, width=1, lw=0.5, label="Precipitation", color="deepskyblue", zorder=10)
    plb.ylabel("RR [mm]")
    plb.xlim(date[0], date[-1])
    plb.ylim(0, max(prec_mm)*1.1)
    plb.xticks([])

    # plot cloud cover
    for i in range(0, len(clouds) - 1, 1):
        if clouds[i] > 0:
            plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.)))
        elif clouds[i] == np.nan:
            plb.hlines(0, date[i], date[i + 1], lw=190, color="pink")
        else:
            plb.hlines(0, date[i], date[i + 1], lw=190, color=str(-(clouds[i] - 1.)))

    plb.twinx()
    plb.plot(date, wind, color="greenyellow", label="Wind 2m", lw=2, zorder=15)
    plb.ylabel("FFM [m/s]")



    ############ Temp diff sfc and atm #############################
    plt.subplot2grid((11, 1), (4, 0), rowspan=2)

    plb.plot(date, temp_atm, "black", zorder=5)
    plb.plot(date, temp, "blue", zorder=10)
    plb.plot(date, temp_surf, "green")
    area = np.minimum(temp_atm, temp_surf)

    plb.fill_between(date, temp_atm, area, color='red') #, alpha='0.5')
    plb.fill_between(date, temp_surf, area, color='blue') #, alpha='0.5')
    plb.ylim(-50, 20)
    plb.ylabel("[C]")


    # this plots temperature on separate right side axis
    plb.twinx()

    temp_pluss = []
    temp_minus = []
    for i in range(0, len(atm_minus_surf), 1):
        if atm_minus_surf[i] >= 0:
            temp_pluss.append(atm_minus_surf[i])
            temp_minus.append(np.nan)
        else:
            temp_minus.append(atm_minus_surf[i])
            temp_pluss.append(np.nan)
    plb.plot(date, atm_minus_surf, "black",  lw=2)
    plb.plot(date, temp_pluss, "red",  lw=2)
    plb.plot(date, temp_minus, "blue",  lw=2)
    plb.xlim(date[0], date[-1])
    plb.xticks([])
    plb.ylim(-1, 15)
    plb.ylabel("atm minus surf [C]")


    ################# Richardson no and stability correction of turbulent fluxes #######################
    plt.subplot2grid((11, 1), (6, 0), rowspan=1)

    plb.plot(date, R_i, color="blue", label="Richardson no.", lw=1, zorder=15)
    plb.ylabel("R_i (b) []")

    plb.twinx()

    stable = []
    unstable = []
    for i in range(0, len(R_i), 1):
        if R_i[i] > 0:
            stable.append(stability_correction[i])
            unstable.append(np.nan)
        elif R_i[i] < 0:
            unstable.append(stability_correction[i])
            stable.append(np.nan)
        else:
            unstable.append(np.nan)
            stable.append(np.nan)

    plb.plot(date, stability_correction, "black",  lw=2)
    plb.plot(date, stable, "green",  lw=2)
    plb.plot(date, unstable, "red",  lw=2)
    plb.xlim(date[0], date[-1])
    plb.xticks([])
    plb.ylabel("stable(g) unstable(r) []")



    ############# Energy terms and albedo ################
    plt.subplot2grid((11, 1), (7, 0), rowspan=4)


    # plot surface albedo
    for i in range(0, len(albedo) - 1, 1):
        if albedo[i] > 0.:
            plb.hlines(-11000, date[i], date[i + 1], lw=25, color=str(albedo[i]))
        elif clouds[i] == np.nan:
            plb.hlines(-11000, date[i], date[i + 1], lw=25, color="1.0")


    plb.plot(date, SM, "red", lw=3)
    plb.plot(date, SC, "blue", lw=3)
    plb.plot(date, [0.]*len(date), "white", lw=2)
    plb.plot(date, H, "blue")
    plb.plot(date, LE, "navy")
    plb.plot(date, R, "turquoise")
    plb.plot(date, G, "crimson")
    plb.plot(date, L, "green", lw=1)
    plb.plot(date, S, "gold", lw=1)
    #plb.plot(date, s_inn, "gold", lw=1)
    plb.plot(date, CC, "pink", lw=1)
    plb.plot(date, EB, "black")

    plb.ylim(-12000, 13000)
    plb.xlim(date[0], date[-1])
     #fig.tight_layout()
    plb.ylabel("Q [kJ/m2/24hrs]")


    plb.savefig(filename)
all_strokes = []
stroke_lengths = []
for stroke_id in stroke_ids:
     new_stroke = data[data['Stroke_ID'] == stroke_id]
     all_strokes.append(new_stroke)
     stroke_lengths.append(len(new_stroke))

print("mean, median, stdev, variance of stroke lengths")
avg = statistics.mean(stroke_lengths)
print(avg)
print(statistics.median(stroke_lengths))
print(statistics.stdev(stroke_lengths))
print(statistics.variance(stroke_lengths))
plt.hist(stroke_lengths, bins = 50, color = 'b')
plt.vlines(avg, 0, 500, 'r')
plt.title("stroke lengths, mean = " + str(avg))
plt.savefig("figs/stroke_lengths.pdf")

for param in params:

     print(param)
     #print("labels are: " + str(unique_labels))

     # print(str(len(all_strokes)) + " strokes")

     print("getting first " + str(sample_size) + " strokes")

     # randomly sample from all strokes
#     strokes_sample = []
     strokes_sample = random.sample(all_strokes, sample_size)
コード例 #53
0

## example 2
import matplotlib.pylab as plt
%pylab

figsize(12.5,6)
from lifelines.plotting import plot_lifetimes
from numpy.random import uniform, exponential
N = 25
current_time = 10
actual_lifetimes = np.array([[exponential(12), exponential(2)][uniform()<0.5] for i in range(N)])
observed_lifetimes = np.minimum(actual_lifetimes,current_time)
observed= actual_lifetimes < current_time
plt.xlim(0,25)
plt.vlines(10,0,30,lw=2, linestyles="--")
plt.xlabel('time')
plt.title('Births and deaths of our population, at $t=10$')
plot_lifetimes(observed_lifetimes, censorship=observed)
print "Observed lifetimes at time %d:\n"% (current_time), observed_lifetimes

?plot_lifetimes

import patsy






コード例 #54
0
    if Camera == 'iPhone':
        PickPoint = [[1500, 1000]]
    elif Camera[:6] == 'tiscam':
        # Select middle of image...
        PickPoint = [[ImageHeight / 2, ImageWidth / 2]]
    elif Camera == 'Elphel':
        PickPoint = [[ImageHeight / 2, ImageWidth / 2]]
plt.title('Original image')
Horizon = int(PickPoint[0][1])
Vertigo = int(PickPoint[0][0])
if SelectStartPointManually:
    print 'You selected horizontal line', Horizon, 'and vertical line', Vertigo
else:
    print 'I selected horizontal line', Horizon, 'and vertical line', Vertigo
plt.hlines(Horizon, 0, ImageHeight, 'r')
plt.vlines(Vertigo, 0, ImageWidth, 'b')
plt.draw()
plt.subplot(223)
HorizontalProfile = Image[Horizon, :]
plt.plot(HorizontalProfile, 'r')
plt.title('Horizontal Profile')
# plt.xlim(0, ImageHeight)
# plt.ylim(0, 256)
plt.subplot(222)
VerticalProfile = Image[:, Vertigo]
plt.plot(VerticalProfile, range(ImageWidth), 'b')
# plt.xlim(0, 256)
# plt.ylim(0, ImageWidth)
plt.title('Vertical Profile')
plt.draw()
コード例 #55
0
ファイル: plot_pk_k3pk_zsa_2.py プロジェクト: jsdillon/capo
re_z = re.compile(r"power_21cm_z(\d+\.\d+)\.dat")
kpl_pos = ks[k0 + 1 :]
for filename in glob.glob("lidz_mcquinn_k3pk/*7.3*dat"):
    print "Reading", filename
    d = n.array([map(float, L.split()) for L in open(filename).readlines()])
    ks, pk = d[:, 0], d[:, 1]
    z_file = float(re_z.match(os.path.basename(filename)).groups()[0])
    z = C.pspec.f2z(0.151)
    k3pk = ks ** 3 / (2 * n.pi ** 2) * pk
    p.subplot(122)
    p.plot(ks, k3pk * mean_temp(z) ** 2, "m-")

tau_h = 100 + 15.0  # in ns
k_h = C.pspec.dk_deta(C.pspec.f2z(0.151)) * tau_h
p.subplot(121)
p.vlines(k_h, -1e7, 1e8, linestyles="--", linewidth=1.5)
p.vlines(-k_h, -1e7, 1e8, linestyles="--", linewidth=1.5)
# p.gca().set_yscale('log', nonposy='clip')
p.xlabel(r"$k_\parallel\ [h\ {\rm Mpc}^{-1}]$", fontsize="large")
p.ylabel(r"$P(k)[\ {\rm mK}^2\ (h^{-1}\ {\rm Mpc})^3]$", fontsize="large")
p.ylim(-0.6e7, 1.75e7)
# p.ylim(1e5,5e16)
p.grid()


p.subplot(122)
# if ONLY_POS_K: p.plot([.5], [248**2], 'mv', label='GMRT2013')
# else: p.plot([-.5, .5], [248**2, 248**2], 'mv', label='GMRT2013')
p.vlines(k_h, -1e7, 1e7, linestyles="--", linewidth=1.5)
# theoretical_ks = n.linspace(.058,.5, 100)
# theor_errs = 1441090 * n.array(theoretical_ks)**3  / (2*n.pi**2)
コード例 #56
0
ファイル: plot_pk_k3pk_zsa_2.py プロジェクト: jsdillon/capo
def posterior(kpl, pk, err, pkfold=None, errfold=None, f0=0.151, umag=16.0, theo_noise=None):
    import scipy.interpolate as interp

    k0 = n.abs(kpl).argmin()
    kpl = kpl[k0:]
    z = C.pspec.f2z(f0)
    kpr = C.pspec.dk_du(z) * umag
    k = n.sqrt(kpl ** 2 + kpr ** 2)
    if pkfold is None:
        print "Folding for posterior"
        pkfold = pk[k0:].copy()
        errfold = err[k0:].copy()
        pkpos, errpos = pk[k0 + 1 :].copy(), err[k0 + 1 :].copy()
        pkneg, errneg = pk[k0 - 1 : 0 : -1].copy(), err[k0 - 1 : 0 : -1].copy()
        pkfold[1:] = (pkpos / errpos ** 2 + pkneg / errneg ** 2) / (1.0 / errpos ** 2 + 1.0 / errneg ** 2)
        errfold[1:] = n.sqrt(1.0 / (1.0 / errpos ** 2 + 1.0 / errneg ** 2))

    # ind = n.logical_and(kpl>.2, kpl<.5)
    ind = n.logical_and(k > 0.15, k < 0.5)
    # ind = n.logical_and(kpl>.12, kpl<.5)
    # print kpl,pk.real,err
    k = k[ind]
    pkfold = pkfold[ind]
    errfold = errfold[ind]
    # if not theo_noise is None:
    #    theo_noise=theo_noise[ind]
    #    if True:
    if False:
        # remove k=.345 point
        w = n.floor(k * 100) != 34
        k = k[w]
        pkfold = pkfold[w]
        errfold = errfold[w]
    pk = k ** 3 * pkfold / (2 * n.pi ** 2)
    err = k ** 3 * errfold / (2 * n.pi ** 2)
    err_omit = err.copy()
    err_omit[3] = 1e10  # give no weight to this point
    # s = n.logspace(1,3.5,100)
    s = n.linspace(-5000, 5000, 10000)
    #    print s
    data = []
    data_omit = []
    for _k, _pk, _err in zip(k, pk, err):
        print _k, _pk.real, _err
    #    print '%6.3f    %9.5f     9.5f'%(_k, _pk.real, _err)
    for ss in s:
        data.append(n.exp(-0.5 * n.sum((pk.real - ss) ** 2 / err ** 2)))
        data_omit.append(n.exp(-0.5 * n.sum((pk.real - ss) ** 2 / err_omit ** 2)))
    #    print data[-1]
    data = n.array(data)
    data_omit = n.array(data_omit)
    # print data
    # print s
    # data/=n.sum(data)
    data /= n.max(data)
    data_omit /= n.max(data_omit)
    p.figure(5, figsize=(6.5, 5.5))
    p.plot(s, data, "k", linewidth=2)
    #    p.plot(s, data_omit, 'k--', linewidth=1)
    # use a spline interpolator to get the 1 and 2 sigma limits.
    # spline = interp.interp1d(data,s)
    # print spline
    # print max(data), min(data)
    # print spline(.68), spline(.95)
    # p.plot(spline(n.linspace(.0,1,100)),'o')
    #    p.plot(s, n.exp(-.5)*n.ones_like(s))
    #    p.plot(s, n.exp(-.5*2**2)*n.ones_like(s))
    data_c = n.cumsum(data)
    data_omit_c = n.cumsum(data_omit)
    data_c /= data_c[-1]
    data_omit_c /= data_omit_c[-1]
    mean = s[n.argmax(data)]
    s1lo, s1hi = s[data_c < 0.1586][-1], s[data_c > 1 - 0.1586][0]
    s2lo, s2hi = s[data_c < 0.0227][-1], s[data_c > 1 - 0.0227][0]
    print "Posterior: Mean, (1siglo,1sighi), (2siglo,2sighi)"
    print "Posterior:", mean, (s1lo, s1hi), (s2lo, s2hi)
    mean_o = s[n.argmax(data_omit)]
    s1lo_o, s1hi_o = s[data_omit_c < 0.1586][-1], s[data_omit_c > 1 - 0.1586][0]
    s2lo_o, s2hi_o = s[data_omit_c < 0.0227][-1], s[data_omit_c > 1 - 0.0227][0]
    print "Posterior (omit):", mean_o, (s1lo_o, s1hi_o), (s2lo_o, s2hi_o)
    # sig1 = []
    # sig2 = []
    # s1 = data[maxarg:] - n.exp(-.5)
    # sig1.append(n.floor(n.median(n.where(n.abs(s1)<.01)))+maxarg)
    # s1 = data[:maxarg] - n.exp(-.5)
    # sig1.append(n.floor(n.median(n.where(n.abs(s1)<.01))))

    # s2 = data[maxarg:] - n.exp(-.5*2**2)
    # sig2.append(n.floor(n.median(n.where(n.abs(s2)<.01)))+maxarg)
    # s2 = data[:maxarg] - n.exp(-.5*2**2)
    # sig2.append(n.floor(n.median(n.where(n.abs(s2)<.01))))

    # p.vlines(s[sig1[-1]],0,1,color=(0,107/255.,164/255.), linewidth=2)
    p.vlines(s1lo, 0, 1, color=(0, 107 / 255.0, 164 / 255.0), linewidth=2)
    p.vlines(s1hi, 0, 1, color=(0, 107 / 255.0, 164 / 255.0), linewidth=2)
    # p.vlines(s1lo_o,0,1,color=(0,107/255.,164/255.), linestyle='--', linewidth=2)
    # p.vlines(s1hi_o,0,1,color=(0,107/255.,164/255.), linestyle='--', linewidth=2)

    # p.vlines(s[sig2[-1]],0,1,color=(0,107/255.,164/255.), linewidth=2)
    # limits for data_omit
    p.vlines(s2lo, 0, 1, color=(1, 128 / 255.0, 14 / 255.0), linewidth=2)
    p.vlines(s2hi, 0, 1, color=(1, 128 / 255.0, 14 / 255.0), linewidth=2)
    # p.vlines(s2lo_o,0,1,color=(1,128/255.,14/255.), linestyle='--', linewidth=2)
    # p.vlines(s2hi_o,0,1,color=(1,128/255.,14/255.), linestyle='--', linewidth=2)
    if not theo_noise is None:
        s2l_theo = n.sqrt(1.0 / n.mean(1.0 / theo_noise ** 2))
        p.vlines(s2l_theo ** 2, 0, 1, color="black", linewidth=2)
        print ("Noise level: {0:0>5.3f} mk^2".format(s2l_theo ** 2))
    p.xlabel(r"$k^3/2\pi^2\ P(k)\ [{\rm mK}^2]$", fontsize="large")
    p.ylabel("Posterior Distribution", fontsize="large")
    p.xlim(0, 700)
    if s2lo > 700:
        p.xlim(0, 2000)
    p.grid(1)
    p.subplots_adjust(left=0.15, top=0.95, bottom=0.15, right=0.95)
    p.savefig("posterior.png")
    f = open("posterior.txt", "w")
    f.write("Posterior: Mean,\t(1siglo,1sighi),\t(2siglo,2sighi)\n")
    f.write("Posterior: {0:.4f},\t({1:.4f},{2:.4f}),\t({3:.4f},{4:.4f})\n".format(mean, s1lo, s1hi, s2lo, s2hi))
    f.write(
        "Posterior (omit): {0:.4f}, ({1:.4f},{2:.4f}),\t({3:.4f},{4:.4f})\n".format(
            mean_o, s1lo_o, s1hi_o, s2lo_o, s2hi_o
        )
    )
    f.write("Noise level: {0:0>5.3f} mk^2\n".format(s2l_theo ** 2))
    f.close()
コード例 #57
0
ファイル: MTF-SNR-CNR.py プロジェクト: habi/GlobalDiagnostiX
    if options.ROI[0][0] > options.ROI[1][0]:
        plt.title('Select top left, then bottom right!')
        options.ROI = plt.ginput(2)
        if options.ROI[0][0] > options.ROI[1][0]:
            plt.title('TOP LEFT, BOTTOM RIGHT!')
            options.ROI = plt.ginput(2)

if options.ROI[0][0] > 0:
    plt.subplot(211)
    plt.imshow(Image)
    plt.title('Original')
    plt.hlines(options.ROI[0][1], options.ROI[0][0], options.ROI[1][0], 'r',
               linewidth=3)
    plt.hlines(options.ROI[1][1], options.ROI[0][0], options.ROI[1][0], 'r',
               linewidth=3)
    plt.vlines(options.ROI[0][0], options.ROI[0][1], options.ROI[1][1], 'r',
               linewidth=3)
    plt.vlines(options.ROI[1][0], options.ROI[0][1], options.ROI[1][1], 'r',
               linewidth=3)
    Image = Image[options.ROI[0][1]:options.ROI[1][1],
                  options.ROI[0][0]:options.ROI[1][0]]
    plt.subplot(212)
    plt.imshow(Image)
    plt.title('ROI: ' +
              str(int(numpy.round(options.ROI[0][0]))) + ':' +
              str(int(numpy.round(options.ROI[0][1]))) + ' to ' +
              str(int(numpy.round(options.ROI[1][0]))) + ':' +
              str(int(numpy.round(options.ROI[1][1]))))
plt.draw()

# Plot horizontal line
SmoothingStep = 50