def main( args ):

  hash = get_genes_with_features(args['file'])
  for key, featurearray in hash.iteritems():
    cluster, branch = key.split()
    length = int(featurearray[0][0])
    import matplotlib.pyplot as P
    x = [e+1 for e in range(length+1)]
    y1 = [0] * (length+1)
    y2 = [0] * (length+1)
    for feature in featurearray:
      length, pos, aa, prob = feature[0:4]
      if prob > 0.95: y1[pos] = prob
      else: y2[pos] = prob
    
    P.bar(x, y1, color='#000000', edgecolor='#000000')
    P.bar(x, y2, color='#bbbbbb', edgecolor='#bbbbbb')
    P.ylim(ymin=0, ymax=1)
    P.xlim(xmin=0, xmax=length)
    P.xlabel("position in the ungapped alignment [aa]")
    P.ylabel(r'$P (\omega > 1)$')
    P.title(cluster + " (branch " + branch + ")")

    P.axhline(y=.95, xmin=0, xmax=length, linestyle=":", color="k")
    P.savefig(cluster + "." + branch + ".png", format="png")
    P.close()
Example #2
0
def show_plot(X, y, n_neighbors=10, h=0.2):
    # Create color maps
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#FFAAAA', '#AAFFAA', '#AAAAFF','#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000','#FF0000',])

    for weights in ['uniform', 'distance']:
        # we create an instance of Neighbours Classifier and fit the data.
        clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
        clf.fit(X, y)
        clf.n_neighbors = n_neighbors

        # Plot the decision boundary. For that, we will assign a color to each
        # point in the mesh [x_min, x_max]x[y_min, y_max].
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                             np.arange(y_min, y_max, h))
        Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])

        # Put the result into a color plot
        Z = Z.reshape(xx.shape)
        plt.figure()
        plt.pcolormesh(xx, yy, Z, cmap=cmap_light)

        # Plot also the training points
        plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
        plt.xlim(xx.min(), xx.max())
        plt.ylim(yy.min(), yy.max())
        plt.title("3-Class classification (k = %i, weights = '%s')"
                  % (n_neighbors, weights))

    plt.show()
Example #3
0
def plotAlphas(datasetNames, sampleSizes, foldsSet, cvScalings, sampleMethods, fileNameSuffix): 
    """
    Plot the variation in the error with alpha for penalisation. 
    """
    for i, datasetName in enumerate(datasetNames): 
        #plt.figure(i)    
        
        
        for k in range(len(sampleMethods)):
            outfileName = outputDir + datasetName + sampleMethods[k] + fileNameSuffix + ".npz"
            data = numpy.load(outfileName)
    
            errors = data["arr_0"]
            meanMeasures = numpy.mean(errors, 0)
            
            foldInd = 4 
    
            for i in range(sampleSizes.shape[0]):
                plt.plot(cvScalings, meanMeasures[i, foldInd, 2:8], next(linecycler), label="m="+str(sampleSizes[i]))
                    
            plt.xlabel("Alpha")
            plt.ylabel('Error')
            xmin, xmax = cvScalings[0], cvScalings[-1]
            plt.xlim((xmin,xmax))

        
            plt.legend(loc="upper left")
    plt.show()
    def exec_transmissions():
        IP,IP_AP,files=parser_reduce()
        plt.figure("GRAPHE_D'EVOLUTION_DES_TRANSMISSIONS")
        ENS_TEMPS_, TRANSMISSION_ = transmissions(files)
        plt.plot(ENS_TEMPS_, TRANSMISSION_,"r.", label="Transmissions: ")

        lot = map(inet_aton, IP)
        lot.sort()
        iplist1 = map(inet_ntoa, lot)

        for i in iplist1: #ici j'affiche les annotations et vérifie si j'ai des @ip de longueur 9 ou 8 pour connaitre la taille de la fenetre du graphe
                if len(i)==9:
                    maxim_=i[-2:] #Sera utilisé pour la taille de la fenetre du graphe
                    plt.annotate('   Machine: '+ i ,horizontalalignment='left', xy=(1, float(i[-2:])), xytext=(1, float(i[-2:])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),)
                else:
                    maxim_=i[-1:] #Sera utilisé pour la taille de la fenetre du graphe
                    plt.annotate('   Machine: '+ i ,horizontalalignment='left', xy=(1, float(i[7])), xytext=(1, float(i[7])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),)
        for i in IP_AP: #ACCESS POINT ( cas spécial )
            if i[-2:]:
                plt.annotate('   access point: '+ i , xy=(1, i[7]), xytext=(1, float(i[7])-0.4),arrowprops=dict(facecolor='black', shrink=0.05),)

        plt.ylim(0, (float(maxim_))+1) #C'est à ça que sert le tri
        plt.xlim(1, 1.1)
        plt.legend(loc='best',prop={'size':10})
        plt.xlabel('Temps (s)')
        plt.ylabel('IP machines transmettrices')
        plt.grid(True)
        plt.title("GRAPHE_D'EVOLUTION_DES_TRANSMISSIONS")
        plt.legend(loc='best')
        plt.show()
Example #5
0
def scree_plot(pca_obj, fname=None): 
    '''
    Scree plot for variance & cumulative variance by component from PCA. 

    Arguments: 
        - pca_obj: a fitted sklearn PCA instance
        - fname: path to write plot to file

    Output: 
        - scree plot 
    '''   
    components = pca_obj.n_components_ 
    variance = pca.explained_variance_ratio_
    plt.figure()
    plt.plot(np.arange(1, components + 1), np.cumsum(variance), label='Cumulative Variance')
    plt.plot(np.arange(1, components + 1), variance, label='Variance')
    plt.xlim([0.8, components]); plt.ylim([0.0, 1.01])
    plt.xlabel('No. Components', labelpad=11); plt.ylabel('Variance Explained', labelpad=11)
    plt.legend(loc='best') 
    plt.tight_layout() 
    if fname is not None:
        plt.savefig(fname)
        plt.close() 
    else:
        plt.show() 
    return 
def plotErrorBars(dict_to_plot, x_lim, y_lim, xlabel, y_label, title, out_file, margin=[0.05, 0.05], loc=2):

    plt.title(title)
    plt.xlabel(xlabel)
    plt.ylabel(y_label)

    if y_lim is None:
        y_lim = [1 * float("Inf"), -1 * float("Inf")]

    max_val_seen_y = y_lim[1] - margin[1]
    min_val_seen_y = y_lim[0] + margin[1]
    print min_val_seen_y, max_val_seen_y
    max_val_seen_x = x_lim[1] - margin[0]
    min_val_seen_x = x_lim[0] + margin[0]
    handles = []
    for k in dict_to_plot:
        means, stds, x_vals = dict_to_plot[k]

        min_val_seen_y = min(min(np.array(means) - np.array(stds)), min_val_seen_y)
        max_val_seen_y = max(max(np.array(means) + np.array(stds)), max_val_seen_y)

        min_val_seen_x = min(min(x_vals), min_val_seen_x)
        max_val_seen_x = max(max(x_vals), max_val_seen_x)

        handle = plt.errorbar(x_vals, means, yerr=stds)
        handles.append(handle)
        print max_val_seen_y
    plt.xlim([min_val_seen_x - margin[0], max_val_seen_x + margin[0]])
    plt.ylim([min_val_seen_y - margin[1], max_val_seen_y + margin[1]])
    plt.legend(handles, dict_to_plot.keys(), loc=loc)
    plt.savefig(out_file)
Example #7
0
def plot_scenario(strategies, names, scenario_id=1):
    probabilities = get_scenario(scenario_id)

    plt.figure(figsize=(6, 4.5))

    ax = plt.subplot(111)
    ax.spines["top"].set_visible(False)
    ax.spines["bottom"].set_visible(False)
    ax.spines["right"].set_visible(False)
    ax.spines["left"].set_visible(False)

    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()

    plt.yticks(fontsize=14)
    plt.xticks(fontsize=14)
    plt.xlim((0, 1300))

    # Remove the tick marks; they are unnecessary with the tick lines we just plotted.
    plt.tick_params(axis="both", which="both", bottom="on", top="off",
                    labelbottom="on", left="off", right="off", labelleft="on")

    for rank, (strategy, name) in enumerate(zip(strategies, names)):
        plot_strategy(probabilities, strategy, name, rank)

    plt.title("Bandits: " + str(probabilities), fontweight='bold')
    plt.xlabel('Number of Trials', fontsize=14)
    plt.ylabel('Cumulative Regret', fontsize=14)
    plt.legend(names)
    plt.show()
Example #8
0
def scatter(x, y, equal=False, xlabel=None, ylabel=None, xinvert=False, yinvert=False):
    """
    Plot a scatter with simple formatting options
    """
    plt.scatter(x, y, 200, color=[0.3, 0.3, 0.3], edgecolors="white", linewidth=1, zorder=2)
    sns.despine()
    if xlabel:
        plt.xlabel(xlabel)
    if ylabel:
        plt.ylabel(ylabel)
    if equal:
        plt.axes().set_aspect("equal")
        plt.plot([0, max([x.max(), y.max()])], [0, max([x.max(), y.max()])], color=[0.6, 0.6, 0.6], zorder=1)
        bmin = min([x.min(), y.min()])
        bmax = max([x.max(), y.max()])
        rng = abs(bmax - bmin)
        plt.xlim([bmin - rng * 0.05, bmax + rng * 0.05])
        plt.ylim([bmin - rng * 0.05, bmax + rng * 0.05])
    else:
        xrng = abs(x.max() - x.min())
        yrng = abs(y.max() - y.min())
        plt.xlim([x.min() - xrng * 0.05, x.max() + xrng * 0.05])
        plt.ylim([y.min() - yrng * 0.05, y.max() + yrng * 0.05])
    if xinvert:
        plt.gca().invert_xaxis()
    if yinvert:
        plt.gca().invert_yaxis()
Example #9
0
File: LVQ.py Project: jayshonzs/ESL
def draw(data, classes, model, resolution=100):
    mycm = mpl.cm.get_cmap('Paired')
    
    one_min, one_max = data[:, 0].min()-0.1, data[:, 0].max()+0.1
    two_min, two_max = data[:, 1].min()-0.1, data[:, 1].max()+0.1
    xx1, xx2 = np.meshgrid(np.arange(one_min, one_max, (one_max-one_min)/resolution),
                     np.arange(two_min, two_max, (two_max-two_min)/resolution))
    
    inputs = np.c_[xx1.ravel(), xx2.ravel()]
    z = []
    for i in range(len(inputs)):
        z.append(predict(model, inputs[i])[0])
    result = np.array(z).reshape(xx1.shape)
    
    plt.contourf(xx1, xx2, result, cmap=mycm)
    plt.scatter(data[:, 0], data[:, 1], s=50, c=classes, cmap=mycm)
    
    t = np.zeros(15)
    for i in range(15):
        if i < 5:
            t[i] = 0
        elif i < 10:
            t[i] = 1
        else:
            t[i] = 2
    plt.scatter(model[:, 0], model[:, 1], s=150, c=t, cmap=mycm)
    
    plt.xlim([0, 10])
    plt.ylim([0, 10])
    
    plt.show()
 def _plot_histogram(self, data, number_of_devices=1, 
         preamp_timeout=1253):
     if number_of_devices == 0:
         return
     data = np.array(data)
     plt.figure(3)
     plt.ioff()
     plt.get_current_fig_manager().window.wm_geometry("800x550+700+25")
     plt.clf()
     if number_of_devices == 1: 
         plt.hist(data[0,:], bins=preamp_timeout, range=(1, preamp_timeout-1),
             color='b')
     elif number_of_devices == 2:
         plt.hist(data[0,:], bins=preamp_timeout, range=(1, preamp_timeout-1),
             color='r', label='JPM A')
         plt.hist(data[1,:], bins=preamp_timeout, range=(1, preamp_timeout-1),
             color='b', label='JPM B')
         plt.legend()
     elif number_of_devices > 2:
         raise Exception('Histogram plotting for more than two ' +
         'devices is not implemented.')
     plt.xlabel('Timing Information [Preamp Time Counts]')
     plt.ylabel('Counts')
     plt.xlim(0, preamp_timeout)
     plt.draw()
     plt.pause(0.05)
Example #11
0
def tuning(x, y, err=None, smooth=None, ylabel=None, pal=None):
    """
    Plot a tuning curve
    """
    if smooth is not None:
        xs, ys = smoothfit(x, y, smooth)
        plt.plot(xs, ys, linewidth=4, color="black", zorder=1)
    else:
        ys = asarray([0])
    if pal is None:
        pal = sns.color_palette("husl", n_colors=len(x) + 6)
        pal = pal[2 : 2 + len(x)][::-1]
    plt.scatter(x, y, s=300, linewidth=0, color=pal, zorder=2)
    if err is not None:
        plt.errorbar(x, y, yerr=err, linestyle="None", ecolor="black", zorder=1)
    plt.xlabel("Wall distance (mm)")
    plt.ylabel(ylabel)
    plt.xlim([-2.5, 32.5])
    errTmp = err
    errTmp[isnan(err)] = 0
    rng = max([nanmax(ys), nanmax(y + errTmp)])
    plt.ylim([0 - rng * 0.1, rng + rng * 0.1])
    plt.yticks(linspace(0, rng, 3))
    plt.xticks(range(0, 40, 10))
    sns.despine()
    return rng
Example #12
0
def predicted_probabilities(y_true, y_pred, n_groups=30):
    """Plots the distribution of predicted probabilities.

    Parameters
    ----------
    y_true : array_like
        Observed labels, either 0 or 1.
    y_pred : array_like
        Predicted probabilities, floats on [0, 1].
    n_groups : int, optional
        The number of groups to create. The default value is 30.

    Notes
    -----
    .. plot:: pyplots/predicted_probabilities.py
    """
    plt.hist(y_pred, n_groups)
    plt.xlim([0, 1])
    plt.xlabel('Predicted Probability')
    plt.ylabel('Count')

    title = 'Distribution of Predicted Probabilities (n = {})'
    plt.title(title.format(len(y_pred)))

    plt.tight_layout()
Example #13
0
def roc_plot(y_true, y_pred):
    """Plots a receiver operating characteristic.

    Parameters
    ----------
    y_true : array_like
        Observed labels, either 0 or 1.
    y_pred : array_like
        Predicted probabilities, floats on [0, 1].

    Notes
    -----
    .. plot:: pyplots/roc_plot.py

    References
    ----------
    .. [1] Pedregosa, F. et al. "Scikit-learn: Machine Learning in Python."
       *Journal of Machine Learning Research* 12 (2011): 2825–2830.
    .. [2] scikit-learn developers. "Receiver operating characteristic (ROC)."
       Last modified August 2013.
       http://scikit-learn.org/stable/auto_examples/plot_roc.html.
    """
    fpr, tpr, __ = roc_curve(y_true, y_pred)
    roc_auc = auc(fpr, tpr)

    plt.plot(fpr, tpr, label='ROC curve (area = {:0.2f})'.format(roc_auc))
    plt.plot([0, 1], [0, 1], 'k--')
    plt.xlim([0, 1])
    plt.ylim([0, 1])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc='lower right')
def entries_histogram(turnstile_weather):
    '''
    Before we perform any analysis, it might be useful to take a
    look at the data we're hoping to analyze. More specifically, lets 
    examine the hourly entries in our NYC subway data and determine what
    distribution the data follows. This data is stored in a dataframe
    called turnstile_weather under the ['ENTRIESn_hourly'] column.
    
    Why don't you plot two histograms on the same axes, showing hourly
    entries when raining vs. when not raining. Here's an example on how
    to plot histograms with pandas and matplotlib:
    turnstile_weather['column_to_graph'].hist()
    
    Your histograph may look similar to the following graph:
    http://i.imgur.com/9TrkKal.png
    
    You can read a bit about using matplotlib and pandas to plot
    histograms:
    http://pandas.pydata.org/pandas-docs/stable/visualization.html#histograms
    
    You can look at the information contained within the turnstile weather data at the link below:
    https://www.dropbox.com/s/meyki2wl9xfa7yk/turnstile_data_master_with_weather.csv
    '''
    plt.figure()
    (turnstile_weather[turnstile_weather.rain==0].ENTRIESn_hourly).hist(bins=175) # your code here to plot a historgram for hourly entries when it is not raining
    (turnstile_weather[turnstile_weather.rain==1].ENTRIESn_hourly).hist(bins=175) # your code here to plot a historgram for hourly entries when it is raining
    plt.ylim(ymax = 45000, ymin = 0)
    plt.xlim(xmax = 6000, xmin = 0)
    return plt
Example #15
0
def plot_wav_fft(wav_filename, desc=None):
    plt.clf()
    plt.figure(num=None, figsize=(6, 4))
    sample_rate, X = scipy.io.wavfile.read(wav_filename)
    spectrum = np.fft.fft(X)
    freq = np.fft.fftfreq(len(X), 1.0 / sample_rate)

    plt.subplot(211)
    num_samples = 200.0
    plt.xlim(0, num_samples / sample_rate)
    plt.xlabel("time [s]")
    plt.title(desc or wav_filename)
    plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples])
    plt.grid(True)

    plt.subplot(212)
    plt.xlim(0, 5000)
    plt.xlabel("frequency [Hz]")
    plt.xticks(np.arange(5) * 1000)
    if desc:
        desc = desc.strip()
        fft_desc = desc[0].lower() + desc[1:]
    else:
        fft_desc = wav_filename
    plt.title("FFT of %s" % fft_desc)
    plt.plot(freq, abs(spectrum), linewidth=5)
    plt.grid(True)

    plt.tight_layout()

    rel_filename = os.path.split(wav_filename)[1]
    plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0],
                bbox_inches='tight')
Example #16
0
def make_fish(zoom=False):
    plt.close(1)
    plt.figure(1, figsize=(6, 4))
    plt.plot(plot_limits['pitch'], plot_limits['rolldev'], '-g', lw=3)
    plt.plot(plot_limits['pitch'], -plot_limits['rolldev'], '-g', lw=3)
    plt.plot(pitch.midvals, roll.midvals, '.b', ms=1, alpha=0.7)

    p, r = make_ellipse()  # pitch, off nominal roll
    plt.plot(p, r, '-c', lw=2)

    gf = -0.08  # Fudge on pitch value for illustrative purposes
    plt.plot(greta['pitch'] + gf, -greta['roll'], '.r', ms=1, alpha=0.7)
    plt.plot(greta['pitch'][-1] + gf, -greta['roll'][-1], 'xr', ms=10, mew=2)

    if zoom:
        plt.xlim(46.3, 56.1)
        plt.ylim(4.1, 7.3)
    else:
        plt.ylim(-22, 22)
        plt.xlim(40, 180)
    plt.xlabel('Sun pitch angle (deg)')
    plt.ylabel('Sun off-nominal roll angle (deg)')
    plt.title('Mission off-nominal roll vs. pitch (5 minute samples)')
    plt.grid()
    plt.tight_layout()
    plt.savefig('fish{}.png'.format('_zoom' if zoom else ''))
Example #17
0
def make_entity_plot(filename, title, fixed_noip, fixed_ip, dynamic_noip, dynamic_ip):
    plt.figure(figsize=(12,5))

    plt.title("Settings comparison - " + title)
    
    plt.xlabel('Time (ms)', fontsize=12)
    plt.xlim([0,62000])

    x = 0
    barwidth = 0.5
    bargroupspacing = 1.5

    fixed_noip_mean,fixed_noip_conf = conf_stats(fixed_noip)
    fixed_ip_mean,fixed_ip_conf = conf_stats(fixed_ip)
    dynamic_noip_mean,dynamic_noip_conf = conf_stats(dynamic_noip)
    dynamic_ip_mean,dynamic_ip_conf = conf_stats(dynamic_ip)

    values = [fixed_noip_mean,fixed_ip_mean,dynamic_noip_mean, dynamic_ip_mean]
    errs = [fixed_noip_conf,fixed_ip_conf,dynamic_noip_conf, dynamic_ip_conf]

    y_pos = numpy.arange(len(values))
    plt.barh(y_pos, values, xerr=errs, align='center', color=['r', 'b', 'r', 'b'],  ecolor='black', alpha=0.7)
    plt.yticks(y_pos, ["Fixed | no I.P.", "Fixed | I.P.", "Dynamic | no I.P.", "Dynamic | I.P."])
    plt.savefig(output_file(filename))
    plt.clf()
Example #18
0
def acf_plot_raw(type, date, lag=8, window=24, days=30):
  lag, window, days = int(lag), int(window), int(days)
  date = datetime.strptime(date, "%Y%m%d")

  target = int(buckify(window, date))

  cc = CrashCounts(type, window)
  t, counts = cc.crash_counts()

  target = t.index(target)
  counts = counts[target-days:target+1]
  t = [i - 0.02 for i in xrange(1, lag)]
  a = acf(counts, lag)
  plt.title("Global crash count sample ACF plot")
  plt.xlabel("Lag")
  plt.ylabel("ACF")
  plt.bar(t, a, width=0.04)

  err = 2 / sqrt(days)
  plt.plot([0, lag+1], [err, err], "r--")
  plt.plot([0, lag+1], [-err, -err], "r--")
  plt.plot([0, lag+1], [0, 0], "k-")

  plt.xlim(0, lag)
  plt.show()
Example #19
0
def build_plot(profilerResults):
    # Calculate each value.
    x = []
    mean = []
    std = []
    for t in xrange(profilerResults.getLookBack()*-1, profilerResults.getLookForward()+1):
        x.append(t)
        values = np.asarray(profilerResults.getValues(t))
        mean.append(values.mean())
        std.append(values.std())

    # Cleanup
    plt.clf()
    # Plot a line with the mean cumulative returns.
    plt.plot(x, mean, color='#0000FF')

    # Error bars starting on the first lookforward period.
    lookBack = profilerResults.getLookBack()
    firstLookForward = lookBack+1
    plt.errorbar(
        x=x[firstLookForward:], y=mean[firstLookForward:], yerr=std[firstLookForward:],
        capsize=3,
        ecolor='#AAAAFF', alpha=0.5
    )

    # Horizontal line at the level of the first cumulative return.
    plt.axhline(
        y=mean[lookBack],
        xmin=-1*profilerResults.getLookBack(), xmax=profilerResults.getLookForward(),
        color='#000000'
    )

    plt.xlim(profilerResults.getLookBack()*-1-0.5, profilerResults.getLookForward()+0.5)
    plt.xlabel('Time')
    plt.ylabel('Cumulative returns')
Example #20
0
def plot_fidelity_lorentzian(constants):
	"""
		Plots the Fidelity vs FSS curve with and without decoherence.
	"""

	qd = QuantumDot(constants.xtau, constants.xxtau, constants.ptau, constants.FSS, constants.crosstau)

	fss = np.linspace(-10., 10., 500)*1e-6

	qd.crosstau = 0.
	no_decoherence = np.array([qd.ideal_fidelity_lorentzian(f)[0] for f in fss])

	qd.crosstau = 1.
	with_decoherence = np.array([qd.ideal_fidelity_lorentzian(f)[0] for f in fss])

	fss = fss/1e-6
	decoherence = qd.ideal_fidelity_lorentzian(1e-6)[1]

	plt.figure(figsize = (16./1.3, 9./1.3))
	plt.plot(fss, no_decoherence, 'r--', fss, with_decoherence, 'b--')

	plt.xlim([-10, 10]) ; plt.ylim([0.45, 1])
	plt.xlabel('Fine structure splitting $eV$') ; plt.ylabel('Fidelity')
	plt.xticks(np.linspace(-10, 10, 11))
	plt.legend(['No decoherence', 'With $1^{st}$ coherence: ' + np.array(decoherence).astype('|S3').tostring()])
	plt.show()
Example #21
0
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
    # setup marker generator and color map
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
    cmap = ListedColormap(colors[:len(np.unique(y))])

    # plot the decision surface
    x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                           np.arange(x2_min, x2_max, resolution))
    Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
    Z = Z.reshape(xx1.shape)
    plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
    plt.xlim(xx1.min(), xx1.max())
    plt.ylim(xx2.min(), xx2.max())

    # plot class samples
    for idx, cl in enumerate(np.unique(y)):
        plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
                    alpha=0.8, c=cmap(idx),
                    marker=markers[idx], label=cl)

    # Highlight test samples
    if test_idx:
        X_test, y_test = X[test_idx, :], y[test_idx]
        plt.scatter(X_test[:, 0],
                    X_test[:, 1],
                    c='',
                    alpha=1.0,
                    linewidths=1,
                    marker='o',
                    s=55, label='test set')
def plot_stack_candidates(tweets, cands, interval, start = 0, \
  end = MAX_TIME // 60, tic_inc = 120, save_to = None): 
  '''
  Plots stackplot for the candidates in list cands over the time interval
  ''' 

  period = range(start, end, interval)
  percent_dict = tweets.mention_minute_percent(cands, interval, period)

  y = [] 
  fig = plt.figure(figsize = (FIGWIDTH, FIGHEIGHT))
  legends = [] 
  for candidate in percent_dict:
    y.append(percent_dict[candidate]) 
    legends.append(CANDIDATE_NAMES[candidate])
  plt.stackplot(period, y)

  plt.title("Percentage of Mentions per {} minutes before, during, \
    and after debate".format(interval))
  plt.xlabel("Time")
  plt.ylabel("Number of Tweets")
  plt.legend(y, legends)

  ticks_range = range(start, end, tic_inc)
  labels = list(map(lambda x: str(x - start) + " min", ticks_range))
  plt.xticks(ticks_range, labels, rotation = 'vertical')
  plt.xlim( (start, end) )
  plt.ylim( (0.0, 1.0))
  
  if save_to: 
    fig.savefig(save_to)
  plt.show()
Example #23
0
def plot_dens_par_comp_single_obs(obs, pars, comps, ax = None, legend = False, loc = 2, vline = None, xlim = None):
    """Density plot of results from both partitions and compositions with value from a single observation.
    
    """
    if not ax:
        fig = plt.figure(figsize = (3.5, 3.5))
        ax = plt.subplot(111)
    
    full_values = list(pars) + list(comps) + list([obs])
    min_plot = 0.9 * min(full_values)
    max_plot = 1.1 * max(full_values)
    xs = np.linspace(min_plot, max_plot, 200)
    cov_factor = 0.2
    dens_par = comp_dens(pars, cov_factor)
    dens_comp = comp_dens(comps, cov_factor)
    par_plot, = plt.plot(xs, dens_par(xs), c = '#228B22', linewidth=2)
    comp_plot, = plt.plot(xs, dens_comp(xs), c = '#CD69C9', linewidth=2)
    ymax = 1.1 * max([max(dens_par(xs)), max(dens_comp(xs))])
    plt.plot((obs, obs), (0, ymax), 'k-', linewidth = 2)
    if legend:
        plt.legend([par_plot, comp_plot], ['Partitions', 'Compositions'], loc = loc, prop = {'size': 10})
    ax.tick_params(axis = 'both', which = 'major', labelsize = 8)
    if xlim != None:
        plt.xlim(xlim)
    else: plt.xlim((0.9 * min(full_values), 1.1 * max(full_values)))
    return ax
Example #24
0
def plot_dens(obs, expc, obs_type, ax = None, legend = False, loc = 2, vline = None, xlim = None):
    """Plot the density of observed and expected values, with spatial and temporal observations 
    
    distinguished by color.
    
    """
    if not ax:
        fig = plt.figure(figsize = (3.5, 3.5))
        ax = plt.subplot(111)
    
    obs_spatial = [obs[i] for i in range(len(obs)) if obs_type[i] == 'spatial']
    obs_temporal = [obs[i] for i in range(len(obs)) if obs_type[i] == 'temporal']
    full_values = list(obs) + list(expc)
    min_plot = 0.9 * min(full_values)
    max_plot = 1.1 * max(full_values)
    xs = np.linspace(min_plot, max_plot, 200)
    cov_factor = 0.2
    dens_obs_spatial = comp_dens(obs_spatial, cov_factor)
    dens_obs_temporal = comp_dens(obs_temporal, cov_factor)
    dens_expc = comp_dens(expc, cov_factor)
    spat, = plt.plot(xs, dens_obs_spatial(xs), c = '#EE4000', linewidth=2)
    temp, = plt.plot(xs, dens_obs_temporal(xs), c = '#1C86EE', linewidth=2)
    feas, = plt.plot(xs, dens_expc(xs), 'k-', linewidth=2)
    if vline != None:
        ymax = 1.1 * max([max(dens_obs_spatial(xs)), max(dens_obs_temporal(xs)), max(dens_expc(xs))])
        plt.plot((vline, vline), (0, ymax), 'k--')
    if legend:
        plt.legend([spat, temp, feas], ['Spatial', 'Temporal', 'Feasible Set'], loc = loc, prop = {'size': 8})
    ax.tick_params(axis = 'both', which = 'major', labelsize = 6)
    if xlim != None:
        plt.xlim(xlim)
    return ax
Example #25
0
def plot_dens_par_comp(obs, pars, comps, ax = None, legend = False, loc = 2, vline = None, xlim = None):
    """Density plot of the spatial and temporal data pooled together, and results from both partitions and compositions.
    
    """
    if not ax:
        fig = plt.figure(figsize = (3.5, 3.5))
        ax = plt.subplot(111)
    
    full_values = list(obs) + list(pars) + list(comps)
    min_plot = 0.9 * min(full_values)
    max_plot = 1.1 * max(full_values)
    xs = np.linspace(min_plot, max_plot, 200)
    cov_factor = 0.2
    dens_obs = comp_dens(obs, cov_factor)
    dens_par = comp_dens(pars, cov_factor)
    dens_comp = comp_dens(comps, cov_factor)
    obs_plot, = plt.plot(xs, dens_obs(xs), 'k-', linewidth=2)
    par_plot, = plt.plot(xs, dens_par(xs), c = '#228B22', linewidth=2)
    comp_plot, = plt.plot(xs, dens_comp(xs), c = '#CD69C9', linewidth=2)
    if vline != None:
        ymax = 1.1 * max([max(dens_obs(xs)), max(dens_par(xs)), max(dens_comp(xs))])
        plt.plot((vline, vline), (0, ymax), 'k--')
    if legend:
        plt.legend([obs_plot, par_plot, comp_plot], ['Empirical', 'Partitions', 'Compositions'], loc = loc, prop = {'size': 8})
    ax.tick_params(axis = 'both', which = 'major', labelsize = 6)
    if xlim != None:
        plt.xlim(xlim)
    return ax
Example #26
0
def plot_convergence():

    data = np.loadtxt("smooth-error.out")

    nx = data[:,0]
    aerr = data[:,1]

    ax = plt.subplot(111)
    ax.set_xscale('log')
    ax.set_yscale('log')

    plt.scatter(nx, aerr, marker="x", color="r")
    plt.plot(nx, aerr[0]*(nx[0]/nx)**2, "--", color="k")

    plt.xlabel("number of zones")
    plt.ylabel("L2 norm of abs error")

    plt.title(r"convergence for smooth advection problem", fontsize=11)

    f = plt.gcf()
    f.set_size_inches(5.0,5.0)

    plt.xlim(8,256)

    plt.savefig("smooth_converge.eps", bbox_inches="tight")
Example #27
0
def plot_obs_expc_new(obs, expc, expc_upper, expc_lower, analysis, log, ax = None):
    """Modified version of obs-expc plot suggested by R2. The points are separated by whether their CIs are above, below, 
    
    or overlapping the empirical value
    Input: 
    obs - list of observed values
    expc_mean - list of mean simulated values for the corresponding observed values
    expc_upper - list of the 97.5% quantile of the simulated vlaues
    expc_lower - list of the 2.5% quantile of the simulated values
    analysis - whether it is patitions or compositions
    log - whether the y axis is to be transformed. If True, expc/obs is plotted. If Flase, expc - obs is plotted.
    ax - whether the plot is generated on a given figure, or a new plot object is to be created
    
    """
    obs, expc, expc_upper, expc_lower = list(obs), list(expc), list(expc_upper), list(expc_lower)
    if not ax:
        fig = plt.figure(figsize = (3.5, 3.5))
        ax = plt.subplot(111)
    
    ind_above = [i for i in range(len(obs)) if expc_lower[i] > obs[i]]
    ind_below = [i for i in range(len(obs)) if expc_upper[i] < obs[i]]
    ind_overlap = [i for i in range(len(obs)) if expc_lower[i] <= obs[i] <= expc_upper[i]]
    
    if log:
        expc_standardize = [expc[i] / obs[i] for i in range(len(obs))]
        expc_upper_standardize = [expc_upper[i] / obs[i] for i in range(len(obs))]
        expc_lower_standardize = [expc_lower[i] / obs[i] for i in range(len(obs))]
        axis_min = 0.9 * min([expc_lower_standardize[i] for i in range(len(expc_lower_standardize)) if expc_lower_standardize[i] != 0])
        axis_max = 1.5 * max(expc_upper_standardize)
    else:
        expc_standardize = [expc[i] - obs[i] for i in range(len(obs))]
        expc_upper_standardize = [expc_upper[i] - obs[i] for i in range(len(obs))]
        expc_lower_standardize = [expc_lower[i] - obs[i] for i in range(len(obs))]
        axis_min = 1.1 * min(expc_lower_standardize)
        axis_max = 1.1 * max(expc_upper_standardize)
   
    if analysis == 'partition': col = '#228B22'
    else: col = '#CD69C9'
    ind_full = [] 
    for index in [ind_below, ind_overlap, ind_above]:
        expc_standardize_ind = [expc_standardize[i] for i in index]
        sort_ind_ind = sorted(range(len(expc_standardize_ind)), key = lambda i: expc_standardize_ind[i])
        sorted_index = [index[i] for i in sort_ind_ind]
        ind_full.extend(sorted_index)

    xaxis_max = len(ind_full)
    for i, ind in enumerate(ind_full):
        plt.plot([i, i],[expc_lower_standardize[ind], expc_upper_standardize[ind]], '-', c = col, linewidth = 0.4)
    plt.scatter(range(len(ind_full)), [expc_standardize[i] for i in ind_full], c = col,  edgecolors='none', s = 8)    
    if log: 
        plt.plot([0, xaxis_max + 1], [1, 1], 'k-', linewidth = 1.5)
        ax.set_yscale('log')
    else: plt.plot([0, xaxis_max + 1], [0, 0], 'k-', linewidth = 1.5)
    plt.plot([len(ind_below) - 0.5, len(ind_below) - 0.5], [axis_min, axis_max], 'k--')
    plt.plot([len(ind_below) + len(ind_overlap) - 0.5, len(ind_below) + len(ind_overlap) - 0.5], [axis_min, axis_max], 'k--')
    plt.xlim(0, xaxis_max)
    plt.ylim(axis_min, axis_max)
    plt.tick_params(axis = 'y', which = 'major', labelsize = 8, labelleft = 'on')
    plt.tick_params(axis = 'x', which = 'major', top = 'off', bottom = 'off', labelbottom = 'off')
    return ax
def plot_per_min_debate(tweets, cands, interval, \
  start = DEBATE_START // 60, end = DEBATE_END // 60, tic_inc = 15, save_to = None): 
  '''
  Plots data from beg of debate to end. For Task 4a. 
  Note: start and end should be in minutes, not seconds
  '''

  fig = plt.figure(figsize = (FIGWIDTH, FIGHEIGHT))

  period = range(start, end, interval)
  c_dict = tweets.get_candidate_mentions_per_minute(cands, interval, period)

  y = np.row_stack()
  for candidate in c_dict: 
    plt.plot(period, c_dict[candidate], label = CANDIDATE_NAMES[candidate])

  if interval == 1: 
    plt.title("Mentions per Minute During Debate")
  else: 
    plt.title("Mentions per {} minutes before, during, and after debate".\
      format(interval))
  plt.xlabel("Time")
  plt.ylabel("Number of Tweets")
  plt.legend()

  ticks_range = range(start, end, tic_inc)
  labels = list(map(lambda x: str(x - start) + " min", ticks_range))
  plt.xticks(ticks_range, labels, rotation = 'vertical')
  plt.xlim( (start, end) )
  
  if save_to: 
    fig.savefig(save_to)
  plt.show()
Example #29
0
def plt_data():
    t = [[0,1], [1,0], [1, 1], [0, 0]]
    t2 = [1, 1, 1, 0]
    X = np.array(t)
    Y = np.array(t2)

    h = .02  # step size in the mesh

    logreg = linear_model.LogisticRegression(C=1e5)

    # we create an instance of Neighbours Classifier and fit the data.
    logreg.fit(X, Y)
    # Plot the decision boundary. For that, we will assign a color to each
    # point in the mesh [x_min, m_max]x[y_min, y_max].
    x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
    y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
    Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])

    # Put the result into a color plot
    Z = Z.reshape(xx.shape)
    plt.figure(1, figsize=(4, 3))
    plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)

    # Plot also the training points
    plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
    plt.xlabel('Sepal length')
    plt.ylabel('Sepal width')

    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.xticks(())
    plt.yticks(())

    plt.show()
	def plotFFT(self):
	# Generates plot of the FFT output. To view, run plotFFT.py in a separate terminal
		figure1 = plt.figure(num= None, figsize=(12,12), dpi=80, facecolor='w', edgecolor='w')
		plot1 = figure1.add_subplot(111)
		line1, = plot1.plot( np.arange(0,512,0.5), np.zeros(1024), 'g-')
		plt.xlabel('freq (MHz)',fontsize = 12)
		plt.ylabel('Amplitude',fontsize = 12)
		plt.title('Pre-mixer FFT',fontsize = 12)
		plt.xticks(np.arange(0,512,50))
		plt.xlim((0,512))
		plt.grid()
		plt.show(block = False)
		count = 0 
		stop = 1.0e6
		while(count < stop):
			overflow = np.fromstring(self.fpga.read('overflow', 4), dtype = '>B')
			print overflow
			self.fpga.write_int('fft_snap_ctrl',0)
			self.fpga.write_int('fft_snap_ctrl',1)
			fft_snap = (np.fromstring(self.fpga.read('fft_snap_bram',(2**9)*8),dtype='>i2')).astype('float')
			I0 = fft_snap[0::4]
			Q0 = fft_snap[1::4]
			I1 = fft_snap[2::4]
			Q1 = fft_snap[3::4]
			mag0 = np.sqrt(I0**2 + Q0**2)
			mag1 = np.sqrt(I1**2 + Q1**2)
			fft_mags = np.hstack(zip(mag0,mag1))
			plt.ylim((0,np.max(fft_mags) + 300.))
			line1.set_ydata((fft_mags))
			plt.draw()
			count += 1
Example #31
0
    plt.figure('Gaussian Process Regression')
    plt.suptitle('Gaussian Process Regression')
    plt.subplot(2, 1, 1)
    plt.plot(t_list,
             prior_samples,
             c=proj_cmap['grey'],
             linewidth=0.3,
             alpha=0.3)
    plt.plot(t_list,
             np.mean(prior_samples, axis=1),
             c=proj_cmap['blue'],
             linewidth=2.0,
             alpha=0.6)
    plt.legend(['Prior'], loc='upper right', fontsize=8)
    plt.xlim([np.min(t_list), np.max(t_list)])
    plt.xticks(fontsize=6)
    plt.yticks(fontsize=6)

    # 采集后验样本.
    t_obs = [1.1, 1.0, 4.0, 6.0, 7.0, 7.1]
    x_obs = [1.0, 1.0, 0.5, 1.0, 2.0, 2.0]
    obs_n = len(x_obs)

    # %% 计算先验和后验分布参数, 1代表观测值, 2代表未知值.
    # 先验参数mu.
    mu_1 = np.zeros_like(t_obs).reshape(-1, 1)
    mu_2 = np.zeros_like(t_list).reshape(-1, 1)

    # 先验参数Sigma.
    t_total = np.hstack((np.array(t_obs), t_list))
    deform_floor_env = True if gap == 'deform_floor_env' else False
    soft_floor_env = True if gap == 'soft_floor_env' else False
    low_power_env = True if gap == 'low_power_env' else False
    emf_power_env = True if gap == 'emf_power_env' else False
    joint_gap_env = True if gap == 'joint_gap_env'else False
    max_tar_vel = 20

    sess = Sim2Real2DGridSearch(gap=gap, env_name=env_name,true_scales=true_scales, policy=policy,trials=trials,N=N,processes=processes,  control_mode=control_mode,max_tar_vel=20.0,
        soft_floor_env=soft_floor_env,deform_floor_env=deform_floor_env,low_power_env=low_power_env,emf_power_env=emf_power_env,
        random_IC =random_IC, init_noise = init_noise, obs_noise=obs_noise, act_noise=act_noise, shrink_IC_dist=shrink_IC_dist, seeded_IC=seeded_IC, joint_gap_env=joint_gap_env)
    xlist, ylist = sess.Search(bounds=np.array([0.5, 1.5]),x_samples=None)

    import matplotlib.pyplot as plt
    plt.figure()
    plt.scatter(xlist[:, 0], xlist[:, 2], c=ylist)#, vmin=0.0, vmax=10000.0)
    plt.xlim([0.5, 1.5])
    plt.ylim([0.5, 1.5])
    plt.colorbar()
    plt.xlabel('Front leg scale')
    plt.ylabel('Back leg scale')
    plt.scatter(true_scales[0],true_scales[1],c='r')
    plt.show()
    #import matplotlib
    #matplotlib.use('TkAgg')
    #import matplotlib.pyplot as plt
    bp()
    #ind = np.argmax(ylist)
    #plt.figure()
    #plt.plot(xlist,ylist)
    #plt.plot(ind,ylist[ind],'ro')
    #plt.show()
ax1.plot(Outputs[144:],
         color="blue",
         linestyle="-",
         linewidth=1.5,
         label="Measurements")
ax1.plot(y_pred_dep,
         color="green",
         linestyle="--",
         linewidth=1.5,
         label="Proposed model")

plt.legend(loc='upper right')
plt.xticks(fontsize=8, fontweight='normal')
plt.yticks(fontsize=8, fontweight='normal')
plt.xlabel('Time (Month)', fontsize=10)
plt.ylabel('Water table depth (m)', fontsize=10)
plt.xlim(0, 25)
plt.savefig('results.png', format='png')
plt.show()

##### Loading Model #####
model = torch.load('checkpoints/LSTM_FC.pth')
model.eval()
y_pred_dep_ = model(X_test_dep_std).detach().numpy()
y_pred_dep = ss_y_dep.inverse_transform(y_pred_dep_[0, 144:])

print('the value of R-squared of Evaporation is ',
      r2_score(Outputs[144:], y_pred_dep))
print('the value of Root mean squared error of Evaporation is ',
      rmse(Outputs[144:], y_pred_dep))
Example #34
0
	return mse

def fit_gauss_func(x, t, m):
	mu = np.linspace(5, 30, m)
	s = mu[1] - mu[0]
	n = x.shape[0]
	psi = np.ones((n, m+1))
	for j in range(m):
		psi[:, j] = gauss(x, mu[j], s)
	psi_T = np.transpose(psi)

	b = np.linalg.inv(psi_T.dot(psi))
	c = b.dot(psi_T)
	w = c.dot(t)
	return w

def show_gauss_func(w):
	xb = np.linspace(X_min, X_max, 100)
	y = gauss_func(w, xb)
	plt.plot(xb, y, c=[.5, .5, .5], lw=4)

M = 4
W = fit_gauss_func(X, T, M)
show_gauss_func(W)
plt.plot(X, T, marker='o', linestyle='None', color='cornflowerblue', markeredgecolor='black')
plt.xlim(X_min, X_max)
plt.grid(True)
mse = mse_gauss_func(X, T, W)
print('W=' + str(np.round(W, 1)))
print('SD={0: .2f} cm'.format(np.sqrt(mse)))
plt.show()
Example #35
0
         np.max(abs(res.sgnl_max_temporal)**2), 'k', label = 'sgnl', linewidth = 2)
p = res.get_idlr(x)
line_idlr_t, =ax1.plot((p.T_mks - tc)* 1e15,   abs(p.AT)**2 /
         np.max(abs(res.idlr_max_temporal)**2), 'r', label = 'idlr', linewidth = 2)
ax1.set_xlabel("Time (fs)")
ax1.set_ylabel("Normalized Power")
plt.legend()
ax1.set_xlim(-3000*pump_pulse_length,3000*pump_pulse_length)
ax1.set_ylim(0, 1)

ax2 = plt.subplot(212)
line_idlr_f, = plt.plot(p.wl_mks*1.0e9, 
         abs(p.AW)**2 / np.max(res.idlr_max_field**2),
         label = 'idler', linewidth = 2)        
plt.ylim(0, 1) 
plt.xlim(2700, 3900)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Normalized Power (arb.)")        
axins = inset_axes(ax2, width = '20%',height = '10%', loc = 1)
line_inset, = axins.plot(1000*np.array(res.zs[0:0]),idlr_power_series[0:0] )
axins.set_ylim(0, 1.1*max(idlr_power_series))
axins.set_xlim(res.zs[0], 1000.0*res.zs[-1])
axins.set_xticks([])
axins.set_yticks([])    

def update(ctr):
    x = ctr % res.n_saves
    p = res.get_pump(x)
    ic = np.argmax(abs(p.AT)**2)
    tc = p.T_mks[ic]
    x_ax = (p.T_mks - tc)* 1e15
Example #36
0
    # Histogram
    ang_hist, bin_edges = np.histogram(np.ravel(angles[isfour, :]),
                                       bins=bins,
                                       density=True)
    #plt.plot(bins[:44]+dbin/2,ang_hist,'.-',color='r')
    #plt.plot(bins[:44]+dbin/2,ang_hist,'.-',color=vmap(w),label=vList[w])

    ## angles
    #plt.xlabel('time')
    #plt.ylabel('angle')
    #plt.legend(loc=3,ncol=2)
    ## correlations
    plt.xlabel('time')
    plt.ylabel('C(t)')
    #plt.legend(loc=3,ncol=2)
    plt.xlim(0, 50000)
    plt.ylim(-0.5, 1.05)
    ## Histograms
    plt.xlabel('angle')
    plt.ylabel('P(angle)')

    plt.title('Alignment ' + J)
    #plt.title('Velocity ' + str(v))
    w += 1

#plt.figure(figsize=(10,7),linewidth=2.0)
#for r in range(len(RList)):
#plt.errorbar(vval,avtot[r,:],yerr=davtot[r,:],color=Rmap(r),marker='o',label='R=' + RList[r])
#plt.plot(vval,109.47*vval/vval,'k--')
#plt.xlim(0,2)
#plt.ylim(0,140)
Example #37
0
# Grab the image channels, initialize the tuple of colors
# and the figure
chans = cv2.split(image)
colors = ("b", "g", "r")
plt.figure()
plt.title("'Flattened' Color Histogram")
plt.xlabel("Bins")
plt.ylabel("# of Pixels")

# Loop over the image channels
for (chan, color) in zip(chans, colors):
    # Create a histogram for the current channel and plot it
    hist = cv2.calcHist([chan], [0], None, [256], [0, 256])
    plt.plot(hist, color=color)
    plt.xlim([0, 256])

# Let's move on to 2D histograms -- I am reducing the
# number of bins in the histogram from 256 to 32 so we
# can better visualize the results
fig = plt.figure()

# Plot a 2D color histogram for green and blue
ax = fig.add_subplot(131)
hist = cv2.calcHist([chans[1], chans[0]], [0, 1], None, [32, 32],
                    [0, 256, 0, 256])
p = ax.imshow(hist, interpolation="nearest")
ax.set_title("2D Color Histogram for G and B")
plt.colorbar(p)

# Plot a 2D color histogram for green and red
Example #38
0
import numpy as np
import matplotlib.pyplot as plt

n=6

X = np.arange(n)
Y1 = (1 - X / float(n)) * np.random.uniform(0.5, 1.0, n)

plt.bar(X, +Y1)
plt.xlim(-.5, n)
plt.xticks(())
plt.ylim(-1.25, 1.25)
plt.yticks(())

plt.show()
Example #39
0
def main():
	AllData = {}
	nstart = True
	y_score_Avg_PerTile = []
	y_score_PcS_PerTile = []
	y_ref_PerTile = []

	unique_labels = []
	with open(FLAGS.labels_names, "r") as f:
		for line in f:
			line = line.replace('\r','\n')
			line = line.split('\n')
			for eachline in line:
				if len(eachline)>0:
					unique_labels.append(eachline)
	TotNbTiles = 0
	if ', ' in FLAGS.file_stats:
		file1 = FLAGS.file_stats.split(', ')[0]
		file2 = FLAGS.file_stats.split(', ')[1]
		corr = ''
		with open(file1) as f:
			for line in f:
				#print(line)
				basename = line.split()[0]
				AllData[basename] = {}				
				tmp_out = line.split('[')[1]
				tmp_out = tmp_out.split(']')[0]
				AllData[basename]['Labelvec'] = [float(x) for x in tmp_out.split(',')]

				tmp_out = line.split('Percent_Selected:')[1]
				PcSel = tmp_out.split('Average_Probability:')[0].split()
				AvgPrb = tmp_out.split('Average_Probability:')[1].split()
				tmp = 0
				AllData[basename]['Percent_Selected'] = {}
				for eachlabel in unique_labels:
					AllData[basename]['Percent_Selected'][eachlabel] = float(PcSel[tmp])
					tmp += 1
				tmp = 0
				AllData[basename]['Avg_Prob'] = {}
				for eachlabel in unique_labels:
					AllData[basename]['Avg_Prob'][eachlabel] = float(AvgPrb[tmp])
					tmp += 1
				#print(line)
				#print(basename)
				#print(AllData[basename])

		with open(file2) as f:
			for line in f:
				#print(line)
				basename = line.split()[0]


				tmp_out = line.split('Percent_Selected:')[1]
				PcSel = tmp_out.split('Average_Probability:')[0].split()
				AvgPrb = tmp_out.split('Average_Probability:')[1].split()
				if basename in AllData.keys():
					tmp = 0
					for eachlabel in unique_labels:
						AllData[basename]['Percent_Selected'][eachlabel] = float(PcSel[tmp]) +AllData[basename]['Percent_Selected'][eachlabel]
						AllData[basename]['Percent_Selected'][eachlabel] = AllData[basename]['Percent_Selected'][eachlabel] / 2.0
						tmp += 1
					tmp = 0
					for eachlabel in unique_labels:
						AllData[basename]['Avg_Prob'][eachlabel] = float(AvgPrb[tmp]) + AllData[basename]['Avg_Prob'][eachlabel]
						AllData[basename]['Avg_Prob'][eachlabel] = AllData[basename]['Avg_Prob'][eachlabel] / 2.0
						tmp += 1					
				else:
					AllData[basename] = {}
					tmp_out = line.split('[')[1]
					tmp_out = tmp_out.split(']')[0]
					AllData[basename]['Labelvec'] = [float(x) for x in tmp_out.split(',')]
					tmp = 0
					AllData[basename]['Percent_Selected'] = {}
					for eachlabel in unique_labels:
						AllData[basename]['Percent_Selected'][eachlabel] = float(PcSel[tmp])
						tmp += 1
					tmp = 0
					AllData[basename]['Avg_Prob'] = {}
					for eachlabel in unique_labels:
						AllData[basename]['Avg_Prob'][eachlabel] = float(AvgPrb[tmp])
						tmp += 1

				#print(line)
				#print(basename)
				#print(AllData[basename])


		y_score = []
		y_score_PcSelect = []
		y_ref = []
		n_classes = len(unique_labels)
		output = open(os.path.join(FLAGS.output_dir, 'out2_perSlideStats_avg.txt'),'w')
		for basename in AllData.keys():
			#print(basename)
			#print(AllData[basename])
			output.write("%s\ttrue_label: %s\t" % (basename, AllData[basename]['Labelvec']) )
			tmp_prob = []
			output.write("Percent_Selected: ")
			for eachlabel in unique_labels:
				tmp_prob.append(AllData[basename]['Percent_Selected'][eachlabel])
				output.write("%f\t" % (AllData[basename]['Percent_Selected'][eachlabel]) )
			y_score_PcSelect.append(tmp_prob)
			tmp_prob = []
			output.write("Average_Probability: ")
			for eachlabel in unique_labels: 
				tmp_prob.append(AllData[basename]['Avg_Prob'][eachlabel])
				output.write("%f\t" % (AllData[basename]['Avg_Prob'][eachlabel]) )
			output.write("\n")
			y_score.append(tmp_prob)
			y_ref.append(AllData[basename]['Labelvec'])
		output.close()
		print("y_score")
		print(y_score)
		print("y_ref")
		print(y_ref)
		print("y_score_PcSelect")
		print(y_score_PcSelect)

	else:
		with open(FLAGS.file_stats) as f:
			for line in f:
				print(line)
				if line.find('.dat') != -1:
					filename = line.split('.dat')[0]
				elif line.find('.jpeg') != -1:
					filename = line.split('.jpeg')[0]
				elif line.find('.net2048') != -1:
					filename = line.split('.net2048')[0]
				else:
					continue
				basename = '_'.join(filename.split('_')[:-2])
				#print("basename")
				#print(basename)
				#print("filename")
				#print(filename)

				# Check if tile should be considered for ROC (classified as LUAD) or not (Normal or LUSC)
				corr = ''
				analyze = True
				if os.path.isfile(FLAGS.ref_stats):
					corr = 'corrected_'
					with open(FLAGS.ref_stats) as fstat2:
						for line2 in fstat2:
							if filename in line2:
								print("Found:")
								print(line2)
								if "False" in line2:
									analyze = False
								print(analyze)
								break
				if analyze == False:
					print("continue")
					continue
				TotNbTiles += 1

				ExpectedProb = line.split('[')[1]
				ExpectedProb = ExpectedProb.split(']')[0]
				ExpectedProb = ExpectedProb.split()
				try: # mutations format
					IncProb = line.split('[')[2]
					IncProb = IncProb.split(']')[0]
					IncProb = IncProb.split()
				except:
					IncProb = []
					minProb_Indx = 1
					maxProb_Indx = 1
					minProb_Val = 2
					maxProb_Val = 0
					for kL in range(len(ExpectedProb)):
						if kL ==0:
							#IncProb.append(float(ExpectedProb[0]))
							IncProb.append(0)
						else:
							IncProb.append(float(ExpectedProb[kL]) / (1-float(ExpectedProb[0])))
							if IncProb[kL] < minProb_Val:
								minProb_Val = IncProb[kL]
								minProb_Indx = kL
							if IncProb[kL] >= maxProb_Val:
								maxProb_Val = IncProb[kL]
								maxProb_Indx = kL


					for kL in range(len(ExpectedProb)):
						ExpectedProb[kL] = 0
					try:
						True_Label = int(line.split('labels:')[1])
					except:
						# old filename format - assuming 2 classes only
						True_Label = line.split()[1]
						if True_Label == 'True':
							True_Label = maxProb_Indx
						else:
							True_Label = minProb_Indx

					print("True label: %d " % True_Label)
					ExpectedProb[True_Label] = 1
					'''
					IncProb = [0, 0, 0]
					IncProb[0] = float(ExpectedProb[0])
					IncProb[1] = float(ExpectedProb[1]) / (1-float(ExpectedProb[0]))
					IncProb[2] = float(ExpectedProb[2]) / (1-float(ExpectedProb[0]))
					tmp = [IncProb[1], IncProb[2]]
					ExpectedProb = [0, 0, 0]
					if 'True' in line:
						ExpectedProb[IncProb.index(max(tmp))] = 1
					else:
						ExpectedProb[IncProb.index(min(tmp))] = 1
					'''


				true_label = []
				for iprob in ExpectedProb:
					true_label.append(float(iprob))
				true_label.pop(0)
				OutProb = []
				for iprob in IncProb:
					OutProb.append(float(iprob))
				OutProb.pop(0)
				print(true_label)
				print(OutProb)

				tmp_prob_avg = []
				tmp_prob_pcs = []
				if basename in AllData:
					AllData[basename]['NbTiles'] += 1
					for eachlabel in range(len(OutProb)):
						AllData[basename]['Probs'][eachlabel] = AllData[basename]['Probs'][eachlabel] + OutProb[eachlabel]
						#if OutProb[eachlabel] >= 0.5:
						if OutProb[eachlabel] == max(OutProb):
							AllData[basename]['Nb_Selected'][eachlabel] = AllData[basename]['Nb_Selected'][eachlabel] + 1.0
				else:
					AllData[basename] = {}
					AllData[basename]['NbTiles'] = 1
					AllData[basename]['Labelvec'] = true_label
					AllData[basename]['Nb_Selected'] = {}
					AllData[basename]['Probs'] = {}
					for eachlabel in range(len(OutProb)):
						AllData[basename]['Nb_Selected'][eachlabel] = 0.0
						#AllData[basename]['LabelIndx_'+unique_labels(eachlabel)] = true_label(eachlabel)
						AllData[basename]['Probs'][eachlabel] = OutProb[eachlabel]
						#if OutProb[eachlabel] >= 0.5:
						#print(eachlabel)
						#print(OutProb[eachlabel])
						#print(max(OutProb[eachlabel]))
						if OutProb[eachlabel] == max(OutProb):
							AllData[basename]['Nb_Selected'][eachlabel] = 1.0
					nstart = False

				for eachlabel in range(len(OutProb)):
					tmp_prob_avg.append(OutProb[eachlabel])
					if OutProb[eachlabel] == max(OutProb):
						tmp_prob_pcs.append(1.)
					else:
						tmp_prob_pcs.append(0.)
					
				y_score_Avg_PerTile.append(tmp_prob_avg)
				y_score_PcS_PerTile.append(tmp_prob_pcs)
				y_ref_PerTile.append(AllData[basename]['Labelvec'])



		print("%d tiles used for the ROC curves" % TotNbTiles)
		output = open(os.path.join(FLAGS.output_dir, corr + 'out2_perSlideStats.txt'),'w')
		y_score = []
		y_score_PcSelect = []
		y_ref = []
		n_classes = len(unique_labels)
		print(unique_labels)
		print(AllData)
		for basename in AllData.keys():
			output.write("%s\ttrue_label: %s\t" % (basename, AllData[basename]['Labelvec']) )
			tmp_prob = []
			AllData[basename]['Percent_Selected'] = {}
			output.write("Percent_Selected: ")
			for eachlabel in range(len(unique_labels)):
				print(eachlabel)
				print(float(AllData[basename]['NbTiles']))
				print(AllData[basename]['Nb_Selected'][eachlabel])
				AllData[basename]['Percent_Selected'][eachlabel] = AllData[basename]['Nb_Selected'][eachlabel] / float(AllData[basename]['NbTiles'])
				tmp_prob.append(AllData[basename]['Percent_Selected'][eachlabel])
				output.write("%f\t" % (AllData[basename]['Percent_Selected'][eachlabel]) )
			y_score_PcSelect.append(tmp_prob)

			AllData[basename]['Avg_Prob'] = {}
			tmp_prob = []
			output.write("Average_Probability: ")
			for eachlabel in range(len(AllData[basename]['Probs'])): 
				AllData[basename]['Avg_Prob'][eachlabel] = AllData[basename]['Probs'][eachlabel] / float(AllData[basename]['NbTiles'])
				tmp_prob.append(AllData[basename]['Avg_Prob'][eachlabel])
				output.write("%f\t" % (AllData[basename]['Avg_Prob'][eachlabel]) )
			output.write("\n")
			y_score.append(tmp_prob)
			y_ref.append(AllData[basename]['Labelvec'])


		output.close()

	## Compute ROC per tile
	y_score_Avg_PerTile = np.array(y_score_Avg_PerTile)
	y_score_PcS_PerTile = np.array(y_score_PcS_PerTile)
	y_ref_PerTile = np.array(y_ref_PerTile)
	fpr = dict()
	tpr = dict()
	thresholds = dict()
	opt_thresh = dict()
	roc_auc = dict()
	fpr_PcSel = dict()
	tpr_PcSel = dict()
	roc_auc_PcSel = dict()
	print("n_classes")
	print(n_classes)

	for i in range(n_classes):
		print(y_ref_PerTile[:, i], y_score_Avg_PerTile[:, i], y_score_PcS_PerTile[:, i])
		fpr[i], tpr[i], thresholds[i] = roc_curve(y_ref_PerTile[:, i], y_score_Avg_PerTile[:, i])
		roc_auc[i] = auc(fpr[i], tpr[i])

		fpr_PcSel[i], tpr_PcSel[i], _ = roc_curve(y_ref_PerTile[:, i], y_score_PcS_PerTile[:, i])
		roc_auc_PcSel[i] = auc(fpr_PcSel[i], tpr_PcSel[i])
		euc_dist = []
		try:
			for jj in range(len(fpr[i])):
				euc_dist.append( euclidean_distances([0, 1], [fpr[i][jj], tpr[i][jj]]) )
			opt_thresh[i] = thresholds[i][euc_dist.index(min(euc_dist))]
		except:
			opt_thresh[i] = 0

	# Compute micro-average ROC curve and ROC area
	fpr["micro"], tpr["micro"], _ = roc_curve(y_ref_PerTile.ravel(), y_score_Avg_PerTile.ravel())
	roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

	fpr_PcSel["micro"], tpr_PcSel["micro"], thresholds["micro"] = roc_curve(y_ref_PerTile.ravel(), y_score_PcS_PerTile.ravel())
	roc_auc_PcSel["micro"] = auc(fpr_PcSel["micro"], tpr_PcSel["micro"])
	euc_dist = []
	for jj in range(len(fpr_PcSel["micro"])):
		euc_dist.append( euclidean_distances([0, 1], [fpr_PcSel["micro"][jj], tpr_PcSel["micro"][jj]]) )
	print(min(euc_dist))
	print(euc_dist.index(min(euc_dist)))
	print(thresholds["micro"])
	opt_thresh["micro"] = thresholds["micro"][euc_dist.index(min(euc_dist))]


	## Compute macro-average ROC curve and ROC area
	# First aggregate all false positive rates
	all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
	all_fpr_PcSel = np.unique(np.concatenate([fpr_PcSel[i] for i in range(n_classes)]))

	# Then interpolate all ROC curves at this points
	mean_tpr = np.zeros_like(all_fpr)
	for i in range(n_classes):
	    mean_tpr += interp(all_fpr, fpr[i], tpr[i])

	mean_tpr_PcSel= np.zeros_like(all_fpr_PcSel)
	for i in range(n_classes):
	    mean_tpr_PcSel += interp(all_fpr_PcSel, fpr_PcSel[i], tpr_PcSel[i])

	# Finally average it and compute AUC
	mean_tpr /= n_classes
	fpr["macro"] = all_fpr
	tpr["macro"] = mean_tpr
	roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

	mean_tpr_PcSel /= n_classes
	fpr_PcSel["macro"] = all_fpr_PcSel
	tpr_PcSel["macro"] = mean_tpr_PcSel
	roc_auc_PcSel["macro"] = auc(fpr_PcSel["macro"], tpr_PcSel["macro"])


	# save data
	print("******* FP / TP for average probabilitys")
	print(fpr)
	print(tpr)
	for i in range(n_classes):
		output = open(os.path.join(FLAGS.output_dir, corr + 'out1_perTile_roc_data_AvPb_c' + str(i+1)+ 'auc_' + str("%.4f" % roc_auc[i]) + '_t' + str("%.3f" % opt_thresh[i]) + '.txt'),'w')
		for kk in range(len(tpr[i])):
			output.write("%f\t%f\n" % (fpr[i][kk], tpr[i][kk]) )
		output.close()

	output = open(os.path.join(FLAGS.output_dir, corr + 'out1_perTile_roc_data_AvPb_macro_auc_' + str("%.4f" % roc_auc["macro"]) + '.txt'),'w')
	for kk in range(len(tpr["macro"])):
		output.write("%f\t%f\n" % (fpr["macro"][kk], tpr["macro"][kk]) )
	output.close()

	output = open(os.path.join(FLAGS.output_dir, corr + 'out1_perTile_roc_data_AvPb_micro_auc_' + str("%.4f" % roc_auc["micro"]) + '_t' + str("%.3f" % opt_thresh["micro"]) + '.txt'),'w')
	for kk in range(len(tpr["micro"])):
		output.write("%f\t%f\n" % (fpr["micro"][kk], tpr["micro"][kk]) )
	output.close()

	print("******* FP / TP for percent selected")
	print(fpr_PcSel)
	print(tpr_PcSel)
	for i in range(n_classes):
		output = open(os.path.join(FLAGS.output_dir, corr + 'out1_perTile_roc_data_PcSel_c' + str(i+1)+ 'auc_' + str("%.4f" % roc_auc_PcSel[i]) + '.txt'),'w')
		for kk in range(len(tpr_PcSel[i])):
			output.write("%f\t%f\n" % (fpr_PcSel[i][kk], tpr_PcSel[i][kk]) )
		output.close()


	output = open(os.path.join(FLAGS.output_dir, corr+ 'out1_perTile_roc_data_PcSel_macro_auc_' + str("%.4f" % roc_auc_PcSel["macro"]) + '.txt'),'w')
	for kk in range(len(tpr_PcSel["macro"])):
		output.write("%f\t%f\n" % (fpr_PcSel["macro"][kk], tpr_PcSel["macro"][kk]) )
	output.close()

	output = open(os.path.join(FLAGS.output_dir, corr+ 'out1_perTile_roc_data_PcSel_micro_auc_' + str("%.4f" % roc_auc_PcSel["micro"]) + '.txt'),'w')
	for kk in range(len(tpr_PcSel["micro"])):
		output.write("%f\t%f\n" % (fpr_PcSel["micro"][kk], tpr_PcSel["micro"][kk]) )
	output.close()















	## Compute ROC per slide
	y_score = np.array(y_score)
	y_score_PcSelect = np.array(y_score_PcSelect)
	y_ref = np.array(y_ref)
	
	# Compute ROC curve and ROC area for each class
	fpr = dict()
	tpr = dict()
	thresholds = dict()
	opt_thresh = dict()
	roc_auc = dict()
	fpr_PcSel = dict()
	tpr_PcSel = dict()
	roc_auc_PcSel = dict()
	print("n_classes")
	print(n_classes)

	for i in range(n_classes):
		print(y_ref[:, i], y_score[:, i])
		fpr[i], tpr[i], thresholds[i] = roc_curve(y_ref[:, i], y_score[:, i])
		roc_auc[i] = auc(fpr[i], tpr[i])

		fpr_PcSel[i], tpr_PcSel[i], _ = roc_curve(y_ref[:, i], y_score_PcSelect[:, i])
		roc_auc_PcSel[i] = auc(fpr_PcSel[i], tpr_PcSel[i])
		euc_dist = []
		try:
			for jj in range(len(fpr[i])):
				euc_dist.append( euclidean_distances([0, 1], [fpr[i][jj], tpr[i][jj]]) )
			opt_thresh[i] = thresholds[i][euc_dist.index(min(euc_dist))]
		except:
			opt_thresh[i] = 0
	# Compute micro-average ROC curve and ROC area
	fpr["micro"], tpr["micro"], _ = roc_curve(y_ref.ravel(), y_score.ravel())
	roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])

	fpr_PcSel["micro"], tpr_PcSel["micro"], thresholds["micro"] = roc_curve(y_ref.ravel(), y_score_PcSelect.ravel())
	roc_auc_PcSel["micro"] = auc(fpr_PcSel["micro"], tpr_PcSel["micro"])
	euc_dist = []
	for jj in range(len(fpr_PcSel["micro"])):
		euc_dist.append( euclidean_distances([0, 1], [fpr_PcSel["micro"][jj], tpr_PcSel["micro"][jj]]) )
	print(min(euc_dist))
	print(euc_dist.index(min(euc_dist)))
	print(thresholds["micro"])
	opt_thresh["micro"] = thresholds["micro"][euc_dist.index(min(euc_dist))]


	## Compute macro-average ROC curve and ROC area
	# First aggregate all false positive rates
	all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
	all_fpr_PcSel = np.unique(np.concatenate([fpr_PcSel[i] for i in range(n_classes)]))

	# Then interpolate all ROC curves at this points
	mean_tpr = np.zeros_like(all_fpr)
	for i in range(n_classes):
	    mean_tpr += interp(all_fpr, fpr[i], tpr[i])

	mean_tpr_PcSel= np.zeros_like(all_fpr_PcSel)
	for i in range(n_classes):
	    mean_tpr_PcSel += interp(all_fpr_PcSel, fpr_PcSel[i], tpr_PcSel[i])

	# Finally average it and compute AUC
	mean_tpr /= n_classes
	fpr["macro"] = all_fpr
	tpr["macro"] = mean_tpr
	roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

	mean_tpr_PcSel /= n_classes
	fpr_PcSel["macro"] = all_fpr_PcSel
	tpr_PcSel["macro"] = mean_tpr_PcSel
	roc_auc_PcSel["macro"] = auc(fpr_PcSel["macro"], tpr_PcSel["macro"])


	# save data
	print("******* FP / TP for average probabilitys")
	print(fpr)
	print(tpr)
	for i in range(n_classes):
		output = open(os.path.join(FLAGS.output_dir, corr + 'out2_roc_data_AvPb_c' + str(i+1)+ 'auc_' + str("%.4f" % roc_auc[i]) + '_t' + str("%.3f" % opt_thresh[i]) + '.txt'),'w')
		for kk in range(len(tpr[i])):
			output.write("%f\t%f\n" % (fpr[i][kk], tpr[i][kk]) )
		output.close()

	output = open(os.path.join(FLAGS.output_dir, corr + 'out2_roc_data_AvPb_macro_auc_' + str("%.4f" % roc_auc["macro"]) + '.txt'),'w')
	for kk in range(len(tpr["macro"])):
		output.write("%f\t%f\n" % (fpr["macro"][kk], tpr["macro"][kk]) )
	output.close()

	output = open(os.path.join(FLAGS.output_dir, corr + 'out2_roc_data_AvPb_micro_auc_' + str("%.4f" % roc_auc["micro"]) + '_t' + str("%.3f" % opt_thresh["micro"]) + '.txt'),'w')
	for kk in range(len(tpr["micro"])):
		output.write("%f\t%f\n" % (fpr["micro"][kk], tpr["micro"][kk]) )
	output.close()

	print("******* FP / TP for percent selected")
	print(fpr_PcSel)
	print(tpr_PcSel)
	for i in range(n_classes):
		output = open(os.path.join(FLAGS.output_dir, corr + 'out2_roc_data_PcSel_c' + str(i+1)+ 'auc_' + str("%.4f" % roc_auc_PcSel[i]) + '.txt'),'w')
		for kk in range(len(tpr_PcSel[i])):
			output.write("%f\t%f\n" % (fpr_PcSel[i][kk], tpr_PcSel[i][kk]) )
		output.close()


	output = open(os.path.join(FLAGS.output_dir, corr+ 'out2_roc_data_PcSel_macro_auc_' + str("%.4f" % roc_auc_PcSel["macro"]) + '.txt'),'w')
	for kk in range(len(tpr_PcSel["macro"])):
		output.write("%f\t%f\n" % (fpr_PcSel["macro"][kk], tpr_PcSel["macro"][kk]) )
	output.close()

	output = open(os.path.join(FLAGS.output_dir, corr+ 'out2_roc_data_PcSel_micro_auc_' + str("%.4f" % roc_auc_PcSel["micro"]) + '.txt'),'w')
	for kk in range(len(tpr_PcSel["micro"])):
		output.write("%f\t%f\n" % (fpr_PcSel["micro"][kk], tpr_PcSel["micro"][kk]) )
	output.close()

	# Plot all ROC curves
	plt.figure()
	plt.plot(fpr["micro"], tpr["micro"],
		 label='micro-average ROC curve (area = {0:0.2f})'
		       ''.format(roc_auc["micro"]),
		 color='deeppink', linestyle=':', linewidth=4)

	plt.plot(fpr["macro"], tpr["macro"],
		 label='macro-average ROC curve (area = {0:0.2f})'
		       ''.format(roc_auc["macro"]),
		 color='navy', linestyle=':', linewidth=4)
	lw = 2
	colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
	for i, color in zip(range(n_classes), colors):
		plt.plot(fpr[i], tpr[i], color=color, lw=lw,
		     label='ROC curve of class {0} (area = {1:0.2f})' 
		     ''.format(i, roc_auc[i]))

	plt.plot([0, 1], [0, 1], 'k--', lw=lw)
	plt.xlim([0.0, 1.0])
	plt.ylim([0.0, 1.05])
	plt.xlabel('False Positive Rate')
	plt.ylabel('True Positive Rate')
	plt.title("Some extension of Receiver operating characteristic to multi-class (Aggregation by averaging tiles'probabilities)")
	plt.legend(loc="lower right")
	plt.show()


	plt.figure()
	plt.plot(fpr_PcSel["micro"], tpr_PcSel["micro"],
		 label='micro-average ROC curve (area = {0:0.2f})'
		       ''.format(roc_auc_PcSel["micro"]),
		 color='deeppink', linestyle=':', linewidth=4)
	plt.plot(fpr_PcSel["macro"], tpr_PcSel["macro"],
		 label='macro-average ROC curve (area = {0:0.2f})'  
		       ''.format(roc_auc_PcSel["macro"]),
		 color='navy', linestyle=':', linewidth=4)
	lw = 2
	colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
	for i, color in zip(range(n_classes), colors):
		plt.plot(fpr_PcSel[i], tpr_PcSel[i], color=color, lw=lw,
		     label='ROC curve of class {0} (area = {1:0.2f})'  
		     ''.format(i, roc_auc_PcSel[i]))
	plt.plot([0, 1], [0, 1], 'k--', lw=lw)
	plt.xlim([0.0, 1.0])
	plt.ylim([0.0, 1.05])
	plt.xlabel('False Positive Rate')
	plt.ylabel('True Positive Rate')
	plt.title('Some extension of Receiver operating characteristic to multi-class (Aggregation by percentage of TP tiles)')
	plt.legend(loc="lower right")
	plt.show()
Example #40
0
    for j in range(1, J):
        unew[j] = uold[j] - 2 * defac * (Fuhalf[j] - Fuhalf[j - 1])
        nnew[j] = nold[j] - 2 * defac * (Fnhalf[j] - Fnhalf[j - 1])
    # For boundary grid points use forward (x = 0) and backward (x = 1)
    # differencing. Apply boundary conditions.
    unew[0] = u0  # boundary condition
    unew[J] = u1  # boundary condition
    nnew[0] = nold[0] - 2 * defac * (Fnhalf[0] - Fn[0])  # forward difference
    nnew[J] = nold[J] - 2 * defac * (Fn[J] - Fnhalf[J - 1]
                                     )  # backward difference
    # Update arrays with new values
    uold = np.copy(unew)
    nold = np.copy(nnew)
    line[0].set_ydata(nold)
    ax.set_title('Time = {0} s'.format(t))
    plt.draw()
    # Track the wave at timesteps we are interested in.
    if any(abs(t - stimes) < eps):
        savetimes.append(nold)
    t += tdel  # update time to next timestep

################################## PLOT ##################################
plt.figure()
for i in range(len(savetimes)):
    plt.plot(x, savetimes[i], label='t = {0} s'.format(stimes[i]))
plt.xlim(min(x), max(x))
plt.legend(loc='best')
plt.xlabel('$x [m]$', fontsize=20)
plt.ylabel('$\eta$', fontsize=20)
plt.title('Evolution of a shallow water wave over time using the TSLW scheme')
Example #41
0
    hy_tgas_coords.cartesian.x.value,
    hy_tgas.dpmra/hy_tgas.parallax*4.74,
    # yerr=hy_tgas.vra_error,
    ls='None', marker='.', ms=1, elinewidth=.5);

ax[1].errorbar(
    hy_dr2_coords.cartesian.x.value,
    hy_dr2.dpmdec/hy_dr2.parallax*4.74,
    ls='None',
    yerr=hy_dr2.vdec_error, marker='.', ms=1, elinewidth=.5);
ax[1].errorbar(
    hy_tgas_coords.cartesian.x.value,
    hy_tgas.dpmdec/hy_tgas.parallax*4.74,
    ls='None',
    yerr=hy_tgas.vdec_error, marker='.', ms=1, elinewidth=.5);
plt.xlim(4,32);
plt.ylim(-5.5,5.5)
ax[0].axhline(0, lw=1, c='gray');
ax[1].axhline(0, lw=1, c='gray');
ax[0].set_xlabel("ICRS $X$")
ax[1].set_xlabel("ICRS $X$")
ax[0].set_title("R.A.")
ax[1].set_title("Decl.")
ax[0].set_ylabel(r'$\mu$ [mas/yr]');

# %% Residual velocities vx cartesian coordinates
fig, ax = plt.subplots(1, 2, figsize=(10,4), sharex=True, sharey=True)
ax[0].errorbar(
    hy_dr2_coords.cartesian.y.value,
    hy_dr2.dpmra/hy_dr2.parallax*4.74,
    yerr=hy_dr2.vra_error,
        plt.plot(localcam[:, 0],
                 localcam[:, 1],
                 '-o',
                 label="trajectory at frame " + str(frame_i + 2))
        camerastart += currentslidingwindowsize
        if currentslidingwindowsize < slidingwindowsize:
            currentslidingwindowsize += 1
        else:
            camerastart_gt += 1
        xtot = np.concatenate([localcam[:, 0], camera_gt_x, [xmin, xmax]])
        ytot = np.concatenate([localcam[:, 1], camera_gt_y, [ymin, ymax]])
        xmin = np.min(xtot)
        xmax = np.max(xtot)
        ymin = np.min(ytot)
        ymax = np.max(ytot)
        plt.xlim(xmin - 1, xmax + 1)
        plt.ylim(ymin - 1, ymax + 1)

        title = set + "_" + classifier + "_camera_trajectory"
        plt.legend(loc='upper center',
                   bbox_to_anchor=(0.5, -0.05),
                   fancybox=True,
                   shadow=True,
                   ncol=2)
        plt.title(title)
        framestring = str(frame_i)
        plt.savefig(framefilepath + "/" + title + (str(frame_i)).zfill(3),
                    dpi=300)
        plt.clf()
        print "saved frame " + str(frame_i) + " in: " + title
Example #43
0
def main():
    ################### Read mu of MCMC (rvd2) ################################
    with h5py.File(control_mcmc, 'r') as f:
        muControl = f['mu'][...]
        locControl = f['loc'][...]
    with h5py.File(case_mcmc, 'r') as f:
        muCase = f['mu'][...]
        locCase = f['loc'][...]
    idx = []
    for pos in position:
        idx.append(pos)
        muControl1 = muControl[idx]
        muCase1 = muCase[idx]
        #N = 2000
        #(muZ,_,_) =rvd27.sample_post_diff(muCase1, muControl1, N) # sample Z

    ## plot histogram
    num_bins = 25
    for i in xrange(len(position)):
        fig = plt.figure(figsize=(12, 8))

        ########### Plot mu of MCMC (rvd2) vs Variational (rvd3) ##################
        # normed=True, the integral of the histogram will sum to 1.
        plt.hist(muCase1[i, :].T,
                 num_bins,
                 normed=True,
                 facecolor='r',
                 alpha=0.5,
                 label='Case (MCMC)')
        plt.hist(muControl1[i, :].T,
                 num_bins,
                 normed=True,
                 facecolor='k',
                 alpha=0.5,
                 label='Control (MCMC)')

        ############# Plot mu of Variational (rvd3) ################################
        caseR, caseN, casephi, caseq, loc, refb = rvd3.load_model(case_var)
        casegam = caseq['gam']
        a = casegam[position, 0]
        b = casegam[position, 1]
        cov_case = int(np.median(caseN))
        x_case = np.linspace(beta.ppf(0.001, a, b), beta.ppf(0.999, a, b), 100)
        plt.plot(x_case,
                 beta.pdf(x_case, a, b),
                 'r--',
                 lw=4,
                 alpha=1.0,
                 label="Case (Variational)")
        r_case = beta.rvs(a, b, size=2000)
        plt.hist(r_case,
                 num_bins,
                 normed=True,
                 histtype='stepfilled',
                 alpha=0.2,
                 facecolor='r')

        controlR, controlN, controlphi, controlq, _, _ = rvd3.load_model(
            control_var)
        controlgam = controlq['gam']
        a = controlgam[position, 0]
        b = controlgam[position, 1]
        x_control = np.linspace(beta.ppf(0.001, a, b), beta.ppf(0.999, a, b),
                                100)
        plt.plot(x_control,
                 beta.pdf(x_control, a, b),
                 'k--',
                 lw=4,
                 alpha=1.0,
                 label='Control (Variational)')
        r_control = beta.rvs(a, b, size=2000)
        plt.hist(r_control,
                 num_bins,
                 normed=True,
                 histtype='stepfilled',
                 alpha=0.2,
                 facecolor='k')
        plt.xlim(0, 0.012)
        plt.legend(loc='best', frameon=False)
        plt.xlabel('$\hat{\mu} = \mu-\mu_0$', fontsize=20)
        plt.xticks(rotation=25)
        plt.title('$\hat{\mu}$ at position %s when median depth is %d' %
                  ((position[i] + 1), cov_case),
                  fontsize=18)
        plt.xticks(rotation=25)
        plt.savefig('position_%s_%d_mcmc_vs_var.png' %
                    ((position[i] + 1), cov_case))
        plt.tight_layout()
Example #44
0
def plot_tritium(dictionary, headers_to_plot, headers, scales = [], toggleLimits = 1, toggleErrors = 1):
    '''
    Function used to plot data against limit and errors from the Tritium.
    
    Inputs:
        dictionary -- Dictionary object returned from file_to_variables(f)
        
        headers_to_plot -- Header names in dictionary to plot over limits and errors. Can also pass an array of data to plot instead if it's
        the same size of arrays in the dictionary
        
        headers -- Python list returned from file_to_variables(f)
        
        scales -- Values to scale each data header by. 1 scalar value per 2 headers. If no scalars are passed, they default to 0
        
        toggleLimits -- 1 to display limits in the plot, 0 to not
        
        toggleErrors -- 1 to display errors, 0 to not
        
    Example function call:
        plot_tritium(dict, ['Runtime','MotorVelocity'], headers, [-1])
        
        In this case, we are plotting Runtime on the X, MotorVelocity on the Y. We also scale
        MotorVelocity by -1 to make the values positive. Since we don't specify limits or errors, they default to 1.
        Dict and headers are output variables from file_to_variables(f)
        
    Plot_tritium also offers the ability to plot data not in the dictionary. To do so, build you're own array and
    pass it in the "headers_to_plot" input. There should be an x array and y array inside of an array for the feature
    to work.
    '''
    
    limits = []
    errors = []
    limit_arrays = []    
    arrays_to_plot = []
    
    for header in headers_to_plot:
        if type(header).__module__ == np.__name__:
            arrays_to_plot.append(header)
        else:
            arrays_to_plot.append(dictionary[header])    

    i = 0        
    for scale in scales:
        arrays_to_plot[i+1] = arrays_to_plot[i+1] * scale
        i += 2
    
    for label in headers:
        if string.find(label,'limit') >= 0:
            limits.append(label)
        elif string.find(label,'error') >= 0:
            errors.append(label)
    
    errors.remove('error_watchdog')
    if toggleLimits == 1:
        for limit in limits:
            limit_arrays.append(dictionary[limit])
            
    if toggleErrors == 1:
        for error in errors:
            limit_arrays.append(dictionary[error])

    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)    
    '''ax2 = ax.twiny()'''
    
    maxX, minX, maxY, minY = 0, 100000000, 0, 100000000
    totalArrays = np.shape(arrays_to_plot)[0]
    for i in xrange(0,totalArrays,2):
        if maxX < np.amax(arrays_to_plot[i]):
            maxX = max(arrays_to_plot[i]);
        if minX > np.amin(arrays_to_plot[i]):
            minX = min(arrays_to_plot[i]);
        if maxY < np.amax(arrays_to_plot[i+1]):
            maxY = max(arrays_to_plot[i+1]);
        if minY > np.amin(arrays_to_plot[i+1]):
            minY = min(arrays_to_plot[i+1]);

    lastErrorState = 0
    red,blue,green = 0,0,0

    w,h = 100, 50000
    
    data = np.zeros( (w,h,3), dtype=np.uint8)
    startOfColor, x = 0, 0

    customArtist = []
    '''If this isn't enough colors for future proofing, good luck'''
    colors = ['#00FF00','#0000FF','#FF0000','#01FFFE','#FFA6FE','#006401',
                  '#010067','#95003A','#007DB5','#FF00F6','#FFEEE8','#774D00',
                  '#90FB92','#0076FF','#D5FF00','#FF937E','#6A826C','#FF029D',
                  '#FE8900','#7A4782','#7E2DD2','#85A900','#FF0056','#A42400',
                  '#00AE7E','#683D3B','#BDC6FF','#263400','#BDD393','#00B917',
                  '#9E008E','#001544','#C28C9F','#FF74A3','#01D0FF','#004754',
                  '#E56FFE','#788231','#0E4CA1','#91D0CB','#BE9970','#968AE8',
                  '#BB8800','#43002C','#DEFF74','#00FFC6','#FFE502','#620E00',
                  '#008F9C','#98FF52','#7544B1','#B500FF','#00FF78','#FF6E41',
                  '#005F39','#6B6882','#5FAD4E','#A75740','#A5FFD2','#FFB167',
                  '#009BFF','#E85EBE']
    
    currentErrorState = 0
    redTone,blueTone,greenTone = 0,0,0
    data[:,:]= 255,255,255
    for i in xrange(0,np.shape(limit_arrays)[0],1):
        if currentErrorState == 1:
            data[:,startOfColor:x] = [redTone,greenTone,blueTone]
        
        redTone = int(colors[i][1:3], 16)
        greenTone = int(colors[i][3:5], 16)
        blueTone = int(colors[i][5:7], 16)
        startOfColor = 0
        x, counter = 0, 0
        lastErrorState = 0
        while counter < limit_arrays[i].size-1:
            currentErrorState = limit_arrays[i][counter]
            if currentErrorState == 1 and lastErrorState == 0:
                startOfColor = x

            if lastErrorState == 1 and currentErrorState == 0:
                data[:,startOfColor:x-1] = [redTone,greenTone,blueTone]

            lastErrorState = currentErrorState

            x += 50000.0/limit_arrays[i].size
            counter += 1
            
        customArtist.append(plt.Line2D((0,1),(0,0), color=colors[i], marker='None', alpha=1, linewidth=6))
    
    if currentErrorState == 1:
        data[:,startOfColor:x+2] = [redTone,greenTone,blueTone]


    scipy.misc.imsave('outfile.jpg', data)
    img = scipy.misc.imread('outfile.jpg')


    markers = itertools.cycle((',', '+', '.', 'o', '*'))
    colors = itertools.cycle(('b','g','r','c','m','y','k'))
    for i in xrange(0, totalArrays-1, 2):
        ax.plot(arrays_to_plot[i],arrays_to_plot[i+1], color=colors.next(), marker=markers.next(), label = headers_to_plot[i+1])
    
    plt.ylim([minY,maxY])
    plt.xlim([minX,maxX])
    '''
    ax2.set_xticks(dictionary['Runtime'])
    ax2.set_xlabel("Runtime")
    '''
    plt.imshow(img, aspect='auto', zorder=0, extent=[minX, maxX, minY, maxY], alpha=1)

    handles, labels = ax.get_legend_handles_labels()
    display = (0,1,2)

    if toggleLimits == 0:
        ax.legend([handle for i,handle in enumerate(handles) if i in display]+
          [artist for artist, artist in enumerate(customArtist)],
          [label for i,label in enumerate(labels) if i in display]+
          [error for error, error in enumerate(errors)],numpoints=1)     
    elif toggleErrors == 0:
        ax.legend([handle for i,handle in enumerate(handles) if i in display]+
          [artist for artist, artist in enumerate(customArtist)],
          [label for i,label in enumerate(labels) if i in display]+
          [limit for limit, limit in enumerate(limits)],numpoints=1) 
    elif toggleLimits == 0 and toggleErrors == 0:
        ax.legend([handle for i,handle in enumerate(handles) if i in display]+
          [artist for artist, artist in enumerate(customArtist)],
          [label for i,label in enumerate(labels) if i in display],numpoints=1) 
    else:
        ax.legend([handle for i,handle in enumerate(handles) if i in display]+
          [artist for artist, artist in enumerate(customArtist)],
          [label for i,label in enumerate(labels) if i in display]+
          [limit for limit, limit in enumerate(limits)]+
          [error for error, error in enumerate(errors)],numpoints=1)    
    
    plt.show()
    
    
Example #45
0
def plot_lag_all(home,project_name,cata_name,sta_name,filter_slope,ref_OT="2018-05-04T22:32:54.650",coast_path=''):
    import matplotlib
    matplotlib.use('pdf') #instead using interactive backend
    import matplotlib.pyplot as plt
    import seaborn as sns
    
    from repeq import data_proc
    from repeq.EQreloc import get_lonlat
    
    import pandas as pd
    '''
        read home/project_name/output/Template_match/Measure_lag/measure_lag_all.npy and plot slope of shift measurements
        cata_name: catalog name
        sta_name: station table created from data_proc.make_sta_table
        filter_slope: parameter for measuring slope
        filter_slope = {
        'diff_t':60,    #minimum dt between template and detected_OT (note the definition is different from filter_detc used by data_proc.bulk_cut_dailydata)
        'aligned_CC':0.7,
        'measured_CC':0.5,
        'max_shift':0.5, #drop the shift larger than this number (very large shift due to cycle slip)
        'min_length':0.7, #length of the available time series pass the above criteria (0~1)
        'cal_range':[5,10], # time range for slope calculation
        }
    '''
    sns.set()
    sns.set_palette('husl',n_colors=10)

    # load coast data if given
    if coast_path:
        coast=np.genfromtxt(coast_path)

    # load catalog
    df = data_proc.cat2pd(home+'/'+project_name+'/catalog/'+cata_name)

    # load station table in home/project_nam/stations/stations.txt
    sta_table = pd.read_table(home+'/'+project_name+'/stations/'+sta_name,header=None,names=['stlon','stlat','stelev','stname'],sep=' ')

    #load all lag measurements (this is a huge file, make sure memory fit)
    lag_all = np.load(home+'/'+project_name+'/output/Template_match/Measure_lag/'+'measure_lag_all.npy',allow_pickle=True)
    lag_all = lag_all.item()

    print('*****Add depth filter all events should >= 5km*****')

    sav_slope = {} #with sta as key
    for ik in lag_all.keys():
        temp_OT = lag_all[ik]['template_OT']
        #find the corresponding eqinfo
        tmp_df = df[(df.Date==temp_OT.split('T')[0]) & (df.Time==temp_OT.split('T')[1] )  ]
        if tmp_df.iloc[0].Depth < 2.0:
            continue


        #loop all the detections
        for detc_OT in lag_all[ik]['detc_OT'].keys():
            if np.abs(UTCDateTime(temp_OT)-UTCDateTime(detc_OT))<filter_slope['diff_t']:
                continue #dont want the detection too close to template(basically itself)

            #loop every stations
            for sta in lag_all[ik]['detc_OT'][detc_OT].keys():
                #in each lag_all[ik]['detc_OT'][detc_OT][sta] there are "time","shift","CCC" keys
                time = lag_all[ik]['detc_OT'][detc_OT][sta]['time']
                shift = lag_all[ik]['detc_OT'][detc_OT][sta]['shift']
                CCC = lag_all[ik]['detc_OT'][detc_OT][sta]['CCC']

                #find the alignedCC (time closest to zero)
                zeroidx = np.where(np.abs(time) == np.min(np.abs(time)))[0][0]
                if CCC[zeroidx]<filter_slope['aligned_CC']:
                    continue #alignment is not robust
                        
                #take the time,shift and fit by a slope
                idx = np.where((CCC>=filter_slope['measured_CC']) & (shift<filter_slope['max_shift']) & (time>=filter_slope['cal_range'][0]) & (time<=filter_slope['cal_range'][1]) )[0] #shift cannt be too large otherwise is cycle slip
                idx_t = np.where((time>=filter_slope['cal_range'][0]) & (time<=filter_slope['cal_range'][1]))[0]
                #idx = np.where((CCC>=filter_slope['measured_CC']) & (shift<filter_slope['max_shift']) & (time>=0) )[0] #shift cannt be too large otherwise is cycle slip
                #if len(idx) >= (len(shift)*filter_slope['min_length']):
                if len(idx) >= (len(idx_t)*filter_slope['min_length']):
                    #plt.plot(time[idx],shift[idx])
                    #plt.show()
                    #80% data pass threshold, then calculate slope
                    M = data_proc.cal_slope(time[idx],shift[idx])
                    #sav_slope.append(M[1]) #M[0] is intercept, M[1] is slope
                    G = np.hstack([np.ones([len(idx),1]),time[idx].reshape(-1,1)])
                    yhat = np.dot(G,M.reshape(-1,1))
                    fit_std = np.std(yhat-shift[idx]) #standard deviation of misfit
                    #sav_std.append(fit_std)
                    #get the reference time WRS to template_OT
                    ref_tempT = (UTCDateTime(detc_OT)-UTCDateTime(temp_OT))/86400.0 #relative days from template
                    #sav_reftime.append(ref_tempT)
                    #create new sta key if its not there
                    if not (sta in sav_slope):
                        sav_slope[sta] = {temp_OT:{'slope':[],'std':[],'ref_time':[],'ID':ik}}
                    #template_OT as new key
                    if not (lag_all[ik]['template_OT'] in sav_slope[sta]):
                        sav_slope[sta][temp_OT] = {'slope':[],'std':[],'ref_time':[],'ID':ik}
                    #appending data
                    sav_slope[sta][temp_OT]['slope'] = np.hstack([sav_slope[sta][temp_OT]['slope'],M[1]]) #appending data as array
                    sav_slope[sta][temp_OT]['std'] = np.hstack([sav_slope[sta][temp_OT]['std'],fit_std]) #
                    sav_slope[sta][temp_OT]['ref_time'] = np.hstack([sav_slope[sta][temp_OT]['ref_time'],ref_tempT])


    ref_OT = UTCDateTime(ref_OT)
    #=========plot station result============
    for sta in sav_slope.keys():
        plt.figure(figsize=(8.5,4.5))
        plt.subplot(1,2,1)
        #loop templates
        n_meas = 0 #n-measurements
        sav_tmplon = [] #save template lon
        sav_tmplat = [] #template lat
        sav_h = []
        for temp in sav_slope[sta].keys():
            #for each template, all the measurements at this station
            time = sav_slope[sta][temp]['ref_time'] #0 is the template time
            slope = sav_slope[sta][temp]['slope']
            stdn = sav_slope[sta][temp]['std']
            #add templat itself in the data
            time = np.hstack([time,0])
            slope = np.hstack([slope,0])
            stdn = np.hstack([stdn,0])
            #sort
            sor_idx = np.argsort(time)
            time = time[sor_idx]
            slope = slope[sor_idx]
            stdn = stdn[sor_idx]
            dt_main = (UTCDateTime(temp)-ref_OT)/86400.0 #set t at ref_OT = 0
            time += dt_main
            #check if slope measurement across the mainshock
            if not ((time.min()<0) & (time.max()>0)):
                continue
            #not plot short sequence
            if len(time)< 5:
                continue
            #if (time.max()<0):
            #    continue
            #plt.errorbar(time,slope+n_meas,stdn)
            scale_slope = np.std(slope)
            #h = plt.plot(time,slope+n_meas*0.01,'.-') #the old scaling
            h = plt.plot(time,slope/scale_slope+n_meas*3,'.-')  #normalize by their std
            sav_h.append(h[0])
            '''
            #old scaling
            plt.plot(dt_main,0+n_meas*0.01,'v',markerfacecolor=[1,0,0],markeredgecolor=[0,0,0]) #template triangle mark
            plt.plot([-10,10],[n_meas*0.01,n_meas*0.01],'k--',linewidth=0.5)
            '''
            plt.plot(dt_main,0+n_meas*3,'v',markerfacecolor=[1,0,0],markeredgecolor=[0,0,0]) #template triangle mark
            plt.plot([-10,10],[n_meas*3,n_meas*3],'k--',linewidth=0.5)
            #save template information (loc)
            sav_tmplon.append(df.iloc[ int(sav_slope[sta][temp]['ID']) ].Lon)
            sav_tmplat.append(df.iloc[ int(sav_slope[sta][temp]['ID']) ].Lat)
            n_meas += 1
        if n_meas == 0:
            plt.close()
            continue
        #plt.plot([0,0],[-0.01,n_meas*0.01],'r',linewidth=0.5)
        plt.plot([0,0],[-3,n_meas*3],'r',linewidth=0.5)
        plt.xlim([-5,5])
        #plt.ylim([-0.01,n_meas*0.01])
        plt.ylim([-3,n_meas*3])
        plt.yticks([],[])
        plt.xlabel('Day relative to mainshock',fontsize=15,labelpad=0)
        plt.title(sta,fontsize=15)
        plt.grid(False)
        #another subplot plot map
        plt.subplot(1,2,2)
        for itmp in range(len(sav_tmplon)):
            plt.plot(sav_tmplon[itmp],sav_tmplat[itmp],'o',color=sav_h[itmp].get_color(),markeredgecolor=[0,0,0],mew=0.8,alpha=0.9)
        if coast_path:
            plt.plot(coast[:,0],coast[:,1],'k-')
        #get station lon,lat
        stlon,stlat = get_lonlat(sta_table,[sta])
        plt.plot(stlon,stlat,'^',markersize=10,color=[0,1,0],markeredgecolor=[0,0,1],mew=1)
        print('***manually plot mainshock loc, set xlim and ylim')
        plt.plot(-154.9996667,19.3181667,'*',markerfacecolor=[1,0,0],markersize=14,markeredgecolor=[0,0,0],mew=1,alpha=0.9)
        plt.xlim([-155.85,-154.74])
        plt.ylim([18.86,19.88])
        plt.xticks(rotation=30,fontsize=10)
        plt.savefig(home+'/'+project_name+'/output/Template_match/Figs/'+'slopeSummary_%s.png'%(sta))
        plt.close()
Example #46
0
# yc2 = g_mean[1] - np.sqrt(fn**2 -(xc - g_mean[0])**2)
#
# plt.plot(xc, yc1, 'y')
# plt.plot(xc, yc2, 'y')


ticks = np.arange(0, 700, 100)
plt.scatter(sheep[:, 0], sheep[:, 1], c='g', label='sheep')
plt.scatter(shepherd[0], shepherd[1], c='r', marker='p', label='shepherd')
plt.scatter(sheep[idx][0], sheep[idx][1], c='b', label='target sheep')
plt.scatter(g_mean[0], g_mean[1], marker='^', c='orange', label='center')
line = np.array([[450, 0], [450, 150], [600, 150]])

plt.plot(line[:, 0], line[:, 1], 'c')
plt.text(455, 50, 'destination', fontsize=15)
plt.text(455, 20, 'region', fontsize=15)
plt.text(10, 570, 'collecting', fontsize=15)
plt.text(10, 20, 'time steps: {}/554'.format(step), fontsize=15)
plt.xticks(ticks)
plt.yticks(ticks)
plt.xlim(0, 600)
plt.ylim(0, 600)

plt.xlabel("X Position")
plt.ylabel("Y Position")
plt.legend(loc='upper right')
# plt.grid()
plt.show()

fig.savefig("E:\\我的坚果云\\latex\\doubleDistSum\\pics\\stepAngle{}x.pdf".format(step), dpi=600, format='pdf')
Example #47
0
def plot_accNumber(home,project_name,cata_name,filter_detc,min_inter,time1,time2):
    #plot accumulated number of EQ in catalog v.s. detections
    '''
        min_inter: minimum inter event time (s)
        time1,time2: plot data between the range
    '''
    import glob
    from repeq import data_proc
    from obspy import UTCDateTime
    import datetime
    from repeq import data_proc
    import matplotlib
    matplotlib.use('pdf') #instead using interactive backend
    import matplotlib.pyplot as plt
    '''
    filter_detc = {
        'min_stan':9, #number of non-zero CC measurements
        'min_CC':0.5, #min mean(CC) value
        'diff_t':60, #time difference between events should larger than this
    }
    '''
    #load catalog and get their time
    df = data_proc.cat2pd(home+'/'+project_name+'/catalog/'+cata_name)
    template_time = [UTCDateTime(df.Date[i]+'T'+df.Time[i]) for i in range(len(df))]
    template_time = np.array(template_time)

    #load detections and get their time
    detcs = glob.glob(home+'/'+project_name+'/'+'output/Template_match/Detections/'+'Detected_tmp_*.npy')
    detcs.sort()
    detc_time = []
    for detc_path in detcs:
        detc = np.load(detc_path,allow_pickle=True)
        detc = detc.item()
        detc = data_proc.clean_detc(detc,filter_detc)
        detc_time += detc.keys()

    detc_time.sort()
    detc_time = np.array(detc_time)
    detc_time = [UTCDateTime(i) for i in detc_time]

    #set min-interevent time to remove redundant data
    clean_template_time = data_proc.clean_events_time(template_time,min_time=min_inter)
    clean_detc_time = data_proc.clean_events_time(detc_time,min_time=min_inter)

    t_temp, accnum_temp = data_proc.cal_accum(clean_template_time,time1,time2,dt=3600)
    t_detc, accnum_detc = data_proc.cal_accum(clean_detc_time,time1,time2,dt=3600)

    main_OT = UTCDateTime("2018-05-04T22:32:54.650Z").datetime #mainshock OT
    #convert UTCDateTime to datetime for plotting
    t_temp = [i.datetime for i in t_temp]
    t_detc = [i.datetime for i in t_detc]
    plt.figure(figsize=(10,4.5))
    plt.plot(t_temp,accnum_temp,'k')
    plt.plot(t_detc,accnum_detc,'r')
    print('***manually add something in plot function***')
    plt.plot([main_OT,main_OT],[0,np.max(accnum_detc)],'r--')
    plt.ylim([0,np.max(accnum_detc)])
    plt.xlim([UTCDateTime(time1).datetime,UTCDateTime(time2).datetime])
    plt.xlabel('Date',fontsize=14)
    plt.ylabel('Accumulated number',fontsize=14)
    plt.savefig(home+'/'+project_name+'/'+'output/Template_match/Detections/'+'detections.png')
    plt.close()
Example #48
0
    y=(caption_inv+1)/float(len(captions)+1)

    #Plot function
    def timelines(y, xstart, xstop,color='b'):
        """Plot timelines at y from xstart to xstop with given color."""
        plt.hlines(y,xstart,xstop,color,lw=4)
        plt.vlines(xstart, y+0.03,y-0.03,color,lw=2)
        plt.vlines(xstop, y+0.03,y-0.03,color,lw=2)

    #Plot ok tl black
    timelines(y[is_sync],start[is_sync],stop[is_sync],'r')
    #Plot fail tl red
    timelines(y[not_sync],start[not_sync],stop[not_sync],'k')

    #Setup the plot
    ax=plt.gca()
    #ax.xaxis_date()
    #myFmt = DateFormatter('%H:%M:%S')
    #ax.xaxis.set_major_formatter(myFmt)
    #ax.xaxis.set_major_locator(SecondLocator(0,interval=20))

    #To adjust the xlimits a timedelta is needed.
    delta=(stop.max()-start.min())/10

    plt.yticks(y[unique_idx],captions)
    plt.ylim(0,1)
    plt.xlim(start.min()-delta, stop.max()+delta)
    plt.xlabel('Time')

plt.show()
Example #49
0
    np.arange(start=x_set[:, 0].min() - 1,
              stop=x_set[:, 0].max() + 1,
              step=0.01),
    np.arange(start=x_set[:, 1].min() - 1,
              stop=x_set[:, 1].max() + 1,
              step=0.01))  #setting the pexicles

#To draw the separator (straight line)
plt.contourf(x1,
             x2,
             classifier.predict(np.array([x1.ravel(),
                                          x2.ravel()]).T).reshape(x1.shape),
             alpha=0.75,
             cmap=ListedColormap(('red', 'green')))  #if 0 (red) if 1 (green)
#now to color the scattered points either red or green we will iterate and check predict
#if 1 make it green else red
plt.xlim(x1.min(), x1.max())
plt.ylim(x2.min(), x2.max())
for i, j in enumerate(np.unique(y_set)):
    plt.scatter(x_set[y_set == j, 0],
                x_set[y_set == j, 1],
                c=ListedColormap(('red', 'green'))(i),
                label=j)

#now label the axies
plt.title('K-NN(Test Set)')
plt.xlabel('Age')
plt.ylabel('Salary')
plt.legend()
plt.show()
Example #50
0
def plot_detc_tcs(daily_cut,template,filter_detc,outname):
    '''
        daily_cut: cutted daily data from the data_proc.cut_dailydata
        template: template .ms data in waveforms_template
        filter_detc: filter dictionary before plot
        outname: output name
    '''
    import obspy
    import numpy as np
    import matplotlib
    matplotlib.use('pdf') #instead using interactive backend
    import matplotlib.pyplot as plt
    from obspy import UTCDateTime
    from repeq import data_proc
    if type(template)==str:
        temp = obspy.read(template)
    else:
        temp = template
    if type(daily_cut)==str:
        daily_cut = np.load(daily_cut,allow_pickle=True)
        daily_cut = daily_cut.item()
    #apply filter
    daily_cut = data_proc.clean_data_cut(daily_cut,filter_detc)
    if len(daily_cut['detc_tcs'].keys())==0:
        return 1 #nothing left, just return
    OT_temp = UTCDateTime(daily_cut['OT_template']) #origin time for template
    for ik in daily_cut['detc_tcs'].keys():
        D = daily_cut['detc_tcs'][ik]
        phase = daily_cut['phase'][ik] # assume D and phase have the same order
        OT_D = UTCDateTime(ik) #origin time of Detection (cut from daily data)
        XLIM=[]
        #create figure based on how many traces
        print('Ntraces=',len(D))
        #if 0<len(D)<=20:
        #    fig = plt.figure(figsize=(8.5,5.5))
        #elif 20<len(D)<=30:
        #    fig = plt.figure(figsize=(8.5,6.5))
        #elif 30<len(D):
        fig = plt.figure(figsize=(8.5,8.5)) #all with the same size
        for ista in range(len(D)):
            net = D[ista].stats.network
            sta = D[ista].stats.station
            channel = D[ista].stats.channel
            location = D[ista].stats.location
            PS = phase[ista] #'P' or 'S'
            selected_temp = temp.select(network=net,station=sta,channel=channel,location=location)
            selected_temp = selected_temp.copy()
            #most of the case should return only 1 data, but if there's P and S in 1 station...
            if len(selected_temp)!=1:
                t1 = selected_temp[0].stats.starttime
                t2 = selected_temp[1].stats.starttime
                print('phase=',PS)
                if t2-t1>0:
                    if PS=='P':
                        selected_temp = obspy.Stream(selected_temp[0])
                        print('return first one')
                    elif PS=='S':
                        selected_temp = obspy.Stream(selected_temp[1])
                        print('return second one')
                else:
                    if PS=='P':
                        selected_temp = obspy.Stream(selected_temp[1])
                    elif PS=='S':
                        selected_temp = obspy.Stream(selected_temp[0])
                print('multiple data selected, return data based on basic PS wave assumption') #have to check this!
                #continue #!!!!!!!! deal with this later!!!!!!!!!!
            #dealing with time
            T_D = D[ista].times()
            T_temp = selected_temp[0].times() #length should only be 1, unless P/S in same data
            #Time relative to origin, so that at origin is zero
            dt_D = D[ista].stats.starttime-OT_D
            T_D = T_D+dt_D
            dt_temp = selected_temp[0].stats.starttime-OT_temp
            T_temp = T_temp+dt_temp
            #normalize data
            data_D = D[ista].data/np.max(D[ista].data)
            #data_D = D[ista].data/np.max(selected_temp[0].data) #normalize the data based on template amplitude, not daily data amplitude
            data_temp = selected_temp[0].data/np.max(selected_temp[0].data)
            #data_temp = selected_temp[0].data/np.max(D[ista].data)
            #plot both tcs
            plt.plot(T_D,data_D+ista*1.5,'k')
            if PS=='P':
                plt.plot(T_temp,data_temp+ista*1.5,'r')
            else:
                plt.plot(T_temp,data_temp+ista*1.5,'b')
            #get xlim bound
            if ista==0:
                XLIM.append(T_temp[0]-1)
        XLIM.append(T_temp[-1]+1)
        YLIM = plt.ylim()
        YLIM = [-1,ista*1.5+1]
        YLIM = [YLIM[0],YLIM[1]+0.08*(YLIM[1]-YLIM[0]) ]
        #add text
        props = dict(boxstyle='round', facecolor='white', alpha=0.5)
        text_xloc = (XLIM[1]-XLIM[0])*0.04+XLIM[0]
        text_yloc = (YLIM[1]-YLIM[0])*0.86+YLIM[0]
        text_yloc_temp = (YLIM[1]-YLIM[0])*0.94+YLIM[0]
        plt.text(text_xloc,text_yloc,ik,fontsize=12,bbox=props)
        plt.text(text_xloc,text_yloc_temp,OT_temp.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-4],fontsize=12,color=[1,0,0],bbox=props)
        plt.xlabel('Origin time (s)',fontsize=15,labelpad=0)
        plt.xticks(fontsize=12)
        plt.yticks([],[])
        #add title
        plt.title('CC=%.2f'%(daily_cut['meanCC'][ik]))
        ax1 = plt.gca()
        ax1.tick_params(pad=1) #make axis closer
        plt.xlim(XLIM)
        plt.ylim(YLIM)
        #savName = template.split('_')[-1].split('.')[0] #this is the template ID
        if outname:
            print('save fig:',outname+ik+'.png')
            plt.savefig(outname+ik.replace(':','')+'.png',dpi=300)
        #plt.show()
        plt.close()
Example #51
0
def FancyPlotTotals(FracAnalyzed,
                    Fractures=True,
                    Circles=True,
                    FigureNumber=1):
    """Plots the point_number_density 

		Parameters
		-----------
		FracAnalyzed : list or single instance of FracAnalysisPoly/Point object
			Can be a list of different FracAnalysisPoint and/or FracAnalysisPoly
			objects. These objects must have the same cell_size and anlge_bins

		Fractures : Boolean
			True by default
			Includes the 'raw' polylines and points in the plot

		Circles : Boolean
			False by default
			Includes circles around each Rose diagram, divided into sectors by
			angle bin. The circles indicate 25, 50, 75 and 100 % proportion.

		Title : string
			Blank by default
			Title of the plot

		FigureNumber : int
			Number of figure created

		Returns
		--------
		Another fancy plot.

		-----------------------------------------------------------------
	"""

    if type(FracAnalyzed) != list:
        FracAnalyzed = [FracAnalyzed]

    fig = plt.figure(FigureNumber)
    fig.clf()
    ax = fig.add_subplot(111)

    minx = min([min(temp.X) for temp in FracAnalyzed])
    maxx = max([max(temp.X) for temp in FracAnalyzed])

    miny = min([min(temp.Y) for temp in FracAnalyzed])
    maxy = max([max(temp.Y) for temp in FracAnalyzed])

    cell_size = FracAnalyzed[0].cell_size
    plt.xlim([minx, maxx + (cell_size * 1000)])
    plt.ylim([miny, maxy + (cell_size * 1000)])
    plt.grid(True)

    ############################################################################
    # Drawing Fractures
    if Fractures == True:
        for classs in FracAnalyzed:

            if classs.__name__ == "FracAnalysisPoly":
                sf = shapefile.Reader(classs.address)
                shapes = sf.shapes()

                for i in shapes[::]:
                    a = np.array(i.points)
                    plt.plot(a[:, 0], a[:, 1], zorder=100)

            if classs.__name__ == "FracAnalysisPoint":
                sf = shapefile.Reader(classs.address)

                data = list(sf.records())
                x_input = [x[0] for x in data]
                y_input = [y[1] for y in data]

                plt.scatter(x_input, y_input, zorder=100)

    #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
    # Drawing the Rose plot
    for classs in FracAnalyzed:

        pointsx, pointsy = [], []
        sf = shapefile.Reader(classs.address)
        shapes = sf.shapes()

        for i in shapes[::]:
            a = np.array(i.points)
            pointsx.extend(a[:, 0])
            pointsy.extend(a[:, 1])

        X = classs.X
        X_max, X_min = max(X), min(X)
        X_width = X_max - X_min

        Y = classs.Y
        Y_max, Y_min = max(Y), min(Y)
        Y_width = Y_max - Y_min

        cell_size = min([X_width, Y_width]) / 1000.0
        startx, starty = np.mean(pointsx), np.mean(pointsy)

        Z = np.sum(classs.N, axis=0)

        radius = cell_size / 2.
        a = Z / np.sum(Z)
        L = np.sqrt(a) * radius * 1000.
        increment = 180. / len(L)
        start = 0
        patches = []

        for i in L:
            end = start + increment
            wedgez = Wedge((startx, starty), i, 90 - end, 90 - start)
            patches.append(wedgez)

            wedgez = Wedge((startx, starty), i, (270 - end), (270 - start))
            patches.append(wedgez)
            start = start + increment

        p = PatchCollection(patches, alpha=0.5, color="black", zorder=1000)
        ax.add_collection(p)

        #-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-
        # Add Circles
        if Circles == True:
            # Circles around rose plot
            for i in (.25, .5, .75, 1):
                circle1 = plt.Circle((startx, starty),
                                     np.sqrt(i) * 1000.0 * radius,
                                     color="black",
                                     fill=False,
                                     lw=1.5,
                                     alpha=0.5)
                fig.gca().add_artist(circle1)

            my_angle = 0.

            # Spokes in these circles
            for i in range(len(L) * 2):
                plt.plot([
                    startx, startx +
                    (1000 * radius *
                     np.sin(np.deg2rad(90 - 180 - 90 - my_angle)))
                ], [
                    starty, starty +
                    (1000. * radius * np.sin(np.deg2rad(90 - my_angle)))
                ],
                         color="k",
                         lw=1.5,
                         alpha=0.5)
                my_angle += increment

    plt.show()
Example #52
0
    return new_cmap

cmap = plt.get_cmap('Reds')
new_cmap = truncate_colormap(cmap, 0., 0.8)
    
f, axarr = plt.subplots(1,len(beta_list),figsize=(9,3))
for i,beta in enumerate(beta_list):
        
        axarr[i].imshow(P_list[i],cmap=new_cmap)
        axarr[i].imshow(T,cmap=plt.get_cmap('binary'),alpha=0.7)
        axarr[i].yaxis.set_ticks([])
        axarr[i].xaxis.set_ticks([])
        axarr[i].set_title(r'$\beta$ = '+str(beta), fontsize=20)
        
plt.show()

## This is for the convergence
print('This is to show the convergence behavoir.')
from cycler import cycler
plt.rc('axes', prop_cycle=(cycler('color', ['darkred', 'r', 'salmon']) ))

for i,opt1 in enumerate(epsilon_list):
    plt.semilogy(np.asarray(loss_list[i])-ground_truth,label=r'IPOT $\beta$ = '+str(epsilon_list[i]))

plt.ylabel('$|W-W_{LP}|$',fontsize=20)
plt.xlabel('# iteration',fontsize=20)
plt.xlim([-100,num_proximal+100])

plt.legend(fontsize=15,loc=1, bbox_to_anchor=(1.55, 1.))
plt.show()
#     high = max(np.ravel(RawR_cam[i]).max(), np.ravel(RawR_fit[i]).max())
#     low = min(np.ravel(RawR_cam[i]).min(), np.ravel(RawR_fit[i]).min())
#     plt.ylim((low, high))
#     plt.xlim((low, high))
#     path='C:/Users/76774/Desktop/test/image_4_nobound3/'+str(i)+'.jpg';
#     plt.savefig(path,dopi=600);
#     plt.show()
ax = plt.scatter(np.ravel(RawR_fit), np.ravel(RawR_cam), s=2, marker='*')
font2 = {'family' : 'Times New Roman',
'weight' : 'normal',
'size'   : 20,
}
plt.tick_params(labelsize=20)
plt.xlabel('Fitted R',font2)
plt.ylabel('Actual R',font2)
ss=[]
i=0
while i<140:
    ss.append(i)
    i=i+1
plt.plot(ss, ss, 'r--', linewidth=2)
# plt.legend(['Diagnal fitted line', 'Data'])
ax.figure.set_size_inches(8, 8)
high = max(np.ravel(RawR_cam).max(), np.ravel(RawR_fit).max())
low = min(np.ravel(RawR_cam).min(), np.ravel(RawR_fit).min())
plt.ylim((low, high))
plt.xlim((low, high))
plt.title('unimodal_ves',font2)
path='./result/image/ves.jpg'
plt.savefig(path,dpi=600)
plt.show()
## compute velocity components downstream in lab frame
u2x = u2 * np.cos(theta)
u2y = u2 * np.sin(theta)

## create plots

# plot pressure-deflection polar
plt.figure(1)
plt.plot(180 * theta / np.pi, P2 / 1e6, 'k:', linewidth=2)
plt.title('Shock Polar Air, free-stream speed ' + str(round(U, 5)) + ' m/s',
          fontsize=12)
plt.xlabel('deflection angle (deg)', fontsize=12)
plt.ylabel('pressure (MPa)', fontsize=12)
plt.tick_params(labelsize=12)
plt.xlim(xmin=0)
plt.ylim(ymin=0)

# plot pressure-deflection polar
plt.figure(2)
plt.plot(180 * theta / np.pi, 180 * beta / np.pi, 'k:', linewidth=2)
plt.title('Shock Polar Air, free-stream speed ' + str(round(U, 5)) + ' m/s',
          fontsize=12)
plt.xlabel('deflection angle (deg)', fontsize=12)
plt.ylabel('wave angle (deg)', fontsize=12)
plt.tick_params(labelsize=12)
plt.xlim(xmin=0)
plt.ylim(0, 90)

# plot velocity polar
plt.figure(3)
def plot_bar_dloss_across_subjs(
    dlosses,
    elosses=None,
    ix_datas=None,
    subj_parad_bis: Iterable[Tuple[str, str, bool]] = None,
    axs: Union[plt2.GridAxes, plt2.AxesArray] = None,
    vmax=None,
    add_scale=True,
    base=10.,
):
    """

    :param dlosses: [ix_data]
    :param ix_datas:
    :param axs:
    :param subj_parad_bis: [('subj', 'parad', is_bimanual), ...]
    :return: axs
    """

    if subj_parad_bis is None:
        subj_parad_bis = subj_parad_bis0
    if vmax is None:
        vmax = np.amax(np.abs(dlosses))

    # order: eye S1-S3, hand by ID, paired uni-bimanual
    subjs, parads, bis = zip(*subj_parad_bis)
    subjs = np.array(
        ['ID0' + v[-1] if v[:2] == 'ID' and len(v) == 3 else v for v in subjs])
    parads = np.array(parads)
    bis = np.array(bis)

    is_eye = parads == 'RT'
    is_bin = parads == 'binary'
    ix = np.arange(len(subjs))

    def filt_sort(filt):
        ind = [int(subj[1:]) for subj in subjs[filt]]
        return ix[filt][np.argsort(ind)]

    ix = np.concatenate([
        filt_sort(is_eye & ~is_bin),
        np.stack([
            filt_sort(~is_eye & ~bis & ~is_bin),
            filt_sort(~is_eye & bis & ~is_bin)
        ], -1).flatten('C'),
        filt_sort(is_bin)
    ])
    subjs = subjs[ix]
    parads = parads[ix]
    bis = bis[ix]
    is_eye = is_eye[ix]
    dlosses = dlosses[ix]
    subj_parad_bis = subj_parad_bis[ix]

    n_eye = int(np.sum(is_eye))
    n_hand = int(np.sum(~is_eye))

    y = np.empty([n_eye + n_hand])
    y[is_eye] = 1.5 + np.arange(n_eye)
    y[~is_eye] = n_eye - 1 + 1.5 + np.cumsum([1.5, 1.] * (n_hand // 2))
    y_max = np.amax(y) + 1.5

    if axs is None:
        axs = plt2.GridAxes(nrows=1,
                            ncols=1,
                            heights=y_max * 0.2,
                            widths=2,
                            left=1.5,
                            right=0.25,
                            bottom=0.85)
    ax = axs[0, 0]
    plt.sca(ax)

    m = dlosses
    if elosses is None:
        e = np.zeros_like(m)
    else:
        e = elosses

    for y1, m1, e1, parad1, bi1 in zip(y, m, e, parads, bis):
        plt.barh(y1,
                 m1,
                 xerr=e1,
                 color=colors_parad[(parad1, '%s' % bi1)],
                 edgecolor='None')

    if add_scale:
        dy = y[1] - y[0]

    axvline_dcost()

    x_lim = [-vmax * 1.2, vmax * 1.2]
    for ix_big in range(len(y)):
        if np.abs(m[ix_big]) > vmax:
            for i_sign, sign in enumerate([1, -1]):
                plt2.patch_wave(
                    y[ix_big],
                    x_lim[i_sign] * 1.01,
                    ax=ax,
                    color='w',
                    wave_margin=0.15,
                    wave_amplitude=sign * 0.025,
                )

    plt.xlim(x_lim)
    xticks_serial_vs_parallel(vmax, base)
    subj_parad_bi_str = get_subj_parad_bi_str(subj_parad_bis)
    plt.yticks(y, subj_parad_bi_str)
    plt2.detach_axis('y', y[0], y[-1])
    plt2.detach_axis('x', -vmax, vmax)
    plt.ylim([y_max - 1, 1.])

    return axs
Example #56
0
def FancyPlot(FracAnalyzed,
              Rose=True,
              Fractures=True,
              Patches=False,
              Circles=False,
              SquareNumbers=False,
              Title="",
              FigureNumber=1):
    """Plots the point_number_density 

		Parameters
		-----------
		FracAnalyzed : list or single instance of FracAnalysisPoly/Point object
			Can be a list of different FracAnalysisPoint and/or FracAnalysisPoly
			objects. These objects must have the same cell_size and anlge_bins

		Rose : Boolean
			True by default
			Includes rose plots for each square

		Fractures : Boolean
			True by default
			Includes the 'raw' polylines and points in the plot

		Patches : "Number", "Length", "NumberAnisotropy" or False
			False by default
			Includes squares which indicate length density, number density, or 
			number anisotropy with colorbar

		Circles : Boolean
			False by default
			Includes circles around each Rose diagram, divided into sectors by
			angle bin. The circles indicate 25, 50, 75 and 100 % proportion.

		SquareNumbers : Boolean
			False by default
			Includes numbers in each square.

		Title: string
			Blank by default
			Title of the plot
		Returns
		--------
		A fancy plot.

		-----------------------------------------------------------------
	"""
    if type(FracAnalyzed) != list:
        FracAnalyzed = [FracAnalyzed]

    cell_size = FracAnalyzed[0].cell_size

    fig = plt.figure(FigureNumber)
    fig.clf()
    ax = fig.add_subplot(111)

    minx = min([min(temp.X) for temp in FracAnalyzed])
    maxx = max([max(temp.X) for temp in FracAnalyzed])

    miny = min([min(temp.Y) for temp in FracAnalyzed])
    maxy = max([max(temp.Y) for temp in FracAnalyzed])

    plt.xlim([minx, maxx + (cell_size * 1000)])
    plt.ylim([miny, maxy + (cell_size * 1000)])
    plt.grid(True)

    ############################################################################
    # Drawing Fractures
    if Fractures == True:
        for classs in FracAnalyzed:

            if classs.__name__ == "FracAnalysisPoly":
                sf = shapefile.Reader(classs.address)
                shapes = sf.shapes()

                for i in shapes[::]:
                    a = np.array(i.points)
                    plt.plot(a[:, 0], a[:, 1], zorder=2)

            if classs.__name__ == "FracAnalysisPoint":
                sf = shapefile.Reader(classs.address)

                data = list(sf.records())
                x_input = [x[0] for x in data]
                y_input = [y[1] for y in data]

                plt.scatter(x_input,
                            y_input,
                            zorder=2,
                            color="white",
                            edgecolor="black")

    ############################################################################
    # Drawing Patches Squares
    cmapp = plt.cm.jet

    #--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#
    # Number Density
    if Patches == "Number":
        max_value = max([np.max(classs.N_total) for classs in FracAnalyzed])

        for classs in FracAnalyzed:
            X = classs.X
            Y = classs.Y
            ZZ = classs.N_total

            s = plt.scatter(X + (500 * cell_size),
                            Y + (500 * cell_size),
                            c=ZZ,
                            cmap=cmapp,
                            s=1)

            for x, y, c, number in zip(X, Y, ZZ, range(len(ZZ))):
                ax.add_artist(
                    plt.Rectangle(xy=(x, y),
                                  color=cmapp(c / max_value),
                                  width=cell_size * 1000,
                                  height=cell_size * 1000,
                                  alpha=0.8))

        cbar = plt.colorbar(s)
        cbar.set_label(
            "# of Fractures per\n   {} km squared".format(cell_size),
            fontsize=16,
            rotation=90)
        plt.clim(0, max_value)

    #--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#
    # Length Density
    elif Patches == "Length":
        max_value = max(
            [np.max(np.sum(classs.L, axis=1)) for classs in FracAnalyzed])

        for classs in FracAnalyzed:
            X = classs.X
            Y = classs.Y
            ZZ = np.sum(classs.L, axis=1)

            s = plt.scatter(X + (500 * cell_size),
                            Y + (500 * cell_size),
                            c=ZZ,
                            cmap=cmapp,
                            s=1)

            for x, y, c, number in zip(X, Y, ZZ, range(len(ZZ))):
                ax.add_artist(
                    plt.Rectangle(xy=(x, y),
                                  color=cmapp(c / max_value),
                                  width=cell_size * 1000,
                                  height=cell_size * 1000,
                                  alpha=0.8))

        cbar = plt.colorbar(s)
        cbar.set_label(
            "Length of Fractures (km) per\n   {} km squared".format(cell_size),
            fontsize=16,
            rotation=90)
        plt.clim(0, max_value)

    #--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#
    # Number Anisotropy
    elif Patches == "NumberAnisotropy":
        max_value = max(
            [np.max(classs.Number_Anisotropy) for classs in FracAnalyzed])

        for classs in FracAnalyzed:
            X = classs.X
            Y = classs.Y
            ZZ = classs.Number_Anisotropy

            s = plt.scatter(X + (500 * cell_size),
                            Y + (500 * cell_size),
                            c=ZZ,
                            cmap=cmapp,
                            s=1)

            for x, y, c, number in zip(X, Y, ZZ, range(len(ZZ))):
                ax.add_artist(
                    plt.Rectangle(xy=(x, y),
                                  color=cmapp(c / max_value),
                                  width=cell_size * 1000,
                                  height=cell_size * 1000,
                                  alpha=0.8))

        cbar = plt.colorbar(s)
        cbar.set_label(
            "Number Anisotropy per\n   {} km squared".format(cell_size),
            fontsize=16,
            rotation=90)
        plt.clim(0, max_value)

    ############################################################################
    # Add numbers to the squares
    if SquareNumbers == True:
        for classs in FracAnalyzed:
            X = classs.X
            Y = classs.Y

            for x, y, number in zip(X, Y, range(len(X))):
                plt.text(x,
                         y + ((0.7 * cell_size) * 1000),
                         str(number),
                         color="white",
                         zorder=100000,
                         fontsize=10)

    ############################################################################
    # plot rose diagram
    if Rose == True:
        for classs in FracAnalyzed:

            X = classs.X
            Y = classs.Y
            Z = classs.N

            patches = []
            for x, y, l in zip(X, Y, Z):
                L = np.sqrt(l / sum(l)) * cell_size * 500
                increment = 180. / len(L)
                start = 0

                startx, starty = (x + (0.5 * cell_size) * 1000), (
                    y + (0.5 * cell_size) * 1000)
                for i in L:
                    end = start + increment
                    wedgez = Wedge((startx, starty), i, 90 - end, 90 - start)
                    patches.append(wedgez)

                    wedgez = Wedge((startx, starty), i, (270 - end),
                                   (270 - start))
                    patches.append(wedgez)
                    start = start + increment

                if Circles == True:
                    for classs in FracAnalyzed:
                        radius = cell_size * 0.5
                        # Circles around rose plot
                        for i in (.25, .5, .75, 1):
                            circle1 = plt.Circle((startx, starty),
                                                 np.sqrt(i) * 1000.0 * radius,
                                                 color="black",
                                                 fill=False,
                                                 lw=1.5,
                                                 alpha=0.25)
                            fig.gca().add_artist(circle1)

                        my_angle = 0.

                        # Spokes in these circles
                        for i in range(len(L) * 2):
                            plt.plot([
                                startx, startx +
                                (1000 * radius *
                                 np.sin(np.deg2rad(90 - 180 - 90 - my_angle)))
                            ], [
                                starty, starty +
                                (1000. * radius *
                                 np.sin(np.deg2rad(90 - my_angle)))
                            ],
                                     color="k",
                                     lw=1.5,
                                     alpha=0.25)
                            my_angle += increment

            p = PatchCollection(patches, alpha=0.5, color="black", zorder=1000)
            ax.add_collection(p)
    plt.show()
Example #57
0
def plot_confusion_matrix(y_true,
                          y_pred,
                          classes,
                          normalize=False,
                          title=None,
                          cmap=plt.cm.Blues,
                          figsize=None,
                          save_as=None):
    """
    This function prints and plots the confusion matrix.
    Normalization can be applied by setting `normalize=True`.
    """
    rcParams['font.family'] = 'sans-serif'
    rcParams['font.sans-serif'] = ['Tahoma']

    if not title:
        if normalize:
            title = 'Normalized confusion matrix'
        else:
            title = 'Confusion matrix, without normalization'

    # Compute confusion matrix

    cm_obs = confusion_matrix(y_true, y_pred)
    cm_pct = cm_obs.astype('float') / cm_obs.sum(axis=1)[:, np.newaxis] * 100

    # Only use the labels that appear in the data
    classes = np.array(classes)
    classes = classes[unique_labels(y_true, y_pred)]

    plt.figure(figsize=figsize)

    fig, ax = plt.subplots(figsize=figsize)
    if normalize == True:
        im = ax.imshow(cm_pct, interpolation='nearest', cmap=cmap)
        thresh = 50
        for i in range(cm_pct.shape[0]):
            for j in range(cm_pct.shape[1]):
                ax.text(j,
                        i + .1,
                        f'{cm_pct[i, j]:3.2f}%',
                        ha="center",
                        va="center",
                        color="white" if cm_pct[i, j] > thresh else "black")
                #color="black")
        for i in range(cm_pct.shape[0]):
            for j in range(cm_pct.shape[1]):
                ax.text(j,
                        i - .1,
                        f'({cm_obs[i, j]:,d})',
                        ha="center",
                        va="center",
                        color="white" if cm_pct[i, j] > thresh else "black")
                #color="black")
    else:
        im = ax.imshow(cm_obs, interpolation='nearest', cmap=cmap)
        thresh = cm_obs.max() / 2.
        for i in range(cm_obs.shape[0]):
            for j in range(cm_obs.shape[1]):
                ax.text(j,
                        i,
                        f'{cm_obs[i, j]}',
                        ha="center",
                        va="center",
                        color="white" if cm_obs[i, j] > thresh else "black")

    ax.figure.colorbar(im, ax=ax)
    # We want to show all ticks...
    ax.set(
        xticks=np.arange(cm_obs.shape[1]),
        yticks=np.arange(cm_obs.shape[0]),
        # ... and label them with the respective list entries
        xticklabels=classes,
        yticklabels=classes,
        title=title,
        ylabel='Actual',
        xlabel='Predicted')

    # Rotate the tick labels and set their alignment.
    plt.setp(ax.get_xticklabels(),
             rotation=45,
             ha="right",
             rotation_mode="anchor")

    # Loop over data dimensions and create text annotations.

    plt.xlim(-0.5, cm_obs.shape[1] - .5)
    plt.ylim(-0.5, cm_obs.shape[0] - .5)
    fig.tight_layout()
    if save_as != None:
        plt.savefig(save_as)
    plt.show()

    return cm_pct, cm_obs
Example #58
0
    # mel_amps[c] = mel_amp
    mel_dbs[c] = mel_db      
  
    
#  Plots  
plt.style.use('ggplot')
x_ax = np.arange(0,config.new_len)/config.sample_rate
plt.figure(figsize=(5.9, 4),dpi=400)

plt.subplot(3,2,1)
plt.text(0.35, 0.6, 'Öffnen', fontsize=20)
plt.title('Signal')
plt.ylabel('Amplitude')
plt.ylim(-0.4, 0.4)
plt.xticks([])
plt.xlim(0,max(x_ax))
# plt.xticks([])
plt.plot(x_ax, list(signals.values())[0], color = 'black', linewidth=0.5) 
# plt.grid(linestyle='-')

plt.subplot(3,2,2)
plt.text(0.35, 0.6, 'Schließen', fontsize=20)
plt.title('Signal')
plt.ylim(-0.4, 0.4)
plt.xlim(0,max(x_ax))
plt.xticks([])
plt.yticks([])
plt.plot(x_ax, list(signals.values())[1], color = 'black', linewidth=0.5) 
# plt.grid(linestyle='-')

plt.subplot(3,2,3)
Example #59
0
    #plt.plot(Y)
    Y_mix=np.empty(fft_cc.size,dtype=complex)
    Y_mix=spectral_mixing(Y,fft_cc,1)
    mix=scipy.fftpack.ifft(Y_mix)
    
    new_audio=np.concatenate((mix,new_audio))
    
scipy.io.wavfile.write('GM001.wav',fs,new_audio.astype('int16'))





#ploting
plt.plot(freqs_ga,ampl_ga)
plt.xlim(0,fs/2)
plt.title('spectru amplitudine GA wav')

plt.figure()
plt.plot(ampl_ga)
plt.xlim(0,fs)
plt.plot(peaks_ga,ampl_ga[peaks_ga],'x')
plt.title('spectru amplitudine GA wav plus varfuri')

plt.figure()
plt.plot(ampl_cc)
plt.xlim(0,N_cc/2)
plt.plot(peaks_cc,ampl_cc[peaks_cc],'x')
plt.title('spectru amplitudine frame CC plus varfuri')

plt.figure()
Example #60
0
        jd = json.load(open('./data/' + f, 'r'))
        mtime = jd['starttime']
        endtime = jd['endtime']
        offset = (1 +
                  (endtime - mtime)) / len(jd['metrics']['steps']['values'])
        if metric_name not in jd['metrics']:
            print "Could not find %s in data file (%s)" % (metric_name, f)
            continue
        for v in jd['metrics'][metric_name]['values']:
            if v != None:
                dto = datetime.datetime.fromtimestamp(mtime)
                metric_vals.append((dto, v))
                daygraph_metric_vals.append((dto, v))
            mtime += offset
        if len(daygraph_metric_vals) > 0:
            dp = plt.scatter(*zip(*daygraph_metric_vals), marker=',')
            plt.setp(plt.xticks()[1], rotation=30, ha='right')
            plt.xlim(datetime.datetime.fromtimestamp(jd['starttime']),
                     datetime.datetime.fromtimestamp(jd['endtime']))
            plt.Axes.format_xdata = mdates.DateFormatter('%Y-%m-%d %H:%M')
            plt.title('%s for %s' % (metric_name, today))
            plt.xlabel('Time')
            plt.savefig('graphs/%s_%s.png' % (metric_name, today))

plt.close('all')
if len(metric_vals) > 0:
    p = plt.scatter(*zip(*metric_vals), marker=',', vmin=40, vmax=180, s=100)
    plt.title('%s over time' % (metric_name))
    plt.xlabel('Timesetamp')
    plt.savefig('graphs/%s_all_time.png' % metric_name)